Compare commits
1623 Commits
v1.9.4
...
dependabot
Author | SHA1 | Date | |
---|---|---|---|
|
b55dafa1dd | ||
|
052c64b01a | ||
|
3cd1aa4a1d | ||
|
47e1c9107d | ||
|
50fba01842 | ||
|
97f2eb8e65 | ||
|
f7d557d5ae | ||
|
e7e7e87c93 | ||
|
8cfc010b84 | ||
|
5e8c12ebdf | ||
|
77182fcdda | ||
|
865e17f3cf | ||
|
a8c695ba52 | ||
|
0e7b0597db | ||
|
2456a7be35 | ||
|
d20b4c9958 | ||
|
a91b0c8ea3 | ||
|
e5fc0d3f76 | ||
|
e570039ee0 | ||
|
e3ef0741be | ||
|
57ff7371b4 | ||
|
b9caa8cdfb | ||
|
255ef66a27 | ||
|
e13efa0883 | ||
|
aea17c35ae | ||
|
e146e860e2 | ||
|
b4b26894cd | ||
|
96e3555e93 | ||
|
26899359d1 | ||
|
2e0bc89ec4 | ||
|
6a474f29cd | ||
|
b6b8783323 | ||
|
096febd593 | ||
|
b8ca1bcb68 | ||
|
47b938e617 | ||
|
c29fca000b | ||
|
e7fcda1424 | ||
|
d8c45a69c3 | ||
|
a43ff3bbcb | ||
|
138f04a49f | ||
|
929753a11f | ||
|
4ac730944e | ||
|
7a4a6597c0 | ||
|
6b611e1c52 | ||
|
69e9ad5571 | ||
|
b035991c35 | ||
|
2d4d639635 | ||
|
8487030ea6 | ||
|
bdbca3362e | ||
|
1bc49d219d | ||
|
605036c117 | ||
|
474080608a | ||
|
f3aa80d3f8 | ||
|
9488a73f52 | ||
|
b6903dab6e | ||
|
865a8307e2 | ||
|
077bc4f407 | ||
|
5a48ef72fd | ||
|
9b8850f99e | ||
|
4fd184c131 | ||
|
6fbe2b936c | ||
|
77f9f7cd60 | ||
|
8a754d45b3 | ||
|
c1687b0604 | ||
|
a2be810dbc | ||
|
552d684bdc | ||
|
8f9554b5b9 | ||
|
a5e740431a | ||
|
f7b00ada1b | ||
|
b38833923d | ||
|
3871c85fd7 | ||
|
c0019edf00 | ||
|
8a73badf3d | ||
|
9ac2245970 | ||
|
60b2155bd3 | ||
|
eb478d72d1 | ||
|
85e5b1e902 | ||
|
b22abbce7d | ||
|
e14933c54d | ||
|
f8628d39e0 | ||
|
ecfa1964ff | ||
|
8eef3d9713 | ||
|
91993d89b0 | ||
|
bf13fb4c4b | ||
|
1882434c69 | ||
|
21a64db140 | ||
|
db50893fa1 | ||
|
35ee38b0f1 | ||
|
ff3b6d2b8b | ||
|
b2d502b461 | ||
|
e98575743e | ||
|
330bdc6580 | ||
|
64abd008ca | ||
|
a058f348a2 | ||
|
2ed29771f2 | ||
|
924b8ea1eb | ||
|
9e07272af8 | ||
|
2e5042d8bd | ||
|
fad9bd0538 | ||
|
c090418f26 | ||
|
6ca84f8a40 | ||
|
d2702201ca | ||
|
689064a4f4 | ||
|
03ed334ebb | ||
|
210f6a6fab | ||
|
cb1507126f | ||
|
1f136de294 | ||
|
b84521d47d | ||
|
1dd63631c0 | ||
|
e105547c14 | ||
|
fbe5e51a16 | ||
|
4dd3987451 | ||
|
781094edb2 | ||
|
c27150b1a3 | ||
|
0c2d9194dd | ||
|
a100b32b37 | ||
|
48d1af01c8 | ||
|
f7b2951c79 | ||
|
42c094739d | ||
|
206c3dd402 | ||
|
4f0e887702 | ||
|
550ca7bf92 | ||
|
fddd162645 | ||
|
fb67ff14de | ||
|
7ee1edddd1 | ||
|
afeb1d3cca | ||
|
efb9cbd8e7 | ||
|
25304ce485 | ||
|
2d1f27ed8e | ||
|
559ee5a843 | ||
|
cd09390367 | ||
|
22224127e0 | ||
|
a38bd4acc8 | ||
|
c322842257 | ||
|
07f4a9040a | ||
|
24cc6c33de | ||
|
302142bb25 | ||
|
db23295e1c | ||
|
4ea59d8cb4 | ||
|
2282571493 | ||
|
eb65ffb779 | ||
|
4a11fa072f | ||
|
ba92ba0e06 | ||
|
2b718d00b0 | ||
|
d0b850cdd9 | ||
|
855801cc95 | ||
|
e051c7c162 | ||
|
6fb99891f2 | ||
|
41f2fd7fca | ||
|
ee6bb0d5d3 | ||
|
6a7f6585ce | ||
|
132f08486a | ||
|
997db7637c | ||
|
6ba4e870c4 | ||
|
f8f3edac3c | ||
|
2820b64eb3 | ||
|
ef3e3dce7a | ||
|
04158ee455 | ||
|
4c058b48b6 | ||
|
2fff8bbcc8 | ||
|
b14b8b1efa | ||
|
7cb3b6cbe2 | ||
|
fa7eb7f30c | ||
|
0ca5a0ec68 | ||
|
ec97d6d078 | ||
|
3ca4fffa78 | ||
|
ffa4cafe1c | ||
|
4968e7d38c | ||
|
2c6a3280e4 | ||
|
2af6753808 | ||
|
792bbf75ab | ||
|
694292f7fa | ||
|
f1f8f5458d | ||
|
8a18c48e47 | ||
|
0b7d0476c8 | ||
|
0b5ed87220 | ||
|
c9a476e24d | ||
|
df4d92f9cf | ||
|
97170a5d38 | ||
|
1b45c509c3 | ||
|
cf59c000d9 | ||
|
436048ca2b | ||
|
0188e2601b | ||
|
51b37f0184 | ||
|
8b72200afb | ||
|
31997f8251 | ||
|
98525ddea9 | ||
|
ceb3b52ae4 | ||
|
9c8dad33c7 | ||
|
da001d54e5 | ||
|
88326533ed | ||
|
9abebc2d64 | ||
|
cb5e67d327 | ||
|
210d98bc06 | ||
|
b741b86403 | ||
|
125f9634fd | ||
|
82c5230bc2 | ||
|
37497657c6 | ||
|
1fb82d7924 | ||
|
af9344fe22 | ||
|
54aedb058c | ||
|
ba770832d0 | ||
|
cda3d66b21 | ||
|
5636570d6d | ||
|
7d281a8ddd | ||
|
f3f7578e4b | ||
|
c8937fa244 | ||
|
2b75546190 | ||
|
83ef3fc53e | ||
|
da844d7be5 | ||
|
5a613e9b6e | ||
|
794645d092 | ||
|
ac8b662413 | ||
|
7ef18f220a | ||
|
2a5764ef79 | ||
|
c08cfafd6c | ||
|
31b707b625 | ||
|
5e08701189 | ||
|
87e0aa1b74 | ||
|
bd27eedd15 | ||
|
c24de17278 | ||
|
ec78702bc8 | ||
|
01af40d6b6 | ||
|
1f9c89c1e8 | ||
|
c6dda3b324 | ||
|
e34c52934c | ||
|
acfd22712b | ||
|
6b85c2104c | ||
|
f44c8f296f | ||
|
9cf7720922 | ||
|
c73cdfd6ce | ||
|
477355df3b | ||
|
6686b7c534 | ||
|
741c85ca7c | ||
|
6bb02cdcc1 | ||
|
96361295aa | ||
|
3333f37e88 | ||
|
b2f2a68b86 | ||
|
c227b8ca4d | ||
|
607a5c05de | ||
|
807f88e547 | ||
|
d34fe3dba3 | ||
|
b516a25132 | ||
|
023fc028bc | ||
|
412d9be445 | ||
|
c8c3c4359f | ||
|
51f5524e2f | ||
|
492c54a28f | ||
|
55d61023f7 | ||
|
fedf4e984f | ||
|
9dbb950a25 | ||
|
b61c0a4a21 | ||
|
140c8dd01f | ||
|
37c36ce3fa | ||
|
82328fd9d8 | ||
|
c31db81ac4 | ||
|
a22a2384bf | ||
|
82945ba973 | ||
|
5b916961b5 | ||
|
f2aea3b7c7 | ||
|
9d3b17c635 | ||
|
396b49a7c1 | ||
|
b22165ad69 | ||
|
9022931689 | ||
|
e3eb002f66 | ||
|
f1a411c897 | ||
|
db5d68f01f | ||
|
90009f330b | ||
|
91c2729856 | ||
|
c83c95b56b | ||
|
5a892af2fe | ||
|
3e22d4b286 | ||
|
6428602cd9 | ||
|
260fdf7ba3 | ||
|
486f7b7673 | ||
|
0c0db9308b | ||
|
9dae5551a1 | ||
|
100fd03f3e | ||
|
7af7c15802 | ||
|
154b828287 | ||
|
59290c08aa | ||
|
1b7b261460 | ||
|
dc3863ef14 | ||
|
260f899eda | ||
|
9e61fe7583 | ||
|
db49b826f0 | ||
|
7ff8ed869c | ||
|
26da64184a | ||
|
a573cfa39d | ||
|
b1280b670a | ||
|
7b89222fde | ||
|
911aa5bad3 | ||
|
5541a5873b | ||
|
6b76391ed2 | ||
|
6962a667e5 | ||
|
27b66db88d | ||
|
493a8e2348 | ||
|
9859eb83b5 | ||
|
36807d5fa3 | ||
|
22404ca1fc | ||
|
01317395e9 | ||
|
3f2971692d | ||
|
300c50798f | ||
|
12e24a90a0 | ||
|
d8be0d9430 | ||
|
f717fda9a3 | ||
|
fbcf6a0802 | ||
|
5533e9393c | ||
|
f3219fb695 | ||
|
bc35e1c5f5 | ||
|
92462ae031 | ||
|
9f0ca6d88a | ||
|
3d7c8442c7 | ||
|
7af48465fa | ||
|
359e2de090 | ||
|
1089a38aaf | ||
|
89ba3ff139 | ||
|
16b73a998b | ||
|
9347d57973 | ||
|
ae75b1a25f | ||
|
49228573f4 | ||
|
eb3df4c20e | ||
|
016d3c450a | ||
|
45a7c6edfb | ||
|
c4ecfa5716 | ||
|
24f6855f86 | ||
|
10eeafd3d6 | ||
|
cb06126388 | ||
|
9c60991cd3 | ||
|
9b32b72990 | ||
|
f513195468 | ||
|
63ee00e647 | ||
|
99f1a43262 | ||
|
739e43ba58 | ||
|
ae76fe2bd7 | ||
|
5d03b188c8 | ||
|
965ab9186d | ||
|
15357480ec | ||
|
a1a29b0b86 | ||
|
258db77100 | ||
|
f34434f96b | ||
|
dd69f3baf5 | ||
|
335c4b668b | ||
|
848093b9fd | ||
|
df29276eb0 | ||
|
71ea05c176 | ||
|
1c369fb55f | ||
|
1f052c6234 | ||
|
7e358c654f | ||
|
c556811c0f | ||
|
a419374fa4 | ||
|
0e64fb1fab | ||
|
fcea92ec6c | ||
|
f999eef452 | ||
|
56428be629 | ||
|
00ddf6576c | ||
|
998e7d18f9 | ||
|
c9b8977226 | ||
|
f54e746fc5 | ||
|
c694703e14 | ||
|
2da896fa40 | ||
|
7074ebf45a | ||
|
957bc0db6b | ||
|
d9dbfc83d5 | ||
|
f5339882cb | ||
|
a63dee87ec | ||
|
1b0c9ad4c0 | ||
|
cf73f6dc74 | ||
|
dce5d1c1fa | ||
|
1641d1d329 | ||
|
cb537e80d7 | ||
|
d4d95f1811 | ||
|
797c3324f0 | ||
|
0ed23899e7 | ||
|
a4cacf3389 | ||
|
ce2e82cfb6 | ||
|
857576d76f | ||
|
7ff8c80e25 | ||
|
c478fe2047 | ||
|
fd515097d8 | ||
|
976b138e76 | ||
|
664deb2157 | ||
|
8b230b86cc | ||
|
6b0d34d70d | ||
|
342f1ab1cb | ||
|
2f58c9e501 | ||
|
bb9f9c8dd5 | ||
|
66b1f55351 | ||
|
3a46f45650 | ||
|
9ed056424c | ||
|
0eccacbd5b | ||
|
330d6db19a | ||
|
b4350a2522 | ||
|
dda3a463a2 | ||
|
be0aeea01a | ||
|
caddb851be | ||
|
18bddbc730 | ||
|
86c695268e | ||
|
45337885c1 | ||
|
072a8c99ea | ||
|
74bb527203 | ||
|
fa7926580a | ||
|
cffc32af3e | ||
|
3252dc7203 | ||
|
584ac80b1e | ||
|
44ab660172 | ||
|
3773b753d1 | ||
|
ab373bb1a9 | ||
|
390c5667f7 | ||
|
455313584f | ||
|
e4590c5be6 | ||
|
410463fb72 | ||
|
2da4e3eb6c | ||
|
dbc62f2e28 | ||
|
d44f3d7216 | ||
|
9b46f9b2da | ||
|
2d3501dff9 | ||
|
b5a99b9b09 | ||
|
29af42f428 | ||
|
62b26f012e | ||
|
61cead9b9b | ||
|
eb73dacd58 | ||
|
e9040d2766 | ||
|
8c4f010b8d | ||
|
64e2d9dc47 | ||
|
e7cf84a593 | ||
|
c1bf85b109 | ||
|
8c8f9694e0 | ||
|
f05ac7a899 | ||
|
102dd68a03 | ||
|
bcc5890182 | ||
|
7651ae5039 | ||
|
17cc095d28 | ||
|
2e7ee0f177 | ||
|
390dc24608 | ||
|
543d5d4a5d | ||
|
115f376465 | ||
|
63324be5b3 | ||
|
cf5048faaa | ||
|
3d4bf1d00a | ||
|
c2ce152be8 | ||
|
07d5ee062d | ||
|
fc2d1a61f3 | ||
|
c68c0e881e | ||
|
c5eb8ed7d1 | ||
|
7eaec26a1c | ||
|
0c684721d8 | ||
|
5ea6a1e500 | ||
|
8590911b0a | ||
|
a3242b6b86 | ||
|
c369f8b871 | ||
|
63bf0f66af | ||
|
11be3fb7fa | ||
|
e8a8f4e9e2 | ||
|
accc64ebcf | ||
|
bc9882a78b | ||
|
01b6f97f0b | ||
|
7758c32035 | ||
|
bf57252298 | ||
|
0d369616e7 | ||
|
48d4c8eb5c | ||
|
e2e54ef64b | ||
|
1e20bd8f9a | ||
|
b1da7cff66 | ||
|
4e02ec342c | ||
|
3cf31fa9b8 | ||
|
ccff006948 | ||
|
3eca66a1f8 | ||
|
4d08234603 | ||
|
5d75ef4766 | ||
|
7ee7fc6f58 | ||
|
d20dd21600 | ||
|
2bff36dfba | ||
|
4f18d73281 | ||
|
043086081f | ||
|
3d021cffa3 | ||
|
9b591286d7 | ||
|
99f1a22b9d | ||
|
e6a67cd091 | ||
|
631be1ffdd | ||
|
021135978d | ||
|
b444836a97 | ||
|
83f5f8bfc3 | ||
|
ddd9d5a5a5 | ||
|
ead8cc4366 | ||
|
7b238b3645 | ||
|
35d1235ed0 | ||
|
3c6840050c | ||
|
58c0db9704 | ||
|
37189f20c5 | ||
|
588414a776 | ||
|
72af687aa6 | ||
|
f68c5a274d | ||
|
9f71958d7d | ||
|
17b00ad3a4 | ||
|
1fe0d6eeeb | ||
|
e60c9b97c9 | ||
|
ea1bcd3d59 | ||
|
1eddb6d1e9 | ||
|
26ef6111bb | ||
|
9bbccbe27c | ||
|
fb974489a5 | ||
|
ba54b30101 | ||
|
a1c45d5acb | ||
|
176fd23002 | ||
|
3688ac4eae | ||
|
cc55684f5f | ||
|
e2bc326d58 | ||
|
8abaa5d350 | ||
|
949006b5a2 | ||
|
afc41c7b11 | ||
|
7a9884c831 | ||
|
8a4b019ded | ||
|
249d926d1b | ||
|
65e2d9b2f2 | ||
|
5c722519cf | ||
|
5a0cd05866 | ||
|
9acbfa5eb1 | ||
|
c878c9e2cb | ||
|
0a17edcc1f | ||
|
cc4d75a16f | ||
|
b719d6a2ad | ||
|
8438366d1b | ||
|
46ec5d563b | ||
|
9b80452c7c | ||
|
c2ec294401 | ||
|
536a99705b | ||
|
00558227be | ||
|
5599bd9442 | ||
|
9cfa21f7d1 | ||
|
12337d8daf | ||
|
3114c199bd | ||
|
e790d0fc53 | ||
|
7933c7fc24 | ||
|
ddbf5c782f | ||
|
38d8bbb19c | ||
|
181fffb916 | ||
|
08c9a650db | ||
|
3a0271c113 | ||
|
463cd564cf | ||
|
b8b7163b66 | ||
|
ba771cdc45 | ||
|
d2b23da9ea | ||
|
09b58e1cfb | ||
|
e23c6ce62b | ||
|
38db1dead4 | ||
|
36ad59673c | ||
|
0d33b54d74 | ||
|
93c8e04d51 | ||
|
b28acd2d4d | ||
|
360f6466a3 | ||
|
aad73f1f2e | ||
|
afda8c4020 | ||
|
62d2a4cd88 | ||
|
8d53ea81e9 | ||
|
f2fa49a771 | ||
|
7b7160448b | ||
|
7f608965ef | ||
|
ddfd4f86f3 | ||
|
a99fd09c16 | ||
|
4b59bfe6d8 | ||
|
5ac3466f26 | ||
|
2e750722c7 | ||
|
a9fd807f61 | ||
|
011472a8e8 | ||
|
b4480e6b70 | ||
|
61d7bdd66f | ||
|
43347f3da6 | ||
|
634f4eb37d | ||
|
39387e8446 | ||
|
79a515e88e | ||
|
e87b941a51 | ||
|
fe7604589d | ||
|
e9912744ef | ||
|
8184f755ae | ||
|
97d40ba3da | ||
|
82cb61dc36 | ||
|
1a99251498 | ||
|
41ab690a61 | ||
|
7dbde2247d | ||
|
da00d29de0 | ||
|
e88da2ec0a | ||
|
26aa18b3f3 | ||
|
e630eb73d7 | ||
|
ef8b7d9c62 | ||
|
d909b7c80b | ||
|
8eefe60c44 | ||
|
d43786edcf | ||
|
9ec514f6c5 | ||
|
3ddd018452 | ||
|
41f78b9925 | ||
|
4f0070a5c6 | ||
|
8de88d0a55 | ||
|
86e2f728c3 | ||
|
7b7fdb42d9 | ||
|
c8cb940b4e | ||
|
a4f4ac5279 | ||
|
d3ebe8d8f5 | ||
|
7d1a090cfb | ||
|
c69e3b73ff | ||
|
2a17a661e6 | ||
|
a0d68ef60e | ||
|
7d1810bbcc | ||
|
6c56eb9663 | ||
|
d0ba914d2b | ||
|
7943e8a1c3 | ||
|
3e48cc4e00 | ||
|
ce4d579499 | ||
|
f6a06826d8 | ||
|
93c5642f9f | ||
|
1282277126 | ||
|
454e82683e | ||
|
6b2683f7da | ||
|
ec798f5aad | ||
|
3b5b71ce44 | ||
|
19448ba078 | ||
|
e3fa55f88d | ||
|
5877e38baa | ||
|
0de7b757d0 | ||
|
6dfd1b9883 | ||
|
6666f23c01 | ||
|
f0a235d16f | ||
|
4eeb9f4648 | ||
|
f814c4a082 | ||
|
911c5a8362 | ||
|
22d2a40133 | ||
|
611d745241 | ||
|
ee3fc39f1c | ||
|
fe18ea35a2 | ||
|
30dafc7135 | ||
|
186bb19965 | ||
|
87b76aeeb0 | ||
|
a57c7ba5df | ||
|
2efb909051 | ||
|
418076ab3e | ||
|
36484f4f08 | ||
|
569d531573 | ||
|
0ad4757159 | ||
|
e2fa6a0f7a | ||
|
533eca3b4c | ||
|
ff04a5b989 | ||
|
cf18292fd3 | ||
|
92216c01ff | ||
|
0e5a58b883 | ||
|
d4292774c5 | ||
|
97b5a71ceb | ||
|
804fac8ea9 | ||
|
985af71241 | ||
|
98f059e89c | ||
|
274a6a5ccb | ||
|
2d55a8e1c3 | ||
|
3f35d7fad9 | ||
|
7111918596 | ||
|
5e0086c1ee | ||
|
d1f141484e | ||
|
db12d90735 | ||
|
2207e49633 | ||
|
39c86c6c44 | ||
|
6c53f7f588 | ||
|
2b0f16e7c3 | ||
|
017170c99d | ||
|
a14c7c37ee | ||
|
9d2232306e | ||
|
12bddbc4ec | ||
|
fcaf01e243 | ||
|
856d29c7b7 | ||
|
6a0d2fcfa7 | ||
|
4bc440666a | ||
|
c0d0724be0 | ||
|
7ee549e5ae | ||
|
227df52213 | ||
|
99a057927c | ||
|
cafc18c3f9 | ||
|
0dd36f3201 | ||
|
3ea9ca35fa | ||
|
a245efe83d | ||
|
c81dd602c4 | ||
|
084fb79ad8 | ||
|
2e3eafaa11 | ||
|
2996f1f783 | ||
|
05f04a22b7 | ||
|
d0e85c293f | ||
|
6872fc79ba | ||
|
0a3a18744f | ||
|
09d064c090 | ||
|
ff604efc44 | ||
|
20d031e2b8 | ||
|
5766567e9f | ||
|
88e1192c6b | ||
|
cadc2de77d | ||
|
8b32e80ee2 | ||
|
7ebf398ed7 | ||
|
ac70070e5b | ||
|
f00016b647 | ||
|
bcda74f42f | ||
|
c97f34a0fd | ||
|
72c68695b5 | ||
|
d0d256ee9a | ||
|
7e08ae1d0c | ||
|
ebe3d2d59d | ||
|
04d23a1597 | ||
|
42bdf1d864 | ||
|
8c872e9ce0 | ||
|
70ebab2c82 | ||
|
1add82aa9e | ||
|
e2d0ede630 | ||
|
dcd0a39cb6 | ||
|
1719d2349f | ||
|
ee7e411d68 | ||
|
970f543ef6 | ||
|
1351c1bbcf | ||
|
c696944d36 | ||
|
5726f42a7c | ||
|
ae7fedf0b1 | ||
|
b4100a9b5d | ||
|
1a68f81f89 | ||
|
da00b39f4f | ||
|
619335df1a | ||
|
fa680a35ea | ||
|
b66a304e7b | ||
|
3c235503de | ||
|
a102453bae | ||
|
bca1d51735 | ||
|
6e28a03c73 | ||
|
813725dfec | ||
|
de30699fa5 | ||
|
aeda27433f | ||
|
4213ece7bd | ||
|
f7118258d6 | ||
|
aaf657297f | ||
|
83d31c9e65 | ||
|
115d71536b | ||
|
c2435363f3 | ||
|
7939fdc3e5 | ||
|
577fa4ec0c | ||
|
88b66ae3a8 | ||
|
a6d736572c | ||
|
6ae87109d2 | ||
|
56e23432b0 | ||
|
64f5e57666 | ||
|
527f62c744 | ||
|
917113914d | ||
|
43393ea45d | ||
|
b44f40ee3a | ||
|
03bf66a51b | ||
|
ab92578b02 | ||
|
bb50259956 | ||
|
bf689303d4 | ||
|
197f42d346 | ||
|
a43d04d295 | ||
|
98707baec2 | ||
|
c42b80f099 | ||
|
d2a407a9a7 | ||
|
36167b032c | ||
|
21656c26e7 | ||
|
20bc37c9c9 | ||
|
89f5145f64 | ||
|
d15d495f17 | ||
|
d6927365ed | ||
|
22a2a4252a | ||
|
05b387a852 | ||
|
824446710b | ||
|
0855cef76c | ||
|
69b47915b1 | ||
|
c04438be4b | ||
|
817f47d970 | ||
|
3d9874b95a | ||
|
12dffc105a | ||
|
f7753ce85f | ||
|
a75c9378f2 | ||
|
6399647e64 | ||
|
78089941ff | ||
|
ac13d14e30 | ||
|
c078ca3fb3 | ||
|
34443a238e | ||
|
2f9e30a1f7 | ||
|
4bd6a231d2 | ||
|
e4a1799334 | ||
|
59e64d5e99 | ||
|
9e87eb5f71 | ||
|
9213fcb11b | ||
|
0a1ab945bc | ||
|
d5dec989b9 | ||
|
2ca7ae8e30 | ||
|
47e4291303 | ||
|
9d036be61b | ||
|
a7598b6d78 | ||
|
57a9146fa1 | ||
|
67f6787f7a | ||
|
226a71f073 | ||
|
e545828fc5 | ||
|
205cd4609b | ||
|
ae175a026b | ||
|
86d465c531 | ||
|
f67a27eeea | ||
|
1b287f1b59 | ||
|
f334931903 | ||
|
fb47c1f0a3 | ||
|
c7aa7fb66b | ||
|
869cfc9a1c | ||
|
ba2d83f580 | ||
|
7aa1fb4e24 | ||
|
6587dbfa47 | ||
|
a25ac1c988 | ||
|
c899685cb2 | ||
|
541b5a4826 | ||
|
dcd4ea9111 | ||
|
7873175764 | ||
|
86cf226395 | ||
|
f0f4042680 | ||
|
3c65fd7ba3 | ||
|
e52e48076e | ||
|
f2d406ad5d | ||
|
5acf0f6331 | ||
|
d7fcfee4db | ||
|
b2e475b5c4 | ||
|
a160fc30f2 | ||
|
514aab46d9 | ||
|
a146f2d853 | ||
|
c5d8560cdb | ||
|
dfef68f985 | ||
|
4de14e530b | ||
|
37afdd1a65 | ||
|
c7ca2f41f5 | ||
|
eaf2df99c6 | ||
|
86c3990c25 | ||
|
d2c89213ff | ||
|
9548ea61e5 | ||
|
ba215e94f6 | ||
|
e05cf4bf97 | ||
|
4d877567dd | ||
|
96c88d1a5e | ||
|
27aaf9df85 | ||
|
5a230f418d | ||
|
a47b76afcc | ||
|
f73b470ec0 | ||
|
812b2fff04 | ||
|
a9d9a5095b | ||
|
4bec182b32 | ||
|
28442aa922 | ||
|
660f6981c6 | ||
|
c16cf9cf8a | ||
|
60af1a4cce | ||
|
cc94a93b56 | ||
|
bd1850df25 | ||
|
c62f9839a2 | ||
|
ab02dba96f | ||
|
75563f6c7b | ||
|
58a70d76a3 | ||
|
dccbddad80 | ||
|
e3b137066d | ||
|
2fda90e414 | ||
|
eac4a6df68 | ||
|
fb0e71946f | ||
|
4bc3a1195f | ||
|
65f8f43665 | ||
|
c631a3e0e4 | ||
|
45e09664b8 | ||
|
93789ca5e5 | ||
|
545c97f903 | ||
|
22f6db2e4f | ||
|
551c24da57 | ||
|
9f1f7aff2b | ||
|
29bf1e2529 | ||
|
d60dac9749 | ||
|
fb95fa68a2 | ||
|
bc800a8d5a | ||
|
6a0c45fa2e | ||
|
604ca9316c | ||
|
17b4563a6f | ||
|
bf8ac87a78 | ||
|
13e631dcf7 | ||
|
ed61d249e0 | ||
|
24de5506c2 | ||
|
af8970ad7a | ||
|
9cc456937c | ||
|
aaae5b3ba6 | ||
|
fde5b77f6e | ||
|
41cc596559 | ||
|
1b7bc8b284 | ||
|
40a49081b9 | ||
|
cc74693176 | ||
|
f7da18a4a0 | ||
|
aab70b3aee | ||
|
4cce058e07 | ||
|
5b1d9739ce | ||
|
ee9ccaf414 | ||
|
220aa6ada0 | ||
|
94a5aee484 | ||
|
5cef4c0a4c | ||
|
89c42ebcbe | ||
|
a71f05f86c | ||
|
fa51e5b704 | ||
|
350207b05f | ||
|
7376efe8ea | ||
|
e3570a060a | ||
|
bd86459a94 | ||
|
c0638439be | ||
|
18d69c8d2b | ||
|
75658e2a96 | ||
|
331b953551 | ||
|
9d477d45c7 | ||
|
8c376f58cb | ||
|
a6a8a712e5 | ||
|
3fab5a3b14 | ||
|
322304f1d0 | ||
|
021c1f4f24 | ||
|
4e739bf7c4 | ||
|
1bac1bd58e | ||
|
2839266543 | ||
|
c1b543c74d | ||
|
3ab75fc8de | ||
|
c924b1250a | ||
|
e2de4fbfe7 | ||
|
aa49802119 | ||
|
bee586c6fd | ||
|
85e8bece2e | ||
|
db481e1799 | ||
|
115b488807 | ||
|
3993cd765c | ||
|
d9c259a231 | ||
|
071e97053f | ||
|
aea8f0df16 | ||
|
66b44b48a4 | ||
|
8b1cde83c1 | ||
|
1cf6c97779 | ||
|
1192e760a4 | ||
|
8b90084ebc | ||
|
f366e0f890 | ||
|
52045c761c | ||
|
fc21af4e6e | ||
|
0562426661 | ||
|
1c10677f82 | ||
|
2e56c59bcb | ||
|
7569f282c6 | ||
|
9666f4a8be | ||
|
fd0f5e4d12 | ||
|
714a344937 | ||
|
2b111cd631 | ||
|
1240217a73 | ||
|
31ed4c18f9 | ||
|
92e43df266 | ||
|
dec21524e1 | ||
|
8cb0b1ce9a | ||
|
b0486f0aaa | ||
|
ba5faa2582 | ||
|
e42316ba5a | ||
|
a300e2d2dc | ||
|
1ee6707bdd | ||
|
091929da12 | ||
|
90689585ef | ||
|
477cb539d0 | ||
|
88bf3c7063 | ||
|
d6011ba14d | ||
|
6d5bbca630 | ||
|
ce4f7601af | ||
|
d8cbb2a952 | ||
|
8dd62854fa | ||
|
a7f2fff219 | ||
|
0cf886302d | ||
|
fc8130955a | ||
|
4d00995d2d | ||
|
ae95540387 | ||
|
0e587488b5 | ||
|
aca1d577a4 | ||
|
7803e1be3d | ||
|
d137acf74d | ||
|
7be533a770 | ||
|
95bbb70c91 | ||
|
252b0c42d5 | ||
|
e3915e4b7a | ||
|
4abf7a7f88 | ||
|
38b02bbcc0 | ||
|
9977396d8f | ||
|
7d34a7acac | ||
|
373f200ab8 | ||
|
0eb488580d | ||
|
41fb98c771 | ||
|
a2d251ce1e | ||
|
e7777281d6 | ||
|
cca3dbc76d | ||
|
7ba57e7a7c | ||
|
f8db314134 | ||
|
fe7543c31a | ||
|
7f20c6149e | ||
|
d343713f61 | ||
|
b448472037 | ||
|
60850d71ce | ||
|
dcf44d2523 | ||
|
650882217c | ||
|
e616a7ebfc | ||
|
8dc6f9f589 | ||
|
49443406fd | ||
|
2d94e6e5d3 | ||
|
cc76a73c49 | ||
|
5e5cdf397c | ||
|
2c38a9213f | ||
|
901b2881fb | ||
|
b27976626a | ||
|
c17e54e3f6 | ||
|
65f1e0fcc2 | ||
|
3bd5a89d6f | ||
|
1a855aa6f7 | ||
|
eb461a5442 | ||
|
6edeed888d | ||
|
f34ade7610 | ||
|
7171b3a3ac | ||
|
70633b5c2f | ||
|
a724fa2347 | ||
|
2aa113fd8c | ||
|
1309a9cea0 | ||
|
e14ae33e86 | ||
|
871fd291f3 | ||
|
3b9654771d | ||
|
7d7228c356 | ||
|
56ac26f90e | ||
|
9c9f2dd5bd | ||
|
cddab635ff | ||
|
ae6c511f13 | ||
|
93a7b94507 | ||
|
f12a8fcd73 | ||
|
f804ccdece | ||
|
4c577d7f8c | ||
|
4ab7d6c23e | ||
|
eca8d21249 | ||
|
d3b95b9226 | ||
|
7711cd74c3 | ||
|
66a97bdde0 | ||
|
b635073829 | ||
|
e291342c4a | ||
|
b82d71d22a | ||
|
1632ee03da | ||
|
2756abce39 | ||
|
9c3144e286 | ||
|
ba046bd4dd | ||
|
2370e61431 | ||
|
f2908ed475 | ||
|
6614727be8 | ||
|
b211f839cb | ||
|
eaae2f3538 | ||
|
7171c95bdd | ||
|
b27333e52d | ||
|
4854fadc44 | ||
|
3e8ee27d9d | ||
|
8161cee70f | ||
|
de2a7fdc6c | ||
|
3ca16de851 | ||
|
c45dde6164 | ||
|
35a5dd9c45 | ||
|
157f165a3d | ||
|
637e366b18 | ||
|
a49ef49f87 | ||
|
8b66625c95 | ||
|
3c44d405c7 | ||
|
d064c40617 | ||
|
4214c69694 | ||
|
ec364cc737 | ||
|
49da347d84 | ||
|
2cc6f863bf | ||
|
0869f644fc | ||
|
04a2c19c87 | ||
|
5f1f4dcbdd | ||
|
aadf4b9b63 | ||
|
9bc2592da1 | ||
|
eeec1ce2ad | ||
|
39327ec753 | ||
|
28275a33d6 | ||
|
dd6b4ac221 | ||
|
428575f9ae | ||
|
3b4aad9df1 | ||
|
eaa52bc935 | ||
|
d8ec8ca2e6 | ||
|
0f94e1d3a2 | ||
|
d90d5ee9b6 | ||
|
2f29ff1a3f | ||
|
4a9f4e2505 | ||
|
ad3cb0bc93 | ||
|
4ce48307bb | ||
|
2759cb860b | ||
|
813006b33b | ||
|
81a10e649f | ||
|
6d76db1de5 | ||
|
5771c36d3f | ||
|
b11d3b5abf | ||
|
c2389fc209 | ||
|
e2aa932e97 | ||
|
0eb0d62e95 | ||
|
9f1f64e384 | ||
|
207825d30b | ||
|
52d12cc802 | ||
|
08e64c88ed | ||
|
b2687b7e70 | ||
|
9cb27613c3 | ||
|
5f611723a5 | ||
|
0459f0a4c0 | ||
|
f1e2598baa | ||
|
e0c091a9f4 | ||
|
100293c4b5 | ||
|
705084a25b | ||
|
f81f926a0c | ||
|
bc654bf865 | ||
|
390ef0fbcd | ||
|
72fc6096a0 | ||
|
deb9344e49 | ||
|
848b6dfbdd | ||
|
b25e4a200b | ||
|
7d32909e17 | ||
|
47b74e28ec | ||
|
d9220652ad | ||
|
0673dc5375 | ||
|
12e160269e | ||
|
37ebd9bd9e | ||
|
005ca7759e | ||
|
cd24ec2ef6 | ||
|
cd54b90ae6 | ||
|
ab13e39518 | ||
|
f2ed6f09ee | ||
|
959ea26816 | ||
|
bb3a1b6b31 | ||
|
98e7fada15 | ||
|
69e632a337 | ||
|
e1848ecbcc | ||
|
a9b0309c1f | ||
|
a08e3760d8 | ||
|
f2e7f0c32b | ||
|
9c88f8205b | ||
|
d5196046dd | ||
|
39e0c19b4b | ||
|
11cac460aa | ||
|
62b6eafd7c | ||
|
af80203522 | ||
|
6cdb94ccc5 | ||
|
c38309791b | ||
|
f1345c7bb3 | ||
|
d5472faa96 | ||
|
b902ee6ad5 | ||
|
b1be9a7907 | ||
|
b96e07d161 | ||
|
0e8aa331ad | ||
|
dc52817408 | ||
|
6ba3f4dc6d | ||
|
8b15d7bc28 | ||
|
00fa5a93b8 | ||
|
1a254ec098 | ||
|
5b41d62f8a | ||
|
0944abc0e2 | ||
|
1cbcda71cb | ||
|
7439d2424b | ||
|
a211fe1cf4 | ||
|
e1d3883893 | ||
|
826c3bee4a | ||
|
e561fbc25a | ||
|
bc7ac42f2a | ||
|
601247d958 | ||
|
beb95c4884 | ||
|
08ef612361 | ||
|
584c63bcc4 | ||
|
c26fa1d0e9 | ||
|
208621e3cf | ||
|
c6cd0a5591 | ||
|
e011502875 | ||
|
8ee07cd5c6 | ||
|
31737406da | ||
|
93860e88d2 | ||
|
9a43fbe3b2 | ||
|
7a568482de | ||
|
30871784e4 | ||
|
c7bf9958e7 | ||
|
725781eaa7 | ||
|
dcc961ae00 | ||
|
ccdbe65c87 | ||
|
30e12aef9a | ||
|
6c329e2431 | ||
|
f0db6020eb | ||
|
aba8c2f4af | ||
|
c61775664e | ||
|
69fab16e83 | ||
|
88ce934bd7 | ||
|
2c51288afd | ||
|
8d731f1a70 | ||
|
c59e8f7c8d | ||
|
973287ad66 | ||
|
15aea0fe47 | ||
|
c1db2b4866 | ||
|
425a4a4082 | ||
|
fdb658fff4 | ||
|
c155519ae1 | ||
|
221f499041 | ||
|
89ddae29ef | ||
|
defdf8da72 | ||
|
3721eda23e | ||
|
c7fc430adb | ||
|
77e79221a0 | ||
|
b0e492bc06 | ||
|
173d88d514 | ||
|
22114c523f | ||
|
88f952075d | ||
|
c51a51d0ad | ||
|
2359150b9c | ||
|
6749c45c63 | ||
|
57103c515b | ||
|
2d225de48c | ||
|
beba0eac55 | ||
|
e0c168ef3f | ||
|
72ade5473a | ||
|
abe6b27b34 | ||
|
0ac6427abc | ||
|
17f5dd734c | ||
|
a707e85c10 | ||
|
ecbdb6ba68 | ||
|
2eb326b0da | ||
|
f350fa7147 | ||
|
0cc717340c | ||
|
a368adcd30 | ||
|
500423626d | ||
|
aea95e8ff3 | ||
|
0bd28f9620 | ||
|
65cf599786 | ||
|
9fdadb503d | ||
|
ee6a13ef6f | ||
|
30702dcdee | ||
|
43e368faf6 | ||
|
7aef523a41 | ||
|
4b61e27d12 | ||
|
a8ab615c89 | ||
|
93eb49a3e3 | ||
|
c33e24de57 | ||
|
f272c025bd | ||
|
6b59beda7b | ||
|
1daf676b37 | ||
|
2c1aa715b0 | ||
|
2d62e4e6bd | ||
|
09b8baa4b1 | ||
|
db69128825 | ||
|
a5d1efc207 | ||
|
25216705b3 | ||
|
1af1106b87 | ||
|
73c06d9e33 | ||
|
20c6001836 | ||
|
c150b4b197 | ||
|
a40e7fc59b | ||
|
17cda46531 | ||
|
42f7c0c7f6 | ||
|
20bce10204 | ||
|
9b73e351aa | ||
|
d6a808f41a | ||
|
93f2323e52 | ||
|
75896958b6 | ||
|
a622ee4b8d | ||
|
94a96670e8 | ||
|
8bb6f0dc6f | ||
|
5445e13828 | ||
|
23d3b540a1 | ||
|
1ef3a621a8 | ||
|
d20d03cd7f | ||
|
409b55ad81 | ||
|
667e72144e | ||
|
2f138ecb96 | ||
|
48047b55ba | ||
|
f227504ea7 | ||
|
78799640ea | ||
|
f3e7e62813 | ||
|
d01d425e4b | ||
|
7da620f0b4 | ||
|
88b71c0732 | ||
|
df521bbfc8 | ||
|
03a3a501f3 | ||
|
ae5d254e73 | ||
|
0e1afcbb26 | ||
|
44d61465f1 | ||
|
9f63493789 | ||
|
c1995c647b | ||
|
5bb376f304 | ||
|
45458e7139 | ||
|
e201b41341 | ||
|
af7a2e3daa | ||
|
9725f2e319 | ||
|
212e6ea4a4 | ||
|
379feecae5 | ||
|
4b24499916 | ||
|
6c65734330 | ||
|
1460f00e0f | ||
|
01a096adc8 | ||
|
73a7741c49 | ||
|
aa9f7ed7e8 | ||
|
2486e21ffe | ||
|
ca5591bfa0 | ||
|
9665da9d0b | ||
|
ca8fef5855 | ||
|
2b5e00d36d | ||
|
8b6310b179 | ||
|
e8b7f96a89 | ||
|
33ad74fbcd | ||
|
6895eb7ef6 | ||
|
25cb859ed0 | ||
|
5b6027bef0 | ||
|
ed0b47c6f8 | ||
|
20b61e28b6 | ||
|
73e6038986 | ||
|
672fed04cb | ||
|
005592998d | ||
|
69d71f8f86 | ||
|
b18462737e | ||
|
53777f2fbf | ||
|
bbe5b66324 | ||
|
0e4ede46d1 | ||
|
9029b46570 | ||
|
ecbfc70bfa | ||
|
56fd32bda2 | ||
|
04b76eb066 | ||
|
fb62407232 | ||
|
2a00382d71 | ||
|
86acd8f6f9 | ||
|
64b153fae0 | ||
|
ce6c76e45f | ||
|
d6ec103bee | ||
|
a2a7e91ad6 | ||
|
d743c2917c | ||
|
0b1b36f088 | ||
|
8a43e2d889 | ||
|
e529d03c11 | ||
|
557d35ec79 | ||
|
94a9b712b6 | ||
|
f479ab7af2 | ||
|
d06e6c7425 | ||
|
95dfcc546a | ||
|
4e4577afbe | ||
|
736f974082 | ||
|
d421ccb330 | ||
|
c0c6038654 | ||
|
edb20d6909 | ||
|
3f88994e0f | ||
|
29edb130cc | ||
|
3c1416091e | ||
|
a1912f8400 | ||
|
70f6aff07f | ||
|
33d0b5e011 | ||
|
135af08b8b | ||
|
972730924b | ||
|
f14928a970 | ||
|
c9c78622a8 | ||
|
b1d9a2e60e | ||
|
d20a3774db | ||
|
bac6821e19 | ||
|
57986f982a | ||
|
eaa8c67bde | ||
|
f061059e45 | ||
|
f8c97a3d1f | ||
|
3d6ab96587 | ||
|
c7b0917e1a | ||
|
422a095647 | ||
|
e61a736d44 | ||
|
f2c51653e4 | ||
|
e3b20c443a | ||
|
800472ddf5 | ||
|
e1a0660948 | ||
|
a06646631c | ||
|
bb97c8fdcd | ||
|
f643a8b425 | ||
|
0a0fc85282 | ||
|
77b26b62e0 | ||
|
cc947cad03 | ||
|
60ddd93d09 | ||
|
b89cd8cd1a | ||
|
2ab4f34c02 | ||
|
214b561a28 | ||
|
ec7536faf6 | ||
|
b93ab5d295 | ||
|
d06c04d02c | ||
|
52c1eb0160 | ||
|
93c776ce19 | ||
|
f10407dbc3 | ||
|
298c2d0f62 | ||
|
67c8034fe5 | ||
|
dd80a525ef | ||
|
2be139ca61 | ||
|
37f6777ceb | ||
|
f67ecd5c18 | ||
|
61cc7b10a9 | ||
|
4d62f03297 | ||
|
d6de4a2f4e | ||
|
bf8fbf8383 | ||
|
3155a04189 | ||
|
ddde356b33 | ||
|
7cc6262b5a | ||
|
bdae2993e0 | ||
|
5b464a32f5 | ||
|
9c5d82557a | ||
|
1c0cd2cbb4 | ||
|
b36f7151fc | ||
|
711856cad3 | ||
|
84eaaae062 | ||
|
d896ff74ec | ||
|
ba8e15848e | ||
|
41ec7c8be9 | ||
|
252b0554ca | ||
|
69d0b08dd8 | ||
|
3356c6afb2 | ||
|
2347f65133 | ||
|
755e816521 | ||
|
f5d1115468 | ||
|
c0c3d7c1f2 | ||
|
e810400716 | ||
|
116517fb6d | ||
|
b8eff3456c | ||
|
eeb063b957 | ||
|
e92a81b741 | ||
|
65d59f4ef0 | ||
|
df6a4930b9 | ||
|
301d585d47 | ||
|
7476dfeec0 | ||
|
3fe942ab30 | ||
|
8f547a6c98 | ||
|
7f6fb6937a | ||
|
97a1fa10a6 | ||
|
76098dd42a | ||
|
5f054cd51b | ||
|
68a2570ebd | ||
|
94aa9e568a | ||
|
0f6e8d3385 | ||
|
70f96bda25 | ||
|
8ed7ad5fa7 | ||
|
056f2e9e67 | ||
|
729698e815 | ||
|
af53d2f692 | ||
|
c04737ae69 | ||
|
89d66c3210 | ||
|
7ec39f5a1e | ||
|
66fa8f9667 | ||
|
56ec5245cc | ||
|
c4389a6675 | ||
|
5d40da5688 | ||
|
6374995522 | ||
|
ba777f4f56 | ||
|
385efae4b3 | ||
|
e11a1911ad | ||
|
6ff0be6a82 | ||
|
a5769c029f | ||
|
347323cbb2 | ||
|
3398f5a2f5 | ||
|
efd64a3862 | ||
|
e83ca4bb28 | ||
|
e97da0ea15 | ||
|
18417e410e | ||
|
8183f28636 | ||
|
49cb161203 | ||
|
82672b40fd | ||
|
1e0d3f13e6 | ||
|
635337d2ff | ||
|
46e5350d8c | ||
|
e5be96d8bf | ||
|
882f886450 | ||
|
e374fb1d60 | ||
|
5fb7da12f2 | ||
|
ed924e3bc4 | ||
|
9b06d64eb8 | ||
|
0e9e67b65d | ||
|
9797af6f85 | ||
|
41dd31e5f4 | ||
|
02fa135815 | ||
|
71b12b1f56 | ||
|
e476e17abf | ||
|
e124659aca | ||
|
c2a94a8fb0 | ||
|
8d22ca5076 | ||
|
dcd2854829 | ||
|
7ba27e5cae | ||
|
2a6dcb2ffd | ||
|
dcb5849484 | ||
|
e4f7af0b48 | ||
|
9da826421a | ||
|
003ba1f092 | ||
|
f4308bdb64 | ||
|
cb395abff7 | ||
|
e694acaf5f | ||
|
8d980f07ba | ||
|
d13a5056f1 | ||
|
4ceb2689f5 | ||
|
426f2507d5 | ||
|
ec583bd12d | ||
|
b610e5503e | ||
|
e19c7923c3 | ||
|
509bcd2e74 | ||
|
a86fe899ac | ||
|
4adc8b133f | ||
|
c92c09a8be | ||
|
e5476913fe | ||
|
746869fdac | ||
|
033106ed81 | ||
|
8a63812c4e | ||
|
4a9d7318d1 | ||
|
018b54dbd7 | ||
|
3202cc7eef | ||
|
6fc6673ead | ||
|
f402cbe64e | ||
|
98e5ea9dce | ||
|
17cd14ad88 | ||
|
ed4897d715 | ||
|
fd212fd2a4 | ||
|
eebaf89874 | ||
|
bed1b143a5 | ||
|
a8ba979360 | ||
|
3bc3af2332 | ||
|
50f26ea9c0 | ||
|
b81124deec | ||
|
5a28f61f49 | ||
|
6155ef6377 | ||
|
8aa3d690b5 | ||
|
d5879bafe9 | ||
|
9e9a1a4876 | ||
|
c9e0bde407 | ||
|
4bc5bfb2df | ||
|
1149c1880d | ||
|
90f41fd9b7 | ||
|
025a5a3b9c | ||
|
825f8bcea4 | ||
|
c5b6ea74ef | ||
|
4c0373b72a | ||
|
a400b5e63d | ||
|
ef7f49a21d | ||
|
c5c699a918 | ||
|
eeb97fe7ce | ||
|
e08139f949 | ||
|
254ef3e7b6 | ||
|
379e3ec848 | ||
|
2bbe1d875a | ||
|
9b41ddd9ba | ||
|
a5a0dabe7b | ||
|
49ba09b333 | ||
|
ec7e17787e | ||
|
15a9fa6f53 | ||
|
80eae20610 | ||
|
350845c513 | ||
|
65194c7ae8 | ||
|
6c108c8fc3 | ||
|
fd175c1ea9 | ||
|
622fd7c7ec | ||
|
08940fff10 | ||
|
75385f657a | ||
|
b07fd1fead | ||
|
b3c5a299be | ||
|
d870f566ef | ||
|
f56eb7f882 | ||
|
c1386d66e6 | ||
|
805d53fc10 | ||
|
16a6dceb6b | ||
|
f32216588d | ||
|
f4babb7566 | ||
|
a35df1cb02 | ||
|
03a956e8d9 | ||
|
488dc37fec | ||
|
6919c4863b | ||
|
f865392eee | ||
|
1528f85112 | ||
|
c965517a6b | ||
|
6d18b6bab5 | ||
|
6fc329180b | ||
|
a2df1eb502 | ||
|
8063273d09 | ||
|
58a7d3fc0e | ||
|
0090916735 | ||
|
7f48f67948 | ||
|
0a11be496a | ||
|
7b722abb63 | ||
|
c665fc7a9b | ||
|
84ef6462c2 | ||
|
b2ed9daada | ||
|
b0be0881a7 | ||
|
91efd6e102 | ||
|
ae024f666a | ||
|
0fd8e3c5d7 | ||
|
e020960f49 | ||
|
54862eba0d | ||
|
b61b7189a5 | ||
|
181c0092d6 | ||
|
824994db69 | ||
|
8d1e5ac294 | ||
|
923720f529 | ||
|
7c9abaff2c | ||
|
40a04490ad | ||
|
99da25dc9d | ||
|
7cd5c0cd20 | ||
|
ea1b59f684 | ||
|
f98e198dcf | ||
|
483f5cce46 | ||
|
0224a8b127 | ||
|
4da435f2a0 | ||
|
f0acf7681e | ||
|
1df88837c8 | ||
|
94b1cf47ca | ||
|
31b8fd3109 | ||
|
45e56c599d | ||
|
a2477c1f32 | ||
|
b57097ef18 | ||
|
388e34f8e9 | ||
|
293629bdb5 | ||
|
c5811e5a14 | ||
|
cf5a1448c5 | ||
|
aff8e82705 | ||
|
205fd95722 | ||
|
f6801a4af4 | ||
|
92a1fc076c | ||
|
da4015a959 | ||
|
a1adcb23b6 | ||
|
873fe81bc0 | ||
|
f493a88258 | ||
|
e123883b26 | ||
|
008655e3a4 | ||
|
6f3f6eddb2 | ||
|
3dab1e711d | ||
|
df2b448993 | ||
|
d1c101cde2 | ||
|
b5353e2130 | ||
|
cd9dda4701 | ||
|
b8b721b7c2 | ||
|
a75db69105 | ||
|
f4e92f9b54 | ||
|
72669cad8c | ||
|
f3c2803af9 | ||
|
cd17f63d81 | ||
|
3e5a5a834f | ||
|
f4ca87205f | ||
|
d6f22433d0 |
19
.buildkite/coverage-in-disk.sh
Normal file
19
.buildkite/coverage-in-disk.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# This script is used to upload the full buildkite pipeline. The steps defined
|
||||
# in the buildkite UI should simply be:
|
||||
#
|
||||
# steps:
|
||||
# - command: ".buildkite/pipeline-upload.sh"
|
||||
#
|
||||
|
||||
set -e
|
||||
cd "$(dirname "$0")"/..
|
||||
source ci/_
|
||||
sudo chmod 0777 ci/buildkite-pipeline-in-disk.sh
|
||||
|
||||
_ ci/buildkite-pipeline-in-disk.sh pipeline.yml
|
||||
echo +++ pipeline
|
||||
cat pipeline.yml
|
||||
|
||||
_ buildkite-agent pipeline upload pipeline.yml
|
@@ -12,7 +12,14 @@ export PS4="++"
|
||||
# Restore target/ from the previous CI build on this machine
|
||||
#
|
||||
eval "$(ci/channel-info.sh)"
|
||||
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
|
||||
eval "$(ci/sbf-tools-info.sh)"
|
||||
source "ci/rust-version.sh"
|
||||
HOST_RUST_VERSION="$rust_stable"
|
||||
pattern='^[0-9]+\.[0-9]+\.[0-9]+$'
|
||||
if [[ ${HOST_RUST_VERSION} =~ ${pattern} ]]; then
|
||||
HOST_RUST_VERSION="${rust_stable%.*}"
|
||||
fi
|
||||
export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"-"$SBF_TOOLS_VERSION"-"$HOST_RUST_VERSION"
|
||||
(
|
||||
set -x
|
||||
MAX_CACHE_SIZE=18 # gigabytes
|
||||
|
6
.github/ISSUE_TEMPLATE.md
vendored
6
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,6 +0,0 @@
|
||||
#### Problem
|
||||
|
||||
|
||||
|
||||
#### Proposed Solution
|
||||
|
12
.github/ISSUE_TEMPLATE/0-general.md
vendored
Normal file
12
.github/ISSUE_TEMPLATE/0-general.md
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
name: General Issue
|
||||
about: Create a report describing a problem and a proposed solution
|
||||
title: ''
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
#### Problem
|
||||
|
||||
|
||||
|
||||
#### Proposed Solution
|
70
.github/ISSUE_TEMPLATE/1-feature-gate.yml
vendored
Normal file
70
.github/ISSUE_TEMPLATE/1-feature-gate.yml
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
name: Feature Gate Tracker
|
||||
description: Track the development and status of an on-chain feature
|
||||
title: "Feature Gate: "
|
||||
labels: ["feature-gate"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: >
|
||||
Steps to add a new feature are outlined below. Note that these steps only cover
|
||||
the process of getting a feature into the core Solana code.
|
||||
|
||||
- For features that are unambiguously good (ie bug fixes), these steps are sufficient.
|
||||
|
||||
- For features that should go up for community vote (ie fee structure changes), more
|
||||
information on the additional steps to follow can be found at:
|
||||
<https://spl.solana.com/feature-proposal#feature-proposal-life-cycle>
|
||||
|
||||
1. Generate a new keypair with `solana-keygen new --outfile feature.json --no-passphrase`
|
||||
- Keypairs should be held by core contributors only. If you're a non-core contirbutor going
|
||||
through these steps, the PR process will facilitate a keypair holder being picked. That
|
||||
person will generate the keypair, provide pubkey for PR, and ultimately enable the feature.
|
||||
|
||||
2. Add a public module for the feature, specifying keypair pubkey as the id with
|
||||
`solana_sdk::declare_id!()` within the module. Additionally, add an entry to `FEATURE_NAMES` map.
|
||||
|
||||
3. Add desired logic to check for and switch on feature availability.
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
placeholder: Describe why the new feature gate is needed and any necessary conditions for its activation
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: id
|
||||
attributes:
|
||||
label: Feature ID
|
||||
description: The public key of the feature account
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: activation-method
|
||||
attributes:
|
||||
label: Activation Method
|
||||
options:
|
||||
- Single Core Contributor
|
||||
- Staked Validator Vote
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: testnet
|
||||
attributes:
|
||||
label: Testnet Activation Epoch
|
||||
placeholder: Edit this response when feature is activated on this cluster
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
id: devnet
|
||||
attributes:
|
||||
label: Devnet Activation Epoch
|
||||
placeholder: Edit this response when feature is activated on this cluster
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
id: mainnet-beta
|
||||
attributes:
|
||||
label: Mainnet-Beta Activation Epoch
|
||||
placeholder: Edit this response when feature is activated on this cluster
|
||||
validations:
|
||||
required: false
|
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,5 +1,9 @@
|
||||
#### Problem
|
||||
|
||||
|
||||
#### Summary of Changes
|
||||
|
||||
|
||||
Fixes #
|
||||
<!-- OPTIONAL: Feature Gate Issue: # -->
|
||||
<!-- Don't forget to add the "feature-gate" label -->
|
||||
|
37
.github/workflows/autolock_bot_PR.txt
vendored
Normal file
37
.github/workflows/autolock_bot_PR.txt
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: 'Autolock RitBot for for PR'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
concurrency:
|
||||
group: lock
|
||||
|
||||
jobs:
|
||||
action:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: dessant/lock-threads@v3
|
||||
with:
|
||||
|
||||
github-token: ${{ github.token }}
|
||||
pr-inactive-days: '14'
|
||||
exclude-pr-created-before: ''
|
||||
exclude-pr-created-after: ''
|
||||
exclude-pr-created-between: ''
|
||||
exclude-pr-closed-before: ''
|
||||
exclude-pr-closed-after: ''
|
||||
exclude-pr-closed-between: ''
|
||||
include-any-pr-labels: 'automerge'
|
||||
include-all-pr-labels: ''
|
||||
exclude-any-pr-labels: ''
|
||||
add-pr-labels: 'locked PR'
|
||||
remove-pr-labels: ''
|
||||
pr-comment: 'This PR has been automatically locked since there has not been any activity in past 14 days after it was merged.'
|
||||
pr-lock-reason: 'resolved'
|
||||
log-output: true
|
38
.github/workflows/autolock_bot_closed_issue.txt
vendored
Normal file
38
.github/workflows/autolock_bot_closed_issue.txt
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
name: 'Autolock NaviBot for closed issue'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
concurrency:
|
||||
group: lock
|
||||
|
||||
jobs:
|
||||
action:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: dessant/lock-threads@v3
|
||||
with:
|
||||
|
||||
github-token: ${{ github.token }}
|
||||
issue-inactive-days: '7'
|
||||
exclude-issue-created-before: ''
|
||||
exclude-issue-created-after: ''
|
||||
exclude-issue-created-between: ''
|
||||
exclude-issue-closed-before: ''
|
||||
exclude-issue-closed-after: ''
|
||||
exclude-issue-closed-between: ''
|
||||
include-any-issue-labels: ''
|
||||
include-all-issue-labels: ''
|
||||
exclude-any-issue-labels: ''
|
||||
add-issue-labels: 'locked issue'
|
||||
remove-issue-labels: ''
|
||||
issue-comment: 'This issue has been automatically locked since there has not been any activity in past 7 days after it was closed. Please open a new issue for related bugs.'
|
||||
issue-lock-reason: 'resolved'
|
||||
process-only: 'issues'
|
||||
log-output: true
|
66
.github/workflows/client-targets.yml
vendored
Normal file
66
.github/workflows/client-targets.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
name: client_targets
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "client/**"
|
||||
- "sdk/**"
|
||||
- ".github/workflows/client-targets.yml"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
check_compilation:
|
||||
name: Client compilation
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
target: [aarch64-apple-ios, x86_64-apple-ios, aarch64-apple-darwin, x86_64-apple-darwin, aarch64-linux-android, armv7-linux-androideabi, i686-linux-android, x86_64-linux-android]
|
||||
include:
|
||||
- target: aarch64-apple-ios
|
||||
platform: ios
|
||||
os: macos-latest
|
||||
- target: x86_64-apple-ios
|
||||
platform: ios
|
||||
os: macos-latest
|
||||
- target: aarch64-apple-darwin
|
||||
platform: ios
|
||||
os: macos-latest
|
||||
- target: x86_64-apple-darwin
|
||||
platform: ios
|
||||
os: macos-latest
|
||||
- target: aarch64-linux-android
|
||||
platform: android
|
||||
os: ubuntu-latest
|
||||
- target: armv7-linux-androideabi
|
||||
platform: android
|
||||
os: ubuntu-latest
|
||||
- target: i686-linux-android
|
||||
platform: android
|
||||
os: ubuntu-latest
|
||||
- target: x86_64-linux-android
|
||||
platform: android
|
||||
os: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
target: ${{ matrix.target }}
|
||||
- name: Install cargo-ndk
|
||||
if: ${{ matrix.platform == 'android' }}
|
||||
run: cargo install cargo-ndk
|
||||
- uses: actions-rs/cargo@v1
|
||||
if: ${{ matrix.platform == 'android' }}
|
||||
with:
|
||||
command: ndk
|
||||
args: --target ${{ matrix.target }} build -p solana-client
|
||||
- uses: actions-rs/cargo@v1
|
||||
if: ${{ matrix.platform == 'ios' }}
|
||||
with:
|
||||
command: build
|
||||
args: -p solana-client --target ${{ matrix.target }}
|
42
.github/workflows/explorer_preview.yml
vendored
42
.github/workflows/explorer_preview.yml
vendored
@@ -10,11 +10,49 @@ jobs:
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: amondnet/vercel-action@v20
|
||||
with:
|
||||
vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }} #Optional
|
||||
vercel-org-id: ${{ secrets.ORG_ID}} #Required
|
||||
vercel-project-id: ${{ secrets.PROJECT_ID}} #Required
|
||||
working-directory: ./explorer
|
||||
vercel-project-id: ${{ secrets.PROJECT_ID}} #Required
|
||||
scope: ${{ secrets.TEAM_ID }}
|
||||
|
||||
- name: vercel url
|
||||
run : |
|
||||
touch vercelfile.txt
|
||||
vercel --token ${{secrets.VERCEL_TOKEN}} ls explorer --scope team_8A2WD7p4uR7tmKX9M68loHXI > vercelfile.txt
|
||||
touch vercelfile1.txt
|
||||
head -n 2 vercelfile.txt > vercelfile1.txt
|
||||
touch vercelfile2.txt
|
||||
tail -n 1 vercelfile1.txt > vercelfile2.txt
|
||||
filtered_url7=$(cut -f7 -d" " vercelfile2.txt)
|
||||
echo "filtered_url7 is: $filtered_url7"
|
||||
touch .env.preview1
|
||||
echo "$filtered_url7" > .env.preview1
|
||||
#filtered_url=$(cat vercelfile2.txt )
|
||||
#echo "$filtered_url" >> .env.preview1
|
||||
|
||||
- name: Fetching Vercel Preview Deployment Link
|
||||
uses: mathiasvr/command-output@v1
|
||||
id: test1
|
||||
with:
|
||||
run: |
|
||||
echo "$(cat .env.preview1)"
|
||||
- name: Fetching PR commit URL
|
||||
uses: mathiasvr/command-output@v1
|
||||
id: test2
|
||||
with:
|
||||
run: |
|
||||
HEAD_SHA=$(curl -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/solana-labs/solana/pulls | jq .[0] | jq -r .head.sha)
|
||||
USER_NAME=$(curl -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/solana-labs/solana/pulls | jq .[0] | jq -r .head.user.login)
|
||||
echo "github.com/$USER_NAME/solana/commit/$HEAD_SHA"
|
||||
|
||||
- name: Slack Notification4
|
||||
uses: rtCamp/action-slack-notify@master
|
||||
env:
|
||||
SLACK_MESSAGE: ' Vercel Link: ${{ steps.test1.outputs.stdout }} PR Commit: ${{steps.test2.outputs.stdout}}'
|
||||
SLACK_TITLE: Vercel "Explorer" Preview Deployment Link , PR Commit
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
|
6
.github/workflows/explorer_production.yml
vendored
6
.github/workflows/explorer_production.yml
vendored
@@ -30,7 +30,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: explorer
|
||||
working-directory: explorer
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
@@ -38,9 +39,8 @@ jobs:
|
||||
- uses: amondnet/vercel-action@v20
|
||||
with:
|
||||
vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }} #Optional
|
||||
github-token: ${{ secrets.PAT }} #Optional
|
||||
vercel-args: '--prod' #for production
|
||||
vercel-org-id: ${{ secrets.ORG_ID}} #Required
|
||||
vercel-project-id: ${{ secrets.PROJECT_ID}} #Required
|
||||
working-directory: ./explorer
|
||||
scope: ${{ secrets.TEAM_ID }}
|
||||
|
19
.github/workflows/web3.yml
vendored
19
.github/workflows/web3.yml
vendored
@@ -65,20 +65,7 @@ jobs:
|
||||
node-version: ${{ matrix.node }}
|
||||
cache: 'npm'
|
||||
cache-dependency-path: web3.js/package-lock.json
|
||||
- run: npm i -g npm@7
|
||||
- run: npm ci
|
||||
- run: npm run lint
|
||||
- run: |
|
||||
npm run build
|
||||
ls -l lib
|
||||
test -r lib/index.iife.js
|
||||
test -r lib/index.cjs.js
|
||||
test -r lib/index.esm.js
|
||||
- run: npm run doc
|
||||
- run: npm run codecov
|
||||
- run: |
|
||||
sh -c "$(curl -sSfL https://release.solana.com/edge/install)"
|
||||
echo "$HOME/.local/share/solana/install/active_release/bin" >> $GITHUB_PATH
|
||||
PATH="$HOME/.local/share/solana/install/active_release/bin:$PATH"
|
||||
solana --version
|
||||
- run: npm run test:live-with-test-validator
|
||||
source .travis/before_install.sh
|
||||
npm install
|
||||
source .travis/script.sh
|
||||
|
44
.mergify.yml
44
.mergify.yml
@@ -24,6 +24,7 @@ pull_request_rules:
|
||||
- "#approved-reviews-by=0"
|
||||
- "#commented-reviews-by=0"
|
||||
- "#changes-requested-reviews-by=0"
|
||||
- "#review-requested=0"
|
||||
actions:
|
||||
request_reviews:
|
||||
teams:
|
||||
@@ -34,6 +35,7 @@ pull_request_rules:
|
||||
- status-success=buildkite/solana
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- label!=no-automerge
|
||||
- author≠@dont-squash-my-commits
|
||||
- or:
|
||||
# only require travis success if docs files changed
|
||||
@@ -60,6 +62,7 @@ pull_request_rules:
|
||||
- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- label!=no-automerge
|
||||
- author=@dont-squash-my-commits
|
||||
- or:
|
||||
# only require explorer checks if explorer files changed
|
||||
@@ -93,23 +96,56 @@ pull_request_rules:
|
||||
- author=mergify[bot]
|
||||
- head~=^mergify/bp/
|
||||
- "#status-failure=0"
|
||||
- "-merged"
|
||||
- label!=no-automerge
|
||||
actions:
|
||||
label:
|
||||
add:
|
||||
- automerge
|
||||
- name: v1.8 backport
|
||||
- name: v1.9 feature-gate backport
|
||||
conditions:
|
||||
- label=v1.8
|
||||
- label=v1.9
|
||||
- label=feature-gate
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
labels:
|
||||
- feature-gate
|
||||
branches:
|
||||
- v1.8
|
||||
- name: v1.9 backport
|
||||
- v1.9
|
||||
- name: v1.9 non-feature-gate backport
|
||||
conditions:
|
||||
- label=v1.9
|
||||
- label!=feature-gate
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.9
|
||||
- name: v1.10 feature-gate backport
|
||||
conditions:
|
||||
- label=v1.10
|
||||
- label=feature-gate
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
labels:
|
||||
- feature-gate
|
||||
branches:
|
||||
- v1.10
|
||||
- name: v1.10 non-feature-gate backport
|
||||
conditions:
|
||||
- label=v1.10
|
||||
- label!=feature-gate
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.10
|
||||
|
||||
commands_restrictions:
|
||||
# The author of copied PRs is the Mergify user.
|
||||
# Restrict `copy` access to Core Contributors
|
||||
copy:
|
||||
conditions:
|
||||
- author=@core-contributors
|
||||
|
18
.travis.yml
18
.travis.yml
@@ -78,6 +78,24 @@ jobs:
|
||||
# - sudo apt-get install libssl-dev libudev-dev
|
||||
|
||||
# docs pull request
|
||||
# - name: "explorer"
|
||||
# if: type = pull_request AND branch = master
|
||||
|
||||
# language: node_js
|
||||
# node_js:
|
||||
# - "lts/*"
|
||||
|
||||
# cache:
|
||||
# directories:
|
||||
# - ~/.npm
|
||||
|
||||
# before_install:
|
||||
# - .travis/affects.sh explorer/ .travis || travis_terminate 0
|
||||
# - cd explorer
|
||||
|
||||
# script:
|
||||
# - npm run build
|
||||
# - npm run format
|
||||
- name: "docs"
|
||||
if: type IN (push, pull_request) OR tag IS present
|
||||
language: node_js
|
||||
|
@@ -74,7 +74,7 @@ minutes to execute. Use that time to write a detailed problem description. Once
|
||||
the description is written and CI succeeds, click the "Ready to Review" button
|
||||
and add reviewers. Adding reviewers before CI succeeds is a fast path to losing
|
||||
reviewer engagement. Not only will they be notified and see the PR is not yet
|
||||
ready for them, they will also be bombarded them with additional notifications
|
||||
ready for them, they will also be bombarded with additional notifications
|
||||
each time you push a commit to get past CI or until they "mute" the PR. Once
|
||||
muted, you'll need to reach out over some other medium, such as Discord, to
|
||||
request they have another look. When you use draft PRs, no notifications are
|
||||
@@ -146,6 +146,31 @@ the subject lines of the git commits contained in the PR. It's especially
|
||||
generous (and not expected) to rebase or reword commits such that each change
|
||||
matches the logical flow in your PR description.
|
||||
|
||||
### The PR / Issue Labels
|
||||
Labels make it easier to manage and track PRs / issues. Below some common labels
|
||||
that we use in Solana. For the complete list of labels, please refer to the
|
||||
[label page](https://github.com/solana-labs/solana/issues/labels):
|
||||
|
||||
* "feature-gate": when you add a new feature gate or modify the behavior of
|
||||
an existing feature gate, please add the "feature-gate" label to your PR.
|
||||
New feature gates should also always have a corresponding tracking issue
|
||||
(go to "New Issue" -> "Feature Gate Tracker [Get Started](https://github.com/solana-labs/solana/issues/new?assignees=&labels=feature-gate&template=1-feature-gate.yml&title=Feature+Gate%3A+)")
|
||||
and should be updated each time the feature is activated on a cluster.
|
||||
|
||||
* "automerge": When a PR is labelled with "automerge", the PR will be
|
||||
automically merged once CI passes. In general, this label should only
|
||||
be used for small hot-fix (fewer than 100 lines) or automatic generated
|
||||
PRs. If you're uncertain, it's usually the case that the PR is not
|
||||
qualified as "automerge".
|
||||
|
||||
* "good first issue": If you happen to find an issue that is non-urgent and
|
||||
self-contained with moderate scope, you might want to consider attaching
|
||||
"good first issue" to it as it might be a good practice for newcomers.
|
||||
|
||||
* "rust": this pull request updates Rust code.
|
||||
|
||||
* "javascript": this pull request updates Javascript code.
|
||||
|
||||
### When will my PR be reviewed?
|
||||
|
||||
PRs are typically reviewed and merged in under 7 days. If your PR has been open
|
||||
|
3201
Cargo.lock
generated
3201
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
49
Cargo.toml
49
Cargo.toml
@@ -1,48 +1,48 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"accountsdb-plugin-interface",
|
||||
"accountsdb-plugin-manager",
|
||||
"accountsdb-plugin-postgres",
|
||||
"accounts-cluster-bench",
|
||||
"bench-streamer",
|
||||
"bench-tps",
|
||||
"account-decoder",
|
||||
"accounts-bench",
|
||||
"accounts-cluster-bench",
|
||||
"banking-bench",
|
||||
"banks-client",
|
||||
"banks-interface",
|
||||
"banks-server",
|
||||
"bench-streamer",
|
||||
"bench-tps",
|
||||
"bloom",
|
||||
"bucket_map",
|
||||
"clap-utils",
|
||||
"cli",
|
||||
"cli-config",
|
||||
"cli-output",
|
||||
"client",
|
||||
"client-test",
|
||||
"core",
|
||||
"dos",
|
||||
"download-utils",
|
||||
"entry",
|
||||
"faucet",
|
||||
"frozen-abi",
|
||||
"perf",
|
||||
"validator",
|
||||
"genesis",
|
||||
"genesis-utils",
|
||||
"geyser-plugin-interface",
|
||||
"geyser-plugin-manager",
|
||||
"gossip",
|
||||
"install",
|
||||
"keygen",
|
||||
"ledger",
|
||||
"ledger-tool",
|
||||
"local-cluster",
|
||||
"logger",
|
||||
"log-analyzer",
|
||||
"logger",
|
||||
"measure",
|
||||
"merkle-root-bench",
|
||||
"merkle-tree",
|
||||
"storage-bigtable",
|
||||
"storage-proto",
|
||||
"streamer",
|
||||
"measure",
|
||||
"metrics",
|
||||
"net-shaper",
|
||||
"net-utils",
|
||||
"notifier",
|
||||
"perf",
|
||||
"poh",
|
||||
"poh-bench",
|
||||
"program-test",
|
||||
@@ -52,11 +52,15 @@ members = [
|
||||
"programs/bpf_loader/gen-syscall-list",
|
||||
"programs/compute-budget",
|
||||
"programs/config",
|
||||
"programs/ed25519-tests",
|
||||
"programs/stake",
|
||||
"programs/vote",
|
||||
"programs/zk-token-proof",
|
||||
"rayon-threadlimit",
|
||||
"rbpf-cli",
|
||||
"remote-wallet",
|
||||
"rpc",
|
||||
"rpc-test",
|
||||
"runtime",
|
||||
"runtime/store-tool",
|
||||
"sdk",
|
||||
@@ -64,29 +68,24 @@ members = [
|
||||
"sdk/cargo-test-bpf",
|
||||
"send-transaction-service",
|
||||
"stake-accounts",
|
||||
"storage-bigtable",
|
||||
"storage-proto",
|
||||
"streamer",
|
||||
"sys-tuner",
|
||||
"test-validator",
|
||||
"tokens",
|
||||
"transaction-dos",
|
||||
"transaction-status",
|
||||
"account-decoder",
|
||||
"upload-perf",
|
||||
"net-utils",
|
||||
"validator",
|
||||
"version",
|
||||
"cli",
|
||||
"rayon-threadlimit",
|
||||
"watchtower",
|
||||
"replica-node",
|
||||
"replica-lib",
|
||||
"test-validator",
|
||||
"rpc-test",
|
||||
"client-test",
|
||||
"zk-token-sdk",
|
||||
]
|
||||
|
||||
exclude = [
|
||||
"programs/bpf",
|
||||
]
|
||||
|
||||
# TODO: Remove once the "simd-accel" feature from the reed-solomon-erasure
|
||||
# dependency is supported on Apple M1. v2 of the feature resolver is needed to
|
||||
# specify arch-specific features.
|
||||
# This prevents a Travis CI error when building for Windows.
|
||||
resolver = "2"
|
||||
|
2
LICENSE
2
LICENSE
@@ -1,4 +1,4 @@
|
||||
Copyright 2020 Solana Foundation.
|
||||
Copyright 2022 Solana Foundation.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
16
README.md
16
README.md
@@ -35,13 +35,7 @@ On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, etc
|
||||
|
||||
```bash
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang make
|
||||
```
|
||||
|
||||
On Mac M1s, make sure you set up your terminal & homebrew [to use](https://5balloons.info/correct-way-to-install-and-use-homebrew-on-m1-macs/) Rosetta. You can install it with:
|
||||
|
||||
```bash
|
||||
$ softwareupdate --install-rosetta
|
||||
$ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang cmake make
|
||||
```
|
||||
|
||||
## **2. Download the source code.**
|
||||
@@ -74,7 +68,7 @@ devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://doc
|
||||
|
||||
# Benchmarking
|
||||
|
||||
First install the nightly build of rustc. `cargo bench` requires use of the
|
||||
First, install the nightly build of rustc. `cargo bench` requires the use of the
|
||||
unstable features only available in the nightly build.
|
||||
|
||||
```bash
|
||||
@@ -121,12 +115,12 @@ the reader to check and validate their accuracy and truthfulness.
|
||||
Furthermore, nothing in this project constitutes a solicitation for
|
||||
investment.
|
||||
|
||||
Any content produced by SF or developer resources that SF provides, are
|
||||
Any content produced by SF or developer resources that SF provides are
|
||||
for educational and inspirational purposes only. SF does not encourage,
|
||||
induce or sanction the deployment, integration or use of any such
|
||||
applications (including the code comprising the Solana blockchain
|
||||
protocol) in violation of applicable laws or regulations and hereby
|
||||
prohibits any such deployment, integration or use. This includes use of
|
||||
prohibits any such deployment, integration or use. This includes the use of
|
||||
any such applications by the reader (a) in violation of export control
|
||||
or sanctions laws of the United States or any other applicable
|
||||
jurisdiction, (b) if the reader is located in or ordinarily resident in
|
||||
@@ -139,7 +133,7 @@ prohibitions.
|
||||
The reader should be aware that U.S. export control and sanctions laws
|
||||
prohibit U.S. persons (and other persons that are subject to such laws)
|
||||
from transacting with persons in certain countries and territories or
|
||||
that are on the SDN list. As a project based primarily on open-source
|
||||
that are on the SDN list. As a project-based primarily on open-source
|
||||
software, it is possible that such sanctioned persons may nevertheless
|
||||
bypass prohibitions, obtain the code comprising the Solana blockchain
|
||||
protocol (or other project code or applications) and deploy, integrate,
|
||||
|
@@ -94,7 +94,7 @@ Alternatively use the Github UI.
|
||||
```
|
||||
1. Confirm that your freshly cut release branch is shown as `BETA_CHANNEL` and the previous release branch as `STABLE_CHANNEL`:
|
||||
```
|
||||
ci/channel_info.sh
|
||||
ci/channel-info.sh
|
||||
```
|
||||
|
||||
## Steps to Create a Release
|
||||
@@ -152,5 +152,5 @@ appearing. To check for progress:
|
||||
[Crates.io](https://crates.io/crates/solana) should have an updated Solana version. This can take 2-3 hours, and sometimes fails in the `solana-secondary` job.
|
||||
If this happens and the error is non-fatal, click "Retry" on the "publish crate" job
|
||||
|
||||
### Update software on devnet.solana.com/testnet.solana.com/mainnet-beta.solana.com
|
||||
See the documentation at https://github.com/solana-labs/cluster-ops/
|
||||
### Update software on testnet.solana.com
|
||||
See the documentation at https://github.com/solana-labs/cluster-ops/. devnet.solana.com and mainnet-beta.solana.com run stable releases that have been tested on testnet. Do not update devnet or mainnet-beta with a beta release.
|
||||
|
@@ -11,7 +11,7 @@
|
||||
email to security@solana.com and provide your github username so we can add you
|
||||
to a new draft security advisory for further discussion.
|
||||
|
||||
Expect a response as fast as possible, within one business day at the latest.
|
||||
Expect a response as fast as possible, typically within 72 hours.
|
||||
|
||||
<a name="bounty"></a>
|
||||
## Security Bug Bounties
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.9.4"
|
||||
version = "1.11.0"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,21 +10,21 @@ license = "Apache-2.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
base64 = "0.12.3"
|
||||
Inflector = "0.11.4"
|
||||
base64 = "0.13.0"
|
||||
bincode = "1.3.3"
|
||||
bs58 = "0.4.0"
|
||||
bv = "0.11.1"
|
||||
Inflector = "0.11.4"
|
||||
lazy_static = "1.4.0"
|
||||
serde = "1.0.130"
|
||||
serde = "1.0.136"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.72"
|
||||
solana-config-program = { path = "../programs/config", version = "=1.9.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.4" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.9.4" }
|
||||
serde_json = "1.0.79"
|
||||
solana-config-program = { path = "../programs/config", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.11.0" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
zstd = "0.9.0"
|
||||
zstd = "0.11.1"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -33,7 +33,7 @@ pub type StringDecimals = String;
|
||||
pub const MAX_BASE58_BYTES: usize = 128;
|
||||
|
||||
/// A duplicate representation of an Account for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiAccount {
|
||||
pub lamports: u64,
|
||||
@@ -136,16 +136,13 @@ impl UiAccount {
|
||||
UiAccountData::Binary(blob, encoding) => match encoding {
|
||||
UiAccountEncoding::Base58 => bs58::decode(blob).into_vec().ok(),
|
||||
UiAccountEncoding::Base64 => base64::decode(blob).ok(),
|
||||
UiAccountEncoding::Base64Zstd => base64::decode(blob)
|
||||
.ok()
|
||||
.map(|zstd_data| {
|
||||
let mut data = vec![];
|
||||
zstd::stream::read::Decoder::new(zstd_data.as_slice())
|
||||
.and_then(|mut reader| reader.read_to_end(&mut data))
|
||||
.map(|_| data)
|
||||
.ok()
|
||||
})
|
||||
.flatten(),
|
||||
UiAccountEncoding::Base64Zstd => base64::decode(blob).ok().and_then(|zstd_data| {
|
||||
let mut data = vec![];
|
||||
zstd::stream::read::Decoder::new(zstd_data.as_slice())
|
||||
.and_then(|mut reader| reader.read_to_end(&mut data))
|
||||
.map(|_| data)
|
||||
.ok()
|
||||
}),
|
||||
UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None,
|
||||
},
|
||||
}?;
|
||||
|
@@ -5,7 +5,7 @@ use {
|
||||
parse_nonce::parse_nonce,
|
||||
parse_stake::parse_stake,
|
||||
parse_sysvar::parse_sysvar,
|
||||
parse_token::{parse_token, spl_token_id},
|
||||
parse_token::{parse_token, spl_token_ids},
|
||||
parse_vote::parse_vote,
|
||||
},
|
||||
inflector::Inflector,
|
||||
@@ -21,7 +21,6 @@ lazy_static! {
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = stake::program::id();
|
||||
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
|
||||
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id();
|
||||
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
|
||||
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
|
||||
let mut m = HashMap::new();
|
||||
@@ -31,7 +30,9 @@ lazy_static! {
|
||||
);
|
||||
m.insert(*CONFIG_PROGRAM_ID, ParsableAccount::Config);
|
||||
m.insert(*SYSTEM_PROGRAM_ID, ParsableAccount::Nonce);
|
||||
m.insert(*TOKEN_PROGRAM_ID, ParsableAccount::SplToken);
|
||||
for spl_token_id in spl_token_ids() {
|
||||
m.insert(spl_token_id, ParsableAccount::SplToken);
|
||||
}
|
||||
m.insert(*STAKE_PROGRAM_ID, ParsableAccount::Stake);
|
||||
m.insert(*SYSVAR_PROGRAM_ID, ParsableAccount::Sysvar);
|
||||
m.insert(*VOTE_PROGRAM_ID, ParsableAccount::Vote);
|
||||
|
@@ -15,16 +15,31 @@ use {
|
||||
|
||||
// A helper function to convert spl_token::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_id() -> Pubkey {
|
||||
fn spl_token_id() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token::id().to_bytes())
|
||||
}
|
||||
|
||||
// Returns all known SPL Token program ids
|
||||
pub fn spl_token_ids() -> Vec<Pubkey> {
|
||||
vec![spl_token_id()]
|
||||
}
|
||||
|
||||
// Check if the provided program id as a known SPL Token program id
|
||||
pub fn is_known_spl_token_id(program_id: &Pubkey) -> bool {
|
||||
*program_id == spl_token_id()
|
||||
}
|
||||
|
||||
// A helper function to convert spl_token::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_native_mint() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token::native_mint::id().to_bytes())
|
||||
}
|
||||
|
||||
// The program id of the `spl_token_native_mint` account
|
||||
pub fn spl_token_native_mint_program_id() -> Pubkey {
|
||||
spl_token_id()
|
||||
}
|
||||
|
||||
// A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
|
||||
SplTokenPubkey::new_from_array(pubkey.to_bytes())
|
||||
|
@@ -2,21 +2,21 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.9.4"
|
||||
version = "1.11.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
log = "0.4.14"
|
||||
rayon = "1.5.1"
|
||||
solana-logger = { path = "../logger", version = "=1.9.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.4" }
|
||||
solana-version = { path = "../version", version = "=1.9.4" }
|
||||
clap = "2.33.1"
|
||||
solana-logger = { path = "../logger", version = "=1.11.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.11.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
solana-version = { path = "../version", version = "=1.11.0" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -6,12 +6,18 @@ use {
|
||||
rayon::prelude::*,
|
||||
solana_measure::measure::Measure,
|
||||
solana_runtime::{
|
||||
accounts::{create_test_accounts, update_accounts_bench, Accounts},
|
||||
accounts::{
|
||||
test_utils::{create_test_accounts, update_accounts_bench},
|
||||
Accounts,
|
||||
},
|
||||
accounts_db::AccountShrinkThreshold,
|
||||
accounts_index::AccountSecondaryIndexes,
|
||||
ancestors::Ancestors,
|
||||
rent_collector::RentCollector,
|
||||
},
|
||||
solana_sdk::{
|
||||
genesis_config::ClusterType, pubkey::Pubkey, sysvar::epoch_schedule::EpochSchedule,
|
||||
},
|
||||
solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey},
|
||||
std::{env, fs, path::PathBuf},
|
||||
};
|
||||
|
||||
@@ -114,7 +120,12 @@ fn main() {
|
||||
} else {
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
let mut time = Measure::start("hash");
|
||||
let results = accounts.accounts_db.update_accounts_hash(0, &ancestors);
|
||||
let results = accounts.accounts_db.update_accounts_hash(
|
||||
0,
|
||||
&ancestors,
|
||||
&EpochSchedule::default(),
|
||||
&RentCollector::default(),
|
||||
);
|
||||
time.stop();
|
||||
let mut time_store = Measure::start("hash using store");
|
||||
let results_store = accounts.accounts_db.update_accounts_hash_with_index_option(
|
||||
@@ -124,7 +135,8 @@ fn main() {
|
||||
&ancestors,
|
||||
None,
|
||||
false,
|
||||
None,
|
||||
&EpochSchedule::default(),
|
||||
&RentCollector::default(),
|
||||
false,
|
||||
);
|
||||
time_store.stop();
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accounts-cluster-bench"
|
||||
version = "1.9.4"
|
||||
version = "1.11.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,25 +13,25 @@ clap = "2.33.1"
|
||||
log = "0.4.14"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.1"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" }
|
||||
solana-client = { path = "../client", version = "=1.9.4" }
|
||||
solana-core = { path = "../core", version = "=1.9.4" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.4" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.4" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.4" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.9.4" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" }
|
||||
solana-version = { path = "../version", version = "=1.9.4" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.11.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
|
||||
solana-client = { path = "../client", version = "=1.11.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.11.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.11.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.11.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.11.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.11.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.11.0" }
|
||||
solana-version = { path = "../version", version = "=1.11.0" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.9.4" }
|
||||
solana-core = { path = "../core", version = "=1.11.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.11.0" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.11.0" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -116,7 +116,7 @@ fn make_create_message(
|
||||
|
||||
let instructions: Vec<_> = (0..num_instructions)
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
.flat_map(|_| {
|
||||
let program_id = if mint.is_some() {
|
||||
inline_spl_token::id()
|
||||
} else {
|
||||
@@ -148,7 +148,6 @@ fn make_create_message(
|
||||
|
||||
instructions
|
||||
})
|
||||
.flatten()
|
||||
.collect();
|
||||
|
||||
Message::new(&instructions, Some(&keypair.pubkey()))
|
||||
@@ -674,7 +673,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_accounts_cluster_bench() {
|
||||
solana_logger::setup();
|
||||
let validator_config = ValidatorConfig::default();
|
||||
let validator_config = ValidatorConfig::default_for_test();
|
||||
let num_nodes = 1;
|
||||
let mut config = ClusterConfig {
|
||||
cluster_lamports: 10_000_000,
|
||||
|
@@ -1,20 +0,0 @@
|
||||
<p align="center">
|
||||
<a href="https://solana.com">
|
||||
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
# Solana AccountsDb Plugin Interface
|
||||
|
||||
This crate enables an AccountsDb plugin to be plugged into the Solana Validator runtime to take actions
|
||||
at the time of each account update; for example, saving the account state to an external database. The plugin must implement the `AccountsDbPlugin` trait. Please see the detail of the `accountsdb_plugin_interface.rs` for the interface definition.
|
||||
|
||||
The plugin should produce a `cdylib` dynamic library, which must expose a `C` function `_create_plugin()` that
|
||||
instantiates the implementation of the interface.
|
||||
|
||||
The `solana-accountsdb-plugin-postgres` crate provides an example of how to create a plugin which saves the accounts data into an
|
||||
external PostgreSQL databases.
|
||||
|
||||
More information about Solana is available in the [Solana documentation](https://docs.solana.com/).
|
||||
|
||||
Still have questions? Ask us on [Discord](https://discordapp.com/invite/pquxPsq)
|
@@ -1 +0,0 @@
|
||||
pub mod accountsdb_plugin_interface;
|
@@ -1,31 +0,0 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accountsdb-plugin-manager"
|
||||
description = "The Solana AccountsDb plugin manager."
|
||||
version = "1.9.4"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-validator"
|
||||
|
||||
[dependencies]
|
||||
bs58 = "0.4.0"
|
||||
crossbeam-channel = "0.5"
|
||||
libloading = "0.7.2"
|
||||
log = "0.4.11"
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.72"
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.4" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.4" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.9.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.4" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" }
|
||||
thiserror = "1.0.30"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
@@ -1,39 +0,0 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accountsdb-plugin-postgres"
|
||||
description = "The Solana AccountsDb plugin for PostgreSQL database."
|
||||
version = "1.9.4"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-validator"
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib", "rlib"]
|
||||
|
||||
[dependencies]
|
||||
bs58 = "0.4.0"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
postgres = { version = "0.19.2", features = ["with-chrono-0_4"] }
|
||||
postgres-types = { version = "0.2.2", features = ["derive"] }
|
||||
serde = "1.0.130"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.72"
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.4" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.4" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" }
|
||||
thiserror = "1.0.30"
|
||||
tokio-postgres = "0.7.4"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
@@ -1,5 +0,0 @@
|
||||
This is an example implementing the AccountsDb plugin for PostgreSQL database.
|
||||
Please see the `src/accountsdb_plugin_postgres.rs` for the format of the plugin's configuration file.
|
||||
|
||||
To create the schema objects for the database, please use `scripts/create_schema.sql`.
|
||||
`scripts/drop_schema.sql` can be used to tear down the schema objects.
|
@@ -1,201 +0,0 @@
|
||||
/**
|
||||
* This plugin implementation for PostgreSQL requires the following tables
|
||||
*/
|
||||
-- The table storing accounts
|
||||
|
||||
|
||||
CREATE TABLE account (
|
||||
pubkey BYTEA PRIMARY KEY,
|
||||
owner BYTEA,
|
||||
lamports BIGINT NOT NULL,
|
||||
slot BIGINT NOT NULL,
|
||||
executable BOOL NOT NULL,
|
||||
rent_epoch BIGINT NOT NULL,
|
||||
data BYTEA,
|
||||
write_version BIGINT NOT NULL,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
-- The table storing slot information
|
||||
CREATE TABLE slot (
|
||||
slot BIGINT PRIMARY KEY,
|
||||
parent BIGINT,
|
||||
status VARCHAR(16) NOT NULL,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
-- Types for Transactions
|
||||
|
||||
Create TYPE "TransactionErrorCode" AS ENUM (
|
||||
'AccountInUse',
|
||||
'AccountLoadedTwice',
|
||||
'AccountNotFound',
|
||||
'ProgramAccountNotFound',
|
||||
'InsufficientFundsForFee',
|
||||
'InvalidAccountForFee',
|
||||
'AlreadyProcessed',
|
||||
'BlockhashNotFound',
|
||||
'InstructionError',
|
||||
'CallChainTooDeep',
|
||||
'MissingSignatureForFee',
|
||||
'InvalidAccountIndex',
|
||||
'SignatureFailure',
|
||||
'InvalidProgramForExecution',
|
||||
'SanitizeFailure',
|
||||
'ClusterMaintenance',
|
||||
'AccountBorrowOutstanding',
|
||||
'WouldExceedMaxAccountCostLimit',
|
||||
'WouldExceedMaxBlockCostLimit',
|
||||
'UnsupportedVersion',
|
||||
'InvalidWritableAccount',
|
||||
'WouldExceedMaxAccountDataCostLimit',
|
||||
'TooManyAccountLocks',
|
||||
'AddressLookupError'
|
||||
'AddressLookupTableNotFound',
|
||||
'InvalidAddressLookupTableOwner',
|
||||
'InvalidAddressLookupTableData',
|
||||
'InvalidAddressLookupTableIndex'
|
||||
);
|
||||
|
||||
CREATE TYPE "TransactionError" AS (
|
||||
error_code "TransactionErrorCode",
|
||||
error_detail VARCHAR(256)
|
||||
);
|
||||
|
||||
CREATE TYPE "CompiledInstruction" AS (
|
||||
program_id_index SMALLINT,
|
||||
accounts SMALLINT[],
|
||||
data BYTEA
|
||||
);
|
||||
|
||||
CREATE TYPE "InnerInstructions" AS (
|
||||
index SMALLINT,
|
||||
instructions "CompiledInstruction"[]
|
||||
);
|
||||
|
||||
CREATE TYPE "TransactionTokenBalance" AS (
|
||||
account_index SMALLINT,
|
||||
mint VARCHAR(44),
|
||||
ui_token_amount DOUBLE PRECISION,
|
||||
owner VARCHAR(44)
|
||||
);
|
||||
|
||||
Create TYPE "RewardType" AS ENUM (
|
||||
'Fee',
|
||||
'Rent',
|
||||
'Staking',
|
||||
'Voting'
|
||||
);
|
||||
|
||||
CREATE TYPE "Reward" AS (
|
||||
pubkey VARCHAR(44),
|
||||
lamports BIGINT,
|
||||
post_balance BIGINT,
|
||||
reward_type "RewardType",
|
||||
commission SMALLINT
|
||||
);
|
||||
|
||||
CREATE TYPE "TransactionStatusMeta" AS (
|
||||
error "TransactionError",
|
||||
fee BIGINT,
|
||||
pre_balances BIGINT[],
|
||||
post_balances BIGINT[],
|
||||
inner_instructions "InnerInstructions"[],
|
||||
log_messages TEXT[],
|
||||
pre_token_balances "TransactionTokenBalance"[],
|
||||
post_token_balances "TransactionTokenBalance"[],
|
||||
rewards "Reward"[]
|
||||
);
|
||||
|
||||
CREATE TYPE "TransactionMessageHeader" AS (
|
||||
num_required_signatures SMALLINT,
|
||||
num_readonly_signed_accounts SMALLINT,
|
||||
num_readonly_unsigned_accounts SMALLINT
|
||||
);
|
||||
|
||||
CREATE TYPE "TransactionMessage" AS (
|
||||
header "TransactionMessageHeader",
|
||||
account_keys BYTEA[],
|
||||
recent_blockhash BYTEA,
|
||||
instructions "CompiledInstruction"[]
|
||||
);
|
||||
|
||||
CREATE TYPE "TransactionMessageAddressTableLookup" AS (
|
||||
account_key BYTEA,
|
||||
writable_indexes SMALLINT[],
|
||||
readonly_indexes SMALLINT[]
|
||||
);
|
||||
|
||||
CREATE TYPE "TransactionMessageV0" AS (
|
||||
header "TransactionMessageHeader",
|
||||
account_keys BYTEA[],
|
||||
recent_blockhash BYTEA,
|
||||
instructions "CompiledInstruction"[],
|
||||
address_table_lookups "TransactionMessageAddressTableLookup"[]
|
||||
);
|
||||
|
||||
CREATE TYPE "LoadedAddresses" AS (
|
||||
writable BYTEA[],
|
||||
readonly BYTEA[]
|
||||
);
|
||||
|
||||
CREATE TYPE "LoadedMessageV0" AS (
|
||||
message "TransactionMessageV0",
|
||||
loaded_addresses "LoadedAddresses"
|
||||
);
|
||||
|
||||
-- The table storing transactions
|
||||
CREATE TABLE transaction (
|
||||
slot BIGINT NOT NULL,
|
||||
signature BYTEA NOT NULL,
|
||||
is_vote BOOL NOT NULL,
|
||||
message_type SMALLINT, -- 0: legacy, 1: v0 message
|
||||
legacy_message "TransactionMessage",
|
||||
v0_loaded_message "LoadedMessageV0",
|
||||
signatures BYTEA[],
|
||||
message_hash BYTEA,
|
||||
meta "TransactionStatusMeta",
|
||||
updated_on TIMESTAMP NOT NULL,
|
||||
CONSTRAINT transaction_pk PRIMARY KEY (slot, signature)
|
||||
);
|
||||
|
||||
-- The table storing block metadata
|
||||
CREATE TABLE block (
|
||||
slot BIGINT PRIMARY KEY,
|
||||
blockhash VARCHAR(44),
|
||||
rewards "Reward"[],
|
||||
block_time BIGINT,
|
||||
block_height BIGINT,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
/**
|
||||
* The following is for keeping historical data for accounts and is not required for plugin to work.
|
||||
*/
|
||||
-- The table storing historical data for accounts
|
||||
CREATE TABLE account_audit (
|
||||
pubkey BYTEA,
|
||||
owner BYTEA,
|
||||
lamports BIGINT NOT NULL,
|
||||
slot BIGINT NOT NULL,
|
||||
executable BOOL NOT NULL,
|
||||
rent_epoch BIGINT NOT NULL,
|
||||
data BYTEA,
|
||||
write_version BIGINT NOT NULL,
|
||||
updated_on TIMESTAMP NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX account_audit_account_key ON account_audit (pubkey, write_version);
|
||||
|
||||
CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$
|
||||
BEGIN
|
||||
INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, rent_epoch, data, write_version, updated_on)
|
||||
VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot,
|
||||
OLD.executable, OLD.rent_epoch, OLD.data, OLD.write_version, OLD.updated_on);
|
||||
RETURN NEW;
|
||||
END;
|
||||
|
||||
$audit_account_update$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account
|
||||
FOR EACH ROW EXECUTE PROCEDURE audit_account_update();
|
@@ -1,26 +0,0 @@
|
||||
/**
|
||||
* Script for cleaning up the schema for PostgreSQL used for the AccountsDb plugin.
|
||||
*/
|
||||
|
||||
DROP TRIGGER account_update_trigger ON account;
|
||||
DROP FUNCTION audit_account_update;
|
||||
DROP TABLE account_audit;
|
||||
DROP TABLE account;
|
||||
DROP TABLE slot;
|
||||
DROP TABLE transaction;
|
||||
DROP TABLE block;
|
||||
|
||||
DROP TYPE "TransactionError" CASCADE;
|
||||
DROP TYPE "TransactionErrorCode" CASCADE;
|
||||
DROP TYPE "LoadedMessageV0" CASCADE;
|
||||
DROP TYPE "LoadedAddresses" CASCADE;
|
||||
DROP TYPE "TransactionMessageV0" CASCADE;
|
||||
DROP TYPE "TransactionMessage" CASCADE;
|
||||
DROP TYPE "TransactionMessageHeader" CASCADE;
|
||||
DROP TYPE "TransactionMessageAddressTableLookup" CASCADE;
|
||||
DROP TYPE "TransactionStatusMeta" CASCADE;
|
||||
DROP TYPE "RewardType" CASCADE;
|
||||
DROP TYPE "Reward" CASCADE;
|
||||
DROP TYPE "TransactionTokenBalance" CASCADE;
|
||||
DROP TYPE "InnerInstructions" CASCADE;
|
||||
DROP TYPE "CompiledInstruction" CASCADE;
|
@@ -1,802 +0,0 @@
|
||||
# This a reference configuration file for the PostgreSQL database version 14.
|
||||
|
||||
# -----------------------------
|
||||
# PostgreSQL configuration file
|
||||
# -----------------------------
|
||||
#
|
||||
# This file consists of lines of the form:
|
||||
#
|
||||
# name = value
|
||||
#
|
||||
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
|
||||
# "#" anywhere on a line. The complete list of parameter names and allowed
|
||||
# values can be found in the PostgreSQL documentation.
|
||||
#
|
||||
# The commented-out settings shown in this file represent the default values.
|
||||
# Re-commenting a setting is NOT sufficient to revert it to the default value;
|
||||
# you need to reload the server.
|
||||
#
|
||||
# This file is read on server startup and when the server receives a SIGHUP
|
||||
# signal. If you edit the file on a running system, you have to SIGHUP the
|
||||
# server for the changes to take effect, run "pg_ctl reload", or execute
|
||||
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
|
||||
# require a server shutdown and restart to take effect.
|
||||
#
|
||||
# Any parameter can also be given as a command-line option to the server, e.g.,
|
||||
# "postgres -c log_connections=on". Some parameters can be changed at run time
|
||||
# with the "SET" SQL command.
|
||||
#
|
||||
# Memory units: B = bytes Time units: us = microseconds
|
||||
# kB = kilobytes ms = milliseconds
|
||||
# MB = megabytes s = seconds
|
||||
# GB = gigabytes min = minutes
|
||||
# TB = terabytes h = hours
|
||||
# d = days
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# FILE LOCATIONS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# The default values of these variables are driven from the -D command-line
|
||||
# option or PGDATA environment variable, represented here as ConfigDir.
|
||||
|
||||
data_directory = '/var/lib/postgresql/14/main' # use data in another directory
|
||||
# (change requires restart)
|
||||
|
||||
hba_file = '/etc/postgresql/14/main/pg_hba.conf' # host-based authentication file
|
||||
# (change requires restart)
|
||||
ident_file = '/etc/postgresql/14/main/pg_ident.conf' # ident configuration file
|
||||
# (change requires restart)
|
||||
|
||||
# If external_pid_file is not explicitly set, no extra PID file is written.
|
||||
external_pid_file = '/var/run/postgresql/14-main.pid' # write an extra PID file
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CONNECTIONS AND AUTHENTICATION
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Connection Settings -
|
||||
|
||||
#listen_addresses = 'localhost' # what IP address(es) to listen on;
|
||||
# comma-separated list of addresses;
|
||||
# defaults to 'localhost'; use '*' for all
|
||||
# (change requires restart)
|
||||
listen_addresses = '*'
|
||||
port = 5433 # (change requires restart)
|
||||
max_connections = 200 # (change requires restart)
|
||||
#superuser_reserved_connections = 3 # (change requires restart)
|
||||
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
|
||||
# (change requires restart)
|
||||
#unix_socket_group = '' # (change requires restart)
|
||||
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
|
||||
# (change requires restart)
|
||||
#bonjour = off # advertise server via Bonjour
|
||||
# (change requires restart)
|
||||
#bonjour_name = '' # defaults to the computer name
|
||||
# (change requires restart)
|
||||
|
||||
# - TCP settings -
|
||||
# see "man tcp" for details
|
||||
|
||||
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
|
||||
# 0 selects the system default
|
||||
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
|
||||
# 0 selects the system default
|
||||
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
|
||||
# 0 selects the system default
|
||||
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
|
||||
# 0 selects the system default
|
||||
|
||||
#client_connection_check_interval = 0 # time between checks for client
|
||||
# disconnection while running queries;
|
||||
# 0 for never
|
||||
|
||||
# - Authentication -
|
||||
|
||||
#authentication_timeout = 1min # 1s-600s
|
||||
#password_encryption = scram-sha-256 # scram-sha-256 or md5
|
||||
#db_user_namespace = off
|
||||
|
||||
# GSSAPI using Kerberos
|
||||
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
|
||||
#krb_caseins_users = off
|
||||
|
||||
# - SSL -
|
||||
|
||||
ssl = on
|
||||
#ssl_ca_file = ''
|
||||
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
|
||||
#ssl_crl_file = ''
|
||||
#ssl_crl_dir = ''
|
||||
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
|
||||
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
|
||||
#ssl_prefer_server_ciphers = on
|
||||
#ssl_ecdh_curve = 'prime256v1'
|
||||
#ssl_min_protocol_version = 'TLSv1.2'
|
||||
#ssl_max_protocol_version = ''
|
||||
#ssl_dh_params_file = ''
|
||||
#ssl_passphrase_command = ''
|
||||
#ssl_passphrase_command_supports_reload = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# RESOURCE USAGE (except WAL)
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Memory -
|
||||
|
||||
shared_buffers = 1GB # min 128kB
|
||||
# (change requires restart)
|
||||
#huge_pages = try # on, off, or try
|
||||
# (change requires restart)
|
||||
#huge_page_size = 0 # zero for system default
|
||||
# (change requires restart)
|
||||
#temp_buffers = 8MB # min 800kB
|
||||
#max_prepared_transactions = 0 # zero disables the feature
|
||||
# (change requires restart)
|
||||
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
|
||||
# you actively intend to use prepared transactions.
|
||||
#work_mem = 4MB # min 64kB
|
||||
#hash_mem_multiplier = 1.0 # 1-1000.0 multiplier on hash table work_mem
|
||||
#maintenance_work_mem = 64MB # min 1MB
|
||||
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
|
||||
#logical_decoding_work_mem = 64MB # min 64kB
|
||||
#max_stack_depth = 2MB # min 100kB
|
||||
#shared_memory_type = mmap # the default is the first option
|
||||
# supported by the operating system:
|
||||
# mmap
|
||||
# sysv
|
||||
# windows
|
||||
# (change requires restart)
|
||||
dynamic_shared_memory_type = posix # the default is the first option
|
||||
# supported by the operating system:
|
||||
# posix
|
||||
# sysv
|
||||
# windows
|
||||
# mmap
|
||||
# (change requires restart)
|
||||
#min_dynamic_shared_memory = 0MB # (change requires restart)
|
||||
|
||||
# - Disk -
|
||||
|
||||
#temp_file_limit = -1 # limits per-process temp file space
|
||||
# in kilobytes, or -1 for no limit
|
||||
|
||||
# - Kernel Resources -
|
||||
|
||||
#max_files_per_process = 1000 # min 64
|
||||
# (change requires restart)
|
||||
|
||||
# - Cost-Based Vacuum Delay -
|
||||
|
||||
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
|
||||
#vacuum_cost_page_hit = 1 # 0-10000 credits
|
||||
#vacuum_cost_page_miss = 2 # 0-10000 credits
|
||||
#vacuum_cost_page_dirty = 20 # 0-10000 credits
|
||||
#vacuum_cost_limit = 200 # 1-10000 credits
|
||||
|
||||
# - Background Writer -
|
||||
|
||||
#bgwriter_delay = 200ms # 10-10000ms between rounds
|
||||
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
|
||||
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
|
||||
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
|
||||
|
||||
# - Asynchronous Behavior -
|
||||
|
||||
#backend_flush_after = 0 # measured in pages, 0 disables
|
||||
effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching
|
||||
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
|
||||
#max_worker_processes = 8 # (change requires restart)
|
||||
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
|
||||
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
|
||||
#max_parallel_workers = 8 # maximum number of max_worker_processes that
|
||||
# can be used in parallel operations
|
||||
#parallel_leader_participation = on
|
||||
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# WRITE-AHEAD LOG
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Settings -
|
||||
|
||||
wal_level = minimal # minimal, replica, or logical
|
||||
# (change requires restart)
|
||||
fsync = off # flush data to disk for crash safety
|
||||
# (turning this off can cause
|
||||
# unrecoverable data corruption)
|
||||
synchronous_commit = off # synchronization level;
|
||||
# off, local, remote_write, remote_apply, or on
|
||||
#wal_sync_method = fsync # the default is the first option
|
||||
# supported by the operating system:
|
||||
# open_datasync
|
||||
# fdatasync (default on Linux and FreeBSD)
|
||||
# fsync
|
||||
# fsync_writethrough
|
||||
# open_sync
|
||||
full_page_writes = off # recover from partial page writes
|
||||
#wal_log_hints = off # also do full page writes of non-critical updates
|
||||
# (change requires restart)
|
||||
#wal_compression = off # enable compression of full-page writes
|
||||
#wal_init_zero = on # zero-fill new WAL files
|
||||
#wal_recycle = on # recycle WAL files
|
||||
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
|
||||
# (change requires restart)
|
||||
#wal_writer_delay = 200ms # 1-10000 milliseconds
|
||||
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
|
||||
#wal_skip_threshold = 2MB
|
||||
|
||||
#commit_delay = 0 # range 0-100000, in microseconds
|
||||
#commit_siblings = 5 # range 1-1000
|
||||
|
||||
# - Checkpoints -
|
||||
|
||||
#checkpoint_timeout = 5min # range 30s-1d
|
||||
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
|
||||
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
|
||||
#checkpoint_warning = 30s # 0 disables
|
||||
max_wal_size = 1GB
|
||||
min_wal_size = 80MB
|
||||
|
||||
# - Archiving -
|
||||
|
||||
#archive_mode = off # enables archiving; off, on, or always
|
||||
# (change requires restart)
|
||||
#archive_command = '' # command to use to archive a logfile segment
|
||||
# placeholders: %p = path of file to archive
|
||||
# %f = file name only
|
||||
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
|
||||
#archive_timeout = 0 # force a logfile segment switch after this
|
||||
# number of seconds; 0 disables
|
||||
|
||||
# - Archive Recovery -
|
||||
|
||||
# These are only used in recovery mode.
|
||||
|
||||
#restore_command = '' # command to use to restore an archived logfile segment
|
||||
# placeholders: %p = path of file to restore
|
||||
# %f = file name only
|
||||
# e.g. 'cp /mnt/server/archivedir/%f %p'
|
||||
#archive_cleanup_command = '' # command to execute at every restartpoint
|
||||
#recovery_end_command = '' # command to execute at completion of recovery
|
||||
|
||||
# - Recovery Target -
|
||||
|
||||
# Set these only when performing a targeted recovery.
|
||||
|
||||
#recovery_target = '' # 'immediate' to end recovery as soon as a
|
||||
# consistent state is reached
|
||||
# (change requires restart)
|
||||
#recovery_target_name = '' # the named restore point to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_time = '' # the time stamp up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_inclusive = on # Specifies whether to stop:
|
||||
# just after the specified recovery target (on)
|
||||
# just before the recovery target (off)
|
||||
# (change requires restart)
|
||||
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
|
||||
# (change requires restart)
|
||||
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# REPLICATION
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Sending Servers -
|
||||
|
||||
# Set these on the primary and on any standby that will send replication data.
|
||||
|
||||
max_wal_senders = 0 # max number of walsender processes
|
||||
# (change requires restart)
|
||||
#max_replication_slots = 10 # max number of replication slots
|
||||
# (change requires restart)
|
||||
#wal_keep_size = 0 # in megabytes; 0 disables
|
||||
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
|
||||
#wal_sender_timeout = 60s # in milliseconds; 0 disables
|
||||
#track_commit_timestamp = off # collect timestamp of transaction commit
|
||||
# (change requires restart)
|
||||
|
||||
# - Primary Server -
|
||||
|
||||
# These settings are ignored on a standby server.
|
||||
|
||||
#synchronous_standby_names = '' # standby servers that provide sync rep
|
||||
# method to choose sync standbys, number of sync standbys,
|
||||
# and comma-separated list of application_name
|
||||
# from standby(s); '*' = all
|
||||
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
|
||||
|
||||
# - Standby Servers -
|
||||
|
||||
# These settings are ignored on a primary server.
|
||||
|
||||
#primary_conninfo = '' # connection string to sending server
|
||||
#primary_slot_name = '' # replication slot on sending server
|
||||
#promote_trigger_file = '' # file name whose presence ends recovery
|
||||
#hot_standby = on # "off" disallows queries during recovery
|
||||
# (change requires restart)
|
||||
#max_standby_archive_delay = 30s # max delay before canceling queries
|
||||
# when reading WAL from archive;
|
||||
# -1 allows indefinite delay
|
||||
#max_standby_streaming_delay = 30s # max delay before canceling queries
|
||||
# when reading streaming WAL;
|
||||
# -1 allows indefinite delay
|
||||
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
|
||||
# is not set
|
||||
#wal_receiver_status_interval = 10s # send replies at least this often
|
||||
# 0 disables
|
||||
#hot_standby_feedback = off # send info from standby to prevent
|
||||
# query conflicts
|
||||
#wal_receiver_timeout = 60s # time that receiver waits for
|
||||
# communication from primary
|
||||
# in milliseconds; 0 disables
|
||||
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
|
||||
# retrieve WAL after a failed attempt
|
||||
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
|
||||
|
||||
# - Subscribers -
|
||||
|
||||
# These settings are ignored on a publisher.
|
||||
|
||||
#max_logical_replication_workers = 4 # taken from max_worker_processes
|
||||
# (change requires restart)
|
||||
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# QUERY TUNING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Planner Method Configuration -
|
||||
|
||||
#enable_async_append = on
|
||||
#enable_bitmapscan = on
|
||||
#enable_gathermerge = on
|
||||
#enable_hashagg = on
|
||||
#enable_hashjoin = on
|
||||
#enable_incremental_sort = on
|
||||
#enable_indexscan = on
|
||||
#enable_indexonlyscan = on
|
||||
#enable_material = on
|
||||
#enable_memoize = on
|
||||
#enable_mergejoin = on
|
||||
#enable_nestloop = on
|
||||
#enable_parallel_append = on
|
||||
#enable_parallel_hash = on
|
||||
#enable_partition_pruning = on
|
||||
#enable_partitionwise_join = off
|
||||
#enable_partitionwise_aggregate = off
|
||||
#enable_seqscan = on
|
||||
#enable_sort = on
|
||||
#enable_tidscan = on
|
||||
|
||||
# - Planner Cost Constants -
|
||||
|
||||
#seq_page_cost = 1.0 # measured on an arbitrary scale
|
||||
#random_page_cost = 4.0 # same scale as above
|
||||
#cpu_tuple_cost = 0.01 # same scale as above
|
||||
#cpu_index_tuple_cost = 0.005 # same scale as above
|
||||
#cpu_operator_cost = 0.0025 # same scale as above
|
||||
#parallel_setup_cost = 1000.0 # same scale as above
|
||||
#parallel_tuple_cost = 0.1 # same scale as above
|
||||
#min_parallel_table_scan_size = 8MB
|
||||
#min_parallel_index_scan_size = 512kB
|
||||
#effective_cache_size = 4GB
|
||||
|
||||
#jit_above_cost = 100000 # perform JIT compilation if available
|
||||
# and query more expensive than this;
|
||||
# -1 disables
|
||||
#jit_inline_above_cost = 500000 # inline small functions if query is
|
||||
# more expensive than this; -1 disables
|
||||
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
|
||||
# query is more expensive than this;
|
||||
# -1 disables
|
||||
|
||||
# - Genetic Query Optimizer -
|
||||
|
||||
#geqo = on
|
||||
#geqo_threshold = 12
|
||||
#geqo_effort = 5 # range 1-10
|
||||
#geqo_pool_size = 0 # selects default based on effort
|
||||
#geqo_generations = 0 # selects default based on effort
|
||||
#geqo_selection_bias = 2.0 # range 1.5-2.0
|
||||
#geqo_seed = 0.0 # range 0.0-1.0
|
||||
|
||||
# - Other Planner Options -
|
||||
|
||||
#default_statistics_target = 100 # range 1-10000
|
||||
#constraint_exclusion = partition # on, off, or partition
|
||||
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
|
||||
#from_collapse_limit = 8
|
||||
#jit = on # allow JIT compilation
|
||||
#join_collapse_limit = 8 # 1 disables collapsing of explicit
|
||||
# JOIN clauses
|
||||
#plan_cache_mode = auto # auto, force_generic_plan or
|
||||
# force_custom_plan
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# REPORTING AND LOGGING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Where to Log -
|
||||
|
||||
#log_destination = 'stderr' # Valid values are combinations of
|
||||
# stderr, csvlog, syslog, and eventlog,
|
||||
# depending on platform. csvlog
|
||||
# requires logging_collector to be on.
|
||||
|
||||
# This is used when logging to stderr:
|
||||
#logging_collector = off # Enable capturing of stderr and csvlog
|
||||
# into log files. Required to be on for
|
||||
# csvlogs.
|
||||
# (change requires restart)
|
||||
|
||||
# These are only used if logging_collector is on:
|
||||
#log_directory = 'log' # directory where log files are written,
|
||||
# can be absolute or relative to PGDATA
|
||||
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
|
||||
# can include strftime() escapes
|
||||
#log_file_mode = 0600 # creation mode for log files,
|
||||
# begin with 0 to use octal notation
|
||||
#log_rotation_age = 1d # Automatic rotation of logfiles will
|
||||
# happen after that time. 0 disables.
|
||||
#log_rotation_size = 10MB # Automatic rotation of logfiles will
|
||||
# happen after that much log output.
|
||||
# 0 disables.
|
||||
#log_truncate_on_rotation = off # If on, an existing log file with the
|
||||
# same name as the new log file will be
|
||||
# truncated rather than appended to.
|
||||
# But such truncation only occurs on
|
||||
# time-driven rotation, not on restarts
|
||||
# or size-driven rotation. Default is
|
||||
# off, meaning append to existing files
|
||||
# in all cases.
|
||||
|
||||
# These are relevant when logging to syslog:
|
||||
#syslog_facility = 'LOCAL0'
|
||||
#syslog_ident = 'postgres'
|
||||
#syslog_sequence_numbers = on
|
||||
#syslog_split_messages = on
|
||||
|
||||
# This is only relevant when logging to eventlog (Windows):
|
||||
# (change requires restart)
|
||||
#event_source = 'PostgreSQL'
|
||||
|
||||
# - When to Log -
|
||||
|
||||
#log_min_messages = warning # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# info
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
# log
|
||||
# fatal
|
||||
# panic
|
||||
|
||||
#log_min_error_statement = error # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# info
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
# log
|
||||
# fatal
|
||||
# panic (effectively off)
|
||||
|
||||
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
|
||||
# and their durations, > 0 logs only
|
||||
# statements running at least this number
|
||||
# of milliseconds
|
||||
|
||||
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
|
||||
# and their durations, > 0 logs only a sample of
|
||||
# statements running at least this number
|
||||
# of milliseconds;
|
||||
# sample fraction is determined by log_statement_sample_rate
|
||||
|
||||
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
|
||||
# log_min_duration_sample to be logged;
|
||||
# 1.0 logs all such statements, 0.0 never logs
|
||||
|
||||
|
||||
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
|
||||
# are logged regardless of their duration; 1.0 logs all
|
||||
# statements from all transactions, 0.0 never logs
|
||||
|
||||
# - What to Log -
|
||||
|
||||
#debug_print_parse = off
|
||||
#debug_print_rewritten = off
|
||||
#debug_print_plan = off
|
||||
#debug_pretty_print = on
|
||||
#log_autovacuum_min_duration = -1 # log autovacuum activity;
|
||||
# -1 disables, 0 logs all actions and
|
||||
# their durations, > 0 logs only
|
||||
# actions running at least this number
|
||||
# of milliseconds.
|
||||
#log_checkpoints = off
|
||||
#log_connections = off
|
||||
#log_disconnections = off
|
||||
#log_duration = off
|
||||
#log_error_verbosity = default # terse, default, or verbose messages
|
||||
#log_hostname = off
|
||||
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
|
||||
# %a = application name
|
||||
# %u = user name
|
||||
# %d = database name
|
||||
# %r = remote host and port
|
||||
# %h = remote host
|
||||
# %b = backend type
|
||||
# %p = process ID
|
||||
# %P = process ID of parallel group leader
|
||||
# %t = timestamp without milliseconds
|
||||
# %m = timestamp with milliseconds
|
||||
# %n = timestamp with milliseconds (as a Unix epoch)
|
||||
# %Q = query ID (0 if none or not computed)
|
||||
# %i = command tag
|
||||
# %e = SQL state
|
||||
# %c = session ID
|
||||
# %l = session line number
|
||||
# %s = session start timestamp
|
||||
# %v = virtual transaction ID
|
||||
# %x = transaction ID (0 if none)
|
||||
# %q = stop here in non-session
|
||||
# processes
|
||||
# %% = '%'
|
||||
# e.g. '<%u%%%d> '
|
||||
#log_lock_waits = off # log lock waits >= deadlock_timeout
|
||||
#log_recovery_conflict_waits = off # log standby recovery conflict waits
|
||||
# >= deadlock_timeout
|
||||
#log_parameter_max_length = -1 # when logging statements, limit logged
|
||||
# bind-parameter values to N bytes;
|
||||
# -1 means print in full, 0 disables
|
||||
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
|
||||
# bind-parameter values to N bytes;
|
||||
# -1 means print in full, 0 disables
|
||||
#log_statement = 'none' # none, ddl, mod, all
|
||||
#log_replication_commands = off
|
||||
#log_temp_files = -1 # log temporary files equal or larger
|
||||
# than the specified size in kilobytes;
|
||||
# -1 disables, 0 logs all temp files
|
||||
log_timezone = 'Etc/UTC'
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# PROCESS TITLE
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
cluster_name = '14/main' # added to process titles if nonempty
|
||||
# (change requires restart)
|
||||
#update_process_title = on
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# STATISTICS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Query and Index Statistics Collector -
|
||||
|
||||
#track_activities = on
|
||||
#track_activity_query_size = 1024 # (change requires restart)
|
||||
#track_counts = on
|
||||
#track_io_timing = off
|
||||
#track_wal_io_timing = off
|
||||
#track_functions = none # none, pl, all
|
||||
stats_temp_directory = '/var/run/postgresql/14-main.pg_stat_tmp'
|
||||
|
||||
|
||||
# - Monitoring -
|
||||
|
||||
#compute_query_id = auto
|
||||
#log_statement_stats = off
|
||||
#log_parser_stats = off
|
||||
#log_planner_stats = off
|
||||
#log_executor_stats = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# AUTOVACUUM
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#autovacuum = on # Enable autovacuum subprocess? 'on'
|
||||
# requires track_counts to also be on.
|
||||
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
|
||||
# (change requires restart)
|
||||
#autovacuum_naptime = 1min # time between autovacuum runs
|
||||
#autovacuum_vacuum_threshold = 50 # min number of row updates before
|
||||
# vacuum
|
||||
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
|
||||
# before vacuum; -1 disables insert
|
||||
# vacuums
|
||||
#autovacuum_analyze_threshold = 50 # min number of row updates before
|
||||
# analyze
|
||||
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
|
||||
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
|
||||
# size before insert vacuum
|
||||
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
|
||||
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
|
||||
# (change requires restart)
|
||||
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
|
||||
# before forced vacuum
|
||||
# (change requires restart)
|
||||
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
|
||||
# autovacuum, in milliseconds;
|
||||
# -1 means use vacuum_cost_delay
|
||||
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
|
||||
# autovacuum, -1 means use
|
||||
# vacuum_cost_limit
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CLIENT CONNECTION DEFAULTS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Statement Behavior -
|
||||
|
||||
#client_min_messages = notice # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# log
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
#search_path = '"$user", public' # schema names
|
||||
#row_security = on
|
||||
#default_table_access_method = 'heap'
|
||||
#default_tablespace = '' # a tablespace name, '' uses the default
|
||||
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
|
||||
#temp_tablespaces = '' # a list of tablespace names, '' uses
|
||||
# only default tablespace
|
||||
#check_function_bodies = on
|
||||
#default_transaction_isolation = 'read committed'
|
||||
#default_transaction_read_only = off
|
||||
#default_transaction_deferrable = off
|
||||
#session_replication_role = 'origin'
|
||||
#statement_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#lock_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#vacuum_freeze_table_age = 150000000
|
||||
#vacuum_freeze_min_age = 50000000
|
||||
#vacuum_failsafe_age = 1600000000
|
||||
#vacuum_multixact_freeze_table_age = 150000000
|
||||
#vacuum_multixact_freeze_min_age = 5000000
|
||||
#vacuum_multixact_failsafe_age = 1600000000
|
||||
#bytea_output = 'hex' # hex, escape
|
||||
#xmlbinary = 'base64'
|
||||
#xmloption = 'content'
|
||||
#gin_pending_list_limit = 4MB
|
||||
|
||||
# - Locale and Formatting -
|
||||
|
||||
datestyle = 'iso, mdy'
|
||||
#intervalstyle = 'postgres'
|
||||
timezone = 'Etc/UTC'
|
||||
#timezone_abbreviations = 'Default' # Select the set of available time zone
|
||||
# abbreviations. Currently, there are
|
||||
# Default
|
||||
# Australia (historical usage)
|
||||
# India
|
||||
# You can create your own file in
|
||||
# share/timezonesets/.
|
||||
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
|
||||
# selects precise output mode
|
||||
#client_encoding = sql_ascii # actually, defaults to database
|
||||
# encoding
|
||||
|
||||
# These settings are initialized by initdb, but they can be changed.
|
||||
lc_messages = 'C.UTF-8' # locale for system error message
|
||||
# strings
|
||||
lc_monetary = 'C.UTF-8' # locale for monetary formatting
|
||||
lc_numeric = 'C.UTF-8' # locale for number formatting
|
||||
lc_time = 'C.UTF-8' # locale for time formatting
|
||||
|
||||
# default configuration for text search
|
||||
default_text_search_config = 'pg_catalog.english'
|
||||
|
||||
# - Shared Library Preloading -
|
||||
|
||||
#local_preload_libraries = ''
|
||||
#session_preload_libraries = ''
|
||||
#shared_preload_libraries = '' # (change requires restart)
|
||||
#jit_provider = 'llvmjit' # JIT library to use
|
||||
|
||||
# - Other Defaults -
|
||||
|
||||
#dynamic_library_path = '$libdir'
|
||||
#extension_destdir = '' # prepend path when loading extensions
|
||||
# and shared objects (added by Debian)
|
||||
#gin_fuzzy_search_limit = 0
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# LOCK MANAGEMENT
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#deadlock_timeout = 1s
|
||||
#max_locks_per_transaction = 64 # min 10
|
||||
# (change requires restart)
|
||||
#max_pred_locks_per_transaction = 64 # min 10
|
||||
# (change requires restart)
|
||||
#max_pred_locks_per_relation = -2 # negative values mean
|
||||
# (max_pred_locks_per_transaction
|
||||
# / -max_pred_locks_per_relation) - 1
|
||||
#max_pred_locks_per_page = 2 # min 0
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# VERSION AND PLATFORM COMPATIBILITY
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Previous PostgreSQL Versions -
|
||||
|
||||
#array_nulls = on
|
||||
#backslash_quote = safe_encoding # on, off, or safe_encoding
|
||||
#escape_string_warning = on
|
||||
#lo_compat_privileges = off
|
||||
#quote_all_identifiers = off
|
||||
#standard_conforming_strings = on
|
||||
#synchronize_seqscans = on
|
||||
|
||||
# - Other Platforms and Clients -
|
||||
|
||||
#transform_null_equals = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# ERROR HANDLING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#exit_on_error = off # terminate session on any error?
|
||||
#restart_after_crash = on # reinitialize after backend crash?
|
||||
#data_sync_retry = off # retry or panic on failure to fsync
|
||||
# data?
|
||||
# (change requires restart)
|
||||
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CONFIG FILE INCLUDES
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# These options allow settings to be loaded from files other than the
|
||||
# default postgresql.conf. Note that these are directives, not variable
|
||||
# assignments, so they can usefully be given more than once.
|
||||
|
||||
include_dir = 'conf.d' # include files ending in '.conf' from
|
||||
# a directory, e.g., 'conf.d'
|
||||
#include_if_exists = '...' # include file only if it exists
|
||||
#include = '...' # include file
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CUSTOMIZED OPTIONS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# Add settings for extensions here
|
@@ -1,74 +0,0 @@
|
||||
use {log::*, std::collections::HashSet};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct AccountsSelector {
|
||||
pub accounts: HashSet<Vec<u8>>,
|
||||
pub owners: HashSet<Vec<u8>>,
|
||||
pub select_all_accounts: bool,
|
||||
}
|
||||
|
||||
impl AccountsSelector {
|
||||
pub fn default() -> Self {
|
||||
AccountsSelector {
|
||||
accounts: HashSet::default(),
|
||||
owners: HashSet::default(),
|
||||
select_all_accounts: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(accounts: &[String], owners: &[String]) -> Self {
|
||||
info!(
|
||||
"Creating AccountsSelector from accounts: {:?}, owners: {:?}",
|
||||
accounts, owners
|
||||
);
|
||||
|
||||
let select_all_accounts = accounts.iter().any(|key| key == "*");
|
||||
if select_all_accounts {
|
||||
return AccountsSelector {
|
||||
accounts: HashSet::default(),
|
||||
owners: HashSet::default(),
|
||||
select_all_accounts,
|
||||
};
|
||||
}
|
||||
let accounts = accounts
|
||||
.iter()
|
||||
.map(|key| bs58::decode(key).into_vec().unwrap())
|
||||
.collect();
|
||||
let owners = owners
|
||||
.iter()
|
||||
.map(|key| bs58::decode(key).into_vec().unwrap())
|
||||
.collect();
|
||||
AccountsSelector {
|
||||
accounts,
|
||||
owners,
|
||||
select_all_accounts,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_account_selected(&self, account: &[u8], owner: &[u8]) -> bool {
|
||||
self.select_all_accounts || self.accounts.contains(account) || self.owners.contains(owner)
|
||||
}
|
||||
|
||||
/// Check if any account is of interested at all
|
||||
pub fn is_enabled(&self) -> bool {
|
||||
self.select_all_accounts || !self.accounts.is_empty() || !self.owners.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_create_accounts_selector() {
|
||||
AccountsSelector::new(
|
||||
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
|
||||
&[],
|
||||
);
|
||||
|
||||
AccountsSelector::new(
|
||||
&[],
|
||||
&["9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin".to_string()],
|
||||
);
|
||||
}
|
||||
}
|
@@ -1,466 +0,0 @@
|
||||
use solana_measure::measure::Measure;
|
||||
/// Main entry for the PostgreSQL plugin
|
||||
use {
|
||||
crate::{
|
||||
accounts_selector::AccountsSelector,
|
||||
postgres_client::{ParallelPostgresClient, PostgresClientBuilder},
|
||||
transaction_selector::TransactionSelector,
|
||||
},
|
||||
bs58,
|
||||
log::*,
|
||||
serde_derive::{Deserialize, Serialize},
|
||||
serde_json,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
AccountsDbPlugin, AccountsDbPluginError, ReplicaAccountInfoVersions,
|
||||
ReplicaBlockInfoVersions, ReplicaTransactionInfoVersions, Result, SlotStatus,
|
||||
},
|
||||
solana_metrics::*,
|
||||
std::{fs::File, io::Read},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AccountsDbPluginPostgres {
|
||||
client: Option<ParallelPostgresClient>,
|
||||
accounts_selector: Option<AccountsSelector>,
|
||||
transaction_selector: Option<TransactionSelector>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for AccountsDbPluginPostgres {
|
||||
fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct AccountsDbPluginPostgresConfig {
|
||||
pub host: Option<String>,
|
||||
pub user: Option<String>,
|
||||
pub port: Option<u16>,
|
||||
pub connection_str: Option<String>,
|
||||
pub threads: Option<usize>,
|
||||
pub batch_size: Option<usize>,
|
||||
pub panic_on_db_errors: Option<bool>,
|
||||
/// Indicates if to store historical data for accounts
|
||||
pub store_account_historical_data: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AccountsDbPluginPostgresError {
|
||||
#[error("Error connecting to the backend data store. Error message: ({msg})")]
|
||||
DataStoreConnectionError { msg: String },
|
||||
|
||||
#[error("Error preparing data store schema. Error message: ({msg})")]
|
||||
DataSchemaError { msg: String },
|
||||
|
||||
#[error("Error preparing data store schema. Error message: ({msg})")]
|
||||
ConfigurationError { msg: String },
|
||||
}
|
||||
|
||||
impl AccountsDbPlugin for AccountsDbPluginPostgres {
|
||||
fn name(&self) -> &'static str {
|
||||
"AccountsDbPluginPostgres"
|
||||
}
|
||||
|
||||
/// Do initialization for the PostgreSQL plugin.
|
||||
///
|
||||
/// # Format of the config file:
|
||||
/// * The `accounts_selector` section allows the user to controls accounts selections.
|
||||
/// "accounts_selector" : {
|
||||
/// "accounts" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
|
||||
/// }
|
||||
/// or:
|
||||
/// "accounts_selector" = {
|
||||
/// "owners" : \["pubkey-1", "pubkey-2", ..., "pubkey-m"\]
|
||||
/// }
|
||||
/// Accounts either satisyfing the accounts condition or owners condition will be selected.
|
||||
/// When only owners is specified,
|
||||
/// all accounts belonging to the owners will be streamed.
|
||||
/// The accounts field supports wildcard to select all accounts:
|
||||
/// "accounts_selector" : {
|
||||
/// "accounts" : \["*"\],
|
||||
/// }
|
||||
/// * "host", optional, specifies the PostgreSQL server.
|
||||
/// * "user", optional, specifies the PostgreSQL user.
|
||||
/// * "port", optional, specifies the PostgreSQL server's port.
|
||||
/// * "connection_str", optional, the custom PostgreSQL connection string.
|
||||
/// Please refer to https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html for the connection configuration.
|
||||
/// When `connection_str` is set, the values in "host", "user" and "port" are ignored. If `connection_str` is not given,
|
||||
/// `host` and `user` must be given.
|
||||
/// "store_account_historical_data", optional, set it to 'true', to store historical account data to account_audit
|
||||
/// table.
|
||||
/// * "threads" optional, specifies the number of worker threads for the plugin. A thread
|
||||
/// maintains a PostgreSQL connection to the server. The default is '10'.
|
||||
/// * "batch_size" optional, specifies the batch size of bulk insert when the AccountsDb is created
|
||||
/// from restoring a snapshot. The default is '10'.
|
||||
/// * "panic_on_db_errors", optional, contols if to panic when there are errors replicating data to the
|
||||
/// PostgreSQL database. The default is 'false'.
|
||||
/// * "transaction_selector", optional, controls if and what transaction to store. If this field is missing
|
||||
/// None of the transction is stored.
|
||||
/// "transaction_selector" : {
|
||||
/// "mentions" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\],
|
||||
/// }
|
||||
/// The `mentions` field support wildcard to select all transaction or all 'vote' transactions:
|
||||
/// For example, to select all transactions:
|
||||
/// "transaction_selector" : {
|
||||
/// "mentions" : \["*"\],
|
||||
/// }
|
||||
/// To select all vote transactions:
|
||||
/// "transaction_selector" : {
|
||||
/// "mentions" : \["all_votes"\],
|
||||
/// }
|
||||
/// # Examples
|
||||
///
|
||||
/// {
|
||||
/// "libpath": "/home/solana/target/release/libsolana_accountsdb_plugin_postgres.so",
|
||||
/// "host": "host_foo",
|
||||
/// "user": "solana",
|
||||
/// "threads": 10,
|
||||
/// "accounts_selector" : {
|
||||
/// "owners" : ["9oT9R5ZyRovSVnt37QvVoBttGpNqR3J7unkb567NP8k3"]
|
||||
/// }
|
||||
/// }
|
||||
|
||||
fn on_load(&mut self, config_file: &str) -> Result<()> {
|
||||
solana_logger::setup_with_default("info");
|
||||
info!(
|
||||
"Loading plugin {:?} from config_file {:?}",
|
||||
self.name(),
|
||||
config_file
|
||||
);
|
||||
let mut file = File::open(config_file)?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
|
||||
let result: serde_json::Value = serde_json::from_str(&contents).unwrap();
|
||||
self.accounts_selector = Some(Self::create_accounts_selector_from_config(&result));
|
||||
self.transaction_selector = Some(Self::create_transaction_selector_from_config(&result));
|
||||
|
||||
let result: serde_json::Result<AccountsDbPluginPostgresConfig> =
|
||||
serde_json::from_str(&contents);
|
||||
match result {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::ConfigFileReadError {
|
||||
msg: format!(
|
||||
"The config file is not in the JSON format expected: {:?}",
|
||||
err
|
||||
),
|
||||
})
|
||||
}
|
||||
Ok(config) => {
|
||||
let client = PostgresClientBuilder::build_pararallel_postgres_client(&config)?;
|
||||
self.client = Some(client);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn on_unload(&mut self) {
|
||||
info!("Unloading plugin: {:?}", self.name());
|
||||
|
||||
match &mut self.client {
|
||||
None => {}
|
||||
Some(client) => {
|
||||
client.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: ReplicaAccountInfoVersions,
|
||||
slot: u64,
|
||||
is_startup: bool,
|
||||
) -> Result<()> {
|
||||
let mut measure_all = Measure::start("accountsdb-plugin-postgres-update-account-main");
|
||||
match account {
|
||||
ReplicaAccountInfoVersions::V0_0_1(account) => {
|
||||
let mut measure_select =
|
||||
Measure::start("accountsdb-plugin-postgres-update-account-select");
|
||||
if let Some(accounts_selector) = &self.accounts_selector {
|
||||
if !accounts_selector.is_account_selected(account.pubkey, account.owner) {
|
||||
return Ok(());
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
measure_select.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-select-us",
|
||||
measure_select.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
debug!(
|
||||
"Updating account {:?} with owner {:?} at slot {:?} using account selector {:?}",
|
||||
bs58::encode(account.pubkey).into_string(),
|
||||
bs58::encode(account.owner).into_string(),
|
||||
slot,
|
||||
self.accounts_selector.as_ref().unwrap()
|
||||
);
|
||||
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database."
|
||||
.to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => {
|
||||
let mut measure_update =
|
||||
Measure::start("accountsdb-plugin-postgres-update-account-client");
|
||||
let result = { client.update_account(account, slot, is_startup) };
|
||||
measure_update.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-client-us",
|
||||
measure_update.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError {
|
||||
msg: format!("Failed to persist the update of account to the PostgreSQL database. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
measure_all.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-postgres-update-account-main-us",
|
||||
measure_all.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<()> {
|
||||
info!("Updating slot {:?} at with status {:?}", slot, status);
|
||||
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => {
|
||||
let result = client.update_slot_status(slot, parent, status);
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
||||
msg: format!("Failed to persist the update of slot to the PostgreSQL database. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn notify_end_of_startup(&mut self) -> Result<()> {
|
||||
info!("Notifying the end of startup for accounts notifications");
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => {
|
||||
let result = client.notify_end_of_startup();
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
||||
msg: format!("Failed to notify the end of startup for accounts notifications. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn notify_transaction(
|
||||
&mut self,
|
||||
transaction_info: ReplicaTransactionInfoVersions,
|
||||
slot: u64,
|
||||
) -> Result<()> {
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => match transaction_info {
|
||||
ReplicaTransactionInfoVersions::V0_0_1(transaction_info) => {
|
||||
if let Some(transaction_selector) = &self.transaction_selector {
|
||||
if !transaction_selector.is_transaction_selected(
|
||||
transaction_info.is_vote,
|
||||
transaction_info.transaction.message().account_keys_iter(),
|
||||
) {
|
||||
return Ok(());
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let result = client.log_transaction_info(transaction_info, slot);
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
||||
msg: format!("Failed to persist the transaction info to the PostgreSQL database. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn notify_block_metadata(&mut self, block_info: ReplicaBlockInfoVersions) -> Result<()> {
|
||||
match &mut self.client {
|
||||
None => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(
|
||||
AccountsDbPluginPostgresError::DataStoreConnectionError {
|
||||
msg: "There is no connection to the PostgreSQL database.".to_string(),
|
||||
},
|
||||
)));
|
||||
}
|
||||
Some(client) => match block_info {
|
||||
ReplicaBlockInfoVersions::V0_0_1(block_info) => {
|
||||
let result = client.update_block_metadata(block_info);
|
||||
|
||||
if let Err(err) = result {
|
||||
return Err(AccountsDbPluginError::SlotStatusUpdateError{
|
||||
msg: format!("Failed to persist the update of block metadata to the PostgreSQL database. Error: {:?}", err)
|
||||
});
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if the plugin is interested in account data
|
||||
/// Default is true -- if the plugin is not interested in
|
||||
/// account data, please return false.
|
||||
fn account_data_notifications_enabled(&self) -> bool {
|
||||
self.accounts_selector
|
||||
.as_ref()
|
||||
.map_or_else(|| false, |selector| selector.is_enabled())
|
||||
}
|
||||
|
||||
/// Check if the plugin is interested in transaction data
|
||||
fn transaction_notifications_enabled(&self) -> bool {
|
||||
self.transaction_selector
|
||||
.as_ref()
|
||||
.map_or_else(|| false, |selector| selector.is_enabled())
|
||||
}
|
||||
}
|
||||
|
||||
impl AccountsDbPluginPostgres {
|
||||
fn create_accounts_selector_from_config(config: &serde_json::Value) -> AccountsSelector {
|
||||
let accounts_selector = &config["accounts_selector"];
|
||||
|
||||
if accounts_selector.is_null() {
|
||||
AccountsSelector::default()
|
||||
} else {
|
||||
let accounts = &accounts_selector["accounts"];
|
||||
let accounts: Vec<String> = if accounts.is_array() {
|
||||
accounts
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|val| val.as_str().unwrap().to_string())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::default()
|
||||
};
|
||||
let owners = &accounts_selector["owners"];
|
||||
let owners: Vec<String> = if owners.is_array() {
|
||||
owners
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|val| val.as_str().unwrap().to_string())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::default()
|
||||
};
|
||||
AccountsSelector::new(&accounts, &owners)
|
||||
}
|
||||
}
|
||||
|
||||
fn create_transaction_selector_from_config(config: &serde_json::Value) -> TransactionSelector {
|
||||
let transaction_selector = &config["transaction_selector"];
|
||||
|
||||
if transaction_selector.is_null() {
|
||||
TransactionSelector::default()
|
||||
} else {
|
||||
let accounts = &transaction_selector["mentions"];
|
||||
let accounts: Vec<String> = if accounts.is_array() {
|
||||
accounts
|
||||
.as_array()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|val| val.as_str().unwrap().to_string())
|
||||
.collect()
|
||||
} else {
|
||||
Vec::default()
|
||||
};
|
||||
TransactionSelector::new(&accounts)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
#[allow(improper_ctypes_definitions)]
|
||||
/// # Safety
|
||||
///
|
||||
/// This function returns the AccountsDbPluginPostgres pointer as trait AccountsDbPlugin.
|
||||
pub unsafe extern "C" fn _create_plugin() -> *mut dyn AccountsDbPlugin {
|
||||
let plugin = AccountsDbPluginPostgres::new();
|
||||
let plugin: Box<dyn AccountsDbPlugin> = Box::new(plugin);
|
||||
Box::into_raw(plugin)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use {super::*, serde_json};
|
||||
|
||||
#[test]
|
||||
fn test_accounts_selector_from_config() {
|
||||
let config = "{\"accounts_selector\" : { \
|
||||
\"owners\" : [\"9xQeWvG816bUx9EPjHmaT23yvVM2ZWbrrpZb9PusVFin\"] \
|
||||
}}";
|
||||
|
||||
let config: serde_json::Value = serde_json::from_str(config).unwrap();
|
||||
AccountsDbPluginPostgres::create_accounts_selector_from_config(&config);
|
||||
}
|
||||
}
|
@@ -1,4 +0,0 @@
|
||||
pub mod accounts_selector;
|
||||
pub mod accountsdb_plugin_postgres;
|
||||
pub mod postgres_client;
|
||||
pub mod transaction_selector;
|
File diff suppressed because it is too large
Load Diff
@@ -1,97 +0,0 @@
|
||||
use {
|
||||
crate::{
|
||||
accountsdb_plugin_postgres::{
|
||||
AccountsDbPluginPostgresConfig, AccountsDbPluginPostgresError,
|
||||
},
|
||||
postgres_client::{
|
||||
postgres_client_transaction::DbReward, SimplePostgresClient, UpdateBlockMetadataRequest,
|
||||
},
|
||||
},
|
||||
chrono::Utc,
|
||||
log::*,
|
||||
postgres::{Client, Statement},
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
AccountsDbPluginError, ReplicaBlockInfo,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DbBlockInfo {
|
||||
pub slot: i64,
|
||||
pub blockhash: String,
|
||||
pub rewards: Vec<DbReward>,
|
||||
pub block_time: Option<i64>,
|
||||
pub block_height: Option<i64>,
|
||||
}
|
||||
|
||||
impl<'a> From<&ReplicaBlockInfo<'a>> for DbBlockInfo {
|
||||
fn from(block_info: &ReplicaBlockInfo) -> Self {
|
||||
Self {
|
||||
slot: block_info.slot as i64,
|
||||
blockhash: block_info.blockhash.to_string(),
|
||||
rewards: block_info.rewards.iter().map(DbReward::from).collect(),
|
||||
block_time: block_info.block_time,
|
||||
block_height: block_info
|
||||
.block_height
|
||||
.map(|block_height| block_height as i64),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SimplePostgresClient {
|
||||
pub(crate) fn build_block_metadata_upsert_statement(
|
||||
client: &mut Client,
|
||||
config: &AccountsDbPluginPostgresConfig,
|
||||
) -> Result<Statement, AccountsDbPluginError> {
|
||||
let stmt =
|
||||
"INSERT INTO block (slot, blockhash, rewards, block_time, block_height, updated_on) \
|
||||
VALUES ($1, $2, $3, $4, $5, $6)";
|
||||
|
||||
let stmt = client.prepare(stmt);
|
||||
|
||||
match stmt {
|
||||
Err(err) => {
|
||||
return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError {
|
||||
msg: format!(
|
||||
"Error in preparing for the block metadata update PostgreSQL database: ({}) host: {:?} user: {:?} config: {:?}",
|
||||
err, config.host, config.user, config
|
||||
),
|
||||
})));
|
||||
}
|
||||
Ok(stmt) => Ok(stmt),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn update_block_metadata_impl(
|
||||
&mut self,
|
||||
block_info: UpdateBlockMetadataRequest,
|
||||
) -> Result<(), AccountsDbPluginError> {
|
||||
let client = self.client.get_mut().unwrap();
|
||||
let statement = &client.update_block_metadata_stmt;
|
||||
let client = &mut client.client;
|
||||
let updated_on = Utc::now().naive_utc();
|
||||
|
||||
let block_info = block_info.block_info;
|
||||
let result = client.query(
|
||||
statement,
|
||||
&[
|
||||
&block_info.slot,
|
||||
&block_info.blockhash,
|
||||
&block_info.rewards,
|
||||
&block_info.block_time,
|
||||
&block_info.block_height,
|
||||
&updated_on,
|
||||
],
|
||||
);
|
||||
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to persist the update of block metadata to the PostgreSQL database. Error: {:?}",
|
||||
err);
|
||||
error!("{}", msg);
|
||||
return Err(AccountsDbPluginError::AccountsUpdateError { msg });
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@@ -1,194 +0,0 @@
|
||||
/// The transaction selector is responsible for filtering transactions
|
||||
/// in the plugin framework.
|
||||
use {log::*, solana_sdk::pubkey::Pubkey, std::collections::HashSet};
|
||||
|
||||
pub(crate) struct TransactionSelector {
|
||||
pub mentioned_addresses: HashSet<Vec<u8>>,
|
||||
pub select_all_transactions: bool,
|
||||
pub select_all_vote_transactions: bool,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl TransactionSelector {
|
||||
pub fn default() -> Self {
|
||||
Self {
|
||||
mentioned_addresses: HashSet::default(),
|
||||
select_all_transactions: false,
|
||||
select_all_vote_transactions: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a selector based on the mentioned addresses
|
||||
/// To select all transactions use ["*"] or ["all"]
|
||||
/// To select all vote transactions, use ["all_votes"]
|
||||
/// To select transactions mentioning specific addresses use ["<pubkey1>", "<pubkey2>", ...]
|
||||
pub fn new(mentioned_addresses: &[String]) -> Self {
|
||||
info!(
|
||||
"Creating TransactionSelector from addresses: {:?}",
|
||||
mentioned_addresses
|
||||
);
|
||||
|
||||
let select_all_transactions = mentioned_addresses
|
||||
.iter()
|
||||
.any(|key| key == "*" || key == "all");
|
||||
if select_all_transactions {
|
||||
return Self {
|
||||
mentioned_addresses: HashSet::default(),
|
||||
select_all_transactions,
|
||||
select_all_vote_transactions: true,
|
||||
};
|
||||
}
|
||||
let select_all_vote_transactions = mentioned_addresses.iter().any(|key| key == "all_votes");
|
||||
if select_all_vote_transactions {
|
||||
return Self {
|
||||
mentioned_addresses: HashSet::default(),
|
||||
select_all_transactions,
|
||||
select_all_vote_transactions: true,
|
||||
};
|
||||
}
|
||||
|
||||
let mentioned_addresses = mentioned_addresses
|
||||
.iter()
|
||||
.map(|key| bs58::decode(key).into_vec().unwrap())
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
mentioned_addresses,
|
||||
select_all_transactions: false,
|
||||
select_all_vote_transactions: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a transaction is of interest.
|
||||
pub fn is_transaction_selected(
|
||||
&self,
|
||||
is_vote: bool,
|
||||
mentioned_addresses: Box<dyn Iterator<Item = &Pubkey> + '_>,
|
||||
) -> bool {
|
||||
if !self.is_enabled() {
|
||||
return false;
|
||||
}
|
||||
|
||||
if self.select_all_transactions || (self.select_all_vote_transactions && is_vote) {
|
||||
return true;
|
||||
}
|
||||
for address in mentioned_addresses {
|
||||
if self.mentioned_addresses.contains(address.as_ref()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Check if any transaction is of interest at all
|
||||
pub fn is_enabled(&self) -> bool {
|
||||
self.select_all_transactions
|
||||
|| self.select_all_vote_transactions
|
||||
|| !self.mentioned_addresses.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_select_transaction() {
|
||||
let pubkey1 = Pubkey::new_unique();
|
||||
let pubkey2 = Pubkey::new_unique();
|
||||
|
||||
let selector = TransactionSelector::new(&[pubkey1.to_string()]);
|
||||
|
||||
assert!(selector.is_enabled());
|
||||
|
||||
let addresses = [pubkey1];
|
||||
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey2];
|
||||
assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey1, pubkey2];
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_select_all_transaction_using_wildcard() {
|
||||
let pubkey1 = Pubkey::new_unique();
|
||||
let pubkey2 = Pubkey::new_unique();
|
||||
|
||||
let selector = TransactionSelector::new(&["*".to_string()]);
|
||||
|
||||
assert!(selector.is_enabled());
|
||||
|
||||
let addresses = [pubkey1];
|
||||
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey2];
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey1, pubkey2];
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_select_all_transaction_all() {
|
||||
let pubkey1 = Pubkey::new_unique();
|
||||
let pubkey2 = Pubkey::new_unique();
|
||||
|
||||
let selector = TransactionSelector::new(&["all".to_string()]);
|
||||
|
||||
assert!(selector.is_enabled());
|
||||
|
||||
let addresses = [pubkey1];
|
||||
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey2];
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey1, pubkey2];
|
||||
assert!(selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_select_all_vote_transaction() {
|
||||
let pubkey1 = Pubkey::new_unique();
|
||||
let pubkey2 = Pubkey::new_unique();
|
||||
|
||||
let selector = TransactionSelector::new(&["all_votes".to_string()]);
|
||||
|
||||
assert!(selector.is_enabled());
|
||||
|
||||
let addresses = [pubkey1];
|
||||
|
||||
assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey2];
|
||||
assert!(selector.is_transaction_selected(true, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey1, pubkey2];
|
||||
assert!(selector.is_transaction_selected(true, Box::new(addresses.iter())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_select_no_transaction() {
|
||||
let pubkey1 = Pubkey::new_unique();
|
||||
let pubkey2 = Pubkey::new_unique();
|
||||
|
||||
let selector = TransactionSelector::new(&[]);
|
||||
|
||||
assert!(!selector.is_enabled());
|
||||
|
||||
let addresses = [pubkey1];
|
||||
|
||||
assert!(!selector.is_transaction_selected(false, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey2];
|
||||
assert!(!selector.is_transaction_selected(true, Box::new(addresses.iter())));
|
||||
|
||||
let addresses = [pubkey1, pubkey2];
|
||||
assert!(!selector.is_transaction_selected(true, Box::new(addresses.iter())));
|
||||
}
|
||||
}
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.9.4"
|
||||
version = "1.11.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,17 +14,17 @@ crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.1"
|
||||
solana-core = { path = "../core", version = "=1.9.4" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.4" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.9.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.4" }
|
||||
solana-perf = { path = "../perf", version = "=1.9.4" }
|
||||
solana-poh = { path = "../poh", version = "=1.9.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.4" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.4" }
|
||||
solana-version = { path = "../version", version = "=1.9.4" }
|
||||
solana-core = { path = "../core", version = "=1.11.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.11.0" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.11.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.11.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.11.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.11.0" }
|
||||
solana-poh = { path = "../poh", version = "=1.11.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.11.0" }
|
||||
solana-version = { path = "../version", version = "=1.11.0" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,7 +1,7 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, App, Arg},
|
||||
crossbeam_channel::unbounded,
|
||||
crossbeam_channel::{unbounded, Receiver},
|
||||
log::*,
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::prelude::*,
|
||||
@@ -11,6 +11,7 @@ use {
|
||||
blockstore::Blockstore,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_perf::packet::to_packet_batches,
|
||||
@@ -28,7 +29,7 @@ use {
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex, RwLock},
|
||||
sync::{atomic::Ordering, Arc, Mutex, RwLock},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
@@ -174,6 +175,11 @@ fn main() {
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
let mut bank = bank_forks.working_bank();
|
||||
|
||||
// set cost tracker limits to MAX so it will not filter out TXs
|
||||
bank.write_cost_tracker()
|
||||
.unwrap()
|
||||
.set_limits(std::u64::MAX, std::u64::MAX, std::u64::MAX);
|
||||
|
||||
info!("threads: {} txs: {}", num_threads, total_num_transactions);
|
||||
|
||||
let same_payer = matches.is_present("same_payer");
|
||||
@@ -218,8 +224,13 @@ fn main() {
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) = create_test_recorder(
|
||||
&bank,
|
||||
&blockstore,
|
||||
None,
|
||||
Some(leader_schedule_cache.clone()),
|
||||
);
|
||||
let cluster_info = ClusterInfo::new(
|
||||
Node::new_localhost().info,
|
||||
Arc::new(Keypair::new()),
|
||||
@@ -329,9 +340,17 @@ fn main() {
|
||||
bank = bank_forks.working_bank();
|
||||
insert_time.stop();
|
||||
|
||||
// set cost tracker limits to MAX so it will not filter out TXs
|
||||
bank.write_cost_tracker().unwrap().set_limits(
|
||||
std::u64::MAX,
|
||||
std::u64::MAX,
|
||||
std::u64::MAX,
|
||||
);
|
||||
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
assert!(poh_recorder.lock().unwrap().bank().is_some());
|
||||
if bank.slot() > 32 {
|
||||
leader_schedule_cache.set_root(&bank);
|
||||
bank_forks.set_root(root, &AbsRequestSender::default(), None);
|
||||
root += 1;
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-client"
|
||||
version = "1.9.4"
|
||||
version = "1.11.0"
|
||||
description = "Solana banks client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,19 +10,19 @@ documentation = "https://docs.rs/solana-banks-client"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
borsh = "0.9.1"
|
||||
borsh = "0.9.3"
|
||||
futures = "0.3"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.9.4" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.9.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.4" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.11.0" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.4" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.9.4" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.11.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.11.0" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -1,5 +1,8 @@
|
||||
use {
|
||||
solana_sdk::{transaction::TransactionError, transport::TransportError},
|
||||
solana_sdk::{
|
||||
transaction::TransactionError, transaction_context::TransactionReturnData,
|
||||
transport::TransportError,
|
||||
},
|
||||
std::io,
|
||||
tarpc::client::RpcError,
|
||||
thiserror::Error,
|
||||
@@ -25,6 +28,7 @@ pub enum BanksClientError {
|
||||
err: TransactionError,
|
||||
logs: Vec<String>,
|
||||
units_consumed: u64,
|
||||
return_data: Option<TransactionReturnData>,
|
||||
},
|
||||
}
|
||||
|
||||
|
@@ -5,9 +5,11 @@
|
||||
//! but they are undocumented, may change over time, and are generally more
|
||||
//! cumbersome to use.
|
||||
|
||||
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
|
||||
use {
|
||||
pub use {
|
||||
crate::error::BanksClientError,
|
||||
solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus},
|
||||
};
|
||||
use {
|
||||
borsh::BorshDeserialize,
|
||||
futures::{future::join_all, Future, FutureExt, TryFutureExt},
|
||||
solana_banks_interface::{BanksRequest, BanksResponse, BanksTransactionResultWithSimulation},
|
||||
@@ -21,9 +23,7 @@ use {
|
||||
message::Message,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
transport,
|
||||
},
|
||||
std::io,
|
||||
tarpc::{
|
||||
client::{self, NewClient, RequestDispatch},
|
||||
context::{self, Context},
|
||||
@@ -60,10 +60,9 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = io::Result<()>> + '_ {
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.send_transaction_with_context(ctx, transaction)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -75,11 +74,10 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
|
||||
) -> impl Future<Output = Result<(FeeCalculator, Hash, u64), BanksClientError>> + '_ {
|
||||
#[allow(deprecated)]
|
||||
self.inner
|
||||
.get_fees_with_commitment_and_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -87,10 +85,9 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
signature: Signature,
|
||||
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
|
||||
) -> impl Future<Output = Result<Option<TransactionStatus>, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_transaction_status_with_context(ctx, signature)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -98,10 +95,9 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_slot_with_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -109,10 +105,9 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_block_height_with_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -121,10 +116,9 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Option<transaction::Result<()>>>> + '_ {
|
||||
) -> impl Future<Output = Result<Option<transaction::Result<()>>, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -149,10 +143,9 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_account_with_commitment_and_context(ctx, address, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -162,7 +155,7 @@ impl BanksClient {
|
||||
pub fn send_transaction(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = io::Result<()>> + '_ {
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
self.send_transaction_with_context(context::current(), transaction)
|
||||
}
|
||||
|
||||
@@ -175,27 +168,25 @@ impl BanksClient {
|
||||
)]
|
||||
pub fn get_fees(
|
||||
&mut self,
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, u64)>> + '_ {
|
||||
) -> impl Future<Output = Result<(FeeCalculator, Hash, u64), BanksClientError>> + '_ {
|
||||
#[allow(deprecated)]
|
||||
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the cluster Sysvar
|
||||
pub fn get_sysvar<T: Sysvar>(&mut self) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
pub fn get_sysvar<T: Sysvar>(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<T, BanksClientError>> + '_ {
|
||||
self.get_account(T::id()).map(|result| {
|
||||
let sysvar = result?
|
||||
.ok_or(BanksClientError::ClientError("Sysvar not present"))
|
||||
.map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError
|
||||
from_account::<T, _>(&sysvar)
|
||||
.ok_or(BanksClientError::ClientError(
|
||||
"Failed to deserialize sysvar",
|
||||
))
|
||||
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
|
||||
let sysvar = result?.ok_or(BanksClientError::ClientError("Sysvar not present"))?;
|
||||
from_account::<T, _>(&sysvar).ok_or(BanksClientError::ClientError(
|
||||
"Failed to deserialize sysvar",
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the cluster rent
|
||||
pub fn get_rent(&mut self) -> impl Future<Output = io::Result<Rent>> + '_ {
|
||||
pub fn get_rent(&mut self) -> impl Future<Output = Result<Rent, BanksClientError>> + '_ {
|
||||
self.get_sysvar::<Rent>()
|
||||
}
|
||||
|
||||
@@ -203,7 +194,9 @@ impl BanksClient {
|
||||
/// transactions with a blockhash that has not yet expired. Use the `get_fees`
|
||||
/// method to get both a blockhash and the blockhash's last valid slot.
|
||||
#[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")]
|
||||
pub fn get_recent_blockhash(&mut self) -> impl Future<Output = io::Result<Hash>> + '_ {
|
||||
pub fn get_recent_blockhash(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<Hash, BanksClientError>> + '_ {
|
||||
#[allow(deprecated)]
|
||||
self.get_fees().map(|result| Ok(result?.1))
|
||||
}
|
||||
@@ -214,7 +207,7 @@ impl BanksClient {
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = transport::Result<()>> + '_ {
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
let mut ctx = context::current();
|
||||
ctx.deadline += Duration::from_secs(50);
|
||||
self.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
@@ -224,7 +217,6 @@ impl BanksClient {
|
||||
)),
|
||||
Some(transaction_result) => Ok(transaction_result?),
|
||||
})
|
||||
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
|
||||
}
|
||||
|
||||
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
|
||||
@@ -255,6 +247,7 @@ impl BanksClient {
|
||||
err,
|
||||
logs: simulation_details.logs,
|
||||
units_consumed: simulation_details.units_consumed,
|
||||
return_data: simulation_details.return_data,
|
||||
}),
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: Some(result),
|
||||
@@ -279,7 +272,7 @@ impl BanksClient {
|
||||
pub fn process_transaction(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = transport::Result<()>> + '_ {
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
self.process_transaction_with_commitment(transaction, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
@@ -287,7 +280,7 @@ impl BanksClient {
|
||||
&mut self,
|
||||
transactions: Vec<Transaction>,
|
||||
commitment: CommitmentLevel,
|
||||
) -> transport::Result<()> {
|
||||
) -> Result<(), BanksClientError> {
|
||||
let mut clients: Vec<_> = transactions.iter().map(|_| self.clone()).collect();
|
||||
let futures = clients
|
||||
.iter_mut()
|
||||
@@ -303,19 +296,21 @@ impl BanksClient {
|
||||
pub fn process_transactions(
|
||||
&mut self,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> impl Future<Output = transport::Result<()>> + '_ {
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
self.process_transactions_with_commitment(transactions, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the most recent rooted slot. All transactions at or below this slot
|
||||
/// are said to be finalized. The cluster will not fork to a higher slot.
|
||||
pub fn get_root_slot(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
pub fn get_root_slot(&mut self) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
self.get_slot_with_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the most recent rooted block height. All transactions at or below this height
|
||||
/// are said to be finalized. The cluster will not fork to a higher block height.
|
||||
pub fn get_root_block_height(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
pub fn get_root_block_height(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
self.get_block_height_with_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
@@ -325,7 +320,7 @@ impl BanksClient {
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ {
|
||||
self.get_account_with_commitment_and_context(context::current(), address, commitment)
|
||||
}
|
||||
|
||||
@@ -334,7 +329,7 @@ impl BanksClient {
|
||||
pub fn get_account(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ {
|
||||
self.get_account_with_commitment(address, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
@@ -343,14 +338,11 @@ impl BanksClient {
|
||||
pub fn get_packed_account_data<T: Pack>(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
) -> impl Future<Output = Result<T, BanksClientError>> + '_ {
|
||||
self.get_account(address).map(|result| {
|
||||
let account = result?
|
||||
.ok_or(BanksClientError::ClientError("Account not found"))
|
||||
.map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError
|
||||
let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?;
|
||||
T::unpack_from_slice(&account.data)
|
||||
.map_err(|_| BanksClientError::ClientError("Failed to deserialize account"))
|
||||
.map_err(Into::into) // Remove this when return Err type updated to BanksClientError
|
||||
})
|
||||
}
|
||||
|
||||
@@ -359,7 +351,7 @@ impl BanksClient {
|
||||
pub fn get_account_data_with_borsh<T: BorshDeserialize>(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
) -> impl Future<Output = Result<T, BanksClientError>> + '_ {
|
||||
self.get_account(address).map(|result| {
|
||||
let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?;
|
||||
T::try_from_slice(&account.data).map_err(Into::into)
|
||||
@@ -372,14 +364,17 @@ impl BanksClient {
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<u64>> + '_ {
|
||||
) -> impl Future<Output = Result<u64, BanksClientError>> + '_ {
|
||||
self.get_account_with_commitment_and_context(context::current(), address, commitment)
|
||||
.map(|result| Ok(result?.map(|x| x.lamports).unwrap_or(0)))
|
||||
}
|
||||
|
||||
/// Return the balance in lamports of an account at the given address at the time
|
||||
/// of the most recent root slot.
|
||||
pub fn get_balance(&mut self, address: Pubkey) -> impl Future<Output = io::Result<u64>> + '_ {
|
||||
pub fn get_balance(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = Result<u64, BanksClientError>> + '_ {
|
||||
self.get_balance_with_commitment(address, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
@@ -391,7 +386,7 @@ impl BanksClient {
|
||||
pub fn get_transaction_status(
|
||||
&mut self,
|
||||
signature: Signature,
|
||||
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
|
||||
) -> impl Future<Output = Result<Option<TransactionStatus>, BanksClientError>> + '_ {
|
||||
self.get_transaction_status_with_context(context::current(), signature)
|
||||
}
|
||||
|
||||
@@ -399,7 +394,7 @@ impl BanksClient {
|
||||
pub async fn get_transaction_statuses(
|
||||
&mut self,
|
||||
signatures: Vec<Signature>,
|
||||
) -> io::Result<Vec<Option<TransactionStatus>>> {
|
||||
) -> Result<Vec<Option<TransactionStatus>>, BanksClientError> {
|
||||
// tarpc futures oddly hold a mutable reference back to the client so clone the client upfront
|
||||
let mut clients_and_signatures: Vec<_> = signatures
|
||||
.into_iter()
|
||||
@@ -416,7 +411,9 @@ impl BanksClient {
|
||||
statuses.into_iter().collect()
|
||||
}
|
||||
|
||||
pub fn get_latest_blockhash(&mut self) -> impl Future<Output = io::Result<Hash>> + '_ {
|
||||
pub fn get_latest_blockhash(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<Hash, BanksClientError>> + '_ {
|
||||
self.get_latest_blockhash_with_commitment(CommitmentLevel::default())
|
||||
.map(|result| {
|
||||
result?
|
||||
@@ -429,7 +426,7 @@ impl BanksClient {
|
||||
pub fn get_latest_blockhash_with_commitment(
|
||||
&mut self,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Option<(Hash, u64)>>> + '_ {
|
||||
) -> impl Future<Output = Result<Option<(Hash, u64)>, BanksClientError>> + '_ {
|
||||
self.get_latest_blockhash_with_commitment_and_context(context::current(), commitment)
|
||||
}
|
||||
|
||||
@@ -437,10 +434,9 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Option<(Hash, u64)>>> + '_ {
|
||||
) -> impl Future<Output = Result<Option<(Hash, u64)>, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_latest_blockhash_with_commitment_and_context(ctx, commitment)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
@@ -449,15 +445,14 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
message: Message,
|
||||
) -> impl Future<Output = io::Result<Option<u64>>> + '_ {
|
||||
) -> impl Future<Output = Result<Option<u64>, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_fee_for_message_with_commitment_and_context(ctx, commitment, message)
|
||||
.map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_client<C>(transport: C) -> io::Result<BanksClient>
|
||||
pub async fn start_client<C>(transport: C) -> Result<BanksClient, BanksClientError>
|
||||
where
|
||||
C: Transport<ClientMessage<BanksRequest>, Response<BanksResponse>> + Send + 'static,
|
||||
{
|
||||
@@ -466,7 +461,7 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> io::Result<BanksClient> {
|
||||
pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> Result<BanksClient, BanksClientError> {
|
||||
let transport = tcp::connect(addr, Bincode::default).await?;
|
||||
Ok(BanksClient {
|
||||
inner: TarpcClient::new(client::Config::default(), transport).spawn(),
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-interface"
|
||||
version = "1.9.4"
|
||||
version = "1.11.0"
|
||||
description = "Solana banks RPC interface"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-banks-interface"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1.0.130", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.4" }
|
||||
serde = { version = "1.0.136", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
|
||||
[lib]
|
||||
|
@@ -12,6 +12,7 @@ use {
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction, TransactionError},
|
||||
transaction_context::TransactionReturnData,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -35,6 +36,7 @@ pub struct TransactionStatus {
|
||||
pub struct TransactionSimulationDetails {
|
||||
pub logs: Vec<String>,
|
||||
pub units_consumed: u64,
|
||||
pub return_data: Option<TransactionReturnData>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-server"
|
||||
version = "1.9.4"
|
||||
version = "1.11.0"
|
||||
description = "Solana banks server"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,11 +11,12 @@ edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.3.3"
|
||||
crossbeam-channel = "0.5"
|
||||
futures = "0.3"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.9.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.4" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.4" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.11.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.11.0" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
@@ -1,5 +1,6 @@
|
||||
use {
|
||||
bincode::{deserialize, serialize},
|
||||
crossbeam_channel::{unbounded, Receiver, Sender},
|
||||
futures::{future, prelude::stream::StreamExt},
|
||||
solana_banks_interface::{
|
||||
Banks, BanksRequest, BanksResponse, BanksTransactionResultWithSimulation,
|
||||
@@ -23,17 +24,14 @@ use {
|
||||
transaction::{self, SanitizedTransaction, Transaction},
|
||||
},
|
||||
solana_send_transaction_service::{
|
||||
send_transaction_service::{SendTransactionService, TransactionInfo},
|
||||
send_transaction_service::{SendTransactionService, TransactionInfo, DEFAULT_TPU_USE_QUIC},
|
||||
tpu_info::NullTpuInfo,
|
||||
},
|
||||
std::{
|
||||
convert::TryFrom,
|
||||
io,
|
||||
net::{Ipv4Addr, SocketAddr},
|
||||
sync::{
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, RwLock,
|
||||
},
|
||||
sync::{Arc, RwLock},
|
||||
thread::Builder,
|
||||
time::Duration,
|
||||
},
|
||||
@@ -96,7 +94,7 @@ impl BanksServer {
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> Self {
|
||||
let (transaction_sender, transaction_receiver) = channel();
|
||||
let (transaction_sender, transaction_receiver) = unbounded();
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
let slot = bank.slot();
|
||||
{
|
||||
@@ -268,6 +266,7 @@ impl Banks for BanksServer {
|
||||
logs,
|
||||
post_simulation_accounts: _,
|
||||
units_consumed,
|
||||
return_data,
|
||||
} = self
|
||||
.bank(commitment)
|
||||
.simulate_transaction_unchecked(sanitized_transaction)
|
||||
@@ -277,6 +276,7 @@ impl Banks for BanksServer {
|
||||
simulation_details: Some(TransactionSimulationDetails {
|
||||
logs,
|
||||
units_consumed,
|
||||
return_data,
|
||||
}),
|
||||
};
|
||||
}
|
||||
@@ -392,7 +392,7 @@ pub async fn start_tcp_server(
|
||||
// serve is generated by the service attribute. It takes as input any type implementing
|
||||
// the generated Banks trait.
|
||||
.map(move |chan| {
|
||||
let (sender, receiver) = channel();
|
||||
let (sender, receiver) = unbounded();
|
||||
|
||||
SendTransactionService::new::<NullTpuInfo>(
|
||||
tpu_addr,
|
||||
@@ -401,6 +401,7 @@ pub async fn start_tcp_server(
|
||||
receiver,
|
||||
5_000,
|
||||
0,
|
||||
DEFAULT_TPU_USE_QUIC,
|
||||
);
|
||||
|
||||
let server = BanksServer::new(
|
||||
|
@@ -2,19 +2,18 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.9.4"
|
||||
version = "1.11.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.4" }
|
||||
solana-version = { path = "../version", version = "=1.9.4" }
|
||||
crossbeam-channel = "0.5"
|
||||
clap = { version = "3.1.5", features = ["cargo"] }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.11.0" }
|
||||
solana-version = { path = "../version", version = "=1.11.0" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,6 +1,7 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
clap::{crate_description, crate_name, App, Arg},
|
||||
clap::{crate_description, crate_name, Arg, Command},
|
||||
crossbeam_channel::unbounded,
|
||||
solana_streamer::{
|
||||
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
|
||||
streamer::{receiver, PacketBatchReceiver},
|
||||
@@ -10,7 +11,6 @@ use {
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
mpsc::channel,
|
||||
Arc,
|
||||
},
|
||||
thread::{sleep, spawn, JoinHandle, Result},
|
||||
@@ -57,23 +57,32 @@ fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) ->
|
||||
fn main() -> Result<()> {
|
||||
let mut num_sockets = 1usize;
|
||||
|
||||
let matches = App::new(crate_name!())
|
||||
let matches = Command::new(crate_name!())
|
||||
.about(crate_description!())
|
||||
.version(solana_version::version!())
|
||||
.arg(
|
||||
Arg::with_name("num-recv-sockets")
|
||||
Arg::new("num-recv-sockets")
|
||||
.long("num-recv-sockets")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.help("Use NUM receive sockets"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("num-producers")
|
||||
.long("num-producers")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.help("Use this many producer threads."),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
if let Some(n) = matches.value_of("num-recv-sockets") {
|
||||
num_sockets = max(num_sockets, n.to_string().parse().expect("integer"));
|
||||
}
|
||||
|
||||
let mut port = 0;
|
||||
let num_producers: u64 = matches.value_of_t("num_producers").unwrap_or(4);
|
||||
|
||||
let port = 0;
|
||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
let mut addr = SocketAddr::new(ip_addr, 0);
|
||||
|
||||
@@ -82,14 +91,17 @@ fn main() -> Result<()> {
|
||||
let mut read_channels = Vec::new();
|
||||
let mut read_threads = Vec::new();
|
||||
let recycler = PacketBatchRecycler::default();
|
||||
for _ in 0..num_sockets {
|
||||
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
|
||||
let (_port, read_sockets) = solana_net_utils::multi_bind_in_range(
|
||||
ip_addr,
|
||||
(port, port + num_sockets as u16),
|
||||
num_sockets,
|
||||
)
|
||||
.unwrap();
|
||||
for read in read_sockets {
|
||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
|
||||
addr = read.local_addr().unwrap();
|
||||
port = addr.port();
|
||||
|
||||
let (s_reader, r_reader) = channel();
|
||||
let (s_reader, r_reader) = unbounded();
|
||||
read_channels.push(r_reader);
|
||||
read_threads.push(receiver(
|
||||
Arc::new(read),
|
||||
@@ -102,9 +114,10 @@ fn main() -> Result<()> {
|
||||
));
|
||||
}
|
||||
|
||||
let t_producer1 = producer(&addr, exit.clone());
|
||||
let t_producer2 = producer(&addr, exit.clone());
|
||||
let t_producer3 = producer(&addr, exit.clone());
|
||||
let producer_threads: Vec<_> = (0..num_producers)
|
||||
.into_iter()
|
||||
.map(|_| producer(&addr, exit.clone()))
|
||||
.collect();
|
||||
|
||||
let rvs = Arc::new(AtomicUsize::new(0));
|
||||
let sink_threads: Vec<_> = read_channels
|
||||
@@ -124,9 +137,9 @@ fn main() -> Result<()> {
|
||||
for t_reader in read_threads {
|
||||
t_reader.join()?;
|
||||
}
|
||||
t_producer1.join()?;
|
||||
t_producer2.join()?;
|
||||
t_producer3.join()?;
|
||||
for t_producer in producer_threads {
|
||||
t_producer.join()?;
|
||||
}
|
||||
for t_sink in sink_threads {
|
||||
t_sink.join()?;
|
||||
}
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.9.4"
|
||||
version = "1.11.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,27 +10,33 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
rayon = "1.5.1"
|
||||
serde_json = "1.0.72"
|
||||
serde_yaml = "0.8.21"
|
||||
solana-core = { path = "../core", version = "=1.9.4" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.9.4" }
|
||||
solana-client = { path = "../client", version = "=1.9.4" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.9.4" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.9.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.9.4" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.9.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.9.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.9.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.4" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.9.4" }
|
||||
solana-version = { path = "../version", version = "=1.9.4" }
|
||||
serde_json = "1.0.79"
|
||||
serde_yaml = "0.8.23"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.11.0" }
|
||||
solana-client = { path = "../client", version = "=1.11.0" }
|
||||
solana-core = { path = "../core", version = "=1.11.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.11.0" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.11.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.11.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.11.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.11.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.11.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.11.0" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.11.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.11.0" }
|
||||
solana-version = { path = "../version", version = "=1.11.0" }
|
||||
thiserror = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.5.1"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.9.4" }
|
||||
serial_test = "0.6.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.11.0" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.11.0" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,19 +1,21 @@
|
||||
use {
|
||||
crate::cli::Config,
|
||||
crate::{
|
||||
bench_tps_client::*,
|
||||
cli::Config,
|
||||
perf_utils::{sample_txs, SampleStats},
|
||||
},
|
||||
log::*,
|
||||
rayon::prelude::*,
|
||||
solana_client::perf_utils::{sample_txs, SampleStats},
|
||||
solana_core::gen_keys::GenKeys,
|
||||
solana_faucet::faucet::request_airdrop_transaction,
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::{self, datapoint_info},
|
||||
solana_sdk::{
|
||||
client::Client,
|
||||
clock::{DEFAULT_MS_PER_SLOT, DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE},
|
||||
commitment_config::CommitmentConfig,
|
||||
hash::Hash,
|
||||
instruction::{AccountMeta, Instruction},
|
||||
message::Message,
|
||||
native_token::Sol,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction, system_transaction,
|
||||
@@ -22,7 +24,6 @@ use {
|
||||
},
|
||||
std::{
|
||||
collections::{HashSet, VecDeque},
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
||||
@@ -38,16 +39,9 @@ const MAX_TX_QUEUE_AGE: u64 = (MAX_PROCESSING_AGE as f64 * DEFAULT_S_PER_SLOT) a
|
||||
|
||||
pub const MAX_SPENDS_PER_TX: u64 = 4;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BenchTpsError {
|
||||
AirdropFailure,
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, BenchTpsError>;
|
||||
|
||||
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
|
||||
|
||||
fn get_latest_blockhash<T: Client>(client: &T) -> Hash {
|
||||
fn get_latest_blockhash<T: BenchTpsClient>(client: &T) -> Hash {
|
||||
loop {
|
||||
match client.get_latest_blockhash_with_commitment(CommitmentConfig::processed()) {
|
||||
Ok((blockhash, _)) => return blockhash,
|
||||
@@ -61,7 +55,7 @@ fn get_latest_blockhash<T: Client>(client: &T) -> Hash {
|
||||
|
||||
fn wait_for_target_slots_per_epoch<T>(target_slots_per_epoch: u64, client: &Arc<T>)
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
T: 'static + BenchTpsClient + Send + Sync,
|
||||
{
|
||||
if target_slots_per_epoch != 0 {
|
||||
info!(
|
||||
@@ -91,7 +85,7 @@ fn create_sampler_thread<T>(
|
||||
maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>,
|
||||
) -> JoinHandle<()>
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
T: 'static + BenchTpsClient + Send + Sync,
|
||||
{
|
||||
info!("Sampling TPS every {} second...", sample_period);
|
||||
let exit_signal = exit_signal.clone();
|
||||
@@ -110,7 +104,7 @@ fn generate_chunked_transfers(
|
||||
shared_txs: &SharedTransactions,
|
||||
shared_tx_active_thread_count: Arc<AtomicIsize>,
|
||||
source_keypair_chunks: Vec<Vec<&Keypair>>,
|
||||
dest_keypair_chunks: &mut Vec<VecDeque<&Keypair>>,
|
||||
dest_keypair_chunks: &mut [VecDeque<&Keypair>],
|
||||
threads: usize,
|
||||
duration: Duration,
|
||||
sustained: bool,
|
||||
@@ -169,7 +163,7 @@ fn create_sender_threads<T>(
|
||||
shared_tx_active_thread_count: &Arc<AtomicIsize>,
|
||||
) -> Vec<JoinHandle<()>>
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
T: 'static + BenchTpsClient + Send + Sync,
|
||||
{
|
||||
(0..threads)
|
||||
.map(|_| {
|
||||
@@ -197,7 +191,7 @@ where
|
||||
|
||||
pub fn do_bench_tps<T>(client: Arc<T>, config: Config, gen_keypairs: Vec<Keypair>) -> u64
|
||||
where
|
||||
T: 'static + Client + Send + Sync,
|
||||
T: 'static + BenchTpsClient + Send + Sync,
|
||||
{
|
||||
let Config {
|
||||
id,
|
||||
@@ -391,7 +385,7 @@ fn generate_txs(
|
||||
}
|
||||
}
|
||||
|
||||
fn get_new_latest_blockhash<T: Client>(client: &Arc<T>, blockhash: &Hash) -> Option<Hash> {
|
||||
fn get_new_latest_blockhash<T: BenchTpsClient>(client: &Arc<T>, blockhash: &Hash) -> Option<Hash> {
|
||||
let start = Instant::now();
|
||||
while start.elapsed().as_secs() < 5 {
|
||||
if let Ok(new_blockhash) = client.get_latest_blockhash() {
|
||||
@@ -407,7 +401,7 @@ fn get_new_latest_blockhash<T: Client>(client: &Arc<T>, blockhash: &Hash) -> Opt
|
||||
None
|
||||
}
|
||||
|
||||
fn poll_blockhash<T: Client>(
|
||||
fn poll_blockhash<T: BenchTpsClient>(
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
blockhash: &Arc<RwLock<Hash>>,
|
||||
client: &Arc<T>,
|
||||
@@ -449,7 +443,7 @@ fn poll_blockhash<T: Client>(
|
||||
}
|
||||
}
|
||||
|
||||
fn do_tx_transfers<T: Client>(
|
||||
fn do_tx_transfers<T: BenchTpsClient>(
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
shared_txs: &SharedTransactions,
|
||||
shared_tx_thread_count: &Arc<AtomicIsize>,
|
||||
@@ -467,14 +461,11 @@ fn do_tx_transfers<T: Client>(
|
||||
};
|
||||
if let Some(txs0) = txs {
|
||||
shared_tx_thread_count.fetch_add(1, Ordering::Relaxed);
|
||||
info!(
|
||||
"Transferring 1 unit {} times... to {}",
|
||||
txs0.len(),
|
||||
client.as_ref().tpu_addr(),
|
||||
);
|
||||
info!("Transferring 1 unit {} times...", txs0.len());
|
||||
let tx_len = txs0.len();
|
||||
let transfer_start = Instant::now();
|
||||
let mut old_transactions = false;
|
||||
let mut transactions = Vec::<_>::new();
|
||||
for tx in txs0 {
|
||||
let now = timestamp();
|
||||
// Transactions that are too old will be rejected by the cluster Don't bother
|
||||
@@ -483,10 +474,13 @@ fn do_tx_transfers<T: Client>(
|
||||
old_transactions = true;
|
||||
continue;
|
||||
}
|
||||
client
|
||||
.async_send_transaction(tx.0)
|
||||
.expect("async_send_transaction in do_tx_transfers");
|
||||
transactions.push(tx.0);
|
||||
}
|
||||
|
||||
if let Err(error) = client.send_batch(transactions) {
|
||||
warn!("send_batch_sync in do_tx_transfers failed: {}", error);
|
||||
}
|
||||
|
||||
if old_transactions {
|
||||
let mut shared_txs_wl = shared_txs.write().expect("write lock in do_tx_transfers");
|
||||
shared_txs_wl.clear();
|
||||
@@ -510,7 +504,11 @@ fn do_tx_transfers<T: Client>(
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_funding_transfer<T: Client>(client: &Arc<T>, tx: &Transaction, amount: u64) -> bool {
|
||||
fn verify_funding_transfer<T: BenchTpsClient>(
|
||||
client: &Arc<T>,
|
||||
tx: &Transaction,
|
||||
amount: u64,
|
||||
) -> bool {
|
||||
for a in &tx.message().account_keys[1..] {
|
||||
match client.get_balance_with_commitment(a, CommitmentConfig::processed()) {
|
||||
Ok(balance) => return balance >= amount,
|
||||
@@ -521,7 +519,7 @@ fn verify_funding_transfer<T: Client>(client: &Arc<T>, tx: &Transaction, amount:
|
||||
}
|
||||
|
||||
trait FundingTransactions<'a> {
|
||||
fn fund<T: 'static + Client + Send + Sync>(
|
||||
fn fund<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
&mut self,
|
||||
client: &Arc<T>,
|
||||
to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)],
|
||||
@@ -529,12 +527,16 @@ trait FundingTransactions<'a> {
|
||||
);
|
||||
fn make(&mut self, to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)]);
|
||||
fn sign(&mut self, blockhash: Hash);
|
||||
fn send<T: Client>(&self, client: &Arc<T>);
|
||||
fn verify<T: 'static + Client + Send + Sync>(&mut self, client: &Arc<T>, to_lamports: u64);
|
||||
fn send<T: BenchTpsClient>(&self, client: &Arc<T>);
|
||||
fn verify<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
&mut self,
|
||||
client: &Arc<T>,
|
||||
to_lamports: u64,
|
||||
);
|
||||
}
|
||||
|
||||
impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
fn fund<T: 'static + Client + Send + Sync>(
|
||||
fn fund<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
&mut self,
|
||||
client: &Arc<T>,
|
||||
to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)],
|
||||
@@ -603,16 +605,20 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
debug!("sign {} txs: {}us", self.len(), sign_txs.as_us());
|
||||
}
|
||||
|
||||
fn send<T: Client>(&self, client: &Arc<T>) {
|
||||
fn send<T: BenchTpsClient>(&self, client: &Arc<T>) {
|
||||
let mut send_txs = Measure::start("send_txs");
|
||||
self.iter().for_each(|(_, tx)| {
|
||||
client.async_send_transaction(tx.clone()).expect("transfer");
|
||||
client.send_transaction(tx.clone()).expect("transfer");
|
||||
});
|
||||
send_txs.stop();
|
||||
debug!("send {} txs: {}us", self.len(), send_txs.as_us());
|
||||
}
|
||||
|
||||
fn verify<T: 'static + Client + Send + Sync>(&mut self, client: &Arc<T>, to_lamports: u64) {
|
||||
fn verify<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
&mut self,
|
||||
client: &Arc<T>,
|
||||
to_lamports: u64,
|
||||
) {
|
||||
let starting_txs = self.len();
|
||||
let verified_txs = Arc::new(AtomicUsize::new(0));
|
||||
let too_many_failures = Arc::new(AtomicBool::new(false));
|
||||
@@ -687,7 +693,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
|
||||
/// on every iteration. This allows us to replay the transfers because the source is either empty,
|
||||
/// or full
|
||||
pub fn fund_keys<T: 'static + Client + Send + Sync>(
|
||||
pub fn fund_keys<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
client: Arc<T>,
|
||||
source: &Keypair,
|
||||
dests: &[Keypair],
|
||||
@@ -729,75 +735,6 @@ pub fn fund_keys<T: 'static + Client + Send + Sync>(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn airdrop_lamports<T: Client>(
|
||||
client: &T,
|
||||
faucet_addr: &SocketAddr,
|
||||
id: &Keypair,
|
||||
desired_balance: u64,
|
||||
) -> Result<()> {
|
||||
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
||||
metrics_submit_lamport_balance(starting_balance);
|
||||
info!("starting balance {}", starting_balance);
|
||||
|
||||
if starting_balance < desired_balance {
|
||||
let airdrop_amount = desired_balance - starting_balance;
|
||||
info!(
|
||||
"Airdropping {:?} lamports from {} for {}",
|
||||
airdrop_amount,
|
||||
faucet_addr,
|
||||
id.pubkey(),
|
||||
);
|
||||
|
||||
let blockhash = get_latest_blockhash(client);
|
||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
Ok(transaction) => {
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
tries += 1;
|
||||
let signature = client.async_send_transaction(transaction.clone()).unwrap();
|
||||
let result = client.poll_for_signature_confirmation(&signature, 1);
|
||||
|
||||
if result.is_ok() {
|
||||
break;
|
||||
}
|
||||
if tries >= 5 {
|
||||
panic!(
|
||||
"Error requesting airdrop: to addr: {:?} amount: {} {:?}",
|
||||
faucet_addr, airdrop_amount, result
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
panic!(
|
||||
"Error requesting airdrop: {:?} to addr: {:?} amount: {}",
|
||||
err, faucet_addr, airdrop_amount
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
let current_balance = client
|
||||
.get_balance_with_commitment(&id.pubkey(), CommitmentConfig::processed())
|
||||
.unwrap_or_else(|e| {
|
||||
info!("airdrop error {}", e);
|
||||
starting_balance
|
||||
});
|
||||
info!("current balance {}...", current_balance);
|
||||
|
||||
metrics_submit_lamport_balance(current_balance);
|
||||
if current_balance - starting_balance != airdrop_amount {
|
||||
info!(
|
||||
"Airdrop failed! {} {} {}",
|
||||
id.pubkey(),
|
||||
current_balance,
|
||||
starting_balance
|
||||
);
|
||||
return Err(BenchTpsError::AirdropFailure);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn compute_and_report_stats(
|
||||
maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>,
|
||||
sample_period: u64,
|
||||
@@ -881,15 +818,33 @@ pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u
|
||||
(rnd.gen_n_keypairs(total_keys), extra)
|
||||
}
|
||||
|
||||
pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
pub fn generate_and_fund_keypairs<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
client: Arc<T>,
|
||||
faucet_addr: Option<SocketAddr>,
|
||||
funding_key: &Keypair,
|
||||
keypair_count: usize,
|
||||
lamports_per_account: u64,
|
||||
) -> Result<Vec<Keypair>> {
|
||||
let rent = client.get_minimum_balance_for_rent_exemption(0)?;
|
||||
let lamports_per_account = lamports_per_account + rent;
|
||||
|
||||
info!("Creating {} keypairs...", keypair_count);
|
||||
let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64);
|
||||
fund_keypairs(client, funding_key, &keypairs, extra, lamports_per_account)?;
|
||||
|
||||
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
||||
keypairs.truncate(keypair_count);
|
||||
|
||||
Ok(keypairs)
|
||||
}
|
||||
|
||||
pub fn fund_keypairs<T: 'static + BenchTpsClient + Send + Sync>(
|
||||
client: Arc<T>,
|
||||
funding_key: &Keypair,
|
||||
keypairs: &[Keypair],
|
||||
extra: u64,
|
||||
lamports_per_account: u64,
|
||||
) -> Result<()> {
|
||||
let rent = client.get_minimum_balance_for_rent_exemption(0)?;
|
||||
info!("Get lamports...");
|
||||
|
||||
// Sample the first keypair, to prevent lamport loss on repeated solana-bench-tps executions
|
||||
@@ -897,7 +852,7 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
let first_keypair_balance = client.get_balance(&first_key).unwrap_or(0);
|
||||
|
||||
// Sample the last keypair, to check if funding was already completed
|
||||
let last_key = keypairs[keypair_count - 1].pubkey();
|
||||
let last_key = keypairs[keypairs.len() - 1].pubkey();
|
||||
let last_keypair_balance = client.get_balance(&last_key).unwrap_or(0);
|
||||
|
||||
// Repeated runs will eat up keypair balances from transaction fees. In order to quickly
|
||||
@@ -926,24 +881,35 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
funding_key_balance, max_fee, lamports_per_account, extra, total
|
||||
);
|
||||
|
||||
if client.get_balance(&funding_key.pubkey()).unwrap_or(0) < total {
|
||||
airdrop_lamports(client.as_ref(), &faucet_addr.unwrap(), funding_key, total)?;
|
||||
if funding_key_balance < total + rent {
|
||||
error!(
|
||||
"funder has {}, needed {}",
|
||||
Sol(funding_key_balance),
|
||||
Sol(total)
|
||||
);
|
||||
let latest_blockhash = get_latest_blockhash(client.as_ref());
|
||||
if client
|
||||
.request_airdrop_with_blockhash(
|
||||
&funding_key.pubkey(),
|
||||
total + rent - funding_key_balance,
|
||||
&latest_blockhash,
|
||||
)
|
||||
.is_err()
|
||||
{
|
||||
return Err(BenchTpsError::AirdropFailure);
|
||||
}
|
||||
}
|
||||
|
||||
fund_keys(
|
||||
client,
|
||||
funding_key,
|
||||
&keypairs,
|
||||
keypairs,
|
||||
total,
|
||||
max_fee,
|
||||
lamports_per_account,
|
||||
);
|
||||
}
|
||||
|
||||
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
||||
keypairs.truncate(keypair_count);
|
||||
|
||||
Ok(keypairs)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -952,14 +918,14 @@ mod tests {
|
||||
super::*,
|
||||
solana_runtime::{bank::Bank, bank_client::BankClient},
|
||||
solana_sdk::{
|
||||
client::SyncClient, fee_calculator::FeeRateGovernor,
|
||||
genesis_config::create_genesis_config,
|
||||
fee_calculator::FeeRateGovernor, genesis_config::create_genesis_config,
|
||||
native_token::sol_to_lamports,
|
||||
},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_bank_client() {
|
||||
let (genesis_config, id) = create_genesis_config(10_000);
|
||||
let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0));
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let client = Arc::new(BankClient::new(bank));
|
||||
|
||||
@@ -972,48 +938,49 @@ mod tests {
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), None, &config.id, keypair_count, 20)
|
||||
.unwrap();
|
||||
generate_and_fund_keypairs(client.clone(), &config.id, keypair_count, 20).unwrap();
|
||||
|
||||
do_bench_tps(client, config, keypairs);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_fund_keys() {
|
||||
let (genesis_config, id) = create_genesis_config(10_000);
|
||||
let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0));
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let client = Arc::new(BankClient::new(bank));
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
let rent = client.get_minimum_balance_for_rent_exemption(0).unwrap();
|
||||
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports).unwrap();
|
||||
generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(
|
||||
client
|
||||
.get_balance_with_commitment(&kp.pubkey(), CommitmentConfig::processed())
|
||||
.unwrap(),
|
||||
lamports
|
||||
lamports + rent
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_fund_keys_with_fees() {
|
||||
let (mut genesis_config, id) = create_genesis_config(10_000);
|
||||
let (mut genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0));
|
||||
let fee_rate_governor = FeeRateGovernor::new(11, 0);
|
||||
genesis_config.fee_rate_governor = fee_rate_governor;
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let client = Arc::new(BankClient::new(bank));
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
let rent = client.get_minimum_balance_for_rent_exemption(0).unwrap();
|
||||
|
||||
let keypairs =
|
||||
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports).unwrap();
|
||||
generate_and_fund_keypairs(client.clone(), &id, keypair_count, lamports).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
|
||||
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports + rent);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
87
bench-tps/src/bench_tps_client.rs
Normal file
87
bench-tps/src/bench_tps_client.rs
Normal file
@@ -0,0 +1,87 @@
|
||||
use {
|
||||
solana_client::{client_error::ClientError, tpu_client::TpuSenderError},
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, message::Message,
|
||||
pubkey::Pubkey, signature::Signature, transaction::Transaction, transport::TransportError,
|
||||
},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum BenchTpsError {
|
||||
#[error("Airdrop failure")]
|
||||
AirdropFailure,
|
||||
#[error("IO error: {0:?}")]
|
||||
IoError(#[from] std::io::Error),
|
||||
#[error("Client error: {0:?}")]
|
||||
ClientError(#[from] ClientError),
|
||||
#[error("TpuClient error: {0:?}")]
|
||||
TpuSenderError(#[from] TpuSenderError),
|
||||
#[error("Transport error: {0:?}")]
|
||||
TransportError(#[from] TransportError),
|
||||
#[error("Custom error: {0}")]
|
||||
Custom(String),
|
||||
}
|
||||
|
||||
pub(crate) type Result<T> = std::result::Result<T, BenchTpsError>;
|
||||
|
||||
pub trait BenchTpsClient {
|
||||
/// Send a signed transaction without confirmation
|
||||
fn send_transaction(&self, transaction: Transaction) -> Result<Signature>;
|
||||
|
||||
/// Send a batch of signed transactions without confirmation.
|
||||
fn send_batch(&self, transactions: Vec<Transaction>) -> Result<()>;
|
||||
|
||||
/// Get latest blockhash
|
||||
fn get_latest_blockhash(&self) -> Result<Hash>;
|
||||
|
||||
/// Get latest blockhash and its last valid block height, using explicit commitment
|
||||
fn get_latest_blockhash_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<(Hash, u64)>;
|
||||
|
||||
/// Get transaction count
|
||||
fn get_transaction_count(&self) -> Result<u64>;
|
||||
|
||||
/// Get transaction count, using explicit commitment
|
||||
fn get_transaction_count_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64>;
|
||||
|
||||
/// Get epoch info
|
||||
fn get_epoch_info(&self) -> Result<EpochInfo>;
|
||||
|
||||
/// Get account balance
|
||||
fn get_balance(&self, pubkey: &Pubkey) -> Result<u64>;
|
||||
|
||||
/// Get account balance, using explicit commitment
|
||||
fn get_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64>;
|
||||
|
||||
/// Calculate the fee for a `Message`
|
||||
fn get_fee_for_message(&self, message: &Message) -> Result<u64>;
|
||||
|
||||
/// Get the rent-exempt minimum for an account
|
||||
fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> Result<u64>;
|
||||
|
||||
/// Return the address of client
|
||||
fn addr(&self) -> String;
|
||||
|
||||
/// Request, submit, and confirm an airdrop transaction
|
||||
fn request_airdrop_with_blockhash(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
lamports: u64,
|
||||
recent_blockhash: &Hash,
|
||||
) -> Result<Signature>;
|
||||
}
|
||||
|
||||
mod bank_client;
|
||||
mod rpc_client;
|
||||
mod thin_client;
|
||||
mod tpu_client;
|
85
bench-tps/src/bench_tps_client/bank_client.rs
Normal file
85
bench-tps/src/bench_tps_client/bank_client.rs
Normal file
@@ -0,0 +1,85 @@
|
||||
use {
|
||||
crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result},
|
||||
solana_runtime::bank_client::BankClient,
|
||||
solana_sdk::{
|
||||
client::{AsyncClient, SyncClient},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_info::EpochInfo,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::Transaction,
|
||||
},
|
||||
};
|
||||
|
||||
impl BenchTpsClient for BankClient {
|
||||
fn send_transaction(&self, transaction: Transaction) -> Result<Signature> {
|
||||
AsyncClient::async_send_transaction(self, transaction).map_err(|err| err.into())
|
||||
}
|
||||
fn send_batch(&self, transactions: Vec<Transaction>) -> Result<()> {
|
||||
AsyncClient::async_send_batch(self, transactions).map_err(|err| err.into())
|
||||
}
|
||||
fn get_latest_blockhash(&self) -> Result<Hash> {
|
||||
SyncClient::get_latest_blockhash(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_latest_blockhash_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<(Hash, u64)> {
|
||||
SyncClient::get_latest_blockhash_with_commitment(self, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count(&self) -> Result<u64> {
|
||||
SyncClient::get_transaction_count(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
SyncClient::get_transaction_count_with_commitment(self, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_epoch_info(&self) -> Result<EpochInfo> {
|
||||
SyncClient::get_epoch_info(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance(&self, pubkey: &Pubkey) -> Result<u64> {
|
||||
SyncClient::get_balance(self, pubkey).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
SyncClient::get_balance_with_commitment(self, pubkey, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_fee_for_message(&self, message: &Message) -> Result<u64> {
|
||||
SyncClient::get_fee_for_message(self, message).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> Result<u64> {
|
||||
SyncClient::get_minimum_balance_for_rent_exemption(self, data_len).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn addr(&self) -> String {
|
||||
"Local BankClient".to_string()
|
||||
}
|
||||
|
||||
fn request_airdrop_with_blockhash(
|
||||
&self,
|
||||
_pubkey: &Pubkey,
|
||||
_lamports: u64,
|
||||
_recent_blockhash: &Hash,
|
||||
) -> Result<Signature> {
|
||||
// BankClient doesn't support airdrops
|
||||
Err(BenchTpsError::AirdropFailure)
|
||||
}
|
||||
}
|
83
bench-tps/src/bench_tps_client/rpc_client.rs
Normal file
83
bench-tps/src/bench_tps_client/rpc_client.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
use {
|
||||
crate::bench_tps_client::{BenchTpsClient, Result},
|
||||
solana_client::rpc_client::RpcClient,
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, message::Message,
|
||||
pubkey::Pubkey, signature::Signature, transaction::Transaction,
|
||||
},
|
||||
};
|
||||
|
||||
impl BenchTpsClient for RpcClient {
|
||||
fn send_transaction(&self, transaction: Transaction) -> Result<Signature> {
|
||||
RpcClient::send_transaction(self, &transaction).map_err(|err| err.into())
|
||||
}
|
||||
fn send_batch(&self, transactions: Vec<Transaction>) -> Result<()> {
|
||||
for transaction in transactions {
|
||||
BenchTpsClient::send_transaction(self, transaction)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
fn get_latest_blockhash(&self) -> Result<Hash> {
|
||||
RpcClient::get_latest_blockhash(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_latest_blockhash_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<(Hash, u64)> {
|
||||
RpcClient::get_latest_blockhash_with_commitment(self, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count(&self) -> Result<u64> {
|
||||
RpcClient::get_transaction_count(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
RpcClient::get_transaction_count_with_commitment(self, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_epoch_info(&self) -> Result<EpochInfo> {
|
||||
RpcClient::get_epoch_info(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance(&self, pubkey: &Pubkey) -> Result<u64> {
|
||||
RpcClient::get_balance(self, pubkey).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
RpcClient::get_balance_with_commitment(self, pubkey, commitment_config)
|
||||
.map(|res| res.value)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_fee_for_message(&self, message: &Message) -> Result<u64> {
|
||||
RpcClient::get_fee_for_message(self, message).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> Result<u64> {
|
||||
RpcClient::get_minimum_balance_for_rent_exemption(self, data_len).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn addr(&self) -> String {
|
||||
self.url()
|
||||
}
|
||||
|
||||
fn request_airdrop_with_blockhash(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
lamports: u64,
|
||||
recent_blockhash: &Hash,
|
||||
) -> Result<Signature> {
|
||||
RpcClient::request_airdrop_with_blockhash(self, pubkey, lamports, recent_blockhash)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
}
|
86
bench-tps/src/bench_tps_client/thin_client.rs
Normal file
86
bench-tps/src/bench_tps_client/thin_client.rs
Normal file
@@ -0,0 +1,86 @@
|
||||
use {
|
||||
crate::bench_tps_client::{BenchTpsClient, Result},
|
||||
solana_client::thin_client::ThinClient,
|
||||
solana_sdk::{
|
||||
client::{AsyncClient, Client, SyncClient},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_info::EpochInfo,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::Transaction,
|
||||
},
|
||||
};
|
||||
|
||||
impl BenchTpsClient for ThinClient {
|
||||
fn send_transaction(&self, transaction: Transaction) -> Result<Signature> {
|
||||
AsyncClient::async_send_transaction(self, transaction).map_err(|err| err.into())
|
||||
}
|
||||
fn send_batch(&self, transactions: Vec<Transaction>) -> Result<()> {
|
||||
AsyncClient::async_send_batch(self, transactions).map_err(|err| err.into())
|
||||
}
|
||||
fn get_latest_blockhash(&self) -> Result<Hash> {
|
||||
SyncClient::get_latest_blockhash(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_latest_blockhash_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<(Hash, u64)> {
|
||||
SyncClient::get_latest_blockhash_with_commitment(self, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count(&self) -> Result<u64> {
|
||||
SyncClient::get_transaction_count(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
SyncClient::get_transaction_count_with_commitment(self, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_epoch_info(&self) -> Result<EpochInfo> {
|
||||
SyncClient::get_epoch_info(self).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance(&self, pubkey: &Pubkey) -> Result<u64> {
|
||||
SyncClient::get_balance(self, pubkey).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
SyncClient::get_balance_with_commitment(self, pubkey, commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_fee_for_message(&self, message: &Message) -> Result<u64> {
|
||||
SyncClient::get_fee_for_message(self, message).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> Result<u64> {
|
||||
SyncClient::get_minimum_balance_for_rent_exemption(self, data_len).map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn addr(&self) -> String {
|
||||
Client::tpu_addr(self)
|
||||
}
|
||||
|
||||
fn request_airdrop_with_blockhash(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
lamports: u64,
|
||||
recent_blockhash: &Hash,
|
||||
) -> Result<Signature> {
|
||||
self.rpc_client()
|
||||
.request_airdrop_with_blockhash(pubkey, lamports, recent_blockhash)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
}
|
99
bench-tps/src/bench_tps_client/tpu_client.rs
Normal file
99
bench-tps/src/bench_tps_client/tpu_client.rs
Normal file
@@ -0,0 +1,99 @@
|
||||
use {
|
||||
crate::bench_tps_client::{BenchTpsClient, Result},
|
||||
solana_client::tpu_client::TpuClient,
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, message::Message,
|
||||
pubkey::Pubkey, signature::Signature, transaction::Transaction,
|
||||
},
|
||||
};
|
||||
|
||||
impl BenchTpsClient for TpuClient {
|
||||
fn send_transaction(&self, transaction: Transaction) -> Result<Signature> {
|
||||
let signature = transaction.signatures[0];
|
||||
self.try_send_transaction(&transaction)?;
|
||||
Ok(signature)
|
||||
}
|
||||
fn send_batch(&self, transactions: Vec<Transaction>) -> Result<()> {
|
||||
for transaction in transactions {
|
||||
BenchTpsClient::send_transaction(self, transaction)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
fn get_latest_blockhash(&self) -> Result<Hash> {
|
||||
self.rpc_client()
|
||||
.get_latest_blockhash()
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_latest_blockhash_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<(Hash, u64)> {
|
||||
self.rpc_client()
|
||||
.get_latest_blockhash_with_commitment(commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count(&self) -> Result<u64> {
|
||||
self.rpc_client()
|
||||
.get_transaction_count()
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_transaction_count_with_commitment(
|
||||
&self,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
self.rpc_client()
|
||||
.get_transaction_count_with_commitment(commitment_config)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_epoch_info(&self) -> Result<EpochInfo> {
|
||||
self.rpc_client().get_epoch_info().map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance(&self, pubkey: &Pubkey) -> Result<u64> {
|
||||
self.rpc_client()
|
||||
.get_balance(pubkey)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_balance_with_commitment(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
self.rpc_client()
|
||||
.get_balance_with_commitment(pubkey, commitment_config)
|
||||
.map(|res| res.value)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_fee_for_message(&self, message: &Message) -> Result<u64> {
|
||||
self.rpc_client()
|
||||
.get_fee_for_message(message)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> Result<u64> {
|
||||
self.rpc_client()
|
||||
.get_minimum_balance_for_rent_exemption(data_len)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
fn addr(&self) -> String {
|
||||
self.rpc_client().url()
|
||||
}
|
||||
|
||||
fn request_airdrop_with_blockhash(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
lamports: u64,
|
||||
recent_blockhash: &Hash,
|
||||
) -> Result<Signature> {
|
||||
self.rpc_client()
|
||||
.request_airdrop_with_blockhash(pubkey, lamports, recent_blockhash)
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
}
|
@@ -1,6 +1,7 @@
|
||||
use {
|
||||
clap::{crate_description, crate_name, App, Arg, ArgMatches},
|
||||
solana_faucet::faucet::FAUCET_PORT,
|
||||
solana_clap_utils::input_validators::{is_url, is_url_or_moniker},
|
||||
solana_cli_config::{ConfigInput, CONFIG_FILE},
|
||||
solana_sdk::{
|
||||
fee_calculator::FeeRateGovernor,
|
||||
pubkey::Pubkey,
|
||||
@@ -11,10 +12,28 @@ use {
|
||||
|
||||
const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL;
|
||||
|
||||
pub enum ExternalClientType {
|
||||
// Submits transactions to an Rpc node using an RpcClient
|
||||
RpcClient,
|
||||
// Submits transactions directly to leaders using a ThinClient, broadcasting to multiple
|
||||
// leaders when num_nodes > 1
|
||||
ThinClient,
|
||||
// Submits transactions directly to leaders using a TpuClient, broadcasting to upcoming leaders
|
||||
// via TpuClient default configuration
|
||||
TpuClient,
|
||||
}
|
||||
|
||||
impl Default for ExternalClientType {
|
||||
fn default() -> Self {
|
||||
Self::ThinClient
|
||||
}
|
||||
}
|
||||
|
||||
/// Holds the configuration for a single run of the benchmark
|
||||
pub struct Config {
|
||||
pub entrypoint_addr: SocketAddr,
|
||||
pub faucet_addr: SocketAddr,
|
||||
pub json_rpc_url: String,
|
||||
pub websocket_url: String,
|
||||
pub id: Keypair,
|
||||
pub threads: usize,
|
||||
pub num_nodes: usize,
|
||||
@@ -31,13 +50,16 @@ pub struct Config {
|
||||
pub num_lamports_per_account: u64,
|
||||
pub target_slots_per_epoch: u64,
|
||||
pub target_node: Option<Pubkey>,
|
||||
pub external_client_type: ExternalClientType,
|
||||
pub use_quic: bool,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config {
|
||||
entrypoint_addr: SocketAddr::from(([127, 0, 0, 1], 8001)),
|
||||
faucet_addr: SocketAddr::from(([127, 0, 0, 1], FAUCET_PORT)),
|
||||
json_rpc_url: ConfigInput::default().json_rpc_url,
|
||||
websocket_url: ConfigInput::default().websocket_url,
|
||||
id: Keypair::new(),
|
||||
threads: 4,
|
||||
num_nodes: 1,
|
||||
@@ -54,6 +76,8 @@ impl Default for Config {
|
||||
num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT,
|
||||
target_slots_per_epoch: 0,
|
||||
target_node: None,
|
||||
external_client_type: ExternalClientType::default(),
|
||||
use_quic: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -62,6 +86,42 @@ impl Default for Config {
|
||||
pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
App::new(crate_name!()).about(crate_description!())
|
||||
.version(version)
|
||||
.arg({
|
||||
let arg = Arg::with_name("config_file")
|
||||
.short("C")
|
||||
.long("config")
|
||||
.value_name("FILEPATH")
|
||||
.takes_value(true)
|
||||
.global(true)
|
||||
.help("Configuration file to use");
|
||||
if let Some(ref config_file) = *CONFIG_FILE {
|
||||
arg.default_value(config_file)
|
||||
} else {
|
||||
arg
|
||||
}
|
||||
})
|
||||
.arg(
|
||||
Arg::with_name("json_rpc_url")
|
||||
.short("u")
|
||||
.long("url")
|
||||
.value_name("URL_OR_MONIKER")
|
||||
.takes_value(true)
|
||||
.global(true)
|
||||
.validator(is_url_or_moniker)
|
||||
.help(
|
||||
"URL for Solana's JSON RPC or moniker (or their first letter): \
|
||||
[mainnet-beta, testnet, devnet, localhost]",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("websocket_url")
|
||||
.long("ws")
|
||||
.value_name("URL")
|
||||
.takes_value(true)
|
||||
.global(true)
|
||||
.validator(is_url)
|
||||
.help("WebSocket URL for the solana cluster"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("entrypoint")
|
||||
.short("n")
|
||||
@@ -76,7 +136,8 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
.long("faucet")
|
||||
.value_name("HOST:PORT")
|
||||
.takes_value(true)
|
||||
.help("Location of the faucet; defaults to entrypoint:FAUCET_PORT"),
|
||||
.hidden(true)
|
||||
.help("Deprecated. BenchTps no longer queries the faucet directly"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("identity")
|
||||
@@ -191,6 +252,27 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
"Wait until epochs are this many slots long.",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("rpc_client")
|
||||
.long("use-rpc-client")
|
||||
.conflicts_with("tpu_client")
|
||||
.takes_value(false)
|
||||
.help("Submit transactions with a RpcClient")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("tpu_client")
|
||||
.long("use-tpu-client")
|
||||
.conflicts_with("rpc_client")
|
||||
.takes_value(false)
|
||||
.help("Submit transactions with a TpuClient")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("tpu_use_quic")
|
||||
.long("tpu-use-quic")
|
||||
.takes_value(false)
|
||||
.help("Submit transactions via QUIC; only affects ThinClient (default) \
|
||||
or TpuClient sends"),
|
||||
)
|
||||
}
|
||||
|
||||
/// Parses a clap `ArgMatches` structure into a `Config`
|
||||
@@ -201,6 +283,45 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
pub fn extract_args(matches: &ArgMatches) -> Config {
|
||||
let mut args = Config::default();
|
||||
|
||||
let config = if let Some(config_file) = matches.value_of("config_file") {
|
||||
solana_cli_config::Config::load(config_file).unwrap_or_default()
|
||||
} else {
|
||||
solana_cli_config::Config::default()
|
||||
};
|
||||
let (_, json_rpc_url) = ConfigInput::compute_json_rpc_url_setting(
|
||||
matches.value_of("json_rpc_url").unwrap_or(""),
|
||||
&config.json_rpc_url,
|
||||
);
|
||||
args.json_rpc_url = json_rpc_url;
|
||||
|
||||
let (_, websocket_url) = ConfigInput::compute_websocket_url_setting(
|
||||
matches.value_of("websocket_url").unwrap_or(""),
|
||||
&config.websocket_url,
|
||||
matches.value_of("json_rpc_url").unwrap_or(""),
|
||||
&config.json_rpc_url,
|
||||
);
|
||||
args.websocket_url = websocket_url;
|
||||
|
||||
let (_, id_path) = ConfigInput::compute_keypair_path_setting(
|
||||
matches.value_of("identity").unwrap_or(""),
|
||||
&config.keypair_path,
|
||||
);
|
||||
if let Ok(id) = read_keypair_file(id_path) {
|
||||
args.id = id;
|
||||
} else if matches.is_present("identity") {
|
||||
panic!("could not parse identity path");
|
||||
}
|
||||
|
||||
if matches.is_present("tpu_client") {
|
||||
args.external_client_type = ExternalClientType::TpuClient;
|
||||
} else if matches.is_present("rpc_client") {
|
||||
args.external_client_type = ExternalClientType::RpcClient;
|
||||
}
|
||||
|
||||
if matches.is_present("tpu_use_quic") {
|
||||
args.use_quic = true;
|
||||
}
|
||||
|
||||
if let Some(addr) = matches.value_of("entrypoint") {
|
||||
args.entrypoint_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse entrypoint address: {}", e);
|
||||
@@ -208,18 +329,6 @@ pub fn extract_args(matches: &ArgMatches) -> Config {
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(addr) = matches.value_of("faucet") {
|
||||
args.faucet_addr = solana_net_utils::parse_host_port(addr).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse faucet address: {}", e);
|
||||
exit(1)
|
||||
});
|
||||
}
|
||||
|
||||
if matches.is_present("identity") {
|
||||
args.id = read_keypair_file(matches.value_of("identity").unwrap())
|
||||
.expect("can't read client identity");
|
||||
}
|
||||
|
||||
if let Some(t) = matches.value_of("threads") {
|
||||
args.threads = t.to_string().parse().expect("can't parse threads");
|
||||
}
|
||||
|
72
bench-tps/src/keypairs.rs
Normal file
72
bench-tps/src/keypairs.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
use {
|
||||
crate::{
|
||||
bench::{fund_keypairs, generate_and_fund_keypairs},
|
||||
bench_tps_client::BenchTpsClient,
|
||||
},
|
||||
log::*,
|
||||
solana_genesis::Base64Account,
|
||||
solana_sdk::signature::{Keypair, Signer},
|
||||
std::{collections::HashMap, fs::File, path::Path, process::exit, sync::Arc},
|
||||
};
|
||||
|
||||
pub fn get_keypairs<T>(
|
||||
client: Arc<T>,
|
||||
id: &Keypair,
|
||||
keypair_count: usize,
|
||||
num_lamports_per_account: u64,
|
||||
client_ids_and_stake_file: &str,
|
||||
read_from_client_file: bool,
|
||||
) -> Vec<Keypair>
|
||||
where
|
||||
T: 'static + BenchTpsClient + Send + Sync,
|
||||
{
|
||||
if read_from_client_file {
|
||||
let path = Path::new(client_ids_and_stake_file);
|
||||
let file = File::open(path).unwrap();
|
||||
|
||||
info!("Reading {}", client_ids_and_stake_file);
|
||||
let accounts: HashMap<String, Base64Account> = serde_yaml::from_reader(file).unwrap();
|
||||
let mut keypairs = vec![];
|
||||
let mut last_balance = 0;
|
||||
|
||||
accounts
|
||||
.into_iter()
|
||||
.for_each(|(keypair, primordial_account)| {
|
||||
let bytes: Vec<u8> = serde_json::from_str(keypair.as_str()).unwrap();
|
||||
keypairs.push(Keypair::from_bytes(&bytes).unwrap());
|
||||
last_balance = primordial_account.balance;
|
||||
});
|
||||
|
||||
if keypairs.len() < keypair_count {
|
||||
eprintln!(
|
||||
"Expected {} accounts in {}, only received {} (--tx_count mismatch?)",
|
||||
keypair_count,
|
||||
client_ids_and_stake_file,
|
||||
keypairs.len(),
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
// Sort keypairs so that do_bench_tps() uses the same subset of accounts for each run.
|
||||
// This prevents the amount of storage needed for bench-tps accounts from creeping up
|
||||
// across multiple runs.
|
||||
keypairs.sort_by_key(|x| x.pubkey().to_string());
|
||||
fund_keypairs(
|
||||
client,
|
||||
id,
|
||||
&keypairs,
|
||||
keypairs.len().saturating_sub(keypair_count) as u64,
|
||||
last_balance,
|
||||
)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("Error could not fund keys: {:?}", e);
|
||||
exit(1);
|
||||
});
|
||||
keypairs
|
||||
} else {
|
||||
generate_and_fund_keypairs(client, id, keypair_count, num_lamports_per_account)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("Error could not fund keys: {:?}", e);
|
||||
exit(1);
|
||||
})
|
||||
}
|
||||
}
|
@@ -1,3 +1,6 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
pub mod bench;
|
||||
pub mod bench_tps_client;
|
||||
pub mod cli;
|
||||
pub mod keypairs;
|
||||
mod perf_utils;
|
||||
|
@@ -2,15 +2,19 @@
|
||||
use {
|
||||
log::*,
|
||||
solana_bench_tps::{
|
||||
bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs},
|
||||
cli,
|
||||
bench::{do_bench_tps, generate_keypairs},
|
||||
cli::{self, ExternalClientType},
|
||||
keypairs::get_keypairs,
|
||||
},
|
||||
solana_client::{
|
||||
connection_cache,
|
||||
rpc_client::RpcClient,
|
||||
tpu_client::{TpuClient, TpuClientConfig},
|
||||
},
|
||||
solana_genesis::Base64Account,
|
||||
solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_client},
|
||||
solana_sdk::{
|
||||
fee_calculator::FeeRateGovernor,
|
||||
signature::{Keypair, Signer},
|
||||
system_program,
|
||||
commitment_config::CommitmentConfig, fee_calculator::FeeRateGovernor, system_program,
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc},
|
||||
@@ -21,14 +25,15 @@ pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup_with_default("solana=info");
|
||||
solana_metrics::set_panic_hook("bench-tps");
|
||||
solana_metrics::set_panic_hook("bench-tps", /*version:*/ None);
|
||||
|
||||
let matches = cli::build_args(solana_version::version!()).get_matches();
|
||||
let cli_config = cli::extract_args(&matches);
|
||||
|
||||
let cli::Config {
|
||||
entrypoint_addr,
|
||||
faucet_addr,
|
||||
json_rpc_url,
|
||||
websocket_url,
|
||||
id,
|
||||
num_nodes,
|
||||
tx_count,
|
||||
@@ -40,6 +45,8 @@ fn main() {
|
||||
multi_client,
|
||||
num_lamports_per_account,
|
||||
target_node,
|
||||
external_client_type,
|
||||
use_quic,
|
||||
..
|
||||
} = &cli_config;
|
||||
|
||||
@@ -75,83 +82,93 @@ fn main() {
|
||||
}
|
||||
|
||||
info!("Connecting to the cluster");
|
||||
let nodes = discover_cluster(entrypoint_addr, *num_nodes, SocketAddrSpace::Unspecified)
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let client = if *multi_client {
|
||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
||||
if nodes.len() < num_clients {
|
||||
eprintln!(
|
||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||
num_nodes
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
Arc::new(client)
|
||||
} else if let Some(target_node) = target_node {
|
||||
info!("Searching for target_node: {:?}", target_node);
|
||||
let mut target_client = None;
|
||||
for node in nodes {
|
||||
if node.id == *target_node {
|
||||
target_client = Some(Arc::new(get_client(&[node], &SocketAddrSpace::Unspecified)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
target_client.unwrap_or_else(|| {
|
||||
eprintln!("Target node {} not found", target_node);
|
||||
exit(1);
|
||||
})
|
||||
} else {
|
||||
Arc::new(get_client(&nodes, &SocketAddrSpace::Unspecified))
|
||||
};
|
||||
|
||||
let keypairs = if *read_from_client_file {
|
||||
let path = Path::new(&client_ids_and_stake_file);
|
||||
let file = File::open(path).unwrap();
|
||||
|
||||
info!("Reading {}", client_ids_and_stake_file);
|
||||
let accounts: HashMap<String, Base64Account> = serde_yaml::from_reader(file).unwrap();
|
||||
let mut keypairs = vec![];
|
||||
let mut last_balance = 0;
|
||||
|
||||
accounts
|
||||
.into_iter()
|
||||
.for_each(|(keypair, primordial_account)| {
|
||||
let bytes: Vec<u8> = serde_json::from_str(keypair.as_str()).unwrap();
|
||||
keypairs.push(Keypair::from_bytes(&bytes).unwrap());
|
||||
last_balance = primordial_account.balance;
|
||||
});
|
||||
|
||||
if keypairs.len() < keypair_count {
|
||||
eprintln!(
|
||||
"Expected {} accounts in {}, only received {} (--tx_count mismatch?)",
|
||||
match external_client_type {
|
||||
ExternalClientType::RpcClient => {
|
||||
let client = Arc::new(RpcClient::new_with_commitment(
|
||||
json_rpc_url.to_string(),
|
||||
CommitmentConfig::confirmed(),
|
||||
));
|
||||
let keypairs = get_keypairs(
|
||||
client.clone(),
|
||||
id,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
client_ids_and_stake_file,
|
||||
keypairs.len(),
|
||||
*read_from_client_file,
|
||||
);
|
||||
exit(1);
|
||||
do_bench_tps(client, cli_config, keypairs);
|
||||
}
|
||||
// Sort keypairs so that do_bench_tps() uses the same subset of accounts for each run.
|
||||
// This prevents the amount of storage needed for bench-tps accounts from creeping up
|
||||
// across multiple runs.
|
||||
keypairs.sort_by_key(|x| x.pubkey().to_string());
|
||||
keypairs
|
||||
} else {
|
||||
generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
Some(*faucet_addr),
|
||||
id,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("Error could not fund keys: {:?}", e);
|
||||
exit(1);
|
||||
})
|
||||
};
|
||||
|
||||
do_bench_tps(client, cli_config, keypairs);
|
||||
ExternalClientType::ThinClient => {
|
||||
let nodes = discover_cluster(entrypoint_addr, *num_nodes, SocketAddrSpace::Unspecified)
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||
exit(1);
|
||||
});
|
||||
if *use_quic {
|
||||
connection_cache::set_use_quic(true);
|
||||
}
|
||||
let client = if *multi_client {
|
||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
||||
if nodes.len() < num_clients {
|
||||
eprintln!(
|
||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||
num_nodes
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
Arc::new(client)
|
||||
} else if let Some(target_node) = target_node {
|
||||
info!("Searching for target_node: {:?}", target_node);
|
||||
let mut target_client = None;
|
||||
for node in nodes {
|
||||
if node.id == *target_node {
|
||||
target_client =
|
||||
Some(Arc::new(get_client(&[node], &SocketAddrSpace::Unspecified)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
target_client.unwrap_or_else(|| {
|
||||
eprintln!("Target node {} not found", target_node);
|
||||
exit(1);
|
||||
})
|
||||
} else {
|
||||
Arc::new(get_client(&nodes, &SocketAddrSpace::Unspecified))
|
||||
};
|
||||
let keypairs = get_keypairs(
|
||||
client.clone(),
|
||||
id,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
client_ids_and_stake_file,
|
||||
*read_from_client_file,
|
||||
);
|
||||
do_bench_tps(client, cli_config, keypairs);
|
||||
}
|
||||
ExternalClientType::TpuClient => {
|
||||
let rpc_client = Arc::new(RpcClient::new_with_commitment(
|
||||
json_rpc_url.to_string(),
|
||||
CommitmentConfig::confirmed(),
|
||||
));
|
||||
if *use_quic {
|
||||
connection_cache::set_use_quic(true);
|
||||
}
|
||||
let client = Arc::new(
|
||||
TpuClient::new(rpc_client, websocket_url, TpuClientConfig::default())
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Could not create TpuClient {:?}", err);
|
||||
exit(1);
|
||||
}),
|
||||
);
|
||||
let keypairs = get_keypairs(
|
||||
client.clone(),
|
||||
id,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
client_ids_and_stake_file,
|
||||
*read_from_client_file,
|
||||
);
|
||||
do_bench_tps(client, cli_config, keypairs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
use {
|
||||
crate::bench_tps_client::BenchTpsClient,
|
||||
log::*,
|
||||
solana_sdk::{client::Client, commitment_config::CommitmentConfig, timing::duration_as_s},
|
||||
solana_sdk::{commitment_config::CommitmentConfig, timing::duration_as_s},
|
||||
std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
@@ -27,7 +28,7 @@ pub fn sample_txs<T>(
|
||||
sample_period: u64,
|
||||
client: &Arc<T>,
|
||||
) where
|
||||
T: Client,
|
||||
T: BenchTpsClient,
|
||||
{
|
||||
let mut max_tps = 0.0;
|
||||
let mut total_elapsed;
|
||||
@@ -43,18 +44,16 @@ pub fn sample_txs<T>(
|
||||
total_elapsed = start_time.elapsed();
|
||||
let elapsed = now.elapsed();
|
||||
now = Instant::now();
|
||||
let mut txs;
|
||||
match client.get_transaction_count_with_commitment(CommitmentConfig::processed()) {
|
||||
Err(e) => {
|
||||
// ThinClient with multiple options should pick a better one now.
|
||||
info!("Couldn't get transaction count {:?}", e);
|
||||
sleep(Duration::from_secs(sample_period));
|
||||
continue;
|
||||
}
|
||||
Ok(tx_count) => {
|
||||
txs = tx_count;
|
||||
}
|
||||
}
|
||||
let mut txs =
|
||||
match client.get_transaction_count_with_commitment(CommitmentConfig::processed()) {
|
||||
Err(e) => {
|
||||
// ThinClient with multiple options should pick a better one now.
|
||||
info!("Couldn't get transaction count {:?}", e);
|
||||
sleep(Duration::from_secs(sample_period));
|
||||
continue;
|
||||
}
|
||||
Ok(tx_count) => tx_count,
|
||||
};
|
||||
|
||||
if txs < last_txs {
|
||||
info!("Expected txs({}) >= last_txs({})", txs, last_txs);
|
||||
@@ -83,10 +82,7 @@ pub fn sample_txs<T>(
|
||||
elapsed: total_elapsed,
|
||||
txs: total_txs,
|
||||
};
|
||||
sample_stats
|
||||
.write()
|
||||
.unwrap()
|
||||
.push((client.tpu_addr(), stats));
|
||||
sample_stats.write().unwrap().push((client.addr(), stats));
|
||||
return;
|
||||
}
|
||||
sleep(Duration::from_secs(sample_period));
|
@@ -1,37 +1,64 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use {
|
||||
crossbeam_channel::unbounded,
|
||||
serial_test::serial,
|
||||
solana_bench_tps::{
|
||||
bench::{do_bench_tps, generate_and_fund_keypairs},
|
||||
cli::Config,
|
||||
},
|
||||
solana_client::thin_client::create_client,
|
||||
solana_client::{
|
||||
rpc_client::RpcClient,
|
||||
thin_client::create_client,
|
||||
tpu_client::{TpuClient, TpuClientConfig},
|
||||
},
|
||||
solana_core::validator::ValidatorConfig,
|
||||
solana_faucet::faucet::run_local_faucet_with_port,
|
||||
solana_gossip::cluster_info::VALIDATOR_PORT_RANGE,
|
||||
solana_faucet::faucet::{run_local_faucet, run_local_faucet_with_port},
|
||||
solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
},
|
||||
solana_sdk::signature::{Keypair, Signer},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{
|
||||
sync::{mpsc::channel, Arc},
|
||||
time::Duration,
|
||||
solana_rpc::rpc::JsonRpcConfig,
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
signature::{Keypair, Signer},
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
solana_test_validator::TestValidator,
|
||||
std::{sync::Arc, time::Duration},
|
||||
};
|
||||
|
||||
fn test_bench_tps_local_cluster(config: Config) {
|
||||
let native_instruction_processors = vec![];
|
||||
|
||||
solana_logger::setup();
|
||||
|
||||
let faucet_keypair = Keypair::new();
|
||||
let faucet_pubkey = faucet_keypair.pubkey();
|
||||
let (addr_sender, addr_receiver) = unbounded();
|
||||
run_local_faucet_with_port(faucet_keypair, addr_sender, None, 0);
|
||||
let faucet_addr = addr_receiver
|
||||
.recv_timeout(Duration::from_secs(2))
|
||||
.expect("run_local_faucet")
|
||||
.expect("faucet_addr");
|
||||
|
||||
const NUM_NODES: usize = 1;
|
||||
let mut validator_config = ValidatorConfig::default_for_test();
|
||||
validator_config.rpc_config = JsonRpcConfig {
|
||||
faucet_addr: Some(faucet_addr),
|
||||
..JsonRpcConfig::default_for_test()
|
||||
};
|
||||
let cluster = LocalCluster::new(
|
||||
&mut ClusterConfig {
|
||||
node_stakes: vec![999_990; NUM_NODES],
|
||||
cluster_lamports: 200_000_000,
|
||||
validator_configs: make_identical_validator_configs(
|
||||
&ValidatorConfig::default(),
|
||||
&ValidatorConfig {
|
||||
rpc_config: JsonRpcConfig {
|
||||
faucet_addr: Some(faucet_addr),
|
||||
..JsonRpcConfig::default_for_test()
|
||||
},
|
||||
..ValidatorConfig::default_for_test()
|
||||
},
|
||||
NUM_NODES,
|
||||
),
|
||||
native_instruction_processors,
|
||||
@@ -40,31 +67,55 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
|
||||
let faucet_keypair = Keypair::new();
|
||||
cluster.transfer(
|
||||
&cluster.funding_keypair,
|
||||
&faucet_keypair.pubkey(),
|
||||
100_000_000,
|
||||
);
|
||||
cluster.transfer(&cluster.funding_keypair, &faucet_pubkey, 100_000_000);
|
||||
|
||||
let client = Arc::new(create_client(
|
||||
(cluster.entry_point_info.rpc, cluster.entry_point_info.tpu),
|
||||
VALIDATOR_PORT_RANGE,
|
||||
cluster.entry_point_info.rpc,
|
||||
cluster.entry_point_info.tpu,
|
||||
));
|
||||
|
||||
let (addr_sender, addr_receiver) = channel();
|
||||
run_local_faucet_with_port(faucet_keypair, addr_sender, None, 0);
|
||||
let faucet_addr = addr_receiver
|
||||
.recv_timeout(Duration::from_secs(2))
|
||||
.expect("run_local_faucet")
|
||||
.expect("faucet_addr");
|
||||
|
||||
let lamports_per_account = 100;
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let keypairs = generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
Some(faucet_addr),
|
||||
&config.id,
|
||||
keypair_count,
|
||||
lamports_per_account,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let _total = do_bench_tps(client, config, keypairs);
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
assert!(_total > 100);
|
||||
}
|
||||
|
||||
fn test_bench_tps_test_validator(config: Config) {
|
||||
solana_logger::setup();
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let mint_pubkey = mint_keypair.pubkey();
|
||||
|
||||
let faucet_addr = run_local_faucet(mint_keypair, None);
|
||||
|
||||
let test_validator =
|
||||
TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified);
|
||||
|
||||
let rpc_client = Arc::new(RpcClient::new_with_commitment(
|
||||
test_validator.rpc_url(),
|
||||
CommitmentConfig::processed(),
|
||||
));
|
||||
let websocket_url = test_validator.rpc_pubsub_url();
|
||||
|
||||
let client =
|
||||
Arc::new(TpuClient::new(rpc_client, &websocket_url, TpuClientConfig::default()).unwrap());
|
||||
|
||||
let lamports_per_account = 100;
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let keypairs = generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
&config.id,
|
||||
keypair_count,
|
||||
lamports_per_account,
|
||||
@@ -86,3 +137,13 @@ fn test_bench_tps_local_cluster_solana() {
|
||||
..Config::default()
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_bench_tps_tpu_client() {
|
||||
test_bench_tps_test_validator(Config {
|
||||
tx_count: 100,
|
||||
duration: Duration::from_secs(10),
|
||||
..Config::default()
|
||||
});
|
||||
}
|
||||
|
32
bloom/Cargo.toml
Normal file
32
bloom/Cargo.toml
Normal file
@@ -0,0 +1,32 @@
|
||||
[package]
|
||||
name = "solana-bloom"
|
||||
version = "1.11.0"
|
||||
description = "Solana bloom filter"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-bloom"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bv = { version = "0.11.1", features = ["serde"] }
|
||||
fnv = "1.0.7"
|
||||
log = "0.4.14"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.1"
|
||||
serde = { version = "1.0.136", features = ["rc"] }
|
||||
serde_derive = "1.0.103"
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.11.0" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
name = "solana_bloom"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
||||
[build-dependencies]
|
||||
rustc_version = "0.4"
|
@@ -5,7 +5,7 @@ use {
|
||||
bv::BitVec,
|
||||
fnv::FnvHasher,
|
||||
rand::Rng,
|
||||
solana_runtime::bloom::{AtomicBloom, Bloom, BloomHashIndex},
|
||||
solana_bloom::bloom::{AtomicBloom, Bloom, BloomHashIndex},
|
||||
solana_sdk::{
|
||||
hash::{hash, Hash},
|
||||
signature::Signature,
|
1
bloom/build.rs
Symbolic link
1
bloom/build.rs
Symbolic link
@@ -0,0 +1 @@
|
||||
../frozen-abi/build.rs
|
@@ -101,7 +101,7 @@ impl<T: BloomHashIndex> Bloom<T> {
|
||||
}
|
||||
}
|
||||
fn pos(&self, key: &T, k: u64) -> u64 {
|
||||
key.hash_at_index(k) % self.bits.len()
|
||||
key.hash_at_index(k).wrapping_rem(self.bits.len())
|
||||
}
|
||||
pub fn clear(&mut self) {
|
||||
self.bits = BitVec::new_fill(false, self.bits.len());
|
||||
@@ -111,7 +111,7 @@ impl<T: BloomHashIndex> Bloom<T> {
|
||||
for k in &self.keys {
|
||||
let pos = self.pos(key, *k);
|
||||
if !self.bits.get(pos) {
|
||||
self.num_bits_set += 1;
|
||||
self.num_bits_set = self.num_bits_set.saturating_add(1);
|
||||
self.bits.set(pos, true);
|
||||
}
|
||||
}
|
||||
@@ -164,21 +164,26 @@ impl<T: BloomHashIndex> From<Bloom<T>> for AtomicBloom<T> {
|
||||
|
||||
impl<T: BloomHashIndex> AtomicBloom<T> {
|
||||
fn pos(&self, key: &T, hash_index: u64) -> (usize, u64) {
|
||||
let pos = key.hash_at_index(hash_index) % self.num_bits;
|
||||
let pos = key.hash_at_index(hash_index).wrapping_rem(self.num_bits);
|
||||
// Divide by 64 to figure out which of the
|
||||
// AtomicU64 bit chunks we need to modify.
|
||||
let index = pos >> 6;
|
||||
let index = pos.wrapping_shr(6);
|
||||
// (pos & 63) is equivalent to mod 64 so that we can find
|
||||
// the index of the bit within the AtomicU64 to modify.
|
||||
let mask = 1u64 << (pos & 63);
|
||||
let mask = 1u64.wrapping_shl(u32::try_from(pos & 63).unwrap());
|
||||
(index as usize, mask)
|
||||
}
|
||||
|
||||
pub fn add(&self, key: &T) {
|
||||
/// Adds an item to the bloom filter and returns true if the item
|
||||
/// was not in the filter before.
|
||||
pub fn add(&self, key: &T) -> bool {
|
||||
let mut added = false;
|
||||
for k in &self.keys {
|
||||
let (index, mask) = self.pos(key, *k);
|
||||
self.bits[index].fetch_or(mask, Ordering::Relaxed);
|
||||
let prev_val = self.bits[index].fetch_or(mask, Ordering::Relaxed);
|
||||
added = added || prev_val & mask == 0u64;
|
||||
}
|
||||
added
|
||||
}
|
||||
|
||||
pub fn contains(&self, key: &T) -> bool {
|
||||
@@ -189,6 +194,12 @@ impl<T: BloomHashIndex> AtomicBloom<T> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn clear_for_tests(&mut self) {
|
||||
self.bits.iter().for_each(|bit| {
|
||||
bit.store(0u64, Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
|
||||
// Only for tests and simulations.
|
||||
pub fn mock_clone(&self) -> Self {
|
||||
Self {
|
||||
@@ -320,7 +331,9 @@ mod test {
|
||||
assert_eq!(bloom.keys.len(), 3);
|
||||
assert_eq!(bloom.num_bits, 6168);
|
||||
assert_eq!(bloom.bits.len(), 97);
|
||||
hash_values.par_iter().for_each(|v| bloom.add(v));
|
||||
hash_values.par_iter().for_each(|v| {
|
||||
bloom.add(v);
|
||||
});
|
||||
let bloom: Bloom<Hash> = bloom.into();
|
||||
assert_eq!(bloom.keys.len(), 3);
|
||||
assert_eq!(bloom.bits.len(), 6168);
|
||||
@@ -362,7 +375,9 @@ mod test {
|
||||
}
|
||||
// Round trip, re-inserting the same hash values.
|
||||
let bloom: AtomicBloom<_> = bloom.into();
|
||||
hash_values.par_iter().for_each(|v| bloom.add(v));
|
||||
hash_values.par_iter().for_each(|v| {
|
||||
bloom.add(v);
|
||||
});
|
||||
for hash_value in &hash_values {
|
||||
assert!(bloom.contains(hash_value));
|
||||
}
|
||||
@@ -380,7 +395,9 @@ mod test {
|
||||
let bloom: AtomicBloom<_> = bloom.into();
|
||||
assert_eq!(bloom.num_bits, 9731);
|
||||
assert_eq!(bloom.bits.len(), (9731 + 63) / 64);
|
||||
more_hash_values.par_iter().for_each(|v| bloom.add(v));
|
||||
more_hash_values.par_iter().for_each(|v| {
|
||||
bloom.add(v);
|
||||
});
|
||||
for hash_value in &hash_values {
|
||||
assert!(bloom.contains(hash_value));
|
||||
}
|
5
bloom/src/lib.rs
Normal file
5
bloom/src/lib.rs
Normal file
@@ -0,0 +1,5 @@
|
||||
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))]
|
||||
pub mod bloom;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana_frozen_abi_macro;
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-bucket-map"
|
||||
version = "1.9.4"
|
||||
version = "1.11.0"
|
||||
description = "solana-bucket-map"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-bucket-map"
|
||||
@@ -11,15 +11,18 @@ license = "Apache-2.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
rayon = "1.5.0"
|
||||
solana-logger = { path = "../logger", version = "=1.9.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.4" }
|
||||
memmap2 = "0.5.0"
|
||||
log = { version = "0.4.11" }
|
||||
solana-measure = { path = "../measure", version = "=1.9.4" }
|
||||
memmap2 = "0.5.3"
|
||||
modular-bitfield = "0.11.2"
|
||||
rand = "0.7.0"
|
||||
solana-measure = { path = "../measure", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
tempfile = "3.3.0"
|
||||
|
||||
[dev-dependencies]
|
||||
fs_extra = "1.2.0"
|
||||
tempfile = "3.2.0"
|
||||
rayon = "1.5.0"
|
||||
solana-logger = { path = "../logger", version = "=1.11.0" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -3,7 +3,7 @@ use {
|
||||
bucket_item::BucketItem,
|
||||
bucket_map::BucketMapError,
|
||||
bucket_stats::BucketMapStats,
|
||||
bucket_storage::{BucketStorage, Uid, DEFAULT_CAPACITY_POW2, UID_UNLOCKED},
|
||||
bucket_storage::{BucketStorage, Uid, DEFAULT_CAPACITY_POW2},
|
||||
index_entry::IndexEntry,
|
||||
MaxSearch, RefCount,
|
||||
},
|
||||
@@ -17,7 +17,7 @@ use {
|
||||
ops::RangeBounds,
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
atomic::{AtomicU64, AtomicUsize, Ordering},
|
||||
Arc, Mutex,
|
||||
},
|
||||
},
|
||||
@@ -81,6 +81,7 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketMapStats>,
|
||||
count: Arc<AtomicU64>,
|
||||
) -> Self {
|
||||
let index = BucketStorage::new(
|
||||
Arc::clone(&drives),
|
||||
@@ -88,6 +89,7 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
std::mem::size_of::<IndexEntry>() as u64,
|
||||
max_search,
|
||||
Arc::clone(&stats.index),
|
||||
count,
|
||||
);
|
||||
Self {
|
||||
random: thread_rng().gen(),
|
||||
@@ -100,14 +102,10 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bucket_len(&self) -> u64 {
|
||||
self.index.used.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn keys(&self) -> Vec<Pubkey> {
|
||||
let mut rv = vec![];
|
||||
for i in 0..self.index.capacity() {
|
||||
if self.index.uid(i) == UID_UNLOCKED {
|
||||
if self.index.is_free(i) {
|
||||
continue;
|
||||
}
|
||||
let ix: &IndexEntry = self.index.get(i);
|
||||
@@ -120,10 +118,10 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
where
|
||||
R: RangeBounds<Pubkey>,
|
||||
{
|
||||
let mut result = Vec::with_capacity(self.index.used.load(Ordering::Relaxed) as usize);
|
||||
let mut result = Vec::with_capacity(self.index.count.load(Ordering::Relaxed) as usize);
|
||||
for i in 0..self.index.capacity() {
|
||||
let ii = i % self.index.capacity();
|
||||
if self.index.uid(ii) == UID_UNLOCKED {
|
||||
if self.index.is_free(ii) {
|
||||
continue;
|
||||
}
|
||||
let ix: &IndexEntry = self.index.get(ii);
|
||||
@@ -156,7 +154,7 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
let ix = Self::bucket_index_ix(index, key, random);
|
||||
for i in ix..ix + index.max_search() {
|
||||
let ii = i % index.capacity();
|
||||
if index.uid(ii) == UID_UNLOCKED {
|
||||
if index.is_free(ii) {
|
||||
continue;
|
||||
}
|
||||
let elem: &mut IndexEntry = index.get_mut(ii);
|
||||
@@ -175,7 +173,7 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
let ix = Self::bucket_index_ix(index, key, random);
|
||||
for i in ix..ix + index.max_search() {
|
||||
let ii = i % index.capacity();
|
||||
if index.uid(ii) == UID_UNLOCKED {
|
||||
if index.is_free(ii) {
|
||||
continue;
|
||||
}
|
||||
let elem: &IndexEntry = index.get(ii);
|
||||
@@ -187,26 +185,23 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
}
|
||||
|
||||
fn bucket_create_key(
|
||||
index: &BucketStorage,
|
||||
index: &mut BucketStorage,
|
||||
key: &Pubkey,
|
||||
elem_uid: Uid,
|
||||
random: u64,
|
||||
is_resizing: bool,
|
||||
) -> Result<u64, BucketMapError> {
|
||||
let ix = Self::bucket_index_ix(index, key, random);
|
||||
for i in ix..ix + index.max_search() {
|
||||
let ii = i as u64 % index.capacity();
|
||||
if index.uid(ii) != UID_UNLOCKED {
|
||||
if !index.is_free(ii) {
|
||||
continue;
|
||||
}
|
||||
index.allocate(ii, elem_uid).unwrap();
|
||||
let mut elem: &mut IndexEntry = index.get_mut(ii);
|
||||
elem.key = *key;
|
||||
// These will be overwritten after allocation by callers.
|
||||
index.allocate(ii, elem_uid, is_resizing).unwrap();
|
||||
let elem: &mut IndexEntry = index.get_mut(ii);
|
||||
// These fields will be overwritten after allocation by callers.
|
||||
// Since this part of the mmapped file could have previously been used by someone else, there can be garbage here.
|
||||
elem.ref_count = 0;
|
||||
elem.storage_offset = 0;
|
||||
elem.storage_capacity_when_created_pow2 = 0;
|
||||
elem.num_slots = 0;
|
||||
elem.init(key);
|
||||
//debug!( "INDEX ALLOC {:?} {} {} {}", key, ii, index.capacity, elem_uid );
|
||||
return Ok(ii);
|
||||
}
|
||||
@@ -225,8 +220,14 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
Some(elem.ref_count)
|
||||
}
|
||||
|
||||
fn create_key(&self, key: &Pubkey) -> Result<u64, BucketMapError> {
|
||||
Self::bucket_create_key(&self.index, key, IndexEntry::key_uid(key), self.random)
|
||||
fn create_key(&mut self, key: &Pubkey) -> Result<u64, BucketMapError> {
|
||||
Self::bucket_create_key(
|
||||
&mut self.index,
|
||||
key,
|
||||
IndexEntry::key_uid(key),
|
||||
self.random,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn read_value(&self, key: &Pubkey) -> Option<(&[T], RefCount)> {
|
||||
@@ -256,16 +257,17 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
Some(res) => res,
|
||||
};
|
||||
elem.ref_count = ref_count;
|
||||
let elem_uid = self.index.uid(elem_ix);
|
||||
let elem_uid = self.index.uid_unchecked(elem_ix);
|
||||
let bucket_ix = elem.data_bucket_ix();
|
||||
let current_bucket = &self.data[bucket_ix as usize];
|
||||
let num_slots = data.len() as u64;
|
||||
if best_fit_bucket == bucket_ix && elem.num_slots > 0 {
|
||||
// in place update
|
||||
let elem_loc = elem.data_loc(current_bucket);
|
||||
let slice: &mut [T] = current_bucket.get_mut_cell_slice(elem_loc, data.len() as u64);
|
||||
assert!(current_bucket.uid(elem_loc) == elem_uid);
|
||||
elem.num_slots = data.len() as u64;
|
||||
slice.clone_from_slice(data);
|
||||
assert_eq!(current_bucket.uid(elem_loc), Some(elem_uid));
|
||||
elem.num_slots = num_slots;
|
||||
slice.copy_from_slice(data);
|
||||
Ok(())
|
||||
} else {
|
||||
// need to move the allocation to a best fit spot
|
||||
@@ -275,18 +277,21 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
let pos = thread_rng().gen_range(0, cap);
|
||||
for i in pos..pos + self.index.max_search() {
|
||||
let ix = i % cap;
|
||||
if best_bucket.uid(ix) == UID_UNLOCKED {
|
||||
if best_bucket.is_free(ix) {
|
||||
let elem_loc = elem.data_loc(current_bucket);
|
||||
if elem.num_slots > 0 {
|
||||
let old_slots = elem.num_slots;
|
||||
elem.set_storage_offset(ix);
|
||||
elem.set_storage_capacity_when_created_pow2(best_bucket.capacity_pow2);
|
||||
elem.num_slots = num_slots;
|
||||
if old_slots > 0 {
|
||||
let current_bucket = &mut self.data[bucket_ix as usize];
|
||||
current_bucket.free(elem_loc, elem_uid);
|
||||
}
|
||||
elem.storage_offset = ix;
|
||||
elem.storage_capacity_when_created_pow2 = best_bucket.capacity_pow2;
|
||||
elem.num_slots = data.len() as u64;
|
||||
//debug!( "DATA ALLOC {:?} {} {} {}", key, elem.data_location, best_bucket.capacity, elem_uid );
|
||||
if elem.num_slots > 0 {
|
||||
best_bucket.allocate(ix, elem_uid).unwrap();
|
||||
let slice = best_bucket.get_mut_cell_slice(ix, data.len() as u64);
|
||||
if num_slots > 0 {
|
||||
let best_bucket = &mut self.data[best_fit_bucket as usize];
|
||||
best_bucket.allocate(ix, elem_uid, false).unwrap();
|
||||
let slice = best_bucket.get_mut_cell_slice(ix, num_slots);
|
||||
slice.copy_from_slice(data);
|
||||
}
|
||||
return Ok(());
|
||||
@@ -298,10 +303,12 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
|
||||
pub fn delete_key(&mut self, key: &Pubkey) {
|
||||
if let Some((elem, elem_ix)) = self.find_entry(key) {
|
||||
let elem_uid = self.index.uid(elem_ix);
|
||||
let elem_uid = self.index.uid_unchecked(elem_ix);
|
||||
if elem.num_slots > 0 {
|
||||
let data_bucket = &self.data[elem.data_bucket_ix() as usize];
|
||||
let ix = elem.data_bucket_ix() as usize;
|
||||
let data_bucket = &self.data[ix];
|
||||
let loc = elem.data_loc(data_bucket);
|
||||
let data_bucket = &mut self.data[ix];
|
||||
//debug!( "DATA FREE {:?} {} {} {}", key, elem.data_location, data_bucket.capacity, elem_uid );
|
||||
data_bucket.free(loc, elem_uid);
|
||||
}
|
||||
@@ -319,7 +326,7 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
//increasing the capacity by ^4 reduces the
|
||||
//likelyhood of a re-index collision of 2^(max_search)^2
|
||||
//1 in 2^32
|
||||
let index = BucketStorage::new_with_capacity(
|
||||
let mut index = BucketStorage::new_with_capacity(
|
||||
Arc::clone(&self.drives),
|
||||
1,
|
||||
std::mem::size_of::<IndexEntry>() as u64,
|
||||
@@ -327,14 +334,16 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
self.index.capacity_pow2 + i, // * 2,
|
||||
self.index.max_search,
|
||||
Arc::clone(&self.stats.index),
|
||||
Arc::clone(&self.index.count),
|
||||
);
|
||||
let random = thread_rng().gen();
|
||||
let mut valid = true;
|
||||
for ix in 0..self.index.capacity() {
|
||||
let uid = self.index.uid(ix);
|
||||
if UID_UNLOCKED != uid {
|
||||
if let Some(uid) = uid {
|
||||
let elem: &IndexEntry = self.index.get(ix);
|
||||
let new_ix = Self::bucket_create_key(&index, &elem.key, uid, random);
|
||||
let new_ix =
|
||||
Self::bucket_create_key(&mut index, &elem.key, uid, random, true);
|
||||
if new_ix.is_err() {
|
||||
valid = false;
|
||||
break;
|
||||
@@ -391,6 +400,7 @@ impl<T: Clone + Copy> Bucket<T> {
|
||||
Self::elem_size(),
|
||||
self.index.max_search,
|
||||
Arc::clone(&self.stats.data),
|
||||
Arc::default(),
|
||||
))
|
||||
}
|
||||
self.data.push(bucket);
|
||||
|
@@ -30,14 +30,13 @@ impl<T: Clone + Copy> BucketApi<T> {
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketMapStats>,
|
||||
count: Arc<AtomicU64>,
|
||||
) -> Self {
|
||||
Self {
|
||||
drives,
|
||||
max_search,
|
||||
stats,
|
||||
bucket: RwLock::default(),
|
||||
count,
|
||||
count: Arc::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,12 +72,7 @@ impl<T: Clone + Copy> BucketApi<T> {
|
||||
}
|
||||
|
||||
pub fn bucket_len(&self) -> u64 {
|
||||
self.bucket
|
||||
.read()
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.map(|bucket| bucket.bucket_len())
|
||||
.unwrap_or_default()
|
||||
self.count.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn delete_key(&self, key: &Pubkey) {
|
||||
@@ -95,11 +89,11 @@ impl<T: Clone + Copy> BucketApi<T> {
|
||||
Arc::clone(&self.drives),
|
||||
self.max_search,
|
||||
Arc::clone(&self.stats),
|
||||
Arc::clone(&self.count),
|
||||
));
|
||||
} else {
|
||||
let write = bucket.as_mut().unwrap();
|
||||
write.handle_delayed_grows();
|
||||
self.count.store(write.bucket_len(), Ordering::Relaxed);
|
||||
}
|
||||
bucket
|
||||
}
|
||||
|
@@ -79,21 +79,14 @@ impl<T: Clone + Copy + Debug> BucketMap<T> {
|
||||
});
|
||||
let drives = Arc::new(drives);
|
||||
|
||||
let mut per_bucket_count = Vec::with_capacity(config.max_buckets);
|
||||
per_bucket_count.resize_with(config.max_buckets, Arc::default);
|
||||
let stats = Arc::new(BucketMapStats {
|
||||
per_bucket_count,
|
||||
..BucketMapStats::default()
|
||||
});
|
||||
let buckets = stats
|
||||
.per_bucket_count
|
||||
.iter()
|
||||
.map(|per_bucket_count| {
|
||||
let stats = Arc::default();
|
||||
let buckets = (0..config.max_buckets)
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
Arc::new(BucketApi::new(
|
||||
Arc::clone(&drives),
|
||||
max_search,
|
||||
Arc::clone(&stats),
|
||||
Arc::clone(per_bucket_count),
|
||||
))
|
||||
})
|
||||
.collect();
|
||||
|
@@ -14,5 +14,4 @@ pub struct BucketStats {
|
||||
pub struct BucketMapStats {
|
||||
pub index: Arc<BucketStats>,
|
||||
pub data: Arc<BucketStats>,
|
||||
pub per_bucket_count: Vec<Arc<AtomicU64>>,
|
||||
}
|
||||
|
@@ -35,27 +35,42 @@ use {
|
||||
pub const DEFAULT_CAPACITY_POW2: u8 = 5;
|
||||
|
||||
/// A Header UID of 0 indicates that the header is unlocked
|
||||
pub(crate) const UID_UNLOCKED: Uid = 0;
|
||||
const UID_UNLOCKED: Uid = 0;
|
||||
|
||||
pub(crate) type Uid = u64;
|
||||
|
||||
#[repr(C)]
|
||||
struct Header {
|
||||
lock: AtomicU64,
|
||||
lock: u64,
|
||||
}
|
||||
|
||||
impl Header {
|
||||
fn try_lock(&self, uid: Uid) -> bool {
|
||||
Ok(UID_UNLOCKED)
|
||||
== self
|
||||
.lock
|
||||
.compare_exchange(UID_UNLOCKED, uid, Ordering::AcqRel, Ordering::Relaxed)
|
||||
/// try to lock this entry with 'uid'
|
||||
/// return true if it could be locked
|
||||
fn try_lock(&mut self, uid: Uid) -> bool {
|
||||
if self.lock == UID_UNLOCKED {
|
||||
self.lock = uid;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
fn unlock(&self) -> Uid {
|
||||
self.lock.swap(UID_UNLOCKED, Ordering::Release)
|
||||
/// mark this entry as unlocked
|
||||
fn unlock(&mut self, expected: Uid) {
|
||||
assert_eq!(expected, self.lock);
|
||||
self.lock = UID_UNLOCKED;
|
||||
}
|
||||
fn uid(&self) -> Uid {
|
||||
self.lock.load(Ordering::Acquire)
|
||||
/// uid that has locked this entry or None if unlocked
|
||||
fn uid(&self) -> Option<Uid> {
|
||||
if self.lock == UID_UNLOCKED {
|
||||
None
|
||||
} else {
|
||||
Some(self.lock)
|
||||
}
|
||||
}
|
||||
/// true if this entry is unlocked
|
||||
fn is_unlocked(&self) -> bool {
|
||||
self.lock == UID_UNLOCKED
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,7 +79,7 @@ pub struct BucketStorage {
|
||||
mmap: MmapMut,
|
||||
pub cell_size: u64,
|
||||
pub capacity_pow2: u8,
|
||||
pub used: AtomicU64,
|
||||
pub count: Arc<AtomicU64>,
|
||||
pub stats: Arc<BucketStats>,
|
||||
pub max_search: MaxSearch,
|
||||
}
|
||||
@@ -88,6 +103,7 @@ impl BucketStorage {
|
||||
capacity_pow2: u8,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketStats>,
|
||||
count: Arc<AtomicU64>,
|
||||
) -> Self {
|
||||
let cell_size = elem_size * num_elems + std::mem::size_of::<Header>() as u64;
|
||||
let (mmap, path) = Self::new_map(&drives, cell_size as usize, capacity_pow2, &stats);
|
||||
@@ -95,7 +111,7 @@ impl BucketStorage {
|
||||
path,
|
||||
mmap,
|
||||
cell_size,
|
||||
used: AtomicU64::new(0),
|
||||
count,
|
||||
capacity_pow2,
|
||||
stats,
|
||||
max_search,
|
||||
@@ -112,6 +128,7 @@ impl BucketStorage {
|
||||
elem_size: u64,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketStats>,
|
||||
count: Arc<AtomicU64>,
|
||||
) -> Self {
|
||||
Self::new_with_capacity(
|
||||
drives,
|
||||
@@ -120,53 +137,74 @@ impl BucketStorage {
|
||||
DEFAULT_CAPACITY_POW2,
|
||||
max_search,
|
||||
stats,
|
||||
count,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn uid(&self, ix: u64) -> Uid {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
/// return ref to header of item 'ix' in mmapped file
|
||||
fn header_ptr(&self, ix: u64) -> &Header {
|
||||
let ix = (ix * self.cell_size) as usize;
|
||||
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
|
||||
unsafe {
|
||||
let hdr = hdr_slice.as_ptr() as *const Header;
|
||||
return hdr.as_ref().unwrap().uid();
|
||||
hdr.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn allocate(&self, ix: u64, uid: Uid) -> Result<(), BucketStorageError> {
|
||||
/// return ref to header of item 'ix' in mmapped file
|
||||
fn header_mut_ptr(&mut self, ix: u64) -> &mut Header {
|
||||
let ix = (ix * self.cell_size) as usize;
|
||||
let hdr_slice: &mut [u8] = &mut self.mmap[ix..ix + std::mem::size_of::<Header>()];
|
||||
unsafe {
|
||||
let hdr = hdr_slice.as_mut_ptr() as *mut Header;
|
||||
hdr.as_mut().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// return uid allocated at index 'ix' or None if vacant
|
||||
pub fn uid(&self, ix: u64) -> Option<Uid> {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
self.header_ptr(ix).uid()
|
||||
}
|
||||
|
||||
/// true if the entry at index 'ix' is free (as opposed to being allocated)
|
||||
pub fn is_free(&self, ix: u64) -> bool {
|
||||
// note that the terminology in the implementation is locked or unlocked.
|
||||
// but our api is allocate/free
|
||||
self.header_ptr(ix).is_unlocked()
|
||||
}
|
||||
|
||||
/// caller knows id is not empty
|
||||
pub fn uid_unchecked(&self, ix: u64) -> Uid {
|
||||
self.uid(ix).unwrap()
|
||||
}
|
||||
|
||||
/// 'is_resizing' true if caller is resizing the index (so don't increment count)
|
||||
/// 'is_resizing' false if caller is adding an item to the index (so increment count)
|
||||
pub fn allocate(
|
||||
&mut self,
|
||||
ix: u64,
|
||||
uid: Uid,
|
||||
is_resizing: bool,
|
||||
) -> Result<(), BucketStorageError> {
|
||||
assert!(ix < self.capacity(), "allocate: bad index size");
|
||||
assert!(UID_UNLOCKED != uid, "allocate: bad uid");
|
||||
let mut e = Err(BucketStorageError::AlreadyAllocated);
|
||||
let ix = (ix * self.cell_size) as usize;
|
||||
//debug!("ALLOC {} {}", ix, uid);
|
||||
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
|
||||
unsafe {
|
||||
let hdr = hdr_slice.as_ptr() as *const Header;
|
||||
if hdr.as_ref().unwrap().try_lock(uid) {
|
||||
e = Ok(());
|
||||
self.used.fetch_add(1, Ordering::Relaxed);
|
||||
if self.header_mut_ptr(ix).try_lock(uid) {
|
||||
e = Ok(());
|
||||
if !is_resizing {
|
||||
self.count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
};
|
||||
}
|
||||
e
|
||||
}
|
||||
|
||||
pub fn free(&self, ix: u64, uid: Uid) {
|
||||
pub fn free(&mut self, ix: u64, uid: Uid) {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
assert!(UID_UNLOCKED != uid, "free: bad uid");
|
||||
let ix = (ix * self.cell_size) as usize;
|
||||
//debug!("FREE {} {}", ix, uid);
|
||||
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
|
||||
unsafe {
|
||||
let hdr = hdr_slice.as_ptr() as *const Header;
|
||||
//debug!("FREE uid: {}", hdr.as_ref().unwrap().uid());
|
||||
let previous_uid = hdr.as_ref().unwrap().unlock();
|
||||
assert_eq!(
|
||||
previous_uid, uid,
|
||||
"free: unlocked a header with a differet uid: {}",
|
||||
previous_uid
|
||||
);
|
||||
self.used.fetch_sub(1, Ordering::Relaxed);
|
||||
}
|
||||
self.header_mut_ptr(ix).unlock(uid);
|
||||
self.count.fetch_sub(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn get<T: Sized>(&self, ix: u64) -> &T {
|
||||
@@ -324,6 +362,9 @@ impl BucketStorage {
|
||||
capacity_pow_2,
|
||||
max_search,
|
||||
Arc::clone(stats),
|
||||
bucket
|
||||
.map(|bucket| Arc::clone(&bucket.count))
|
||||
.unwrap_or_default(),
|
||||
);
|
||||
if let Some(bucket) = bucket {
|
||||
new_bucket.copy_contents(bucket);
|
||||
@@ -341,3 +382,43 @@ impl BucketStorage {
|
||||
1 << self.capacity_pow2
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_bucket_storage() {
|
||||
let tmpdir1 = std::env::temp_dir().join("bucket_map_test_mt");
|
||||
let paths: Vec<PathBuf> = [tmpdir1]
|
||||
.iter()
|
||||
.filter(|x| std::fs::create_dir_all(x).is_ok())
|
||||
.cloned()
|
||||
.collect();
|
||||
assert!(!paths.is_empty());
|
||||
|
||||
let mut storage =
|
||||
BucketStorage::new(Arc::new(paths), 1, 1, 1, Arc::default(), Arc::default());
|
||||
let ix = 0;
|
||||
let uid = Uid::MAX;
|
||||
assert!(storage.is_free(ix));
|
||||
assert!(storage.allocate(ix, uid, false).is_ok());
|
||||
assert!(storage.allocate(ix, uid, false).is_err());
|
||||
assert!(!storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), Some(uid));
|
||||
assert_eq!(storage.uid_unchecked(ix), uid);
|
||||
storage.free(ix, uid);
|
||||
assert!(storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), None);
|
||||
let uid = 1;
|
||||
assert!(storage.is_free(ix));
|
||||
assert!(storage.allocate(ix, uid, false).is_ok());
|
||||
assert!(storage.allocate(ix, uid, false).is_err());
|
||||
assert!(!storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), Some(uid));
|
||||
assert_eq!(storage.uid_unchecked(ix), uid);
|
||||
storage.free(ix, uid);
|
||||
assert!(storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), None);
|
||||
}
|
||||
}
|
||||
|
@@ -1,9 +1,12 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use {
|
||||
crate::{
|
||||
bucket::Bucket,
|
||||
bucket_storage::{BucketStorage, Uid},
|
||||
RefCount,
|
||||
},
|
||||
modular_bitfield::prelude::*,
|
||||
solana_sdk::{clock::Slot, pubkey::Pubkey},
|
||||
std::{
|
||||
collections::hash_map::DefaultHasher,
|
||||
@@ -19,15 +22,54 @@ use {
|
||||
pub struct IndexEntry {
|
||||
pub key: Pubkey, // can this be smaller if we have reduced the keys into buckets already?
|
||||
pub ref_count: RefCount, // can this be smaller? Do we ever need more than 4B refcounts?
|
||||
pub storage_offset: u64, // smaller? since these are variably sized, this could get tricky. well, actually accountinfo is not variable sized...
|
||||
storage_cap_and_offset: PackedStorage,
|
||||
// if the bucket doubled, the index can be recomputed using create_bucket_capacity_pow2
|
||||
pub storage_capacity_when_created_pow2: u8, // see data_location
|
||||
pub num_slots: Slot, // can this be smaller? epoch size should ~ be the max len. this is the num elements in the slot list
|
||||
}
|
||||
|
||||
/// Pack the storage offset and capacity-when-crated-pow2 fields into a single u64
|
||||
#[bitfield(bits = 64)]
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
|
||||
struct PackedStorage {
|
||||
capacity_when_created_pow2: B8,
|
||||
offset: B56,
|
||||
}
|
||||
|
||||
impl IndexEntry {
|
||||
pub fn init(&mut self, pubkey: &Pubkey) {
|
||||
self.key = *pubkey;
|
||||
self.ref_count = 0;
|
||||
self.storage_cap_and_offset = PackedStorage::default();
|
||||
self.num_slots = 0;
|
||||
}
|
||||
|
||||
pub fn set_storage_capacity_when_created_pow2(
|
||||
&mut self,
|
||||
storage_capacity_when_created_pow2: u8,
|
||||
) {
|
||||
self.storage_cap_and_offset
|
||||
.set_capacity_when_created_pow2(storage_capacity_when_created_pow2)
|
||||
}
|
||||
|
||||
pub fn set_storage_offset(&mut self, storage_offset: u64) {
|
||||
self.storage_cap_and_offset
|
||||
.set_offset_checked(storage_offset)
|
||||
.expect("New storage offset must fit into 7 bytes!")
|
||||
}
|
||||
|
||||
/// return closest bucket index fit for the slot slice.
|
||||
/// Since bucket size is 2^index, the return value is
|
||||
/// min index, such that 2^index >= num_slots
|
||||
/// index = ceiling(log2(num_slots))
|
||||
/// special case, when slot slice empty, return 0th index.
|
||||
pub fn data_bucket_from_num_slots(num_slots: Slot) -> u64 {
|
||||
(num_slots as f64).log2().ceil() as u64 // use int log here?
|
||||
// Compute the ceiling of log2 for integer
|
||||
if num_slots == 0 {
|
||||
0
|
||||
} else {
|
||||
(Slot::BITS - (num_slots - 1).leading_zeros()) as u64
|
||||
}
|
||||
}
|
||||
|
||||
pub fn data_bucket_ix(&self) -> u64 {
|
||||
@@ -38,10 +80,18 @@ impl IndexEntry {
|
||||
self.ref_count
|
||||
}
|
||||
|
||||
fn storage_capacity_when_created_pow2(&self) -> u8 {
|
||||
self.storage_cap_and_offset.capacity_when_created_pow2()
|
||||
}
|
||||
|
||||
fn storage_offset(&self) -> u64 {
|
||||
self.storage_cap_and_offset.offset()
|
||||
}
|
||||
|
||||
// This function maps the original data location into an index in the current bucket storage.
|
||||
// This is coupled with how we resize bucket storages.
|
||||
pub fn data_loc(&self, storage: &BucketStorage) -> u64 {
|
||||
self.storage_offset << (storage.capacity_pow2 - self.storage_capacity_when_created_pow2)
|
||||
self.storage_offset() << (storage.capacity_pow2 - self.storage_capacity_when_created_pow2())
|
||||
}
|
||||
|
||||
pub fn read_value<'a, T>(&self, bucket: &'a Bucket<T>) -> Option<(&'a [T], RefCount)> {
|
||||
@@ -50,7 +100,7 @@ impl IndexEntry {
|
||||
let slice = if self.num_slots > 0 {
|
||||
let loc = self.data_loc(data_bucket);
|
||||
let uid = Self::key_uid(&self.key);
|
||||
assert_eq!(uid, bucket.data[data_bucket_ix as usize].uid(loc));
|
||||
assert_eq!(Some(uid), bucket.data[data_bucket_ix as usize].uid(loc));
|
||||
bucket.data[data_bucket_ix as usize].get_cell_slice(loc, self.num_slots)
|
||||
} else {
|
||||
// num_slots is 0. This means we don't have an actual allocation.
|
||||
@@ -59,9 +109,78 @@ impl IndexEntry {
|
||||
};
|
||||
Some((slice, self.ref_count))
|
||||
}
|
||||
|
||||
pub fn key_uid(key: &Pubkey) -> Uid {
|
||||
let mut s = DefaultHasher::new();
|
||||
key.hash(&mut s);
|
||||
s.finish().max(1u64)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
impl IndexEntry {
|
||||
pub fn new(key: Pubkey) -> Self {
|
||||
IndexEntry {
|
||||
key,
|
||||
ref_count: 0,
|
||||
storage_cap_and_offset: PackedStorage::default(),
|
||||
num_slots: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// verify that accessors for storage_offset and capacity_when_created are
|
||||
/// correct and independent
|
||||
#[test]
|
||||
fn test_api() {
|
||||
for offset in [0, 1, u32::MAX as u64] {
|
||||
let mut index = IndexEntry::new(solana_sdk::pubkey::new_rand());
|
||||
if offset != 0 {
|
||||
index.set_storage_offset(offset);
|
||||
}
|
||||
assert_eq!(index.storage_offset(), offset);
|
||||
assert_eq!(index.storage_capacity_when_created_pow2(), 0);
|
||||
for pow in [1, 255, 0] {
|
||||
index.set_storage_capacity_when_created_pow2(pow);
|
||||
assert_eq!(index.storage_offset(), offset);
|
||||
assert_eq!(index.storage_capacity_when_created_pow2(), pow);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_size() {
|
||||
assert_eq!(std::mem::size_of::<PackedStorage>(), 1 + 7);
|
||||
assert_eq!(std::mem::size_of::<IndexEntry>(), 32 + 8 + 8 + 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "New storage offset must fit into 7 bytes!")]
|
||||
fn test_set_storage_offset_value_too_large() {
|
||||
let too_big = 1 << 56;
|
||||
let mut index = IndexEntry::new(Pubkey::new_unique());
|
||||
index.set_storage_offset(too_big);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_data_bucket_from_num_slots() {
|
||||
for n in 0..512 {
|
||||
assert_eq!(
|
||||
IndexEntry::data_bucket_from_num_slots(n),
|
||||
(n as f64).log2().ceil() as u64
|
||||
);
|
||||
}
|
||||
assert_eq!(IndexEntry::data_bucket_from_num_slots(u32::MAX as u64), 32);
|
||||
assert_eq!(
|
||||
IndexEntry::data_bucket_from_num_slots(u32::MAX as u64 + 1),
|
||||
32
|
||||
);
|
||||
assert_eq!(
|
||||
IndexEntry::data_bucket_from_num_slots(u32::MAX as u64 + 2),
|
||||
33
|
||||
);
|
||||
}
|
||||
}
|
||||
|
365
ci/buildkite-pipeline-in-disk.sh
Normal file
365
ci/buildkite-pipeline-in-disk.sh
Normal file
@@ -0,0 +1,365 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Builds a buildkite pipeline based on the environment variables
|
||||
#
|
||||
|
||||
set -e
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
output_file=${1:-/dev/stderr}
|
||||
|
||||
if [[ -n $CI_PULL_REQUEST ]]; then
|
||||
IFS=':' read -ra affected_files <<< "$(buildkite-agent meta-data get affected_files)"
|
||||
if [[ ${#affected_files[*]} -eq 0 ]]; then
|
||||
echo "Unable to determine the files affected by this PR"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
affected_files=()
|
||||
fi
|
||||
|
||||
annotate() {
|
||||
if [[ -n $BUILDKITE ]]; then
|
||||
buildkite-agent annotate "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
# Checks if a CI pull request affects one or more path patterns. Each
|
||||
# pattern argument is checked in series. If one of them found to be affected,
|
||||
# return immediately as such.
|
||||
#
|
||||
# Bash regular expressions are permitted in the pattern:
|
||||
# affects .rs$ -- any file or directory ending in .rs
|
||||
# affects .rs -- also matches foo.rs.bar
|
||||
# affects ^snap/ -- anything under the snap/ subdirectory
|
||||
# affects snap/ -- also matches foo/snap/
|
||||
# Any pattern starting with the ! character will be negated:
|
||||
# affects !^docs/ -- anything *not* under the docs/ subdirectory
|
||||
#
|
||||
affects() {
|
||||
if [[ -z $CI_PULL_REQUEST ]]; then
|
||||
# affected_files metadata is not currently available for non-PR builds so assume
|
||||
# the worse (affected)
|
||||
return 0
|
||||
fi
|
||||
# Assume everyting needs to be tested when any Dockerfile changes
|
||||
for pattern in ^ci/docker-rust/Dockerfile ^ci/docker-rust-nightly/Dockerfile "$@"; do
|
||||
if [[ ${pattern:0:1} = "!" ]]; then
|
||||
for file in "${affected_files[@]}"; do
|
||||
if [[ ! $file =~ ${pattern:1} ]]; then
|
||||
return 0 # affected
|
||||
fi
|
||||
done
|
||||
else
|
||||
for file in "${affected_files[@]}"; do
|
||||
if [[ $file =~ $pattern ]]; then
|
||||
return 0 # affected
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
return 1 # not affected
|
||||
}
|
||||
|
||||
|
||||
# Checks if a CI pull request affects anything other than the provided path patterns
|
||||
#
|
||||
# Syntax is the same as `affects()` except that the negation prefix is not
|
||||
# supported
|
||||
#
|
||||
affects_other_than() {
|
||||
if [[ -z $CI_PULL_REQUEST ]]; then
|
||||
# affected_files metadata is not currently available for non-PR builds so assume
|
||||
# the worse (affected)
|
||||
return 0
|
||||
fi
|
||||
|
||||
for file in "${affected_files[@]}"; do
|
||||
declare matched=false
|
||||
for pattern in "$@"; do
|
||||
if [[ $file =~ $pattern ]]; then
|
||||
matched=true
|
||||
fi
|
||||
done
|
||||
if ! $matched; then
|
||||
return 0 # affected
|
||||
fi
|
||||
done
|
||||
|
||||
return 1 # not affected
|
||||
}
|
||||
|
||||
|
||||
start_pipeline() {
|
||||
echo "# $*" > "$output_file"
|
||||
echo "steps:" >> "$output_file"
|
||||
}
|
||||
|
||||
command_step() {
|
||||
cat >> "$output_file" <<EOF
|
||||
- name: "$1"
|
||||
command: "$2"
|
||||
timeout_in_minutes: $3
|
||||
artifact_paths: "log-*.txt"
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
trigger_secondary_step() {
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- trigger: "solana-secondary"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
build:
|
||||
message: "${BUILDKITE_MESSAGE}"
|
||||
commit: "${BUILDKITE_COMMIT}"
|
||||
branch: "${BUILDKITE_BRANCH}"
|
||||
env:
|
||||
TRIGGERED_BUILDKITE_TAG: "${BUILDKITE_TAG}"
|
||||
EOF
|
||||
}
|
||||
|
||||
wait_step() {
|
||||
echo " - wait" >> "$output_file"
|
||||
}
|
||||
|
||||
all_test_steps() {
|
||||
command_step checks ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-checks.sh" 20
|
||||
wait_step
|
||||
|
||||
# Coverage...
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-coverage.sh \
|
||||
^scripts/coverage.sh \
|
||||
; then
|
||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 50
|
||||
wait_step
|
||||
else
|
||||
annotate --style info --context test-coverage \
|
||||
"Coverage skipped as no .rs files were modified"
|
||||
fi
|
||||
# Coverage in disk...
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-coverage.sh \
|
||||
^scripts/coverage-in-disk.sh \
|
||||
; then
|
||||
command_step coverage-in-disk ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 50
|
||||
wait_step
|
||||
else
|
||||
annotate --style info --context test-coverage \
|
||||
"Coverage skipped as no .rs files were modified"
|
||||
fi
|
||||
# Full test suite
|
||||
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 70
|
||||
wait_step
|
||||
|
||||
# BPF test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-bpf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-bpf.sh"
|
||||
name: "stable-bpf"
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "bpf-dumps.tar.bz2"
|
||||
agents:
|
||||
- "queue=default"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"Stable-BPF skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Perf test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"Stable-perf skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Downstream backwards compatibility
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
^scripts/build-downstream-projects.sh \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "scripts/build-downstream-projects.sh"
|
||||
name: "downstream-projects"
|
||||
timeout_in_minutes: 30
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"downstream-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Downstream Anchor projects backwards compatibility
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
^scripts/build-downstream-anchor-projects.sh \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "scripts/build-downstream-anchor-projects.sh"
|
||||
name: "downstream-anchor-projects"
|
||||
timeout_in_minutes: 10
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"downstream-anchor-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Wasm support
|
||||
if affects \
|
||||
^ci/test-wasm.sh \
|
||||
^ci/test-stable.sh \
|
||||
^sdk/ \
|
||||
; then
|
||||
command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20
|
||||
else
|
||||
annotate --style info \
|
||||
"wasm skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Benches...
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-coverage.sh \
|
||||
^ci/test-bench.sh \
|
||||
; then
|
||||
command_step bench "ci/test-bench.sh" 30
|
||||
else
|
||||
annotate --style info --context test-bench \
|
||||
"Bench skipped as no .rs files were modified"
|
||||
fi
|
||||
|
||||
command_step "local-cluster" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
|
||||
40
|
||||
|
||||
command_step "local-cluster-flakey" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
|
||||
10
|
||||
|
||||
command_step "local-cluster-slow" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
|
||||
40
|
||||
}
|
||||
|
||||
pull_or_push_steps() {
|
||||
command_step sanity "ci/test-sanity.sh" 5
|
||||
wait_step
|
||||
|
||||
# Check for any .sh file changes
|
||||
if affects .sh$; then
|
||||
command_step shellcheck "ci/shellcheck.sh" 5
|
||||
wait_step
|
||||
fi
|
||||
|
||||
# Run the full test suite by default, skipping only if modifications are local
|
||||
# to some particular areas of the tree
|
||||
if affects_other_than ^.buildkite ^.mergify .md$ ^docs/ ^web3.js/ ^explorer/ ^.gitbook; then
|
||||
all_test_steps
|
||||
fi
|
||||
|
||||
# web3.js, explorer and docs changes run on Travis or Github actions...
|
||||
}
|
||||
|
||||
|
||||
if [[ -n $BUILDKITE_TAG ]]; then
|
||||
start_pipeline "Tag pipeline for $BUILDKITE_TAG"
|
||||
|
||||
annotate --style info --context release-tag \
|
||||
"https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG"
|
||||
|
||||
# Jump directly to the secondary build to publish release artifacts quickly
|
||||
trigger_secondary_step
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
if [[ $BUILDKITE_BRANCH =~ ^pull ]]; then
|
||||
echo "+++ Affected files in this PR"
|
||||
for file in "${affected_files[@]}"; do
|
||||
echo "- $file"
|
||||
done
|
||||
|
||||
start_pipeline "Pull request pipeline for $BUILDKITE_BRANCH"
|
||||
|
||||
# Add helpful link back to the corresponding Github Pull Request
|
||||
annotate --style info --context pr-backlink \
|
||||
"Github Pull Request: https://github.com/solana-labs/solana/$BUILDKITE_BRANCH"
|
||||
|
||||
if [[ $GITHUB_USER = "dependabot[bot]" ]]; then
|
||||
command_step dependabot "ci/dependabot-pr.sh" 5
|
||||
wait_step
|
||||
fi
|
||||
pull_or_push_steps
|
||||
exit 0
|
||||
fi
|
||||
|
||||
start_pipeline "Push pipeline for ${BUILDKITE_BRANCH:-?unknown branch?}"
|
||||
pull_or_push_steps
|
||||
wait_step
|
||||
trigger_secondary_step
|
||||
exit 0
|
@@ -102,6 +102,8 @@ command_step() {
|
||||
command: "$2"
|
||||
timeout_in_minutes: $3
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -137,7 +139,7 @@ all_test_steps() {
|
||||
^ci/test-coverage.sh \
|
||||
^scripts/coverage.sh \
|
||||
; then
|
||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
|
||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 50
|
||||
wait_step
|
||||
else
|
||||
annotate --style info --context test-coverage \
|
||||
@@ -145,7 +147,7 @@ all_test_steps() {
|
||||
fi
|
||||
|
||||
# Full test suite
|
||||
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
|
||||
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 70
|
||||
wait_step
|
||||
|
||||
# BPF test suite
|
||||
@@ -168,7 +170,7 @@ all_test_steps() {
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "bpf-dumps.tar.bz2"
|
||||
agents:
|
||||
- "queue=default"
|
||||
- "queue=solana"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
@@ -221,12 +223,41 @@ EOF
|
||||
- command: "scripts/build-downstream-projects.sh"
|
||||
name: "downstream-projects"
|
||||
timeout_in_minutes: 30
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"downstream-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Downstream Anchor projects backwards compatibility
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
^scripts/build-downstream-anchor-projects.sh \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "scripts/build-downstream-anchor-projects.sh"
|
||||
name: "downstream-anchor-projects"
|
||||
timeout_in_minutes: 10
|
||||
agents:
|
||||
- "queue=solana"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"downstream-anchor-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Wasm support
|
||||
if affects \
|
||||
^ci/test-wasm.sh \
|
||||
@@ -264,7 +295,7 @@ EOF
|
||||
|
||||
command_step "local-cluster-slow" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
|
||||
30
|
||||
40
|
||||
}
|
||||
|
||||
pull_or_push_steps() {
|
||||
|
@@ -8,11 +8,6 @@ src_root="$(readlink -f "${here}/..")"
|
||||
cd "${src_root}"
|
||||
|
||||
cargo_audit_ignores=(
|
||||
# failure is officially deprecated/unmaintained
|
||||
#
|
||||
# Blocked on multiple upstream crates removing their `failure` dependency.
|
||||
--ignore RUSTSEC-2020-0036
|
||||
|
||||
# `net2` crate has been deprecated; use `socket2` instead
|
||||
#
|
||||
# Blocked on https://github.com/paritytech/jsonrpc/issues/575
|
||||
@@ -30,22 +25,10 @@ cargo_audit_ignores=(
|
||||
|
||||
# generic-array: arr! macro erases lifetimes
|
||||
#
|
||||
# Blocked on libsecp256k1 releasing with upgraded dependencies
|
||||
# https://github.com/paritytech/libsecp256k1/issues/66
|
||||
# Blocked on new spl dependencies on solana-program v1.9
|
||||
# due to curve25519-dalek dependency
|
||||
--ignore RUSTSEC-2020-0146
|
||||
|
||||
# hyper: Lenient `hyper` header parsing of `Content-Length` could allow request smuggling
|
||||
#
|
||||
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
|
||||
# https://github.com/paritytech/jsonrpc/issues/605
|
||||
--ignore RUSTSEC-2021-0078
|
||||
|
||||
# hyper: Integer overflow in `hyper`'s parsing of the `Transfer-Encoding` header leads to data loss
|
||||
#
|
||||
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
|
||||
# https://github.com/paritytech/jsonrpc/issues/605
|
||||
--ignore RUSTSEC-2021-0079
|
||||
|
||||
# chrono: Potential segfault in `localtime_r` invocations
|
||||
#
|
||||
# Blocked due to no safe upgrade
|
||||
|
@@ -1,4 +1,4 @@
|
||||
FROM solanalabs/rust:1.57.0
|
||||
FROM solanalabs/rust:1.60.0
|
||||
ARG date
|
||||
|
||||
RUN set -x \
|
||||
|
@@ -3,8 +3,14 @@ set -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
platform=()
|
||||
if [[ $(uname -m) = arm64 ]]; then
|
||||
# Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr
|
||||
platform+=(--platform linux/amd64)
|
||||
fi
|
||||
|
||||
nightlyDate=${1:-$(date +%Y-%m-%d)}
|
||||
docker build -t solanalabs/rust-nightly:"$nightlyDate" --build-arg date="$nightlyDate" .
|
||||
docker build "${platform[@]}" -t solanalabs/rust-nightly:"$nightlyDate" --build-arg date="$nightlyDate" .
|
||||
|
||||
maybeEcho=
|
||||
if [[ -z $CI ]]; then
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.57.0
|
||||
FROM rust:1.60.0
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
|
@@ -3,7 +3,14 @@ set -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
docker build -t solanalabs/rust .
|
||||
|
||||
platform=()
|
||||
if [[ $(uname -m) = arm64 ]]; then
|
||||
# Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr
|
||||
platform+=(--platform linux/amd64)
|
||||
fi
|
||||
|
||||
docker build "${platform[@]}" -t solanalabs/rust .
|
||||
|
||||
read -r rustc version _ < <(docker run solanalabs/rust rustc --version)
|
||||
[[ $rustc = rustc ]]
|
||||
|
@@ -70,7 +70,7 @@ for Cargo_toml in $Cargo_tomls; do
|
||||
rm -rf crate-test
|
||||
"$cargo" stable init crate-test
|
||||
cd crate-test/
|
||||
echo "${crate_name} = \"${expectedCrateVersion}\"" >> Cargo.toml
|
||||
echo "${crate_name} = \"=${expectedCrateVersion}\"" >> Cargo.toml
|
||||
echo "[workspace]" >> Cargo.toml
|
||||
"$cargo" stable check
|
||||
) && really_uploaded=1
|
||||
|
@@ -150,7 +150,7 @@ elif [[ -n $BUILDKITE ]]; then
|
||||
cat > release.solana.com-install <<EOF
|
||||
SOLANA_RELEASE=$CHANNEL_OR_TAG
|
||||
SOLANA_INSTALL_INIT_ARGS=$CHANNEL_OR_TAG
|
||||
SOLANA_DOWNLOAD_ROOT=http://release.solana.com
|
||||
SOLANA_DOWNLOAD_ROOT=https://release.solana.com
|
||||
EOF
|
||||
cat install/solana-install-init.sh >> release.solana.com-install
|
||||
|
||||
|
@@ -7,7 +7,7 @@ source multinode-demo/common.sh
|
||||
|
||||
rm -rf config/run/init-completed config/ledger config/snapshot-ledger
|
||||
|
||||
SOLANA_RUN_SH_VALIDATOR_ARGS="--snapshot-interval-slots 200" timeout 120 ./scripts/run.sh &
|
||||
SOLANA_RUN_SH_VALIDATOR_ARGS="--full-snapshot-interval-slots 200" timeout 120 ./scripts/run.sh &
|
||||
pid=$!
|
||||
|
||||
attempts=20
|
||||
|
@@ -18,13 +18,13 @@
|
||||
if [[ -n $RUST_STABLE_VERSION ]]; then
|
||||
stable_version="$RUST_STABLE_VERSION"
|
||||
else
|
||||
stable_version=1.57.0
|
||||
stable_version=1.60.0
|
||||
fi
|
||||
|
||||
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
||||
nightly_version="$RUST_NIGHTLY_VERSION"
|
||||
else
|
||||
nightly_version=2021-12-03
|
||||
nightly_version=2022-04-01
|
||||
fi
|
||||
|
||||
|
||||
|
24
ci/sbf-tools-info.sh
Executable file
24
ci/sbf-tools-info.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Finds the version of sbf-tools used by this source tree.
|
||||
#
|
||||
# stdout of this script may be eval-ed.
|
||||
#
|
||||
|
||||
here="$(dirname "$0")"
|
||||
|
||||
SBF_TOOLS_VERSION=unknown
|
||||
|
||||
cargo_build_bpf_main="${here}/../sdk/cargo-build-bpf/src/main.rs"
|
||||
if [[ -f "${cargo_build_bpf_main}" ]]; then
|
||||
version=$(sed -e 's/^.*bpf_tools_version\s*=\s*"\(v[0-9.]\+\)".*/\1/;t;d' "${cargo_build_bpf_main}")
|
||||
if [[ ${version} != '' ]]; then
|
||||
SBF_TOOLS_VERSION="${version}"
|
||||
else
|
||||
echo '--- unable to parse SBF_TOOLS_VERSION'
|
||||
fi
|
||||
else
|
||||
echo "--- '${cargo_build_bpf_main}' not present"
|
||||
fi
|
||||
|
||||
echo SBF_TOOLS_VERSION="${SBF_TOOLS_VERSION}"
|
@@ -57,32 +57,20 @@ if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||
exit "$check_status"
|
||||
fi
|
||||
|
||||
# Ensure nightly and --benches
|
||||
# Ensure nightly and --benches
|
||||
_ scripts/cargo-for-all-lock-files.sh nightly check --locked --all-targets
|
||||
else
|
||||
echo "Note: cargo-for-all-lock-files.sh skipped because $CI_BASE_BRANCH != $EDGE_CHANNEL"
|
||||
fi
|
||||
|
||||
_ ci/order-crates-for-publishing.py
|
||||
_ ci/order-crates-for-publishing.py
|
||||
|
||||
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
|
||||
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
|
||||
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- --deny=warnings --deny=clippy::integer_arithmetic
|
||||
_ scripts/cargo-for-all-lock-files.sh -- nightly clippy -Zunstable-options --all-targets -- --deny=warnings --deny=clippy::integer_arithmetic
|
||||
|
||||
_ "$cargo" stable fmt --all -- --check
|
||||
_ scripts/cargo-for-all-lock-files.sh -- nightly fmt --all -- --check
|
||||
|
||||
_ ci/do-audit.sh
|
||||
|
||||
{
|
||||
cd programs/bpf
|
||||
for project in rust/*/ ; do
|
||||
echo "+++ do_bpf_checks $project"
|
||||
(
|
||||
cd "$project"
|
||||
_ "$cargo" nightly clippy -- --deny=warnings --allow=clippy::missing_safety_doc
|
||||
_ "$cargo" stable fmt -- --check
|
||||
)
|
||||
done
|
||||
}
|
||||
_ ci/do-audit.sh
|
||||
|
||||
echo --- ok
|
||||
|
@@ -21,15 +21,16 @@ export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
source scripts/ulimit-n.sh
|
||||
|
||||
# Limit compiler jobs to reduce memory usage
|
||||
# on machines with 2gb/thread of memory
|
||||
# limit jobs to 4gb/thread
|
||||
JOBS=$(grep MemTotal /proc/meminfo | awk '{printf "%.0f", ($2 / (4 * 1024 * 1024))}')
|
||||
NPROC=$(nproc)
|
||||
NPROC=$((NPROC>14 ? 14 : NPROC))
|
||||
JOBS=$((JOBS>NPROC ? NPROC : JOBS))
|
||||
|
||||
|
||||
echo "Executing $testName"
|
||||
case $testName in
|
||||
test-stable)
|
||||
_ "$cargo" stable test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ "$cargo" stable test --jobs "$JOBS" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
;;
|
||||
test-stable-bpf)
|
||||
# Clear the C dependency files, if dependency moves these files are not regenerated
|
||||
@@ -65,6 +66,9 @@ test-stable-bpf)
|
||||
fi
|
||||
done
|
||||
|
||||
# bpf-tools version
|
||||
"$cargo_build_bpf" -V
|
||||
|
||||
# BPF program instruction count assertion
|
||||
bpf_target_path=programs/bpf/target
|
||||
_ "$cargo" stable test \
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.9.4"
|
||||
version = "1.11.0"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,19 +10,19 @@ documentation = "https://docs.rs/solana-clap-utils"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
chrono = "0.4"
|
||||
clap = "2.33.0"
|
||||
rpassword = "5.0"
|
||||
solana-perf = { path = "../perf", version = "=1.9.4" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.9.4" }
|
||||
rpassword = "6.0"
|
||||
solana-perf = { path = "../perf", version = "=1.11.0" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.11.0", default-features = false }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
thiserror = "1.0.30"
|
||||
tiny-bip39 = "0.8.2"
|
||||
uriparse = "0.6.3"
|
||||
uriparse = "0.6.4"
|
||||
url = "2.2.2"
|
||||
chrono = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.2.0"
|
||||
tempfile = "3.3.0"
|
||||
|
||||
[lib]
|
||||
name = "solana_clap_utils"
|
||||
|
@@ -328,7 +328,7 @@ pub fn is_derivation<T>(value: T) -> Result<(), String>
|
||||
where
|
||||
T: AsRef<str> + Display,
|
||||
{
|
||||
let value = value.as_ref().replace("'", "");
|
||||
let value = value.as_ref().replace('\'', "");
|
||||
let mut parts = value.split('/');
|
||||
let account = parts.next().unwrap();
|
||||
account
|
||||
|
@@ -17,7 +17,7 @@ use {
|
||||
},
|
||||
bip39::{Language, Mnemonic, Seed},
|
||||
clap::ArgMatches,
|
||||
rpassword::prompt_password_stderr,
|
||||
rpassword::prompt_password,
|
||||
solana_remote_wallet::{
|
||||
locator::{Locator as RemoteWalletLocator, LocatorError as RemoteWalletLocatorError},
|
||||
remote_keypair::generate_remote_keypair,
|
||||
@@ -945,9 +945,9 @@ pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant {
|
||||
|
||||
/// Prompts user for a passphrase and then asks for confirmirmation to check for mistakes
|
||||
pub fn prompt_passphrase(prompt: &str) -> Result<String, Box<dyn error::Error>> {
|
||||
let passphrase = prompt_password_stderr(prompt)?;
|
||||
let passphrase = prompt_password(prompt)?;
|
||||
if !passphrase.is_empty() {
|
||||
let confirmed = rpassword::prompt_password_stderr("Enter same passphrase again: ")?;
|
||||
let confirmed = rpassword::prompt_password("Enter same passphrase again: ")?;
|
||||
if confirmed != passphrase {
|
||||
return Err("Passphrases did not match".into());
|
||||
}
|
||||
@@ -1055,7 +1055,7 @@ pub fn keypair_from_seed_phrase(
|
||||
derivation_path: Option<DerivationPath>,
|
||||
legacy: bool,
|
||||
) -> Result<Keypair, Box<dyn error::Error>> {
|
||||
let seed_phrase = prompt_password_stderr(&format!("[{}] seed phrase: ", keypair_name))?;
|
||||
let seed_phrase = prompt_password(&format!("[{}] seed phrase: ", keypair_name))?;
|
||||
let seed_phrase = seed_phrase.trim();
|
||||
let passphrase_prompt = format!(
|
||||
"[{}] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue: ",
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.9.4"
|
||||
version = "1.11.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -12,13 +12,15 @@ documentation = "https://docs.rs/solana-cli-config"
|
||||
[dependencies]
|
||||
dirs-next = "2.0.0"
|
||||
lazy_static = "1.4.0"
|
||||
serde = "1.0.130"
|
||||
serde = "1.0.136"
|
||||
serde_derive = "1.0.103"
|
||||
serde_yaml = "0.8.21"
|
||||
serde_yaml = "0.8.23"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.11.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.11.0" }
|
||||
url = "2.2.2"
|
||||
|
||||
[dev-dependencies]
|
||||
anyhow = "1.0.51"
|
||||
anyhow = "1.0.56"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
126
cli-config/src/config_input.rs
Normal file
126
cli-config/src/config_input.rs
Normal file
@@ -0,0 +1,126 @@
|
||||
use {
|
||||
crate::Config, solana_clap_utils::input_validators::normalize_to_url_if_moniker,
|
||||
solana_sdk::commitment_config::CommitmentConfig, std::str::FromStr,
|
||||
};
|
||||
|
||||
pub enum SettingType {
|
||||
Explicit,
|
||||
Computed,
|
||||
SystemDefault,
|
||||
}
|
||||
|
||||
pub struct ConfigInput {
|
||||
pub json_rpc_url: String,
|
||||
pub websocket_url: String,
|
||||
pub keypair_path: String,
|
||||
pub commitment: CommitmentConfig,
|
||||
}
|
||||
|
||||
impl ConfigInput {
|
||||
fn default_keypair_path() -> String {
|
||||
Config::default().keypair_path
|
||||
}
|
||||
|
||||
fn default_json_rpc_url() -> String {
|
||||
Config::default().json_rpc_url
|
||||
}
|
||||
|
||||
fn default_websocket_url() -> String {
|
||||
Config::default().websocket_url
|
||||
}
|
||||
|
||||
fn default_commitment() -> CommitmentConfig {
|
||||
CommitmentConfig::confirmed()
|
||||
}
|
||||
|
||||
fn first_nonempty_setting(
|
||||
settings: std::vec::Vec<(SettingType, String)>,
|
||||
) -> (SettingType, String) {
|
||||
settings
|
||||
.into_iter()
|
||||
.find(|(_, value)| !value.is_empty())
|
||||
.expect("no nonempty setting")
|
||||
}
|
||||
|
||||
fn first_setting_is_some<T>(
|
||||
settings: std::vec::Vec<(SettingType, Option<T>)>,
|
||||
) -> (SettingType, T) {
|
||||
let (setting_type, setting_option) = settings
|
||||
.into_iter()
|
||||
.find(|(_, value)| value.is_some())
|
||||
.expect("all settings none");
|
||||
(setting_type, setting_option.unwrap())
|
||||
}
|
||||
|
||||
pub fn compute_websocket_url_setting(
|
||||
websocket_cmd_url: &str,
|
||||
websocket_cfg_url: &str,
|
||||
json_rpc_cmd_url: &str,
|
||||
json_rpc_cfg_url: &str,
|
||||
) -> (SettingType, String) {
|
||||
Self::first_nonempty_setting(vec![
|
||||
(SettingType::Explicit, websocket_cmd_url.to_string()),
|
||||
(SettingType::Explicit, websocket_cfg_url.to_string()),
|
||||
(
|
||||
SettingType::Computed,
|
||||
Config::compute_websocket_url(&normalize_to_url_if_moniker(json_rpc_cmd_url)),
|
||||
),
|
||||
(
|
||||
SettingType::Computed,
|
||||
Config::compute_websocket_url(&normalize_to_url_if_moniker(json_rpc_cfg_url)),
|
||||
),
|
||||
(SettingType::SystemDefault, Self::default_websocket_url()),
|
||||
])
|
||||
}
|
||||
|
||||
pub fn compute_json_rpc_url_setting(
|
||||
json_rpc_cmd_url: &str,
|
||||
json_rpc_cfg_url: &str,
|
||||
) -> (SettingType, String) {
|
||||
let (setting_type, url_or_moniker) = Self::first_nonempty_setting(vec![
|
||||
(SettingType::Explicit, json_rpc_cmd_url.to_string()),
|
||||
(SettingType::Explicit, json_rpc_cfg_url.to_string()),
|
||||
(SettingType::SystemDefault, Self::default_json_rpc_url()),
|
||||
]);
|
||||
(setting_type, normalize_to_url_if_moniker(&url_or_moniker))
|
||||
}
|
||||
|
||||
pub fn compute_keypair_path_setting(
|
||||
keypair_cmd_path: &str,
|
||||
keypair_cfg_path: &str,
|
||||
) -> (SettingType, String) {
|
||||
Self::first_nonempty_setting(vec![
|
||||
(SettingType::Explicit, keypair_cmd_path.to_string()),
|
||||
(SettingType::Explicit, keypair_cfg_path.to_string()),
|
||||
(SettingType::SystemDefault, Self::default_keypair_path()),
|
||||
])
|
||||
}
|
||||
|
||||
pub fn compute_commitment_config(
|
||||
commitment_cmd: &str,
|
||||
commitment_cfg: &str,
|
||||
) -> (SettingType, CommitmentConfig) {
|
||||
Self::first_setting_is_some(vec![
|
||||
(
|
||||
SettingType::Explicit,
|
||||
CommitmentConfig::from_str(commitment_cmd).ok(),
|
||||
),
|
||||
(
|
||||
SettingType::Explicit,
|
||||
CommitmentConfig::from_str(commitment_cfg).ok(),
|
||||
),
|
||||
(SettingType::SystemDefault, Some(Self::default_commitment())),
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ConfigInput {
|
||||
fn default() -> ConfigInput {
|
||||
ConfigInput {
|
||||
json_rpc_url: Self::default_json_rpc_url(),
|
||||
websocket_url: Self::default_websocket_url(),
|
||||
keypair_path: Self::default_keypair_path(),
|
||||
commitment: CommitmentConfig::confirmed(),
|
||||
}
|
||||
}
|
||||
}
|
@@ -55,12 +55,16 @@
|
||||
extern crate lazy_static;
|
||||
|
||||
mod config;
|
||||
pub use config::{Config, CONFIG_FILE};
|
||||
mod config_input;
|
||||
use std::{
|
||||
fs::{create_dir_all, File},
|
||||
io::{self, Write},
|
||||
path::Path,
|
||||
};
|
||||
pub use {
|
||||
config::{Config, CONFIG_FILE},
|
||||
config_input::{ConfigInput, SettingType},
|
||||
};
|
||||
|
||||
/// Load a value from a file in YAML format.
|
||||
///
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user