Compare commits
4111 Commits
v1.6.0
...
dependabot
Author | SHA1 | Date | |
---|---|---|---|
ffb9efa89d | |||
aa49802119 | |||
bee586c6fd | |||
85e8bece2e | |||
db481e1799 | |||
115b488807 | |||
3993cd765c | |||
d9c259a231 | |||
071e97053f | |||
aea8f0df16 | |||
66b44b48a4 | |||
8b1cde83c1 | |||
1cf6c97779 | |||
1192e760a4 | |||
8b90084ebc | |||
f366e0f890 | |||
52045c761c | |||
fc21af4e6e | |||
0562426661 | |||
1c10677f82 | |||
2e56c59bcb | |||
7569f282c6 | |||
9666f4a8be | |||
fd0f5e4d12 | |||
714a344937 | |||
2b111cd631 | |||
1240217a73 | |||
31ed4c18f9 | |||
92e43df266 | |||
dec21524e1 | |||
8cb0b1ce9a | |||
b0486f0aaa | |||
ba5faa2582 | |||
e42316ba5a | |||
a300e2d2dc | |||
1ee6707bdd | |||
091929da12 | |||
90689585ef | |||
477cb539d0 | |||
88bf3c7063 | |||
d6011ba14d | |||
6d5bbca630 | |||
ce4f7601af | |||
d8cbb2a952 | |||
8dd62854fa | |||
a7f2fff219 | |||
0cf886302d | |||
fc8130955a | |||
4d00995d2d | |||
ae95540387 | |||
0e587488b5 | |||
aca1d577a4 | |||
7803e1be3d | |||
d137acf74d | |||
7be533a770 | |||
95bbb70c91 | |||
252b0c42d5 | |||
e3915e4b7a | |||
4abf7a7f88 | |||
38b02bbcc0 | |||
9977396d8f | |||
7d34a7acac | |||
373f200ab8 | |||
0eb488580d | |||
41fb98c771 | |||
a2d251ce1e | |||
e7777281d6 | |||
cca3dbc76d | |||
7ba57e7a7c | |||
f8db314134 | |||
fe7543c31a | |||
7f20c6149e | |||
d343713f61 | |||
b448472037 | |||
60850d71ce | |||
dcf44d2523 | |||
650882217c | |||
e616a7ebfc | |||
8dc6f9f589 | |||
49443406fd | |||
2d94e6e5d3 | |||
cc76a73c49 | |||
5e5cdf397c | |||
2c38a9213f | |||
901b2881fb | |||
b27976626a | |||
c17e54e3f6 | |||
65f1e0fcc2 | |||
3bd5a89d6f | |||
1a855aa6f7 | |||
eb461a5442 | |||
6edeed888d | |||
f34ade7610 | |||
7171b3a3ac | |||
70633b5c2f | |||
a724fa2347 | |||
2aa113fd8c | |||
1309a9cea0 | |||
e14ae33e86 | |||
871fd291f3 | |||
3b9654771d | |||
7d7228c356 | |||
56ac26f90e | |||
9c9f2dd5bd | |||
cddab635ff | |||
ae6c511f13 | |||
93a7b94507 | |||
f12a8fcd73 | |||
f804ccdece | |||
4c577d7f8c | |||
4ab7d6c23e | |||
eca8d21249 | |||
d3b95b9226 | |||
7711cd74c3 | |||
66a97bdde0 | |||
b635073829 | |||
e291342c4a | |||
b82d71d22a | |||
1632ee03da | |||
2756abce39 | |||
9c3144e286 | |||
ba046bd4dd | |||
2370e61431 | |||
f2908ed475 | |||
6614727be8 | |||
b211f839cb | |||
eaae2f3538 | |||
7171c95bdd | |||
b27333e52d | |||
4854fadc44 | |||
3e8ee27d9d | |||
8161cee70f | |||
de2a7fdc6c | |||
3ca16de851 | |||
c45dde6164 | |||
35a5dd9c45 | |||
157f165a3d | |||
637e366b18 | |||
a49ef49f87 | |||
8b66625c95 | |||
3c44d405c7 | |||
d064c40617 | |||
4214c69694 | |||
ec364cc737 | |||
49da347d84 | |||
2cc6f863bf | |||
0869f644fc | |||
04a2c19c87 | |||
5f1f4dcbdd | |||
aadf4b9b63 | |||
9bc2592da1 | |||
eeec1ce2ad | |||
39327ec753 | |||
28275a33d6 | |||
dd6b4ac221 | |||
428575f9ae | |||
3b4aad9df1 | |||
eaa52bc935 | |||
d8ec8ca2e6 | |||
0f94e1d3a2 | |||
d90d5ee9b6 | |||
2f29ff1a3f | |||
4a9f4e2505 | |||
ad3cb0bc93 | |||
4ce48307bb | |||
2759cb860b | |||
813006b33b | |||
81a10e649f | |||
6d76db1de5 | |||
5771c36d3f | |||
b11d3b5abf | |||
c2389fc209 | |||
e2aa932e97 | |||
0eb0d62e95 | |||
9f1f64e384 | |||
207825d30b | |||
52d12cc802 | |||
08e64c88ed | |||
b2687b7e70 | |||
9cb27613c3 | |||
5f611723a5 | |||
0459f0a4c0 | |||
f1e2598baa | |||
e0c091a9f4 | |||
100293c4b5 | |||
705084a25b | |||
f81f926a0c | |||
bc654bf865 | |||
390ef0fbcd | |||
72fc6096a0 | |||
deb9344e49 | |||
848b6dfbdd | |||
b25e4a200b | |||
7d32909e17 | |||
47b74e28ec | |||
d9220652ad | |||
0673dc5375 | |||
12e160269e | |||
37ebd9bd9e | |||
005ca7759e | |||
cd24ec2ef6 | |||
cd54b90ae6 | |||
ab13e39518 | |||
f2ed6f09ee | |||
959ea26816 | |||
bb3a1b6b31 | |||
98e7fada15 | |||
69e632a337 | |||
e1848ecbcc | |||
a9b0309c1f | |||
a08e3760d8 | |||
f2e7f0c32b | |||
9c88f8205b | |||
d5196046dd | |||
39e0c19b4b | |||
11cac460aa | |||
62b6eafd7c | |||
af80203522 | |||
6cdb94ccc5 | |||
c38309791b | |||
f1345c7bb3 | |||
d5472faa96 | |||
b902ee6ad5 | |||
b1be9a7907 | |||
b96e07d161 | |||
0e8aa331ad | |||
dc52817408 | |||
6ba3f4dc6d | |||
8b15d7bc28 | |||
00fa5a93b8 | |||
1a254ec098 | |||
5b41d62f8a | |||
0944abc0e2 | |||
1cbcda71cb | |||
7439d2424b | |||
a211fe1cf4 | |||
e1d3883893 | |||
826c3bee4a | |||
e561fbc25a | |||
bc7ac42f2a | |||
601247d958 | |||
beb95c4884 | |||
08ef612361 | |||
584c63bcc4 | |||
c26fa1d0e9 | |||
208621e3cf | |||
c6cd0a5591 | |||
e011502875 | |||
8ee07cd5c6 | |||
31737406da | |||
93860e88d2 | |||
9a43fbe3b2 | |||
7a568482de | |||
30871784e4 | |||
c7bf9958e7 | |||
725781eaa7 | |||
dcc961ae00 | |||
ccdbe65c87 | |||
30e12aef9a | |||
6c329e2431 | |||
f0db6020eb | |||
aba8c2f4af | |||
c61775664e | |||
69fab16e83 | |||
88ce934bd7 | |||
2c51288afd | |||
8d731f1a70 | |||
c59e8f7c8d | |||
973287ad66 | |||
15aea0fe47 | |||
c1db2b4866 | |||
425a4a4082 | |||
fdb658fff4 | |||
c155519ae1 | |||
221f499041 | |||
89ddae29ef | |||
defdf8da72 | |||
3721eda23e | |||
c7fc430adb | |||
77e79221a0 | |||
b0e492bc06 | |||
173d88d514 | |||
22114c523f | |||
88f952075d | |||
c51a51d0ad | |||
2359150b9c | |||
6749c45c63 | |||
57103c515b | |||
2d225de48c | |||
beba0eac55 | |||
e0c168ef3f | |||
72ade5473a | |||
abe6b27b34 | |||
0ac6427abc | |||
17f5dd734c | |||
a707e85c10 | |||
ecbdb6ba68 | |||
2eb326b0da | |||
f350fa7147 | |||
0cc717340c | |||
a368adcd30 | |||
500423626d | |||
aea95e8ff3 | |||
0bd28f9620 | |||
65cf599786 | |||
9fdadb503d | |||
ee6a13ef6f | |||
30702dcdee | |||
43e368faf6 | |||
7aef523a41 | |||
4b61e27d12 | |||
a8ab615c89 | |||
93eb49a3e3 | |||
c33e24de57 | |||
f272c025bd | |||
6b59beda7b | |||
1daf676b37 | |||
2c1aa715b0 | |||
2d62e4e6bd | |||
09b8baa4b1 | |||
db69128825 | |||
a5d1efc207 | |||
25216705b3 | |||
1af1106b87 | |||
73c06d9e33 | |||
20c6001836 | |||
c150b4b197 | |||
a40e7fc59b | |||
17cda46531 | |||
42f7c0c7f6 | |||
20bce10204 | |||
9b73e351aa | |||
d6a808f41a | |||
93f2323e52 | |||
75896958b6 | |||
a622ee4b8d | |||
94a96670e8 | |||
8bb6f0dc6f | |||
5445e13828 | |||
23d3b540a1 | |||
1ef3a621a8 | |||
d20d03cd7f | |||
409b55ad81 | |||
667e72144e | |||
2f138ecb96 | |||
48047b55ba | |||
f227504ea7 | |||
78799640ea | |||
f3e7e62813 | |||
d01d425e4b | |||
7da620f0b4 | |||
88b71c0732 | |||
df521bbfc8 | |||
03a3a501f3 | |||
ae5d254e73 | |||
0e1afcbb26 | |||
44d61465f1 | |||
9f63493789 | |||
c1995c647b | |||
5bb376f304 | |||
45458e7139 | |||
e201b41341 | |||
af7a2e3daa | |||
9725f2e319 | |||
212e6ea4a4 | |||
379feecae5 | |||
4b24499916 | |||
6c65734330 | |||
1460f00e0f | |||
01a096adc8 | |||
73a7741c49 | |||
aa9f7ed7e8 | |||
2486e21ffe | |||
ca5591bfa0 | |||
9665da9d0b | |||
ca8fef5855 | |||
2b5e00d36d | |||
8b6310b179 | |||
e8b7f96a89 | |||
33ad74fbcd | |||
6895eb7ef6 | |||
25cb859ed0 | |||
5b6027bef0 | |||
ed0b47c6f8 | |||
20b61e28b6 | |||
73e6038986 | |||
672fed04cb | |||
005592998d | |||
69d71f8f86 | |||
b18462737e | |||
53777f2fbf | |||
bbe5b66324 | |||
0e4ede46d1 | |||
9029b46570 | |||
ecbfc70bfa | |||
56fd32bda2 | |||
04b76eb066 | |||
fb62407232 | |||
2a00382d71 | |||
86acd8f6f9 | |||
64b153fae0 | |||
ce6c76e45f | |||
d6ec103bee | |||
a2a7e91ad6 | |||
d743c2917c | |||
0b1b36f088 | |||
8a43e2d889 | |||
e529d03c11 | |||
557d35ec79 | |||
94a9b712b6 | |||
f479ab7af2 | |||
d06e6c7425 | |||
95dfcc546a | |||
4e4577afbe | |||
736f974082 | |||
d421ccb330 | |||
c0c6038654 | |||
edb20d6909 | |||
3f88994e0f | |||
29edb130cc | |||
3c1416091e | |||
a1912f8400 | |||
70f6aff07f | |||
33d0b5e011 | |||
135af08b8b | |||
972730924b | |||
f14928a970 | |||
c9c78622a8 | |||
b1d9a2e60e | |||
d20a3774db | |||
bac6821e19 | |||
57986f982a | |||
eaa8c67bde | |||
f061059e45 | |||
f8c97a3d1f | |||
3d6ab96587 | |||
c7b0917e1a | |||
422a095647 | |||
e61a736d44 | |||
f2c51653e4 | |||
e3b20c443a | |||
800472ddf5 | |||
e1a0660948 | |||
a06646631c | |||
bb97c8fdcd | |||
f643a8b425 | |||
0a0fc85282 | |||
77b26b62e0 | |||
cc947cad03 | |||
60ddd93d09 | |||
b89cd8cd1a | |||
2ab4f34c02 | |||
214b561a28 | |||
ec7536faf6 | |||
b93ab5d295 | |||
d06c04d02c | |||
52c1eb0160 | |||
93c776ce19 | |||
f10407dbc3 | |||
298c2d0f62 | |||
67c8034fe5 | |||
dd80a525ef | |||
2be139ca61 | |||
37f6777ceb | |||
f67ecd5c18 | |||
61cc7b10a9 | |||
4d62f03297 | |||
d6de4a2f4e | |||
bf8fbf8383 | |||
3155a04189 | |||
ddde356b33 | |||
7cc6262b5a | |||
bdae2993e0 | |||
5b464a32f5 | |||
9c5d82557a | |||
1c0cd2cbb4 | |||
b36f7151fc | |||
711856cad3 | |||
84eaaae062 | |||
d896ff74ec | |||
ba8e15848e | |||
41ec7c8be9 | |||
252b0554ca | |||
69d0b08dd8 | |||
3356c6afb2 | |||
2347f65133 | |||
755e816521 | |||
f5d1115468 | |||
c0c3d7c1f2 | |||
e810400716 | |||
116517fb6d | |||
b8eff3456c | |||
eeb063b957 | |||
e92a81b741 | |||
65d59f4ef0 | |||
df6a4930b9 | |||
301d585d47 | |||
7476dfeec0 | |||
3fe942ab30 | |||
8f547a6c98 | |||
7f6fb6937a | |||
97a1fa10a6 | |||
76098dd42a | |||
5f054cd51b | |||
68a2570ebd | |||
94aa9e568a | |||
0f6e8d3385 | |||
70f96bda25 | |||
8ed7ad5fa7 | |||
056f2e9e67 | |||
729698e815 | |||
af53d2f692 | |||
c04737ae69 | |||
89d66c3210 | |||
7ec39f5a1e | |||
66fa8f9667 | |||
56ec5245cc | |||
c4389a6675 | |||
5d40da5688 | |||
6374995522 | |||
ba777f4f56 | |||
385efae4b3 | |||
e11a1911ad | |||
6ff0be6a82 | |||
a5769c029f | |||
347323cbb2 | |||
3398f5a2f5 | |||
efd64a3862 | |||
e83ca4bb28 | |||
e97da0ea15 | |||
18417e410e | |||
8183f28636 | |||
49cb161203 | |||
82672b40fd | |||
1e0d3f13e6 | |||
635337d2ff | |||
46e5350d8c | |||
e5be96d8bf | |||
882f886450 | |||
e374fb1d60 | |||
5fb7da12f2 | |||
ed924e3bc4 | |||
9b06d64eb8 | |||
0e9e67b65d | |||
9797af6f85 | |||
41dd31e5f4 | |||
02fa135815 | |||
71b12b1f56 | |||
e476e17abf | |||
e124659aca | |||
c2a94a8fb0 | |||
8d22ca5076 | |||
dcd2854829 | |||
7ba27e5cae | |||
2a6dcb2ffd | |||
dcb5849484 | |||
e4f7af0b48 | |||
9da826421a | |||
003ba1f092 | |||
f4308bdb64 | |||
cb395abff7 | |||
e694acaf5f | |||
8d980f07ba | |||
d13a5056f1 | |||
4ceb2689f5 | |||
426f2507d5 | |||
ec583bd12d | |||
b610e5503e | |||
e19c7923c3 | |||
509bcd2e74 | |||
a86fe899ac | |||
4adc8b133f | |||
c92c09a8be | |||
e5476913fe | |||
746869fdac | |||
033106ed81 | |||
8a63812c4e | |||
4a9d7318d1 | |||
018b54dbd7 | |||
3202cc7eef | |||
6fc6673ead | |||
f402cbe64e | |||
98e5ea9dce | |||
17cd14ad88 | |||
ed4897d715 | |||
fd212fd2a4 | |||
eebaf89874 | |||
bed1b143a5 | |||
a8ba979360 | |||
3bc3af2332 | |||
50f26ea9c0 | |||
b81124deec | |||
5a28f61f49 | |||
6155ef6377 | |||
8aa3d690b5 | |||
d5879bafe9 | |||
9e9a1a4876 | |||
c9e0bde407 | |||
4bc5bfb2df | |||
1149c1880d | |||
90f41fd9b7 | |||
025a5a3b9c | |||
825f8bcea4 | |||
c5b6ea74ef | |||
4c0373b72a | |||
a400b5e63d | |||
ef7f49a21d | |||
c5c699a918 | |||
eeb97fe7ce | |||
e08139f949 | |||
254ef3e7b6 | |||
379e3ec848 | |||
2bbe1d875a | |||
9b41ddd9ba | |||
a5a0dabe7b | |||
49ba09b333 | |||
ec7e17787e | |||
15a9fa6f53 | |||
80eae20610 | |||
350845c513 | |||
65194c7ae8 | |||
6c108c8fc3 | |||
fd175c1ea9 | |||
622fd7c7ec | |||
08940fff10 | |||
75385f657a | |||
b07fd1fead | |||
b3c5a299be | |||
d870f566ef | |||
f56eb7f882 | |||
c1386d66e6 | |||
805d53fc10 | |||
16a6dceb6b | |||
f32216588d | |||
f4babb7566 | |||
a35df1cb02 | |||
03a956e8d9 | |||
488dc37fec | |||
6919c4863b | |||
f865392eee | |||
1528f85112 | |||
c965517a6b | |||
6d18b6bab5 | |||
6fc329180b | |||
a2df1eb502 | |||
8063273d09 | |||
58a7d3fc0e | |||
0090916735 | |||
7f48f67948 | |||
0a11be496a | |||
7b722abb63 | |||
c665fc7a9b | |||
84ef6462c2 | |||
b2ed9daada | |||
b0be0881a7 | |||
91efd6e102 | |||
ae024f666a | |||
0fd8e3c5d7 | |||
e020960f49 | |||
54862eba0d | |||
b61b7189a5 | |||
181c0092d6 | |||
824994db69 | |||
8d1e5ac294 | |||
923720f529 | |||
7c9abaff2c | |||
40a04490ad | |||
99da25dc9d | |||
7cd5c0cd20 | |||
ea1b59f684 | |||
f98e198dcf | |||
483f5cce46 | |||
0224a8b127 | |||
4da435f2a0 | |||
f0acf7681e | |||
1df88837c8 | |||
94b1cf47ca | |||
31b8fd3109 | |||
45e56c599d | |||
a2477c1f32 | |||
b57097ef18 | |||
388e34f8e9 | |||
293629bdb5 | |||
c5811e5a14 | |||
cf5a1448c5 | |||
aff8e82705 | |||
205fd95722 | |||
f6801a4af4 | |||
92a1fc076c | |||
da4015a959 | |||
a1adcb23b6 | |||
873fe81bc0 | |||
f493a88258 | |||
e123883b26 | |||
008655e3a4 | |||
6f3f6eddb2 | |||
3dab1e711d | |||
df2b448993 | |||
d1c101cde2 | |||
b5353e2130 | |||
cd9dda4701 | |||
b8b721b7c2 | |||
a75db69105 | |||
f4e92f9b54 | |||
72669cad8c | |||
f3c2803af9 | |||
cd17f63d81 | |||
3e5a5a834f | |||
f4ca87205f | |||
d6f22433d0 | |||
9c6b95e1e1 | |||
4c4bc873a0 | |||
7261268572 | |||
a86e81fee9 | |||
446089edb4 | |||
c4a9c8b5e9 | |||
e130b2cffc | |||
a433bb310d | |||
4dcf594856 | |||
c9bfc99c72 | |||
689b8d720e | |||
46fe56171b | |||
1430b58a6d | |||
377efefead | |||
b8837c04ec | |||
0ef1b25e4b | |||
1a4a039913 | |||
015250f96c | |||
a9d5ef2055 | |||
dab0e8fdc7 | |||
70de8313df | |||
877a05ec19 | |||
18a16ad956 | |||
1ae9cdcb43 | |||
2b269dbe0e | |||
bdc33ba0a1 | |||
bf5b7f5d7f | |||
f0b32b75ab | |||
314605e149 | |||
9886366977 | |||
976eb81d4f | |||
bfdb775ffc | |||
8dfa83c579 | |||
6bb884836c | |||
adbf31b98c | |||
d1d7182365 | |||
3c8b33eaef | |||
c825b0a84d | |||
8ba676e4c7 | |||
a696afe08c | |||
6330cbcf33 | |||
9079de825b | |||
60fe1d00e8 | |||
04c0f124c4 | |||
0fc1c2e1fb | |||
c8a52337c8 | |||
1fae3d24de | |||
91c6a10cda | |||
308d7d40d0 | |||
1eefdeba85 | |||
f598ee696c | |||
f5b0764795 | |||
3d21b062cc | |||
0212a71d3e | |||
4a1ef12bd9 | |||
b108d7ddaa | |||
cb368e6554 | |||
3da914330d | |||
1f13fd64bd | |||
b2d9bff18c | |||
4e66a1f407 | |||
b78f5b6032 | |||
e922c2da9d | |||
0937e407fa | |||
a0d490d98e | |||
0406d524e6 | |||
dd12d90eac | |||
098dba607a | |||
8cf36e5cb0 | |||
8d0357794e | |||
393c7653c7 | |||
de8edad30b | |||
a125e7b8c2 | |||
d8fb7ce511 | |||
7ec88226ee | |||
38fcfb7542 | |||
22ada18957 | |||
c9aa7ed5ca | |||
e960634909 | |||
0573a25cc5 | |||
2aa7d73df9 | |||
63991cb2f0 | |||
f6366ef9db | |||
e16bea4136 | |||
db19008888 | |||
739251aecb | |||
e31b469f56 | |||
d637371882 | |||
2be859d304 | |||
cdfa7cdc85 | |||
6f6a42f174 | |||
676a535097 | |||
afb2078529 | |||
f25a31fccf | |||
dd55a2706b | |||
12dbcae75a | |||
2c9bc2ab99 | |||
804461c1c0 | |||
ba9dfa0d22 | |||
09799590ac | |||
84809f56bd | |||
1a23a55811 | |||
a275ce5f4d | |||
d36ff8d978 | |||
88ef24cecb | |||
5ce246e737 | |||
1cd1d3ff72 | |||
198c275cde | |||
1200959630 | |||
3b3148cc5a | |||
ab62d370db | |||
375a4d44ff | |||
235e33b640 | |||
3726976006 | |||
647440d34c | |||
4c4f64561b | |||
ca716c400a | |||
cd7331c6be | |||
7d6f80f70e | |||
91d28c5770 | |||
ff139f9a17 | |||
ffb5518cbe | |||
160fb69d7d | |||
04551183a5 | |||
7aad6fa6a6 | |||
4b67a6900d | |||
46aed1b7f9 | |||
8c5401e18e | |||
3f6b644d9f | |||
0208954a9b | |||
3e91a7f9b8 | |||
07a2a13c9f | |||
2f3a49fab4 | |||
54c3b9d35c | |||
02c07d0c93 | |||
23263aa7fb | |||
60e7330b1f | |||
b8af357b59 | |||
4d758c0339 | |||
88f8c89f18 | |||
41cc7af7cd | |||
e558ad4c71 | |||
dafdc15dfe | |||
efedb55705 | |||
69e1b68fbb | |||
755221bac4 | |||
238d65c0b8 | |||
e7fa412465 | |||
81c497d263 | |||
1aebe655b7 | |||
c68a11f715 | |||
3c2276fd09 | |||
fc01120fa6 | |||
2e1e4fb8a4 | |||
9e043d2055 | |||
9edfc5936d | |||
9fc3bf189d | |||
c29838fce1 | |||
2602e7c3bc | |||
dd338b6c9f | |||
22a2537aac | |||
cd5a39ee43 | |||
191cb62c37 | |||
f3f8d2e4f3 | |||
b87ebf9e58 | |||
f603378f8b | |||
a762066310 | |||
87831e7f8d | |||
c4d68063c7 | |||
f31ca8ba8c | |||
cd6f931223 | |||
02bc4e3fc1 | |||
2ed7e3af89 | |||
ebea3297f9 | |||
0bb059185c | |||
4530a5980e | |||
7da2df7d8c | |||
8a50b6302f | |||
0bda0c3e0c | |||
b1b334a17f | |||
79d21d6805 | |||
48dfdfb4d5 | |||
57057f8d39 | |||
bae5dae61d | |||
c24e30f689 | |||
f7152c889c | |||
5788973153 | |||
03c36d240a | |||
1a7cefded7 | |||
b30c94ce55 | |||
e9c00d1e69 | |||
0ca255220e | |||
a272e19f8d | |||
d12dfc1918 | |||
915bf8576f | |||
89102540b1 | |||
89c45a57f8 | |||
9eb0c018dc | |||
9cb74e8421 | |||
83de2f7376 | |||
91fe1278f1 | |||
66fa062f13 | |||
0f69a14247 | |||
e540b1cf3c | |||
c3e5927d16 | |||
6546c3cbae | |||
c6baff6698 | |||
cb0bb5bd1e | |||
8e0068ca6a | |||
3865d9e80d | |||
3aa49e2c69 | |||
d5de0c8e12 | |||
db6d291127 | |||
0661aa67ed | |||
ae497715cc | |||
7e600bd451 | |||
72d7e426a6 | |||
2300a8c5dd | |||
bea61e9543 | |||
398af132a5 | |||
33d9491f41 | |||
55ea672041 | |||
77a59ca3e3 | |||
f2bd9947cc | |||
d8a392c20b | |||
f331ae95ad | |||
119549403d | |||
9b1bf98aa2 | |||
a043b19988 | |||
f707dac358 | |||
47e6848fbf | |||
647aa92667 | |||
39340ed25b | |||
3f4f05865d | |||
3fc858eb60 | |||
d25d9be555 | |||
7e98b77b33 | |||
156b8ead42 | |||
ae3777cadb | |||
f8dcb2f38b | |||
778b5d736c | |||
b0ca335463 | |||
1509513339 | |||
92d215bcee | |||
11153e1f87 | |||
ef29d2d172 | |||
3c7cb2522c | |||
131c0d78c4 | |||
4071761daf | |||
c0f9e351db | |||
13735a77b8 | |||
5fb0ab9d00 | |||
fa7b5ef750 | |||
f4caea306e | |||
8b2133b078 | |||
3c5f505d3e | |||
067e29ae0b | |||
b4d09388aa | |||
55c97275bc | |||
48265ada39 | |||
ccf2a05c8d | |||
6f3c27f1da | |||
5827cf4c51 | |||
c2754b8a51 | |||
a4a9b8f6a4 | |||
c56fb0f014 | |||
2df4e7eea3 | |||
7659a2edc2 | |||
706b60b5c8 | |||
1b46d1d54d | |||
2031522809 | |||
6704fa011f | |||
ed5614cbde | |||
15925c2aa2 | |||
a32413f715 | |||
0e751ef7df | |||
c78f474373 | |||
18b1baa3c1 | |||
7200c5106e | |||
0597594943 | |||
4e527b84b0 | |||
3a577b4625 | |||
7495cbd1c8 | |||
171243a370 | |||
e765542cd8 | |||
11a53de0e3 | |||
76e533be46 | |||
aaccba8377 | |||
d2974fcefc | |||
b1e3a82c11 | |||
29ad081555 | |||
692cd5a2f8 | |||
75a835114b | |||
437f66be0f | |||
c326f41dfe | |||
7508c36209 | |||
b4ff488a0d | |||
ae207b7dbb | |||
221b0f6841 | |||
55d19e61da | |||
e22c97d541 | |||
20b3df02d4 | |||
b8ac6c1889 | |||
c59c56c0f8 | |||
c0952831be | |||
140a5f633d | |||
e6280fc1fa | |||
346cb9a275 | |||
1adf255e3a | |||
b9eee66f94 | |||
9ff92cb0dc | |||
1e5212e60d | |||
9ff561134d | |||
96e5342292 | |||
02826a989e | |||
1b47f7f2cd | |||
fc3b1bc62f | |||
59a1d46c38 | |||
ec86ff8fb3 | |||
ca51ea3e04 | |||
02f60b0581 | |||
ef6f40eae0 | |||
a478f9d09b | |||
3b706cfc42 | |||
e88a244333 | |||
b2f8738653 | |||
e1ee6009cc | |||
9131f52c96 | |||
ffca62a940 | |||
6d437ccd32 | |||
9fabff5129 | |||
484ead01ed | |||
696501500f | |||
7409d9d268 | |||
855b3ccdc0 | |||
9d07746642 | |||
32a242e777 | |||
bced07a099 | |||
aea3c66fa8 | |||
62c8fb4792 | |||
4663b86b8b | |||
1453fce6c6 | |||
91c49d7023 | |||
a0f9e0e8ee | |||
78d99b89c0 | |||
a8d78e89d3 | |||
e16c060abf | |||
3140d7741c | |||
0b8fcf0808 | |||
976298b292 | |||
d5104d95d1 | |||
eea3fb327f | |||
4aa12a52b6 | |||
53358ab1aa | |||
7a41b2c5d1 | |||
e9ab214237 | |||
7bb347faac | |||
4642a2c856 | |||
f0de3e9bf0 | |||
6a1ff60c61 | |||
700e42d556 | |||
036d7fcc81 | |||
9d330fc638 | |||
bbd72a87c2 | |||
f3e49cdf90 | |||
57af5064c0 | |||
6cdade2d36 | |||
852f0ca3f1 | |||
5309a5149d | |||
3a0041f4a6 | |||
031594c200 | |||
cbfdeb6134 | |||
76bc4d4cce | |||
874db73fae | |||
04a337f134 | |||
c9daa5af30 | |||
84c4c68218 | |||
de1c2718ff | |||
f385fa6d1d | |||
ced1505b75 | |||
1e2bef76e3 | |||
feca3fd78b | |||
ebbb237844 | |||
8efc577374 | |||
920159fc63 | |||
4287bf6b8b | |||
37ef1acada | |||
b2f6cfb9ff | |||
35d71d94c1 | |||
5f7b60576f | |||
4d6190a1ba | |||
4fe3354c8f | |||
261dd96ae3 | |||
521b7b79cc | |||
dec104c580 | |||
2515f6a04f | |||
43168e6365 | |||
1297a13586 | |||
cd5e690427 | |||
6aa8d0a885 | |||
19edc05bad | |||
19b8b89138 | |||
cb1947bea2 | |||
fb36f0085b | |||
6470560dd1 | |||
58aa2b964b | |||
337b94b3bc | |||
bbe3ce3db5 | |||
cf0fd5b2ca | |||
8efba88dde | |||
9e85499d4c | |||
f4f281e7c7 | |||
af405f0ed7 | |||
91de3013dd | |||
b825390752 | |||
43ea579f63 | |||
e03dc9e8e3 | |||
d9d4149a0c | |||
24a64615c4 | |||
b4d528119d | |||
ed1cded2ab | |||
e3c68c2b2a | |||
1ed26022e8 | |||
968aafdf8e | |||
a78a675584 | |||
45449b1d01 | |||
c2bfce90b3 | |||
aa13c90dd7 | |||
f14365f4b4 | |||
5e1cf39c74 | |||
edf5bc242c | |||
89caf65966 | |||
63f94a4db3 | |||
4188554e77 | |||
791a2c8d1c | |||
8905244a39 | |||
62e9d6f700 | |||
25235de6f3 | |||
bfe3ef9011 | |||
a695e2a9d5 | |||
81cc3eaabb | |||
9136222925 | |||
c1b577df54 | |||
350bb561eb | |||
293a0d8797 | |||
e97ef6629a | |||
f83817ce5d | |||
2da5238dcc | |||
9dd87bcdb5 | |||
d98c8b861c | |||
30d277b9fd | |||
152da44b62 | |||
b9eb6242f5 | |||
bfbbc53dac | |||
613c7b8444 | |||
f006925b6e | |||
8324482ffb | |||
900d82b859 | |||
b53352516b | |||
b20ab95a87 | |||
22d1210a80 | |||
7ba731b192 | |||
2ba794e2f7 | |||
bf2773c7d2 | |||
f01127b4c8 | |||
06d7a6fef3 | |||
a005773d10 | |||
86bf071d77 | |||
4fbf44dc75 | |||
2889c5ce58 | |||
6196c80a3d | |||
735016661b | |||
1d9c1ccfd2 | |||
b18515ad55 | |||
d96191ad8c | |||
b3d2ce0135 | |||
de96d93986 | |||
4d9237fddf | |||
197932d5c7 | |||
082ae1f6cb | |||
396262866f | |||
e277673d9c | |||
f729dec321 | |||
e7c9b9329d | |||
0d4dbd5588 | |||
1f57d6dc82 | |||
73cdb32fe5 | |||
b555ab0571 | |||
388d1dec31 | |||
50571685d4 | |||
a9177a5447 | |||
fac2e83928 | |||
64e4bbf829 | |||
8a3805be88 | |||
4f01b3fd31 | |||
8959d5e21c | |||
9b1199cdb1 | |||
97c2732d02 | |||
0ac89841bf | |||
73a7abdadf | |||
842ff774e6 | |||
4821b0a1d9 | |||
05344ad991 | |||
76c74b5d7c | |||
813f0fd7f9 | |||
87b3c141c9 | |||
71d0bd4605 | |||
fbc0919a1f | |||
d1d4d687af | |||
aead16496d | |||
f662b4ad7e | |||
2744a2128c | |||
fe098b5ddc | |||
95e91a4863 | |||
451aac7f7f | |||
443611d050 | |||
6d4c5008c9 | |||
691b4654e5 | |||
11f26a688f | |||
ef13c97867 | |||
c3b7f49e03 | |||
8305923527 | |||
66e431dad5 | |||
a8098f37d0 | |||
58164517e4 | |||
b5f21d5e34 | |||
0c7bade0b2 | |||
cb2bd65858 | |||
d9b0fc0e3e | |||
72e1efb847 | |||
2c2bcd20e6 | |||
47a58a38c2 | |||
0acbfdfcb9 | |||
cc4bb5a451 | |||
7496b5784b | |||
27d1850c3d | |||
8eced85881 | |||
7a36bf5095 | |||
dc1b8ddea1 | |||
96c6ba6eb2 | |||
2b76ea51b4 | |||
3f559cc2c9 | |||
5794bba65c | |||
7baeb04f26 | |||
46bf7d4a4a | |||
33f4e79589 | |||
751b45df64 | |||
14af1957d6 | |||
84660bbf3d | |||
c9b367c350 | |||
469cb8af00 | |||
6ec159a1ae | |||
0d934d311e | |||
d12b724e04 | |||
a33ec51b89 | |||
97db78a355 | |||
7d2f7c6942 | |||
416421eb8d | |||
e98cfbb570 | |||
8955c76d24 | |||
2c60c48eb2 | |||
231b58b5f1 | |||
5a02a92013 | |||
a674fe9888 | |||
d363ec98a9 | |||
05569caade | |||
70b2e5fef2 | |||
4cac66244d | |||
47de4f31b6 | |||
254ad45878 | |||
44ff30b65b | |||
6d7da6dbee | |||
9543fd9cdd | |||
5a6d732d48 | |||
9d62963240 | |||
bea181eba9 | |||
1e064c32e2 | |||
00b9056004 | |||
b80fd7566a | |||
e225ed7ab0 | |||
0f03971c3c | |||
0419e6c22e | |||
4ec65b6531 | |||
b66e6cc6f7 | |||
ad0a88f1f2 | |||
e9a427b9c8 | |||
4beabb3a43 | |||
87d5274ce4 | |||
0c0384ec32 | |||
6649dfa899 | |||
0c66099cec | |||
7eead4e25f | |||
c43b6e0248 | |||
9b042fed30 | |||
e11f1b3e9e | |||
02c76bfdb1 | |||
c901f47228 | |||
47ef12f19b | |||
5483d6684e | |||
92f2314b72 | |||
416f859517 | |||
91bba5b333 | |||
f071b991b7 | |||
ea847d870e | |||
683f038408 | |||
717b28cb32 | |||
177671590b | |||
bf52cef77c | |||
82fe12fb5d | |||
26666edebc | |||
49af5342f6 | |||
aa36b997aa | |||
220a8bd51c | |||
209d3cb629 | |||
bbc0bb4f02 | |||
588168b99d | |||
e806fa6904 | |||
13462d63a2 | |||
da45be366a | |||
149d224557 | |||
220fd41bbc | |||
08e40bf32e | |||
da0e570c0d | |||
cd87525f54 | |||
aec9d8bf2f | |||
60fe29b63e | |||
3b40fa3f44 | |||
b4f146f692 | |||
a4f9ea7c26 | |||
5a7c76b4f9 | |||
f469af7626 | |||
2b03ac1284 | |||
7bba6420ec | |||
3245042c2a | |||
09e7782d76 | |||
1d813ea078 | |||
c231cfe235 | |||
e6776effb9 | |||
7631011d8c | |||
06cad19c91 | |||
005d6863fd | |||
a723de0e25 | |||
f77c2880f3 | |||
0f6a35c4fb | |||
133cce45f2 | |||
46230185c2 | |||
94ca5063de | |||
b0ee5d6cfa | |||
a4c9b0f358 | |||
03d3e0098e | |||
0da677e213 | |||
1417c1456d | |||
9d94e43839 | |||
a8e000a2a6 | |||
61ba8d1ecf | |||
e9f459ef21 | |||
185c9f9e8f | |||
9847566654 | |||
40824bcc95 | |||
76bfb0d609 | |||
f3f28f9611 | |||
f30f3bddbb | |||
c16510152e | |||
8cf222b8bd | |||
33d8c07364 | |||
bdf8b1da6b | |||
e03022101b | |||
838ff3b871 | |||
db9336c99e | |||
d621994fee | |||
78f9e65f30 | |||
0545306f0c | |||
a03aba15b0 | |||
5440c1d2e1 | |||
675fa6993b | |||
17314f4a95 | |||
8422a12ac6 | |||
e2e0958c3b | |||
4f6a0b2650 | |||
2c3d52b4cc | |||
00c6536528 | |||
4c3707f618 | |||
5b4e0988c3 | |||
1fcfbfccbb | |||
4e65487d2f | |||
a6a4cfda89 | |||
57592e463e | |||
069586c171 | |||
5e431fbee2 | |||
6cd1a03bb5 | |||
0da661de62 | |||
d56ad8ff4f | |||
839edd35a4 | |||
0ebd8c53ee | |||
1c36af158f | |||
9c8760d37a | |||
72848be36d | |||
d81792d0ef | |||
177a375479 | |||
2a91b34656 | |||
256be7ed55 | |||
60610f1821 | |||
ac6b91c2c0 | |||
ba95863b49 | |||
49f6b7e22a | |||
f966859829 | |||
df39b37cb8 | |||
767f740305 | |||
5ccb039254 | |||
3b70cdb8d8 | |||
00929f8363 | |||
5ceb5ef328 | |||
e13ed8a627 | |||
785fcb63f5 | |||
a98aefa14e | |||
7027d56064 | |||
eb4ce3dfed | |||
b178f3f2d3 | |||
895b46e0c3 | |||
7149c2da0d | |||
92b68352d1 | |||
9d0a1707f7 | |||
01fb2df9c1 | |||
d1a26cd305 | |||
03913f6661 | |||
129716f3f0 | |||
48d3627c8b | |||
df73d8e8a1 | |||
250a8503fe | |||
18e47ab9f9 | |||
6ea96b3274 | |||
d03bf2bbfe | |||
4267419349 | |||
0d98a91511 | |||
71ec05e2f6 | |||
7783030437 | |||
dc47a56c22 | |||
4e3818e5c1 | |||
6ff508c643 | |||
2400e86d13 | |||
db4252c688 | |||
c220a595a5 | |||
6df0ce5457 | |||
258c3bca65 | |||
5d141fe01d | |||
221343e849 | |||
64cf354651 | |||
2d78f8ad2a | |||
2d5b471c09 | |||
0ddb34a0b4 | |||
2b967202f3 | |||
b6a6d6986d | |||
196cc8cfdf | |||
cbf427228c | |||
8da2eb980a | |||
fb8a7cfa92 | |||
d8e4840b49 | |||
d6fdc4b63c | |||
b7f16cdf3b | |||
e7e9bb753c | |||
b4f999d3d7 | |||
d2cfe4998a | |||
852b25e167 | |||
4b2fe9b20d | |||
d7d9e30c27 | |||
87c0d8d9e7 | |||
4cd50f5d45 | |||
fc5dd7f3bc | |||
41c160d582 | |||
dfb5b21c04 | |||
4fd0a42798 | |||
de8331eeaf | |||
fc6b617477 | |||
e31c065544 | |||
acfe76b622 | |||
fe97cb2ddf | |||
9ef1f06b3f | |||
bcf92b63bf | |||
8ae88632cb | |||
a7f2d9f55f | |||
2d8a2be795 | |||
d5dae4fc0d | |||
66e6110f5a | |||
65de0f40b8 | |||
c9628c9170 | |||
3854cfaa00 | |||
94668c95c2 | |||
5952b65932 | |||
8db9586599 | |||
8188c1dd59 | |||
5e05f12c48 | |||
59112adac1 | |||
94a1a57106 | |||
2ec74474e9 | |||
4bf6d0c4d7 | |||
037c9f571e | |||
58f8713292 | |||
5c35ab619d | |||
0bd3ae063e | |||
7df84463df | |||
4de5fff3ca | |||
57c8abf499 | |||
53a810dbad | |||
c02ef395ed | |||
32ece63338 | |||
062d8e6c5a | |||
bd9b2f6f39 | |||
8fee9a2e1a | |||
ee8621a8bd | |||
491877de3d | |||
fa5b091b4c | |||
271381628b | |||
5810568c65 | |||
320beb76f6 | |||
2cd9dc99b6 | |||
30bce9ddbc | |||
4e27543415 | |||
578efdd59f | |||
187e94c507 | |||
1e52346e5e | |||
9393654af0 | |||
15aea3acdf | |||
e50a26e6cf | |||
e7bd1e9065 | |||
3ea6a01254 | |||
af309c126c | |||
38844a7010 | |||
34406d4815 | |||
f24fff8495 | |||
43ed727ba7 | |||
f6812523cd | |||
bbd2f9672d | |||
4f83818fdb | |||
e4795ae7ed | |||
082d5dc5b2 | |||
9d91a6f1b7 | |||
8266864adc | |||
e94b7984a1 | |||
95fbc29500 | |||
ac79ae6848 | |||
49e85afdcd | |||
46263d405e | |||
bc50fd4ad6 | |||
025261c05e | |||
833f7761d2 | |||
f107aa296b | |||
af57bd3d48 | |||
d8f98b3dcb | |||
12a52b3c8c | |||
429bf73bcf | |||
df6905c3a6 | |||
9542bae56e | |||
6f08f9decd | |||
46757238de | |||
198929fe8b | |||
f220386917 | |||
071cfd7484 | |||
aeb1be748e | |||
b95653331c | |||
f58501b4b9 | |||
9e888dc4a3 | |||
9bac1ef7c5 | |||
6e4f5ee457 | |||
91ce4d7306 | |||
77020798c3 | |||
10ef436285 | |||
e9c839a9e7 | |||
cb4121da19 | |||
c137c50d15 | |||
20fbf09072 | |||
0c62a6fe3f | |||
c6d189dac2 | |||
3dfbd95ddc | |||
f48616657f | |||
122206dbb1 | |||
88fd9670df | |||
9653f6b28d | |||
2cf081d863 | |||
3aba89d440 | |||
f8827e2f08 | |||
355d45ca46 | |||
72f5bfbae2 | |||
1d13594c1c | |||
b3bb079d9f | |||
5402a77c63 | |||
1347b507da | |||
2ae1e80013 | |||
a70fd8e606 | |||
334d38d200 | |||
98af19bde3 | |||
254d9c8903 | |||
093dd68214 | |||
fe28f17718 | |||
5dae615208 | |||
fb55e9a9b6 | |||
038d77347a | |||
5bbb0da7b8 | |||
cb9d93525b | |||
4d3e32803f | |||
61a3df6173 | |||
873bd33591 | |||
879253ea05 | |||
7b365c564f | |||
476124de50 | |||
f7eebadbed | |||
567f30aa1a | |||
0eb0d7f73b | |||
4fb77183ef | |||
681e8728a1 | |||
bbd7bc2985 | |||
1eeddf2e23 | |||
681d6d121e | |||
8d9c339e0e | |||
deab944b30 | |||
804ed825f7 | |||
a6aef76f52 | |||
991c068f72 | |||
c944bfb1d9 | |||
9e9417b474 | |||
b918c2f4cd | |||
6c518102dd | |||
e68eee830b | |||
b16f2da444 | |||
92e72d99bd | |||
193d1926e8 | |||
eba43acefb | |||
1d2a6a5dd9 | |||
d714cf659c | |||
1fa8b6b966 | |||
cf4358715b | |||
1b58166e9d | |||
3d0db28d12 | |||
1daebeb82b | |||
52de97e280 | |||
0b6e9d861e | |||
8dbed193c2 | |||
6c60d48322 | |||
e6795e6fdd | |||
640e93187c | |||
7f3d445af5 | |||
013e1d9d49 | |||
795dde109c | |||
1c04b42839 | |||
8652d2d1b3 | |||
c88fb6952c | |||
bbe151e594 | |||
38ea3aa31a | |||
fb71e80962 | |||
c3679ab9bd | |||
b507715d44 | |||
f128cf69a5 | |||
0057652535 | |||
9cde14d3d8 | |||
091d421994 | |||
8e28162aaf | |||
9d37e520cc | |||
830ca369f1 | |||
34f5020457 | |||
cff3254ca5 | |||
ea5fed937f | |||
afa04f7abd | |||
4895c69fea | |||
94330de843 | |||
e12e8bc431 | |||
bb8a1041b9 | |||
22f45dca80 | |||
6399a87485 | |||
e9ee90a121 | |||
3b92210f5c | |||
219a3bfab1 | |||
4716cd518d | |||
6926d59d39 | |||
d842705622 | |||
d6d2840c94 | |||
e6934e7247 | |||
55d9ff9899 | |||
bc7983b13e | |||
2a42f8a06e | |||
7a785e067a | |||
9c6b916c30 | |||
ac6f87ea21 | |||
a1d124a86b | |||
af5b0d42a8 | |||
c1d181add5 | |||
ea34eb8a4b | |||
f579f73700 | |||
742155c214 | |||
db40d06a39 | |||
bed0049a51 | |||
8df8f4396d | |||
4089f8b06b | |||
4e038e94fd | |||
f500c99a6d | |||
bd86f41e18 | |||
e9b066d497 | |||
36f46e1c31 | |||
efd024510a | |||
e6b4dd3866 | |||
4dc2f08198 | |||
24b136a993 | |||
073c5359b0 | |||
c4fe352965 | |||
b2152be3b2 | |||
cddb9da4f0 | |||
65227f44dc | |||
fc2bf2d3b6 | |||
99f5684dc4 | |||
0404e75e96 | |||
9998e16df3 | |||
7b0bf64404 | |||
37d6f5013f | |||
1ec22572f2 | |||
b8106e2450 | |||
376b20c3d2 | |||
9eb98adf97 | |||
40cc243017 | |||
a4a8560311 | |||
d184eab019 | |||
4d8b3aa578 | |||
53fbbfe56c | |||
c71fab6cb3 | |||
92510a2831 | |||
c8f76b8bd0 | |||
58f25a8752 | |||
9f66965eac | |||
66e0fafc21 | |||
49d3d79459 | |||
1a91621c29 | |||
68da1909de | |||
f1bbf1d8b0 | |||
43d4aa7900 | |||
4c69229a47 | |||
ea15c0b40b | |||
3b06214590 | |||
dc69cc1ae4 | |||
2762f6f96f | |||
4675fa6c46 | |||
c02ecadbf2 | |||
bd434d92e1 | |||
ca3f147670 | |||
fd60ef78eb | |||
15144fc923 | |||
3a4ea28d2d | |||
eddd583cd5 | |||
99f2c746d1 | |||
b467e7fb3b | |||
fb0c590090 | |||
67fa9945e1 | |||
34c1a9ac85 | |||
8e3c420414 | |||
28673d5cb5 | |||
923b4a9e91 | |||
ba2b431462 | |||
70b931dcc8 | |||
ee0b948903 | |||
c91519961c | |||
d3c80be7d5 | |||
691bea8776 | |||
4ff50519ff | |||
229b378ed1 | |||
fc647eed19 | |||
21bc7e94f5 | |||
60276da1fd | |||
2a795614d3 | |||
46d1b61902 | |||
00d7981f64 | |||
910f241c3f | |||
4fdd70c931 | |||
44c8b1bca2 | |||
79ade5ec68 | |||
b57e86abf2 | |||
dca49a614f | |||
87a7f00926 | |||
97fc64dd0c | |||
3071c4caad | |||
f3d3774ff8 | |||
4cc17ec6a7 | |||
1942d294e9 | |||
e1a038e791 | |||
ef749a2506 | |||
d39e90d3d7 | |||
ba0ad16816 | |||
36f870a7e9 | |||
a5e0099e87 | |||
00de20d26f | |||
2a29072019 | |||
4e7a100705 | |||
8ac12c29ed | |||
62189c9f3c | |||
c05226620f | |||
3f6eb96d6e | |||
d388c5c257 | |||
7ed71fa84d | |||
3cea535fdb | |||
b992c02708 | |||
0263ffb2ed | |||
af005d79ad | |||
3bf7c3d53e | |||
3617d43e76 | |||
ab152f1319 | |||
d9674f7ff0 | |||
62c8bcf565 | |||
c9a3b8941f | |||
a295febf04 | |||
361101bd31 | |||
d2731a2d93 | |||
c8b285bce0 | |||
f8870ff1e3 | |||
857fce13f9 | |||
0f76077969 | |||
b9972deefe | |||
c9261e034e | |||
c3a610374a | |||
33de7b856f | |||
92e343da26 | |||
91c3b18e1e | |||
7de2236284 | |||
999ba9e026 | |||
ed146408c5 | |||
d66c0a3dfb | |||
ed7a6c6732 | |||
b2bdd3e346 | |||
d1a77a1568 | |||
e515d2ca8d | |||
b247a55dbb | |||
16c8b65f39 | |||
5a53e6a7a3 | |||
97bab5b3a9 | |||
cba834808a | |||
595bba95b4 | |||
9899cd359a | |||
11b10439b4 | |||
7aa5f6b833 | |||
2de2c6ddd7 | |||
098585234d | |||
2dee098b91 | |||
311b5789af | |||
88c1b8f047 | |||
4386e09710 | |||
21f4606212 | |||
a50576ebe6 | |||
0b64bf5585 | |||
df5befdaca | |||
46fc92f392 | |||
eade497e15 | |||
b779456850 | |||
8489ee7df9 | |||
97418bede3 | |||
1cf7ae51f8 | |||
4afd6dddc8 | |||
001ed66b4d | |||
b83284c9f6 | |||
38bbb77989 | |||
32054e7fad | |||
7a226dc03a | |||
dd35c673f8 | |||
3a8c678f62 | |||
007fb3abe5 | |||
d3aa9bce26 | |||
e78821f774 | |||
64a2d7081b | |||
778b2adbea | |||
456bf15012 | |||
590e113f16 | |||
403c948721 | |||
d3f938f0cf | |||
16ab273bf6 | |||
0e72633707 | |||
dbc2d6a38b | |||
f8837c0310 | |||
4a5f83d3a7 | |||
85571c93a4 | |||
4c6f9bac32 | |||
529fefc7cc | |||
a0552e5b46 | |||
982454a455 | |||
cea783e5fd | |||
24fd47a32d | |||
1560d5e3c3 | |||
01a7ec8198 | |||
fe8ba81ce6 | |||
b14aa4e14d | |||
6c72481ff0 | |||
2efb000f9a | |||
ffdeed1442 | |||
6be8aefe02 | |||
3e1b4aec2e | |||
fdbde2df52 | |||
9d9482b9d8 | |||
37fe5139cc | |||
46db029a00 | |||
76176a4620 | |||
8b9e472a6c | |||
d461a9ac10 | |||
f0be3e4ea9 | |||
266e7ce8cb | |||
b44ff347e2 | |||
787c62fb15 | |||
747a3caf9d | |||
f68ca3895b | |||
497b6c152f | |||
333e5a9446 | |||
8352bc48db | |||
bd4d563db1 | |||
decec3cd8b | |||
7578db7ee3 | |||
b3fae0a01a | |||
f1ba759940 | |||
fa61b90796 | |||
e288459cf2 | |||
e6055010eb | |||
ae43ca3bfb | |||
f7a5c0301a | |||
1828579580 | |||
2c3bdedea3 | |||
5e25ee5ebe | |||
bbc4fdb767 | |||
2efc519368 | |||
fb1b853b14 | |||
7ab0aec61f | |||
df040c05da | |||
205f070a99 | |||
5fa3e5744c | |||
170927b7c4 | |||
38e1f7c4ba | |||
3936c10aa0 | |||
9cc57fb076 | |||
e9374d32a3 | |||
d45ced0a5d | |||
57f51352f6 | |||
682daf1117 | |||
78585a992b | |||
2874f6b0a2 | |||
0571962776 | |||
6ab8286e8d | |||
7a8eba10b2 | |||
8ac94b2cf4 | |||
f4f14c42bb | |||
f5388cfe71 | |||
770bdec924 | |||
7eb793a55e | |||
622a6fba7f | |||
ed2c071fe1 | |||
ed1d66153e | |||
5d61e439f2 | |||
4288564f59 | |||
8378e8790f | |||
27c2180db9 | |||
a5f74d86c8 | |||
dd9481c403 | |||
dd9fdd7b68 | |||
1d2f0f6641 | |||
b078edffe1 | |||
6d9818b8e4 | |||
82a6bbe068 | |||
12dc8749cf | |||
c550b32a44 | |||
d60194f6ab | |||
0088aefa24 | |||
1d5a8ebc6a | |||
2df96cd81e | |||
ac9a2522bd | |||
fe9ee9134a | |||
718ab7c12e | |||
5cf8d8795b | |||
e1939688c2 | |||
0e94b8fd4e | |||
9d3afba045 | |||
d7051b0d21 | |||
b742af4034 | |||
09458cc802 | |||
c21bf29ce7 | |||
8e4360cf9c | |||
f4ae450f34 | |||
dc3d7ad2bd | |||
a655b01700 | |||
6cfd44b811 | |||
4ad7069827 | |||
d11b0abf11 | |||
a3bef2e537 | |||
6a53ec28e0 | |||
fbffdf9f3d | |||
8ad52fa095 | |||
f19ff84593 | |||
632079641a | |||
ac729fc75d | |||
364dda4d8f | |||
01f442c81c | |||
f81bfc8462 | |||
84db04ce6c | |||
6909a79b6f | |||
4305d4b7b1 | |||
5cf8730b87 | |||
db2be00289 | |||
355a23c8f6 | |||
7d2031d23b | |||
4513b2216f | |||
aab2df208a | |||
73c1ea7de6 | |||
2d7f036afd | |||
02b050e0f5 | |||
4aaca73e42 | |||
98bc694606 | |||
7c7640b462 | |||
535de3b302 | |||
e492638b7d | |||
f1d9c014f8 | |||
af33012dc8 | |||
9b422c2b64 | |||
57bbbb83a4 | |||
481ee48c35 | |||
c52d3736e8 | |||
f4d21408aa | |||
38f79a1eac | |||
f3b3887ea8 | |||
7482861f4b | |||
d559426373 | |||
46890ac197 | |||
a89f180145 | |||
27a41c5954 | |||
ceb3eae5fa | |||
6751a68e7d | |||
cea8b330c3 | |||
628ad58912 | |||
82ea4891fd | |||
cf3fc61821 | |||
0b3fad19f4 | |||
c721300ab5 | |||
f3ce5fa5e8 | |||
96360f3139 | |||
c0c95e88d8 | |||
aea9960c75 | |||
1a4bede918 | |||
86ef147ec2 | |||
034b68f52f | |||
db4692abb4 | |||
6548494261 | |||
8a88c0fabb | |||
154e01fb33 | |||
2ccbe471ae | |||
6d939811e9 | |||
9483866e0b | |||
ac8bd587ec | |||
fe6398c6e1 | |||
234461f779 | |||
05e7d1027b | |||
17978c2ff4 | |||
967746abbf | |||
4d361af976 | |||
3e5ba8dcaa | |||
3be5715f45 | |||
1deb4add81 | |||
f8fefc9c5a | |||
7954c9ed57 | |||
73aa004c59 | |||
2a877ae06e | |||
a587eec20b | |||
eced50d103 | |||
40613161a0 | |||
e0bc5fa690 | |||
169ded9a70 | |||
3ec33e7d02 | |||
f59a55be17 | |||
4982dc20f9 | |||
e1fb45bd3f | |||
5bb43280eb | |||
89ed8984ee | |||
287c418a19 | |||
7ec492e087 | |||
b59b557163 | |||
7d135f185c | |||
5d380b24a8 | |||
1d375ff2da | |||
89a31ff473 | |||
d7ba15cde8 | |||
39459085ef | |||
6327e006df | |||
c053df143f | |||
c167211611 | |||
c50b01cb60 | |||
098e2b2de3 | |||
7c70f2158b | |||
2c648cc6b6 | |||
f9986c66b8 | |||
7481d9b66b | |||
7a8807b8bb | |||
3c71670bd9 | |||
3efccbffab | |||
6e413331b5 | |||
8198a7eae1 | |||
bf437b0336 | |||
563aec0b4d | |||
0e5ea36cc6 | |||
e6d3c4aba4 | |||
f3c6872c45 | |||
a076b29da2 | |||
a9f7149c92 | |||
fd33f523ab | |||
9be988db41 | |||
f33b7abffb | |||
692aa99147 | |||
39a3d5b8a9 | |||
10d471dcca | |||
95c998a19e | |||
57bfffedf6 | |||
084236006d | |||
3e5ba594e0 | |||
d10e37a829 | |||
930465e67c | |||
a0e8b860b1 | |||
b15fa9fbd2 | |||
140abec6ef | |||
22674000bd | |||
176036aa58 | |||
32501866b7 | |||
1a3f29e22d | |||
74cc2e5ffa | |||
b64eeb7729 | |||
d57398a959 | |||
01b00bc593 | |||
04c0c608a3 | |||
5b9671d01a | |||
0b50bb2b20 | |||
26e963f436 | |||
18463aa846 | |||
bd8f793809 | |||
7a789e0763 | |||
0e0022d9ff | |||
e8b972d73a | |||
bacc5ae5e9 | |||
94e5d945e1 | |||
d192d78ac2 | |||
f8f8c198c0 | |||
772341dccb | |||
5fb6b341c4 | |||
5085f10b4c | |||
5fde4ee3ae | |||
5233338c94 | |||
224adb7645 | |||
4a6f63c750 | |||
c5eb3f1394 | |||
414d904959 | |||
9d8594a046 | |||
d0ee5cdc8f | |||
736da0a80e | |||
8b56fe662a | |||
73c6bf6d15 | |||
81812fd476 | |||
5a13ca2d60 | |||
8229a4fbf6 | |||
9dfeee2993 | |||
446816de52 | |||
e91988c977 | |||
3bdadb3930 | |||
a661198e7a | |||
4cc1b1504f | |||
20b73ec167 | |||
bfeff2c83b | |||
47f38bba8c | |||
330f562d44 | |||
6634bc8696 | |||
7ddda30126 | |||
e9722474eb | |||
d7ab510229 | |||
5970083b4d | |||
c4f2e5f88c | |||
1a372a792e | |||
e7190cc727 | |||
00e5e12906 | |||
1403eaeefc | |||
3b506b568a | |||
83f0915e15 | |||
85befbc8cd | |||
8e1341c7ea | |||
22f8da29e0 | |||
faf99f4760 | |||
ccfa82461b | |||
536b763751 | |||
aeb4240f25 | |||
651343688d | |||
8557dac51d | |||
336b43ba39 | |||
05d92402f1 | |||
f302774cf7 | |||
c18bd08021 | |||
47e0d9aa95 | |||
ef72a02da7 | |||
b84cdacb20 | |||
4ae34c8137 | |||
2883dc7265 | |||
14e5617475 | |||
bf94321430 | |||
d6a2b70438 | |||
4cbb5da0b6 | |||
8817f59b6e | |||
f506da5e19 | |||
2d9385a65c | |||
5874f75a26 | |||
c56bccdf32 | |||
db40cb41b2 | |||
e4b66a5913 | |||
f5ce2c2d63 | |||
c7b986220e | |||
1951fe4e80 | |||
db14bcc43b | |||
9616ae0f2c | |||
94c40ad3d2 | |||
fd937548a0 | |||
aa688e4549 | |||
00890957ee | |||
7923c26939 | |||
f771063275 | |||
216a1b3d74 | |||
c12289fd1b | |||
41f4973f0d | |||
397801a2d8 | |||
ca37873e16 | |||
8878f526ce | |||
fcb4ccd413 | |||
592013eaf4 | |||
0028442e14 | |||
e4be00fece | |||
eaf927cf49 | |||
fb69f45f14 | |||
50d0e830c9 | |||
aa32738dd5 | |||
30bec3921e | |||
ecc1c7957f | |||
44b11154ca | |||
b67ffab370 | |||
e368f10973 | |||
a9014ceceb | |||
24207a09ac | |||
5cf28689e6 | |||
40914de811 | |||
2fc112edcf | |||
67788ad206 | |||
bf16b0517c | |||
087db70df6 | |||
14361906ca | |||
367d5f62ce | |||
6e285c42ea | |||
80e770dfa0 | |||
dfe99efa7c | |||
5a4979f25f | |||
bde9b4de94 | |||
3280ae3e9f | |||
68cc71409e | |||
a1112254a5 | |||
0b8d14b0fc | |||
1ed12a07ab | |||
ca14475085 | |||
cc27b8a5a7 | |||
d60ccf64e1 | |||
6a995f5dfd | |||
6d95d679c4 | |||
999fb4bda9 | |||
2b33c0c165 | |||
31a620c42b | |||
06e08c4840 | |||
f7be47b8b9 | |||
0a63f65c03 | |||
9912b73f9f | |||
38fb6dcf86 | |||
06a7a9e544 | |||
79b4b83d3c | |||
0b7ed18cfa | |||
d8984cb0f4 | |||
13110c16de | |||
b8bb5b0c06 | |||
1b594a07e5 | |||
7cd970e089 | |||
a66091d7fa | |||
5ad4008e4b | |||
aa0ea0b0c1 | |||
cde1461555 | |||
e4f3b84b93 | |||
03353d500f | |||
c16bf02448 | |||
5b2eada24b | |||
95c2873b3c | |||
bb275350c7 | |||
5e277897b0 | |||
77861e2d40 | |||
9d2f0e237b | |||
0247c280ff | |||
049fb0417f | |||
21bc43ed58 | |||
f2cf647c9f | |||
58f395257b | |||
fe1ee49807 | |||
9a8e19c8d8 | |||
1d7e568ad2 | |||
8daae8af89 | |||
5d297ccf96 | |||
ef17cf3bdb | |||
b05fb87f22 | |||
46fdf8a4d2 | |||
81026f9ea5 | |||
1cef6fd4b4 | |||
dfbb0c559b | |||
da480bdb5f | |||
0cf52e2060 | |||
d06dc6c8a6 | |||
1f788781dd | |||
0a2fac2b92 | |||
5f23d8530d | |||
9529284194 | |||
c0d2e81e89 | |||
662ccfa558 | |||
71f6d839f9 | |||
8ed0cd0fff | |||
ed8285c096 | |||
c435f7b3e3 | |||
e641f257ef | |||
ee65ffb3c8 | |||
59641623d1 | |||
9255ae334d | |||
84e78316b1 | |||
4d0cd9b283 | |||
718939b6a0 | |||
578f2aa22b | |||
0167daa116 | |||
65ccfed868 | |||
9d0a937a05 | |||
aef6f235bb | |||
2dad2058ab | |||
e3401c4f61 | |||
9340708dfb | |||
c04618e2b3 | |||
467c18e0d1 | |||
14f0ce850d | |||
72e374d0f3 | |||
f1b9f97aef | |||
86bda02fc4 | |||
1b4014106f | |||
26daf4a7a4 | |||
cec0cfa412 | |||
6980d42cd7 | |||
d4203071f0 | |||
46afcd41ca | |||
6420b02f9c | |||
f99f9eccab | |||
6dadc75a6f | |||
3cf844a67b | |||
4e41c05acd | |||
dc6ec02021 | |||
53d8cad206 | |||
eb6656a553 | |||
df1bec4f5f | |||
c0704d4ec9 | |||
eaeeffa5a3 | |||
a4024168de | |||
9364e7425a | |||
4454439285 | |||
93c20d0a47 | |||
8596db8f53 | |||
b97113408b | |||
f1198fc6d5 | |||
f51d648681 | |||
42b81b309f | |||
1ddb3239ea | |||
b710fd083b | |||
f264511585 | |||
1ee64afb12 | |||
f4aa5c5d8d | |||
846787e7f4 | |||
d2d5f36a3c | |||
63aec9728f | |||
bd67632b41 | |||
3bbad1d278 | |||
e994c53328 | |||
3eecb6f4ae | |||
611af87fdb | |||
5cabb5bb11 | |||
0682d2de31 | |||
e7a687d7ee | |||
d1debcd971 | |||
7fc4cfebc8 | |||
cbe2ed47e2 | |||
2f3df8cce0 | |||
7d56fa8363 | |||
d6f5945653 | |||
fed0e387d1 | |||
61865c0ee0 | |||
71bd434297 | |||
dfde1dec1e | |||
29ec6f1a5a | |||
c8ef041951 | |||
548cf6a437 | |||
c70f8d26af | |||
186a1c743d | |||
588c0464b8 | |||
d751d5b6e8 | |||
b867fa154e | |||
5f968928ec | |||
cf456d0453 | |||
51f3b9aa7c | |||
bbd22f06f4 | |||
65152373de | |||
935c8c5b9f | |||
2bc7edfd01 | |||
de35451e0d | |||
07a4dd0d99 | |||
a4c3db51fc | |||
6a49788942 | |||
c77de29f67 | |||
a2f46a0521 | |||
395ee4ec68 | |||
69f02af98e | |||
5dcfd7ce74 | |||
8549c19f1a | |||
c8442fd476 | |||
207c90bd8b | |||
dff9c88193 | |||
ce7166780f | |||
cd1af06245 | |||
222516192f | |||
b13caa14c9 | |||
23d9fe5fb0 | |||
dda8335f26 | |||
7ed8792647 | |||
d9970c0a7d | |||
6abafac479 | |||
4015e638e4 | |||
f9321064e6 | |||
85d3fb31f1 | |||
d4093dc540 | |||
cd1259cf35 | |||
8848b73bc8 | |||
ce467bea20 | |||
b9dc85a934 | |||
8da261cf5c | |||
269028360c | |||
1507477306 | |||
59cd0556ef | |||
96a7cedaca | |||
7c259d09f4 | |||
e316586516 | |||
919c3ae6ec | |||
181f21529d | |||
74539020b4 | |||
ae5ad5cf9b | |||
2ec81f627d | |||
ca71ca3d6d | |||
090fbeca24 | |||
5f9f3724d0 | |||
535c7ff26c | |||
35c45792cb | |||
6cf3c1ab8f | |||
d166b9856a | |||
c03490b24a | |||
5fe0350c2e | |||
8a846b048e | |||
37ee0b5599 | |||
aeb30fa873 | |||
abe5a0a349 | |||
3a85b77bb5 | |||
568660b402 | |||
c19fce1b19 | |||
f3127c3b47 | |||
958d27bb0e | |||
da9fdd785c | |||
0f8bcf65af | |||
e6f32f6ae0 | |||
027bbcd3d8 | |||
74e7f6bd95 | |||
c418e8f370 | |||
21868439d5 | |||
d117be6792 | |||
6f72f8b1fc | |||
ad3f18f031 | |||
cf31afdd6a | |||
9ed1f24188 | |||
74f1f7f990 | |||
8bb9ddf298 | |||
33718e5fb4 | |||
88433a2f35 | |||
5d21f63e8a | |||
b30b32300d | |||
62d864559f | |||
c82bf39459 | |||
7f2254225e | |||
a4a24b6531 | |||
bf90ea282a | |||
8eda152d33 | |||
7960e65d1b | |||
74b29c0b6f | |||
278a6917f9 | |||
c90af3cd63 | |||
350baece21 | |||
bb41cf3461 | |||
0bd8710d34 | |||
d092fa1f03 | |||
3e11468a04 | |||
5cea25ac3e | |||
6829d210d4 | |||
33066d254e | |||
90f8cf0920 | |||
12a93a9951 | |||
7b3fecc4a6 | |||
3776d93ff9 | |||
af5aff4c66 | |||
a1c0f144f4 | |||
cfece66403 | |||
8ad4ffdee5 | |||
0d3e8ada94 | |||
4098af3b5b | |||
e322bc90cc | |||
157b1c7d2d | |||
bc47ed6c80 | |||
35123b71c2 | |||
4a5ec51007 | |||
072a7761e1 | |||
00f7e514b8 | |||
f5ff4b2058 | |||
e806d31224 | |||
12aa238eb1 | |||
c976c2a9b9 | |||
f6da66fe7e | |||
47dad2390a | |||
ab45532b52 | |||
e7a1f2c9b0 | |||
d30a36641e | |||
175083c4c1 | |||
899b09872b | |||
cf9a4575ae | |||
918b5c28b2 | |||
c810908e62 | |||
b6b3a93821 | |||
c4827732c5 | |||
f1996ca0f3 | |||
1eab0773af | |||
fd9c10c2e2 | |||
335ac82a1a | |||
5bd50aa97c | |||
c2e7d39154 | |||
e9ace3a0d5 | |||
761de8b1a3 | |||
31443c5c61 | |||
f96de208e2 | |||
b0734fabf7 | |||
bd3c93f086 | |||
30a8fb86d0 | |||
b158353ca9 | |||
0f1c0c2796 | |||
4fa902c84f | |||
3a7e5e65e4 | |||
f029af0fca | |||
fd574dcb3b | |||
4e1333fbe6 | |||
a5b91ef4c3 | |||
84a643d73f | |||
87461d09d2 | |||
75192a70c5 | |||
fba8fa146f | |||
ba46c1be38 | |||
0af9fabf83 | |||
07dd00ca26 | |||
ca1d8f2adc | |||
2ef2b6daa0 | |||
f6371cce17 | |||
4d3e301ee4 | |||
27cc7577a1 | |||
aeae14eff0 | |||
4fb1c9da26 | |||
6188283ba6 | |||
334e1112a7 | |||
5008c24722 | |||
f39ffa69f6 | |||
2867584985 | |||
c853da7424 | |||
79c811ef17 | |||
8fcda6a2da | |||
5a285e4307 | |||
6ed35ea61d | |||
0b694ce08b | |||
fbddee815a | |||
b6dff12923 | |||
d4ec0a6da5 | |||
b8be09c09c | |||
69ad13b077 | |||
1e0942e900 | |||
f04340b125 | |||
eca0ceb04c | |||
49c4e54b28 | |||
ccdf93e2b8 | |||
1f288ce527 | |||
92c5cdab62 | |||
ebc7df62f3 | |||
44289e6728 | |||
d69f469b83 | |||
a86ced0bac | |||
c039ec084b | |||
b587298963 | |||
3bccb0cc4a | |||
dba42c57b4 | |||
72da25e9d2 | |||
a0551b4054 | |||
e752299ac5 | |||
3e16df8d28 | |||
6fb194fa06 | |||
86e40f66d8 | |||
524f2b863e | |||
7e6273b667 | |||
ce989c0c6b | |||
5f7d011cd7 | |||
0be46a92e2 | |||
8d5c04e257 | |||
04787be8b1 | |||
77f61a5e2e | |||
74e89a3e3e | |||
b8ca2250fd | |||
39bac256ab | |||
9b302ac0b5 | |||
544f62c92f | |||
ee219ffa47 | |||
100fabf469 | |||
5dd399dafa | |||
777840088b | |||
6319e8811a | |||
d451363dc9 | |||
0e039b4094 | |||
660788d227 | |||
070870fc57 | |||
2136666939 | |||
f00f49bd93 | |||
98ec8a8381 | |||
883045f8d8 | |||
b887e89972 | |||
e7b36c8484 | |||
70234dfdf4 | |||
c40e6f1c43 | |||
b89b2881e6 | |||
af18ca4e2c | |||
7462c27d07 | |||
ffb1f3932a | |||
02555d47f8 | |||
93cb212f9c | |||
1dd730d685 | |||
2af5ec4f57 | |||
d5c2c72360 | |||
a5ee16bbe5 | |||
216983c50e | |||
3f35b32fba | |||
4faa93418f | |||
e36247d187 | |||
40d696fcbc | |||
d2b07dca9d | |||
7cd6224caf | |||
55dd6d948f | |||
c571fa3d0e | |||
5dbbbd1c30 | |||
debef9de34 | |||
6ee846a918 | |||
94ab0eb49f | |||
25ac664106 | |||
0eca92de18 | |||
d951916d40 | |||
c17451ca73 | |||
db3a9ae7fb | |||
71efac46cb | |||
b6792a3328 | |||
bf157506e8 | |||
0426c2d28e | |||
f4fb5de545 | |||
d5961e9d9f | |||
45d54b1fc6 | |||
5e424826ba | |||
05924423c2 | |||
644632fc8a | |||
9a97b707f8 | |||
d245818e42 | |||
5ca975383c | |||
89a3e4f91e | |||
cf25729eaa | |||
f228d113c1 | |||
3e9323b7b9 | |||
c05a92b2e9 | |||
8c87f63f81 | |||
99c05f369d | |||
cc80197349 | |||
8e7d393b70 | |||
ad53d2ef77 | |||
d3a7e22112 | |||
f871afd548 | |||
531f1bce78 | |||
52290dbd35 | |||
dfb6296499 | |||
a67d26a1e8 | |||
b08f8bd1b0 | |||
78968d132f | |||
bb18ec8893 | |||
8d9a6deda4 | |||
00d1125e98 | |||
e9165232ef | |||
7a73314b24 | |||
df552d4270 | |||
b062170197 | |||
02b14caa5f | |||
9d4428d3d8 | |||
ce53b84cdc | |||
52fd10ce03 | |||
c1b9d40a64 | |||
9d983a34a0 | |||
9bd02c9570 | |||
20de467758 | |||
3b2e34a336 | |||
772f9d1e14 | |||
a124019e78 | |||
7212819ad3 | |||
624ced8a7d | |||
68c87469c3 | |||
9d6f1ebef4 | |||
47cafb70da | |||
7babf28ef7 | |||
70d75ce4e8 | |||
0972d12c1c | |||
f2b0d562b0 | |||
639a61d639 | |||
afd64d27c9 | |||
7782a0855d | |||
1f1e54b9d8 | |||
933fd28908 | |||
ff8aef9d27 | |||
2f9804573d | |||
719f123d77 | |||
8d48765109 | |||
277157bed3 | |||
b8067d61c3 | |||
8167827b6e | |||
c2124154a5 | |||
1793a6eb5d | |||
bf2864f204 | |||
d4188db32e | |||
1ae3f2677d | |||
580d5b7f00 | |||
e0cfbff327 | |||
b47942cc78 | |||
aaa4855305 | |||
2fbedd834f | |||
b44af11511 | |||
8fc4c2f375 | |||
5d08bf9aa3 | |||
e98a7504f2 | |||
c045f1dfb5 | |||
e06376664b | |||
91576fdd9b | |||
9b30f094f1 | |||
767c6eb57d | |||
f2a2581259 | |||
9429d0463c | |||
08eafe1e8a | |||
353bf6253b | |||
0bc38153ca | |||
761e324982 | |||
278a241db3 | |||
7ee39fcb0f | |||
f213e48067 | |||
554002b73c | |||
d269975784 | |||
f9fccdee85 | |||
31ec986ea1 | |||
50deba1f10 | |||
4b314be5bd | |||
db3475bcdf | |||
3362ac06b5 | |||
6b62ba045d | |||
b679737542 | |||
94905dae52 | |||
522062350b | |||
fc3b2f0f8d | |||
483a1477f3 | |||
77c3ffe4cf | |||
d7b8329b45 | |||
9b5d609830 | |||
d923ff9cfb | |||
69a5f0e6cd | |||
3b1738c000 | |||
e6c662cdb0 | |||
bc0ddbc9c0 | |||
7882fb5026 | |||
64cff8c5a1 | |||
755b7c7aee | |||
314102cb54 | |||
ccf6b21bf8 | |||
d3ee73e151 | |||
fbc94d84c8 | |||
6fc73470df | |||
2156712768 | |||
8a136736ad | |||
e13c733f40 | |||
d5f9f3b7ce | |||
e808f34b0b | |||
3a4176aa3e | |||
d9def8e4f6 | |||
d43c6eafaf | |||
3b1517237c | |||
84b9de8c18 | |||
2435ea3ad8 | |||
553fc210f5 | |||
93dd965947 | |||
cf187dcb0b | |||
28b2ff3733 | |||
7cefb70fcf | |||
67795f5baa | |||
51a0007001 | |||
0ab361b828 | |||
1c88189a1c | |||
699ca6e71a | |||
19fe1dd463 | |||
2087f5da94 | |||
bf97627021 | |||
58e115275a | |||
598093b5db | |||
46c805fb90 | |||
4fb1bfb1fa | |||
97e51dfb35 | |||
faecc41603 | |||
1b4528b9ca | |||
4d46cb686a | |||
6972fdc7e1 | |||
d458fac2ff | |||
ae82e4e25b | |||
e7a7d0d55f | |||
ec2f930475 | |||
0eb5b0ce42 | |||
1c1f15fd27 | |||
60277fe598 | |||
bbbde81a75 | |||
889d35f350 | |||
d6bad60c1b | |||
1b7f6d2bc0 | |||
4a12c715a3 | |||
2b39eb6412 | |||
83a6c669a5 | |||
d18e02ef44 | |||
491e4450cc | |||
c1e03f3410 | |||
789f33e8db | |||
6514096a67 | |||
3570b00560 | |||
5a99fa3790 | |||
abc9839d86 | |||
d0e134998a | |||
877b94fbd5 | |||
a2507ae196 | |||
810fce1f3d | |||
d0511de9a6 | |||
423e0d90ee | |||
0a81c37fce | |||
bab96e878e | |||
1feaaf009d | |||
20e714b3d0 | |||
071b1ee3e5 | |||
76d49523e9 | |||
cca0f744ab | |||
f2a7152c0c | |||
9b182f4087 | |||
5efc48fc69 | |||
db37680f6f | |||
fa04531c7a | |||
f859a39b86 | |||
f1ebbbab8f | |||
ed18add7d3 | |||
5bc6c89adc | |||
43775111fb | |||
5cc073420a | |||
6f5b248746 | |||
d99b8f10d0 | |||
71796f4951 | |||
7ca04d6a86 | |||
bb7413c9f9 | |||
be51ad8264 | |||
eee5414c64 | |||
adc683956f | |||
55ee3b5f2f | |||
7de79425ce | |||
4f8528cd59 | |||
c57d1b44ef | |||
dbd4dc04b0 | |||
0323ea9f7e | |||
f558b9b6bf | |||
1b1d34da59 | |||
36b09db2d1 | |||
61a8badb31 | |||
5ecb30ff58 | |||
4d8ffdcc11 | |||
161838655c | |||
1f38bf2592 | |||
c6e5fc39ce | |||
b7453641d5 | |||
ab45e70b26 | |||
af8cf94b18 | |||
b7de369992 | |||
34856cef05 | |||
ccc013e134 | |||
471b34132e | |||
7fde9b6ff0 | |||
62e1b20e56 | |||
e6bbd4b3f0 | |||
a0872232d3 | |||
d52632f895 | |||
a248770a6b | |||
3657469826 | |||
820806c6dc | |||
c2191d885d | |||
eeee75c5be | |||
d4cc975fe9 | |||
2dc6969858 | |||
32742df1b4 | |||
76bb403318 | |||
54155f875a | |||
cca46308bc | |||
985280ec0b | |||
361c1bdd57 | |||
1aa5a58f00 | |||
cce7565d3c | |||
689ee31bd6 | |||
e440d00ba0 | |||
b8c251c6c0 | |||
4018ae14be | |||
1cd22a41ca | |||
500fdef370 | |||
96c64086bc | |||
8e83b315e4 | |||
ddd9ec4c40 | |||
a501707058 | |||
3026954d71 | |||
22c356d24c | |||
0feac57cb0 | |||
db25d18120 | |||
2e75023840 | |||
2aa7df23b5 | |||
c8535be0e1 | |||
da9ae5f836 | |||
afafa624a3 | |||
191519188d | |||
c684e2bdc0 | |||
fa6bdd2d12 | |||
d272468a14 | |||
027ac3c8f2 | |||
6a2aec1047 | |||
f08ed1eb2d | |||
4a146b6a19 | |||
527b07966f | |||
2ff21a1bc9 | |||
0e162baeb8 | |||
97ef9b2bc3 | |||
e0d679b319 | |||
2fe211c5e0 | |||
269d995832 | |||
a1fab0c5ca | |||
ae27fcbcda | |||
050bb5446d | |||
84f121881e | |||
2c591ebaab | |||
fcad558ae3 | |||
9a2ca8dd2f | |||
a8d4b6d363 | |||
d7a0fd8afa | |||
576e3d95f7 | |||
2a9b127029 | |||
01ef2a5c4a | |||
7ee5b02bc5 | |||
8a9b7f5ef2 | |||
193813d49a | |||
e5e7390d44 | |||
5c72fef85a | |||
28fdfed1ba | |||
7b58dcac14 | |||
2dfb5b7579 | |||
03f4eb3ed1 | |||
58fe1d0764 | |||
6f193bf357 | |||
86850ceac4 | |||
6964bf0a8c | |||
e0ab5ee4f8 | |||
de9012ce36 | |||
a66566e75b | |||
e76c275867 | |||
18434f17fe | |||
f1ba5c509c | |||
7f8252fa07 | |||
165a99f48f | |||
8d978a35d2 | |||
d5613899d9 | |||
cb086ae492 | |||
cab30e2356 | |||
2e998ed11d | |||
817d48be21 | |||
910ac94a8c | |||
0a24cff30e | |||
43b8d98055 | |||
b5bb91b50f | |||
60024d13c9 | |||
d94942a31f | |||
18ec6756e4 | |||
9ed1626e00 | |||
71ee59df3d | |||
70a81d9109 | |||
2aaf55795f | |||
8f5e773caf | |||
ae0bea1ad3 | |||
7633d0fb8e | |||
4677e6c132 | |||
00ee84af37 | |||
7bb5f24274 | |||
f6fb8906c7 | |||
4bba92d3da | |||
8a77b21be1 | |||
5197454fea | |||
544b3c0d17 | |||
f97ce2cd7e | |||
60b0a13444 | |||
be957f25c9 | |||
bea7ce717c | |||
7f695a97be | |||
847e074943 | |||
81bafd9daf | |||
3a647c4bea | |||
3dcc8e0046 | |||
47861fc373 | |||
4971bc45f2 | |||
39654d3fa5 | |||
83b9a046d1 | |||
708bbcb001 | |||
88a1db17c7 | |||
3670435db4 | |||
7f0349b295 | |||
0b0b9d91a8 | |||
57ce1898ba | |||
543817d697 | |||
3d7d3eca82 | |||
7599cb2de9 | |||
698af2b054 | |||
96ba2edfeb | |||
9388aaca15 | |||
4bd32d891f | |||
83ceedc091 | |||
db8811eacd | |||
8e1e57a181 | |||
3e13cde988 | |||
19c49e2e25 | |||
d802eb303c | |||
3a12f92c34 | |||
d99c888cc2 | |||
07bac27ac7 | |||
eec996ba41 | |||
107af52deb | |||
d47990e753 | |||
bbcdf073ba | |||
80e5b24b38 | |||
d269ca510c | |||
14c52ab018 | |||
10c6e771b5 | |||
4e0db50e4e | |||
b7f98ea18a | |||
738cc9549f | |||
1b7f8777d6 | |||
2b50529265 | |||
38d71c37da | |||
96cdbfdcc0 | |||
c8fdf0762c | |||
ef5169ff24 | |||
72bb271a94 | |||
a43c29e858 | |||
a0d721c6bd | |||
b000d490ce | |||
31ce6faf83 | |||
d0051bd21d | |||
79b7912435 | |||
886898eabf | |||
ab0f4ff835 | |||
ac452f9bb8 | |||
41975016b9 | |||
88a22d7b0f | |||
b9db347601 | |||
1fa69c4ced | |||
59e0747b83 | |||
2c957480bf | |||
d09cabac33 | |||
2896fc3987 | |||
d01b4f80f9 | |||
06a926f2f4 | |||
ca8c1c6c42 | |||
831e87c65d | |||
89ee8778bc | |||
cf5ad7cb7d | |||
e3f5c0005b | |||
a35024123c | |||
654918ab27 | |||
86c07bf785 | |||
a3240aebde | |||
8924fbf6a0 | |||
974a96738a | |||
1d02dba06f | |||
55c22d3b76 | |||
7cf6e66ddd | |||
b8897df515 | |||
2316ddb90a | |||
2422a53df6 | |||
ce63866697 | |||
c1db32be79 | |||
1f97b2365f | |||
5787ac813f | |||
53c91ee89e | |||
e833d2ba5b | |||
94fffee158 | |||
ec1a307a7c | |||
cb1fb28247 | |||
4f74c77146 | |||
6e2ae68643 | |||
1953543274 | |||
983828a2a9 | |||
8eab0e8602 | |||
c81c7fe37c | |||
49402b7d82 | |||
3973bf08f1 | |||
dd3cf27ab0 | |||
02a433307c | |||
44f7fd5eef | |||
a47d55c75c | |||
c4c2beca19 | |||
ab581dafc2 | |||
9541411c15 | |||
7dfc1d9790 | |||
52dccc656a | |||
cbce440af4 | |||
54f0fc9f0f | |||
6abe089740 | |||
3f9e3c7375 | |||
6b9d8d41a3 | |||
bb72ab7f1b | |||
179856c13a | |||
9a5330b7eb | |||
752cd7d15e | |||
302b83a198 | |||
0bb8c4f3ea | |||
37c2a2810f | |||
aed8e4c242 | |||
2a6c5ed0ac | |||
97eab7edf9 | |||
e9bc1c6b07 | |||
7ce910f459 | |||
3d40ec3c88 | |||
6d40d0d141 | |||
660d37aadf | |||
e14f3eb529 | |||
96cef5260c | |||
4eb6deee2d | |||
64bfc14a75 | |||
3dfe87973b | |||
7a1446a3ed | |||
7e433ddf83 | |||
a5c2067273 | |||
0dbe926efe | |||
3ae4806dae | |||
a03230338a | |||
4c17243157 | |||
98f1b11edd | |||
d39a327138 | |||
2019558f03 | |||
40bb6577fc | |||
e39cb49f13 | |||
85b647d80f | |||
1108cb32b5 | |||
a7ba6c84d0 | |||
ddff4fdbf3 | |||
1f204660be | |||
744f4dd834 | |||
dbd7be5ff1 | |||
fda8cb176a | |||
fd43446bfa | |||
eeaa032791 | |||
26fe60a0cd | |||
bc03102efd | |||
f193ea96f7 | |||
70d9347514 | |||
f4bdb11949 | |||
3a0c6950cb | |||
d53dba546a | |||
4541c6e596 | |||
91416b2075 | |||
5e04d6438f | |||
27b84dda4e | |||
06a0788385 | |||
a0c57de387 | |||
efd85b8e28 | |||
7157233275 | |||
6b13373e4b | |||
2aac716626 | |||
99313a4ba2 | |||
d4fc9e7ed4 | |||
dd22ae047b | |||
d8bc56fa51 | |||
41ec1c8d50 | |||
30b60a976b | |||
ad1f24d487 | |||
e867d7f3b8 | |||
a8dca3976b | |||
9d112cf41f | |||
060332c704 | |||
8aec50a275 | |||
b58ebb7bcd | |||
702d0a4d10 | |||
65f6949376 | |||
e13cd33290 | |||
c8169a2f1b | |||
2d63e15023 | |||
db38ba81ee | |||
5567305a5f | |||
8664b2cc39 | |||
cf1acfb021 | |||
d6496376ce | |||
a7870cda8d | |||
d41266e4e9 | |||
51178ccb33 | |||
855ae79598 | |||
9471ba61c5 | |||
9339a6f8f3 | |||
8758e9ed82 | |||
3f3324231d | |||
96cde36784 | |||
ff0e623d30 | |||
71de021177 | |||
019bccab51 | |||
74ac6ab80f | |||
8c073b2c94 | |||
2adce67260 | |||
44831c18d2 | |||
8143ee5502 | |||
2f55547d37 | |||
5e6b00fe98 | |||
e8b35a4f7b | |||
662c2aaeec | |||
e320af99a0 | |||
91f2b6185e | |||
2c99b23ad7 | |||
45552d271a | |||
a1a0d6f84b | |||
741f99ebb6 | |||
0486df02ba | |||
dd13a31a5a | |||
33ab9c4e8d | |||
9b74988fc6 | |||
a9aa533684 | |||
ddfc15b9f2 | |||
2ae57c172a | |||
a544010b03 | |||
32ec8341f9 | |||
13b032b2d4 | |||
477898f682 | |||
e7073ecab1 | |||
b5302e7619 | |||
f1b4a0a2e0 | |||
c20b27bc8f | |||
ed9cbd50f0 | |||
0781fe1b4f | |||
4788976517 | |||
7fe24c455c | |||
305d9dd3f4 | |||
827355a6b1 | |||
a3c0833a1c | |||
63b97729e6 | |||
cfcae50022 | |||
00e198d169 | |||
75335b4f58 | |||
6cba53421e | |||
f7b0184f81 | |||
2175fc0625 | |||
ab871ed4b7 | |||
c65c4475f6 | |||
3a56a56d69 | |||
f15dd1b4ef | |||
7ea1131090 | |||
bcbe155575 | |||
d461aba6a4 | |||
8a574baae2 | |||
9d6837c904 | |||
d1db5448b9 | |||
3805874c86 | |||
de76adbdf3 | |||
02157f4753 | |||
1f322b8a9c | |||
611628a402 | |||
d6ab4196ea | |||
ed51cde37b | |||
2fe864aa48 | |||
031a6373ae | |||
e02b4e1192 | |||
7be3171f4a | |||
ec621e71dc | |||
ae5a10dffd | |||
0f3045fb68 | |||
515f8a21d5 | |||
0c07f1ffe4 | |||
3745e0babc | |||
fd88db7339 | |||
3f0480d060 | |||
2c8dde7224 | |||
5e642a174c | |||
c6f7867aa1 | |||
76dfacca61 | |||
754c708473 | |||
67e6a3106f | |||
26afc7620b | |||
b074e86ad2 | |||
27004f1b76 | |||
3dbc7744ab | |||
3e0c0abb53 | |||
7868df3211 | |||
3214105a21 | |||
e08687acfd | |||
38d7e9a4c4 | |||
5b13d4057b | |||
056c1a7b50 | |||
768a2ebe9d | |||
0e646d10bb | |||
0aa7824884 | |||
8b9c9aa790 | |||
e8090f45f2 | |||
8e9c730f40 | |||
0a4cdc8a85 | |||
6bc27e8864 | |||
d9f96f71d4 | |||
248f2309ef | |||
a821c4d57f | |||
239ab8799c | |||
597373f5fa | |||
b437b0a49d | |||
9c42a89a43 | |||
e3d722bb42 | |||
64a200b1c1 | |||
7d96f78821 | |||
4ed828e4ee | |||
88626b2945 | |||
dbac38702a | |||
967840aed6 | |||
b71e4bdc61 | |||
12bf6c06c3 | |||
0eba6eb401 | |||
af6f3d776e | |||
d4ffd909a4 | |||
77272a17b3 | |||
e21ad02288 | |||
4625231e30 | |||
a5ec3a0547 | |||
8eb05d6ed4 | |||
e511c442e6 | |||
82fb6712e7 | |||
4b60b2863e | |||
6e9deaf1bd | |||
f39dda00e0 | |||
a6a1355b80 | |||
c616c34825 | |||
81ad795d46 | |||
ad92414be3 | |||
a74cbb2f93 | |||
442ce816ac | |||
6ee90759c8 | |||
9983d6af77 | |||
22c02b917e | |||
dfa3e7a61c | |||
ff95e2aaa6 | |||
a1df57a4ea | |||
6927d0c77e | |||
0a6fa1999a | |||
35f20b8b6a | |||
d6c076f1b6 | |||
ec2b06d81d | |||
640883a9a9 | |||
97429744af | |||
f43f0afa55 | |||
0b5167bf51 | |||
225ec00ec4 | |||
ddfbae260f | |||
dc0429f5e6 | |||
edc2ab3e48 | |||
fa86a335b0 | |||
d19526e6c2 | |||
2541809c45 | |||
9cd55e0d98 | |||
b8fc69a45b | |||
5103bc11b4 | |||
66d0348c72 | |||
cb5e000615 | |||
d4aa7d512c | |||
5d655eb516 | |||
9c92531ea0 | |||
12c2cd4e86 | |||
7d1637d89a | |||
9bca18603b | |||
ffbe8906ed | |||
e6f49a3e79 | |||
9ba2c53b85 | |||
769136f586 | |||
ab7c96aa81 | |||
3e0fed48e7 | |||
5786be13a4 | |||
a125388f6e | |||
33443170de | |||
4b7083c38a | |||
a8f84a5622 | |||
34599ad57d | |||
e619b101c9 | |||
7e774ee90f | |||
8f0b410714 | |||
6d11d5dd9f | |||
4ede5117f9 | |||
9abfa65920 | |||
46d2755205 | |||
f5862c03ec | |||
efbb4a5a00 | |||
f17b80236f | |||
bc7e741514 | |||
9ff17a1c18 | |||
0ebfa08860 | |||
694c674aa6 | |||
6318705607 | |||
b948a18841 | |||
b2778f34f5 | |||
7cea2c4466 | |||
2231017b35 | |||
d94e4ef7c9 | |||
92b8b20ca5 | |||
a698e34744 | |||
541aa5ad85 | |||
744ac1535f | |||
40c31f87e0 | |||
8fe2668e99 | |||
f9af7cde7e | |||
43ccaf14b0 | |||
d7166c5778 | |||
c003f8e93c | |||
da81ad0c41 | |||
8e561354d5 | |||
643133b2c1 | |||
18a04b0825 | |||
555ed44e13 | |||
1fe131a5bb | |||
1a9954f85b | |||
01308cd890 | |||
763c04adf3 | |||
5e74cede4e | |||
17e6bd579f | |||
1594a7f11a | |||
6f88aeac56 | |||
0af84bb91e | |||
ac87bc40ca | |||
3f982fcf65 | |||
475b00c42f | |||
c4943f3cb0 | |||
e0ffcfd53c | |||
42202b6aef | |||
38302432e5 | |||
c1c115e5ad | |||
9ca19a2928 | |||
cf3b479bce | |||
dfb3db23f0 | |||
545e037e38 | |||
3d98321b38 | |||
7e5964e339 | |||
0ef5a9b1d7 | |||
5981399612 | |||
94edd6140c | |||
63c56b57a9 | |||
8c651d2530 | |||
c9c94a7ef2 | |||
ca7b36ad8f | |||
59e19828ea | |||
23d67e4ac7 | |||
a2fbb9cfef | |||
90641ad28b | |||
e03baeed2b | |||
17f45fb75c | |||
87c153bd37 | |||
07a4e8494f | |||
9c71a6c0c8 | |||
da020552fd | |||
31abedcbf7 | |||
2625dd733d | |||
97183774e8 | |||
d6f30b7537 | |||
d640ac143b | |||
542d88929f | |||
cfc1cb1aee | |||
a7070a5ca7 | |||
8dc15f727c | |||
21c75d9d29 | |||
f533d3be77 | |||
783bd79e9d | |||
82e11588a5 | |||
b5d30846d6 | |||
9070191b8b | |||
9218b51de7 | |||
77f1ffd84b | |||
0aecc6033a | |||
f35453f838 | |||
7f1c1fe6a4 | |||
a7a671b3aa | |||
2021255f91 | |||
30e83a4a3c | |||
da3342759b | |||
6381ee38eb | |||
1864bc2080 | |||
25054bfd35 | |||
1eaff394da | |||
1ac2a8cfa5 | |||
1c95e2bbee | |||
aee30e304d | |||
b17d5eeaee | |||
36574c30ef | |||
b468ead1b1 | |||
cac666d035 | |||
4ce4f04c58 | |||
3d12be29ec | |||
64fcb792c2 | |||
722de942ca | |||
9b7120bf73 | |||
bc31378797 | |||
283f587afe | |||
9b3a59f030 | |||
1bd623cd15 | |||
2f5102587c | |||
8f56c116d7 | |||
d533f77301 | |||
69bfbf9e98 | |||
3887169db0 | |||
47ca7063f2 | |||
3fdbaefaa6 | |||
998cba74b5 | |||
4e7e675c07 | |||
81402ee7f5 | |||
3b8d6b59fb | |||
0f3ac51cf1 | |||
68d5aec55b | |||
9606126543 | |||
e5d1037fda | |||
ef9a931567 | |||
374fee2953 | |||
f93565da6f | |||
59fc33635a | |||
b66a68975b | |||
3af8cb0150 | |||
c2becbc0a8 | |||
5eb5d9b2f5 | |||
603872685d | |||
c54daa8009 | |||
cf779c63c5 | |||
9706512115 | |||
f2ab0384e4 | |||
aeff911c93 | |||
c44812fa71 | |||
6d160768d7 | |||
97812570e7 | |||
4733baf511 | |||
aac37e5eee | |||
e842a63dc5 | |||
c608efc823 | |||
559d3d8f49 | |||
8ce4a8d6ab | |||
ec37a843a4 | |||
c1829dd00b | |||
2420b280a9 | |||
d9dcd28d82 | |||
ca14c18998 | |||
1a4a7059af | |||
1500011fc6 | |||
48c07d32f0 | |||
be29568318 | |||
dcf2d84b24 | |||
04e6aebf35 | |||
fb0b76c1f3 | |||
63436cc2bf | |||
da58f20a99 | |||
7602911f9a | |||
1cc9a1c0eb | |||
2c82f2154d | |||
4beb2f8ddb | |||
96d21335e1 | |||
5591cd6ef3 | |||
6c5b7ecf90 | |||
87e2e07d34 | |||
91be2903da | |||
03194145c0 | |||
f59fe41abf | |||
e7fcb77b49 | |||
e9a616cfc2 | |||
084997c4f0 | |||
c217ee3a00 | |||
75b8434b76 | |||
fc12841d95 | |||
636b5987af | |||
556997666c | |||
739f0698ab | |||
333998d008 | |||
8a6b80095e | |||
8d9d6b62d9 | |||
be4df39a4c | |||
639650ed2c | |||
13dfc15e84 | |||
f4214637a9 | |||
517a30e83d | |||
26fa71a7c1 | |||
b22c13dcd7 | |||
b8b54567b1 | |||
123e0bdba7 | |||
6004c0abf5 | |||
3f92abedd5 | |||
1a658c7f31 | |||
91b6888e15 | |||
4c94f8933f | |||
63957f0677 | |||
ba9a502e7e | |||
a1ef2bd74d | |||
69cbad0869 | |||
189d2121e6 | |||
37b8587d4e | |||
ba2be0ca34 | |||
1e6fc5b3d1 | |||
29852de4f7 | |||
053120e04c | |||
51b748408c | |||
0924c2d070 | |||
9852572eb9 | |||
bb2b4c7e0b | |||
05fa7250b0 | |||
34addee882 | |||
03f7b251b8 | |||
08d5253651 | |||
8e69dd42c1 | |||
36e11998c7 | |||
bc90e04e64 | |||
f91de6a84d | |||
97f6b090b9 | |||
2c2e7e4cf9 | |||
b32ad29071 | |||
12678a819d | |||
d575450ef0 | |||
a7e65c0034 | |||
c8b474cd0b | |||
a19c3ba5b0 | |||
b20314738f | |||
01786f684e | |||
30c95d38a5 | |||
a99ee15a85 | |||
568438aa6f | |||
f14cf3ed1a | |||
1824b5a2ce | |||
818c3198c1 | |||
b66faf7e80 | |||
185bbf2db5 | |||
2409bb18f3 | |||
1b63bdaf44 | |||
0b84440e0f | |||
f5f06904c3 | |||
4aa753ff01 | |||
9dfcb921cf | |||
015bc034a5 | |||
1e638c1371 | |||
376d0bf063 | |||
b06e93fe5b | |||
a911ae00ba | |||
3b79b21e9d | |||
cfe7a4340b | |||
e405747409 | |||
6907a2366e | |||
5399faaf53 | |||
7e01adc050 | |||
4eac6cf366 | |||
7426861817 | |||
aa64f13172 | |||
a6b197adfe | |||
87715b59a1 | |||
48d836b8e2 | |||
cd5398a889 | |||
c94a5a170b | |||
52f4b96a80 | |||
6444f0e57b | |||
5d1ef5d01d | |||
c5ab3ba6f1 | |||
09dcc9ea04 | |||
bb24318ef0 | |||
285f3c9d56 | |||
7e3db1dedb | |||
c63a208488 | |||
a4474f1d94 | |||
fe4c39a26a | |||
f37c05adeb | |||
4ac17b1ee3 | |||
d747614b27 | |||
6dab20812e | |||
013875e787 | |||
59268b8629 | |||
974e6dd2c1 | |||
ba77e48c12 | |||
a535c0e129 | |||
c8ed14c647 | |||
c826cddbb5 | |||
bcb1b67500 | |||
492b3a91bd | |||
9dfe545820 | |||
7dfb51c0b4 | |||
76ce28c723 | |||
1f29031b9d | |||
4905bb552b | |||
e61b4b7d70 | |||
143496ccba | |||
39eddfd161 | |||
d92721aab9 | |||
f35a6a8be0 | |||
ad71e27a0d | |||
328e7690f3 | |||
f0c150cfb9 | |||
eddfe06a00 | |||
d684ec00aa | |||
f804ce63c2 | |||
bd968f7229 | |||
6382323159 | |||
a593999f1e | |||
3bfae8e829 | |||
74f58376f8 | |||
f7eadd9d70 | |||
ccb11a939f | |||
17aa45fad1 | |||
5c6b38a83c | |||
f641429056 | |||
37afa00ffb | |||
db3bca7edd | |||
85eb37fab0 | |||
70f3f7e679 | |||
05ad979a2d | |||
fa83f3bd73 | |||
2229b70c4e | |||
17a173ebb5 | |||
54ef065cc8 | |||
8c498dbf75 | |||
fffff2cd75 | |||
09752a6827 | |||
a7f8239b46 | |||
105a6bfb46 | |||
78d1d59889 | |||
7ba3e710d3 | |||
6930a77a0f | |||
2bc19eb51e | |||
a7c7287cc3 | |||
dc7030ffaa | |||
ef30943c5c | |||
448d5be79f | |||
1d2cae433c | |||
278c125d99 | |||
99b3aab703 | |||
91d5f6ab30 | |||
c4646b2bc3 | |||
ac5462e7a2 | |||
54a04bac3d | |||
8bc0bdd40b | |||
22a18a68e3 | |||
8ec7e2e14f | |||
8862dcb4a0 | |||
4704b813ff | |||
b08cff9e77 | |||
bb9d2fd07a | |||
0e262aab3d | |||
3645092a52 | |||
ce56f1f6f4 | |||
8a6a380fe1 | |||
e69f37a464 | |||
878e52f0b9 | |||
388ce12207 | |||
42ce2ba069 | |||
c5c3ae0203 | |||
117860218f | |||
476fd40948 | |||
6cd4bc5e60 | |||
02197b1215 | |||
eb2b68b7f2 | |||
81323cbccf | |||
0e42a35e4f | |||
d5b3bcc0b2 | |||
e19b728fa5 | |||
5b89055e00 | |||
d26707dd28 | |||
e501fa5f0b | |||
92f4018b07 | |||
03d3ae1cb9 | |||
1219842a96 | |||
ae7bc8299d | |||
9360e57816 | |||
b71875df61 | |||
b6b08706b9 | |||
a4f0d8636a | |||
7a2a39093d | |||
364af3a3e0 | |||
43feef7362 | |||
95dc7b5449 | |||
7a997759fa | |||
b242f82696 | |||
701fc93343 | |||
b521f50278 | |||
d9d6d65959 | |||
8169c8d320 | |||
b8b6777262 | |||
a98467b563 | |||
a6f4a333e9 | |||
1a13d22984 | |||
a679aebc82 | |||
0596cf5405 | |||
1359bceb5d | |||
4b7b402e74 | |||
3429785d9b | |||
a44c32694f | |||
ddc0a16cec | |||
a1d9b53cd7 | |||
c8c89dd5f7 | |||
4e5ef6bce2 | |||
e4889220c4 | |||
dee655df35 | |||
9784bbb802 | |||
a86d74f214 | |||
a902505810 | |||
482b8c6be9 | |||
3996b699dc | |||
badf224460 | |||
a8a4027ab8 | |||
4db301f749 | |||
8dac9c3634 | |||
aa45e81b3e | |||
da27acabcc | |||
18bd47dbe1 | |||
3f63ed9a72 | |||
ad7f8e7f23 | |||
416312b30b | |||
c723251575 | |||
f374b35944 | |||
6f3926b643 | |||
d0793aa1f0 | |||
a5dcee254d | |||
e57975de26 | |||
c6a97e8eff | |||
e8620dea54 | |||
e27cfa0781 | |||
b0f4e2b738 | |||
d0e3aae39f | |||
19f21b595a | |||
3e22f61ecf | |||
663abbdd16 | |||
d474eadf7b | |||
493d461c89 | |||
bcd89dd34c | |||
2c94c6f8e8 | |||
c344702fa0 | |||
67b747938f | |||
700ebde474 | |||
e9e46ff521 | |||
1ecde67078 | |||
aac18d7564 | |||
2a1639836a | |||
c70674a616 | |||
7db3af647b | |||
2bcfbad653 | |||
ce7f7c2b6c | |||
f84e88f0a2 | |||
54c68ea83f | |||
527adbed34 | |||
414c7070cb | |||
f4f2e781ff | |||
28a6424f38 | |||
031bacbc48 | |||
f2133af9ac | |||
e5300c1c05 | |||
8d8ed5e9fe | |||
fc4ca34238 | |||
a8a817c713 | |||
03498f2288 | |||
c959c5daf7 | |||
54cb16d99b | |||
ba377b7364 | |||
ca222167ef | |||
cd6df35e1a | |||
5eff23db0c | |||
9ba9d2a8ae | |||
1d145e1fc2 | |||
3977ed5c82 | |||
60ed8e2892 | |||
4e7bd45d4c | |||
a6b7dcb3c6 | |||
5f46ef7adc | |||
de9ba0f306 | |||
02c0664487 | |||
dfbca76a76 | |||
a04c09c030 | |||
44e3445a4d | |||
69a4059da0 | |||
d78f2e55d7 | |||
5834346aa2 | |||
a622198235 | |||
abada56ba1 | |||
5e5b63712b | |||
27ab415ecc | |||
e50f598449 | |||
aabe186e3f | |||
16e4ccca13 | |||
5791b95b17 | |||
60b4771fc6 | |||
433f1ead1c | |||
cffa851e0f | |||
658ddd1c9c | |||
4f4cffbd03 | |||
a1f1f573d5 | |||
d2e4503be2 | |||
06ac0fe9a3 | |||
b99ae8f334 | |||
b041b55028 | |||
2a1ac97391 | |||
bf6b098c75 | |||
9b94741290 | |||
d3d6d8fb78 | |||
e817a6db00 | |||
07273bfa9e | |||
a15790c9aa | |||
2aea35281e | |||
66c42f62d8 | |||
43a116a84e | |||
ed828dae88 | |||
a7b024639e | |||
602d0ca52d | |||
cc980cb638 | |||
06886f4e4c | |||
52703badfa | |||
6d5c6c17c5 | |||
7f0ac6a67c | |||
8b3de72e2a | |||
6bc858a888 | |||
9153cb9237 | |||
cc6dcb48d4 | |||
b7aa366758 | |||
a8ef29df27 | |||
26af2d4c26 | |||
ad47c63f27 | |||
96ccc40f0a | |||
db3eecf1e9 | |||
f68860a643 | |||
a6c23648cb | |||
7aa65831bc | |||
570fd3f810 | |||
664ed76523 | |||
17dca6da6a | |||
560baa3296 | |||
665af3d26b | |||
6f831edfb4 | |||
812a86f217 | |||
8c732268f5 | |||
c38936011e | |||
d6ef694139 | |||
afbd09062d | |||
2d24d13046 | |||
6f5d8d18e9 | |||
e7fd7d46cf | |||
4f82b897bc | |||
57ba86c821 | |||
6271665ba6 | |||
3dff5c9dee | |||
d76ad33597 | |||
fde43a906d | |||
2fc609a294 | |||
ff2242d0e3 | |||
482c027d3b | |||
834fae684b | |||
63d0c78b20 | |||
a2dae8e8d4 | |||
2c4ff000b9 | |||
2ec24d438f | |||
64429104b1 | |||
038f80d7fd | |||
939c654752 | |||
ac75e386dd | |||
b503215122 | |||
ddc758439e | |||
3da9ab5d83 | |||
a8b6dd7d91 | |||
89ccf2b65f | |||
6cc22e62d4 | |||
2cd875f8e0 | |||
b996252b88 | |||
413a2b71b5 | |||
75cf1cdd56 | |||
33a49b487c | |||
869c5ba7e6 | |||
4e10bcc306 | |||
7ef0ef5d05 | |||
14e45155e4 | |||
3d8c4f8389 | |||
54f1b2ea0e | |||
aef3215722 | |||
5b08621c3f | |||
0a726f1706 | |||
38ff6a4747 | |||
e9b08b5e7f | |||
f46f346710 | |||
06b97554e1 | |||
55c6fef962 | |||
2d3464a548 | |||
07dc522981 | |||
ac95fa058c | |||
a0f24f23b5 | |||
4a0d956a6a | |||
4c5660ba7a | |||
f4db9e4275 | |||
02b81dd05d | |||
067b390194 | |||
aa54c468ea | |||
9760fded2d | |||
4beb39f7a1 | |||
0988c2f1d6 | |||
689e03d341 | |||
561d55feef | |||
8670bd4589 | |||
7deddaa42d | |||
ba33c9e18e | |||
2dabcac0da | |||
4ab98fff02 | |||
7f500d610c | |||
12399157f5 | |||
854e6766ce | |||
98d7673a03 | |||
5b2b824a53 | |||
04c99cf7ea | |||
c06ff47a90 | |||
f548a04fae | |||
03180b502d | |||
82269f1351 | |||
d30c9ef351 | |||
59c19d9fbf | |||
8a9b51952e | |||
a5d144b00f | |||
20b53eb4b4 | |||
0b42379ed7 | |||
a43b3674c7 | |||
cfb01e26dd | |||
0209d334bd | |||
58b980f9cd | |||
3160549e85 | |||
9b26c45be6 | |||
40997d0aef | |||
5460fb10a2 | |||
d370b5a4d2 | |||
f1c4c3b719 | |||
4e99f1e634 | |||
bf33ce8906 | |||
d9176c1903 | |||
51bc18f8c4 | |||
48b3b27985 | |||
8ada44456d | |||
f912c63b22 | |||
3eb9f7b3eb | |||
3ea23fe736 | |||
a5c840e672 | |||
d40dc06d6a | |||
672e9c640f | |||
98ea058ebe | |||
806bfdd67b | |||
654449ce91 | |||
0c4c89ff4a | |||
61112d4826 | |||
1c261d293f | |||
3b85cbc504 | |||
97e5591a63 | |||
a8a3f4d362 | |||
3726358f51 | |||
eb19e11688 | |||
4be9d7fd29 | |||
a2eb655322 | |||
5760cf0f41 | |||
74aa32175b | |||
ad9901d7c6 | |||
8567b41d5f | |||
430ed6d774 | |||
efee8b62d7 | |||
e50f284658 | |||
e35d738906 | |||
c09ea2c314 | |||
2bf46b789f | |||
510760d81b | |||
853e735edf | |||
c1ba265dd9 | |||
60e5fd11c9 | |||
699888dfb3 | |||
c40bd5f394 | |||
85b4578b14 | |||
cb9fded508 | |||
134eccc273 | |||
821be88d13 | |||
a36d2f2d3e | |||
420174d3dd | |||
485a943958 | |||
3722f46089 | |||
eab182188a | |||
c4f98f9c73 | |||
0c9ca5522c |
2
.buildkite/env/secrets.ejson
vendored
2
.buildkite/env/secrets.ejson
vendored
@ -2,6 +2,6 @@
|
||||
"_public_key": "ae29f4f7ad2fc92de70d470e411c8426d5d48db8817c9e3dae574b122192335f",
|
||||
"_comment": "These credentials are encrypted and pose no risk",
|
||||
"environment": {
|
||||
"CODECOV_TOKEN": "EJ[1:Z7OneT3RdJJ0DipCHQ7rC84snQ+FPbgHwZADQiz54wk=:3K68mE38LJ2RB98VWmjuNLFBNn1XTGR4:cR4r05/TOZQKmEZp1v4CSgUJtC6QJiOaL85QjXW0qZ061fMnsBA8AtAPMDoDq4WCGOZM1A==]"
|
||||
"CODECOV_TOKEN": "EJ[1:KToenD1Sr3w82lHGxz1n+j3hwNlLk/1pYrjZHlvY6kE=:hN1Q25omtJ+4yYVn+qzIsPLKT3O6J9XN:DMLNLXi/pkWgvwF6gNIcNF222sgsRR9LnwLZYj0P0wGj7q6w8YQnd1Rskj+sRroI/z5pQg==]"
|
||||
}
|
||||
}
|
||||
|
@ -36,4 +36,7 @@ export CARGO_TARGET_CACHE=$HOME/cargo-target-cache/"$CHANNEL"-"$BUILDKITE_LABEL"
|
||||
# `std:
|
||||
# "found possibly newer version of crate `std` which `xyz` depends on
|
||||
rm -rf target/bpfel-unknown-unknown
|
||||
if [[ $BUILDKITE_LABEL = "stable-perf" ]]; then
|
||||
rm -rf target/release
|
||||
fi
|
||||
)
|
||||
|
28
.github/workflows/explorer.yml
vendored
Normal file
28
.github/workflows/explorer.yml
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
name: Explorer_build&test_on_PR
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'explorer/**'
|
||||
jobs:
|
||||
check-explorer:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: explorer
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '14'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: explorer/package-lock.json
|
||||
- run: npm i -g npm@7
|
||||
- run: npm ci
|
||||
- run: npm run format
|
||||
- run: npm run build
|
||||
- run: npm run test
|
51
.github/workflows/explorer_preview.yml
vendored
Normal file
51
.github/workflows/explorer_preview.yml
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
name : explorer_preview
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Explorer_build&test_on_PR"]
|
||||
types:
|
||||
- completed
|
||||
jobs:
|
||||
explorer_preview:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: amondnet/vercel-action@v20
|
||||
with:
|
||||
vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }} #Optional
|
||||
vercel-org-id: ${{ secrets.ORG_ID}} #Required
|
||||
vercel-project-id: ${{ secrets.PROJECT_ID}} #Required
|
||||
working-directory: ./explorer
|
||||
scope: ${{ secrets.TEAM_ID }}
|
||||
|
||||
- name: vercel url
|
||||
run : |
|
||||
touch vercelfile.txt
|
||||
vercel --token ${{secrets.VERCEL_TOKEN}} ls solana > vercelfile.txt
|
||||
echo "first line"
|
||||
touch vercelfile1.txt
|
||||
head -n 2 vercelfile.txt > vercelfile1.txt
|
||||
touch vercelfile2.txt
|
||||
tail -n 1 vercelfile1.txt > vercelfile2.txt
|
||||
filtered_url=$(cut -f8 -d" " vercelfile2.txt)
|
||||
touch .env.preview1
|
||||
echo "$filtered_url" > .env.preview1
|
||||
echo "filtered_url is: $filtered_url"
|
||||
|
||||
|
||||
- name: Run tests
|
||||
uses: mathiasvr/command-output@v1
|
||||
id: tests2
|
||||
with:
|
||||
run: |
|
||||
echo "$(cat .env.preview1)"
|
||||
|
||||
- name: Slack Notification1
|
||||
uses: rtCamp/action-slack-notify@master
|
||||
env:
|
||||
SLACK_MESSAGE: ${{ steps.tests2.outputs.stdout }}
|
||||
SLACK_TITLE: Vercel "Explorer" Preview Deployment Link
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
46
.github/workflows/explorer_production.yml
vendored
Normal file
46
.github/workflows/explorer_production.yml
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
name: Explorer_production_build&test
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
paths:
|
||||
- 'explorer/**'
|
||||
jobs:
|
||||
Explorer_production_build_test:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: explorer
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '14'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: explorer/package-lock.json
|
||||
- run: npm i -g npm@7
|
||||
- run: npm ci
|
||||
- run: npm run format
|
||||
- run: npm run build
|
||||
- run: npm run test
|
||||
|
||||
Explorer_production_deploy:
|
||||
needs: Explorer_production_build_test
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: explorer
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: amondnet/vercel-action@v20
|
||||
with:
|
||||
vercel-token: ${{ secrets.VERCEL_TOKEN }} # Required
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }} #Optional
|
||||
vercel-args: '--prod' #for production
|
||||
vercel-org-id: ${{ secrets.ORG_ID}} #Required
|
||||
vercel-project-id: ${{ secrets.PROJECT_ID}} #Required
|
||||
working-directory: ./explorer
|
||||
scope: ${{ secrets.TEAM_ID }}
|
208
.github/workflows/solana-action.yml.txt
vendored
Normal file
208
.github/workflows/solana-action.yml.txt
vendored
Normal file
@ -0,0 +1,208 @@
|
||||
name : minimal
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
Export_Github_Repositories:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
VERCEL_TOKEN: ${{secrets.VERCEL_TOKEN}}
|
||||
GITHUB_TOKEN: ${{secrets.PAT_ANM}}
|
||||
COMMIT_RANGE: ${{ github.event.before}}...${{ github.event.after}}
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- run: echo "COMMIT_DIFF_RANGE=$(echo $COMMIT_RANGE)" >> $GITHUB_ENV
|
||||
# - run: echo "$COMMIT_DIFF_RANGE"
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
GITHUB_TOKEN: ${{secrets.PAT_ANM}}
|
||||
if: ${{ github.event_name == 'push' && 'cron'&& github.ref == 'refs/heads/master'}}
|
||||
|
||||
- name: cmd
|
||||
run : |
|
||||
.travis/export-github-repo.sh web3.js/ solana-web3.js
|
||||
|
||||
macos-artifacts:
|
||||
needs: [Export_Github_Repositories]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
runs-on: macos-latest
|
||||
if : ${{ github.event_name == 'api' && 'cron' || 'push' || startsWith(github.ref, 'refs/tags/v')}}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup | Rust
|
||||
uses: ATiltedTree/setup-rust@v1
|
||||
with:
|
||||
rust-version: stable
|
||||
- name: release artifact
|
||||
run: |
|
||||
source ci/rust-version.sh
|
||||
brew install coreutils
|
||||
export PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
|
||||
greadlink -f .
|
||||
source ci/env.sh
|
||||
rustup set profile default
|
||||
ci/publish-tarball.sh
|
||||
shell: bash
|
||||
|
||||
- name: Cache modules
|
||||
uses: actions/cache@master
|
||||
id: yarn-cache
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: ${{ runner.os }}-yarn-
|
||||
|
||||
|
||||
# - To stop from uploading on the production
|
||||
# - uses: ochanje210/simple-s3-upload-action@master
|
||||
# with:
|
||||
# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_KEY_ID }}
|
||||
# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY}}
|
||||
# AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
|
||||
# SOURCE_DIR: 'travis-s3-upload1'
|
||||
# DEST_DIR: 'giitsol'
|
||||
|
||||
# - uses: ochanje210/simple-s3-upload-action@master
|
||||
# with:
|
||||
# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_KEY_ID }}
|
||||
# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY}}
|
||||
# AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
|
||||
# SOURCE_DIR: './docs/'
|
||||
# DEST_DIR: 'giitsol'
|
||||
|
||||
|
||||
windows-artifact:
|
||||
needs: [Export_Github_Repositories]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
runs-on: windows-latest
|
||||
if : ${{ github.event_name == 'api' && 'cron' || 'push' || startsWith(github.ref, 'refs/tags/v')}}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup | Rust
|
||||
uses: ATiltedTree/setup-rust@v1
|
||||
with:
|
||||
rust-version: stable
|
||||
release-artifact:
|
||||
needs: windows-artifact
|
||||
runs-on: windows-latest
|
||||
if : ${{ github.event_name == 'api' && 'cron' || github.ref == 'refs/heads/master'}}
|
||||
steps:
|
||||
- name: release artifact
|
||||
run: |
|
||||
git clone git://git.openssl.org/openssl.git
|
||||
cd openssl
|
||||
make
|
||||
make test
|
||||
make install
|
||||
openssl version
|
||||
# choco install openssl
|
||||
# vcpkg integrate install
|
||||
# refreshenv
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v2
|
||||
- run: choco install msys2
|
||||
- uses: actions/checkout@v2
|
||||
- run: |
|
||||
openssl version
|
||||
bash ci/rust-version.sh
|
||||
readlink -f .
|
||||
bash ci/env.sh
|
||||
rustup set profile default
|
||||
bash ci/publish-tarball.sh
|
||||
shell: bash
|
||||
|
||||
- name: Cache modules
|
||||
uses: actions/cache@v1
|
||||
id: yarn-cache
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: ${{ runner.os }}-yarn-
|
||||
|
||||
# - To stop from uploading on the production
|
||||
# - name: Config. aws cred
|
||||
# uses: aws-actions/configure-aws-credentials@v1
|
||||
# with:
|
||||
# aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
# aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
# aws-region: us-east-2
|
||||
# - name: Deploy
|
||||
# uses: shallwefootball/s3-upload-action@master
|
||||
# with:
|
||||
# folder: build
|
||||
# aws_bucket: ${{ secrets.AWS_S3_BUCKET }}
|
||||
# aws_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
# aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
# destination_dir: /
|
||||
# bucket-region: us-east-2
|
||||
# delete-removed: true
|
||||
# no-cache: true
|
||||
# private: true
|
||||
|
||||
# Docs:
|
||||
# needs: [windows-artifact,release-artifact]
|
||||
# runs-on: ubuntu-latest
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{secrets.PAT_NEW}}
|
||||
# GITHUB_EVENT_BEFORE: ${{ github.event.before }}
|
||||
# GITHUB_EVENT_AFTER: ${{ github.event.after }}
|
||||
# COMMIT_RANGE: ${{ github.event.before}}...${{ github.event.after}}
|
||||
# steps:
|
||||
# - name: Checkout repo
|
||||
# uses: actions/checkout@v2
|
||||
# with:
|
||||
# fetch-depth: 2
|
||||
# - name: docs
|
||||
# if: ${{github.event_name == 'pull_request' || startsWith(github.ref, 'refs/tags/v')}}
|
||||
# run: |
|
||||
# touch .env
|
||||
# echo "COMMIT_RANGE=($COMMIT_RANGE)" > .env
|
||||
# source ci/env.sh
|
||||
# .travis/channel_restriction.sh edge beta || exit 0
|
||||
# .travis/affects.sh docs/ .travis || exit 0
|
||||
# cd docs/
|
||||
# source .travis/before_install.sh
|
||||
# source .travis/script.sh
|
||||
# - name: setup-node
|
||||
# uses: actions/checkout@v2
|
||||
# - name: setup-node
|
||||
# uses: actions/setup-node@v2
|
||||
# with:
|
||||
# node-version: 'lts/*'
|
||||
# - name: Cache
|
||||
# uses: actions/cache@v1
|
||||
# with:
|
||||
# path: ~/.npm
|
||||
# key: ${{ runner.OS }}-npm-cache-${{ hashFiles('**/package-lock.json') }}
|
||||
# restore-keys: |
|
||||
# ${{ runner.OS }}-npm-cache-2
|
||||
|
||||
# auto_bump:
|
||||
# needs: [windows-artifact,release-artifact,Docs]
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - name : checkout repo
|
||||
# uses: actions/checkout@v2
|
||||
# with:
|
||||
# fetch-depth: '0'
|
||||
# - name: Bump version and push tag
|
||||
# uses: anothrNick/github-tag-action@1.26.0
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# WITH_V: true
|
||||
# DEFAULT_BUMP: patch
|
71
.github/workflows/web3.yml
vendored
Normal file
71
.github/workflows/web3.yml
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
name: Web3
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- "web3.js/**"
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- "web3.js/**"
|
||||
|
||||
jobs:
|
||||
# needed for grouping check-web3 strategies into one check for mergify
|
||||
all-web3-checks:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- check-web3
|
||||
steps:
|
||||
- run: echo "Done"
|
||||
|
||||
web3-commit-lint:
|
||||
runs-on: ubuntu-latest
|
||||
# Set to true in order to avoid cancelling other workflow jobs.
|
||||
# Mergify will still require web3-commit-lint for automerge
|
||||
continue-on-error: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: web3.js
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
# maybe needed for base sha below
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '16'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: web3.js/package-lock.json
|
||||
- run: npm ci
|
||||
- name: commit-lint
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
run: bash commitlint.sh
|
||||
env:
|
||||
COMMIT_RANGE: ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}
|
||||
|
||||
check-web3:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: web3.js
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
node: [ '12', '14', '16' ]
|
||||
|
||||
name: Node ${{ matrix.node }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: ${{ matrix.node }}
|
||||
cache: 'npm'
|
||||
cache-dependency-path: web3.js/package-lock.json
|
||||
- run: |
|
||||
source .travis/before_install.sh
|
||||
npm install
|
||||
source .travis/script.sh
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -4,6 +4,7 @@
|
||||
/solana-metrics/
|
||||
/solana-metrics.tar.bz2
|
||||
/target/
|
||||
/test-ledger/
|
||||
|
||||
**/*.rs.bk
|
||||
.cargo
|
||||
@ -27,3 +28,5 @@ log-*/
|
||||
/spl_*.so
|
||||
|
||||
.DS_Store
|
||||
# scripts that may be generated by cargo *-bpf commands
|
||||
**/cargo-*-bpf-child-script-*.sh
|
||||
|
95
.mergify.yml
95
.mergify.yml
@ -4,24 +4,71 @@
|
||||
#
|
||||
# https://doc.mergify.io/
|
||||
pull_request_rules:
|
||||
- name: label changes from community
|
||||
conditions:
|
||||
- author≠@core-contributors
|
||||
- author≠mergify[bot]
|
||||
- author≠dependabot[bot]
|
||||
actions:
|
||||
label:
|
||||
add:
|
||||
- community
|
||||
- name: request review for community changes
|
||||
conditions:
|
||||
- author≠@core-contributors
|
||||
- author≠mergify[bot]
|
||||
- author≠dependabot[bot]
|
||||
# Only request reviews from the pr subscribers group if no one
|
||||
# has reviewed the community PR yet. These checks only match
|
||||
# reviewers with admin, write or maintain permission on the repository.
|
||||
- "#approved-reviews-by=0"
|
||||
- "#commented-reviews-by=0"
|
||||
- "#changes-requested-reviews-by=0"
|
||||
actions:
|
||||
request_reviews:
|
||||
teams:
|
||||
- "@solana-labs/community-pr-subscribers"
|
||||
- name: automatic merge (squash) on CI success
|
||||
conditions:
|
||||
- status-success=buildkite/solana
|
||||
- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author≠@dont-squash-my-commits
|
||||
- and:
|
||||
- status-success=buildkite/solana
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author≠@dont-squash-my-commits
|
||||
- or:
|
||||
# only require travis success if docs files changed
|
||||
- status-success=Travis CI - Pull Request
|
||||
- -files~=^docs/
|
||||
- or:
|
||||
# only require explorer checks if explorer files changed
|
||||
- status-success=check-explorer
|
||||
- -files~=^explorer/
|
||||
- or:
|
||||
- and:
|
||||
- status-success=all-web3-checks
|
||||
- status-success=web3-commit-lint
|
||||
# only require web3 checks if web3.js files changed
|
||||
- -files~=^web3.js/
|
||||
actions:
|
||||
merge:
|
||||
method: squash
|
||||
# Join the dont-squash-my-commits group if you won't like your commits squashed
|
||||
- name: automatic merge (rebase) on CI success
|
||||
conditions:
|
||||
- status-success=buildkite/solana
|
||||
- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author=@dont-squash-my-commits
|
||||
- and:
|
||||
- status-success=buildkite/solana
|
||||
- status-success=Travis CI - Pull Request
|
||||
- status-success=ci-gate
|
||||
- label=automerge
|
||||
- author=@dont-squash-my-commits
|
||||
- or:
|
||||
# only require explorer checks if explorer files changed
|
||||
- status-success=check-explorer
|
||||
- -files~=^explorer/
|
||||
- or:
|
||||
# only require web3 checks if web3.js files changed
|
||||
- status-success=all-web3-checks
|
||||
- -files~=^web3.js/
|
||||
actions:
|
||||
merge:
|
||||
method: rebase
|
||||
@ -50,35 +97,19 @@ pull_request_rules:
|
||||
label:
|
||||
add:
|
||||
- automerge
|
||||
- name: v1.4 backport
|
||||
- name: v1.8 backport
|
||||
conditions:
|
||||
- label=v1.4
|
||||
- label=v1.8
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.4
|
||||
- name: v1.5 backport
|
||||
- v1.8
|
||||
- name: v1.9 backport
|
||||
conditions:
|
||||
- label=v1.5
|
||||
- label=v1.9
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.5
|
||||
- name: v1.6 backport
|
||||
conditions:
|
||||
- label=v1.6
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.6
|
||||
- name: v1.7 backport
|
||||
conditions:
|
||||
- label=v1.7
|
||||
actions:
|
||||
backport:
|
||||
ignore_conflicts: true
|
||||
branches:
|
||||
- v1.7
|
||||
- v1.9
|
||||
|
59
.travis.yml
59
.travis.yml
@ -23,12 +23,12 @@ jobs:
|
||||
depth: false
|
||||
script:
|
||||
- .travis/export-github-repo.sh web3.js/ solana-web3.js
|
||||
- .travis/export-github-repo.sh explorer/ explorer
|
||||
|
||||
- &release-artifacts
|
||||
if: type IN (api, cron) OR tag IS present
|
||||
name: "macOS release artifacts"
|
||||
os: osx
|
||||
osx_image: xcode12
|
||||
language: rust
|
||||
rust:
|
||||
- stable
|
||||
@ -36,8 +36,12 @@ jobs:
|
||||
- source ci/rust-version.sh
|
||||
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
|
||||
- readlink -f .
|
||||
- brew install gnu-tar
|
||||
- PATH="/usr/local/opt/gnu-tar/libexec/gnubin:$PATH"
|
||||
- tar --version
|
||||
script:
|
||||
- source ci/env.sh
|
||||
- rustup set profile default
|
||||
- ci/publish-tarball.sh
|
||||
deploy:
|
||||
- provider: s3
|
||||
@ -60,6 +64,12 @@ jobs:
|
||||
- <<: *release-artifacts
|
||||
name: "Windows release artifacts"
|
||||
os: windows
|
||||
install:
|
||||
- choco install openssl
|
||||
- export OPENSSL_DIR="C:\Program Files\OpenSSL-Win64"
|
||||
- source ci/rust-version.sh
|
||||
- PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
|
||||
- readlink -f .
|
||||
# Linux release artifacts are still built by ci/buildkite-secondary.yml
|
||||
#- <<: *release-artifacts
|
||||
# name: "Linux release artifacts"
|
||||
@ -67,56 +77,13 @@ jobs:
|
||||
# before_install:
|
||||
# - sudo apt-get install libssl-dev libudev-dev
|
||||
|
||||
# explorer pull request
|
||||
- name: "explorer"
|
||||
if: type = pull_request AND branch = master
|
||||
|
||||
language: node_js
|
||||
node_js:
|
||||
- "node"
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- ~/.npm
|
||||
|
||||
before_install:
|
||||
- .travis/affects.sh explorer/ .travis || travis_terminate 0
|
||||
- cd explorer
|
||||
|
||||
script:
|
||||
- npm run build
|
||||
- npm run format
|
||||
|
||||
# web3.js pull request
|
||||
- name: "web3.js"
|
||||
if: type = pull_request AND branch = master
|
||||
|
||||
language: node_js
|
||||
node_js:
|
||||
- "lts/*"
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- ~/.npm
|
||||
|
||||
before_install:
|
||||
- .travis/affects.sh web3.js/ .travis || travis_terminate 0
|
||||
- cd web3.js/
|
||||
- source .travis/before_install.sh
|
||||
|
||||
script:
|
||||
- ../.travis/commitlint.sh
|
||||
- source .travis/script.sh
|
||||
|
||||
# docs pull request
|
||||
|
||||
- name: "docs"
|
||||
if: type IN (push, pull_request) OR tag IS present
|
||||
language: node_js
|
||||
node_js:
|
||||
- "node"
|
||||
- "lts/*"
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
@ -74,7 +74,7 @@ minutes to execute. Use that time to write a detailed problem description. Once
|
||||
the description is written and CI succeeds, click the "Ready to Review" button
|
||||
and add reviewers. Adding reviewers before CI succeeds is a fast path to losing
|
||||
reviewer engagement. Not only will they be notified and see the PR is not yet
|
||||
ready for them, they will also be bombarded them with additional notifications
|
||||
ready for them, they will also be bombarded with additional notifications
|
||||
each time you push a commit to get past CI or until they "mute" the PR. Once
|
||||
muted, you'll need to reach out over some other medium, such as Discord, to
|
||||
request they have another look. When you use draft PRs, no notifications are
|
||||
|
5623
Cargo.lock
generated
5623
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
34
Cargo.toml
34
Cargo.toml
@ -1,7 +1,8 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"accountsdb-plugin-interface",
|
||||
"accountsdb-plugin-manager",
|
||||
"accounts-cluster-bench",
|
||||
"bench-exchange",
|
||||
"bench-streamer",
|
||||
"bench-tps",
|
||||
"accounts-bench",
|
||||
@ -9,6 +10,8 @@ members = [
|
||||
"banks-client",
|
||||
"banks-interface",
|
||||
"banks-server",
|
||||
"bucket_map",
|
||||
"bloom",
|
||||
"clap-utils",
|
||||
"cli-config",
|
||||
"cli-output",
|
||||
@ -16,11 +19,13 @@ members = [
|
||||
"core",
|
||||
"dos",
|
||||
"download-utils",
|
||||
"entry",
|
||||
"faucet",
|
||||
"frozen-abi",
|
||||
"perf",
|
||||
"validator",
|
||||
"genesis",
|
||||
"genesis-utils",
|
||||
"gossip",
|
||||
"install",
|
||||
"keygen",
|
||||
@ -31,7 +36,6 @@ members = [
|
||||
"log-analyzer",
|
||||
"merkle-root-bench",
|
||||
"merkle-tree",
|
||||
"stake-o-matic",
|
||||
"storage-bigtable",
|
||||
"storage-proto",
|
||||
"streamer",
|
||||
@ -39,31 +43,30 @@ members = [
|
||||
"metrics",
|
||||
"net-shaper",
|
||||
"notifier",
|
||||
"poh",
|
||||
"poh-bench",
|
||||
"program-test",
|
||||
"programs/secp256k1",
|
||||
"programs/address-lookup-table",
|
||||
"programs/address-lookup-table-tests",
|
||||
"programs/bpf_loader",
|
||||
"programs/budget",
|
||||
"programs/bpf_loader/gen-syscall-list",
|
||||
"programs/compute-budget",
|
||||
"programs/config",
|
||||
"programs/exchange",
|
||||
"programs/failure",
|
||||
"programs/noop",
|
||||
"programs/ownable",
|
||||
"programs/stake",
|
||||
"programs/vest",
|
||||
"programs/vote",
|
||||
"rbpf-cli",
|
||||
"remote-wallet",
|
||||
"ramp-tps",
|
||||
"rpc",
|
||||
"runtime",
|
||||
"runtime/store-tool",
|
||||
"sdk",
|
||||
"sdk/cargo-build-bpf",
|
||||
"sdk/cargo-test-bpf",
|
||||
"scripts",
|
||||
"send-transaction-service",
|
||||
"stake-accounts",
|
||||
"stake-monitor",
|
||||
"sys-tuner",
|
||||
"tokens",
|
||||
"transaction-dos",
|
||||
"transaction-status",
|
||||
"account-decoder",
|
||||
"upload-perf",
|
||||
@ -72,6 +75,13 @@ members = [
|
||||
"cli",
|
||||
"rayon-threadlimit",
|
||||
"watchtower",
|
||||
"replica-node",
|
||||
"replica-lib",
|
||||
"test-validator",
|
||||
"rpc-test",
|
||||
"client-test",
|
||||
"zk-token-sdk",
|
||||
"programs/zk-token-proof",
|
||||
]
|
||||
|
||||
exclude = [
|
||||
|
29
README.md
29
README.md
@ -1,6 +1,6 @@
|
||||
<p align="center">
|
||||
<a href="https://solana.com">
|
||||
<img alt="Solana" src="https://i.imgur.com/OMnvVEz.png" width="250" />
|
||||
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
@ -19,12 +19,18 @@ $ source $HOME/.cargo/env
|
||||
$ rustup component add rustfmt
|
||||
```
|
||||
|
||||
Please sure you are always using the latest stable rust version by running:
|
||||
When building the master branch, please make sure you are using the latest stable rust version by running:
|
||||
|
||||
```bash
|
||||
$ rustup update
|
||||
```
|
||||
|
||||
When building a specific release branch, you should check the rust version in `ci/rust-version.sh` and if necessary, install that version by running:
|
||||
```bash
|
||||
$ rustup install VERSION
|
||||
```
|
||||
Note that if this is not the latest rust version on your machine, cargo commands may require an [override](https://rust-lang.github.io/rustup/overrides.html) in order to use the correct version.
|
||||
|
||||
On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, etc. On Ubuntu:
|
||||
|
||||
```bash
|
||||
@ -45,11 +51,6 @@ $ cd solana
|
||||
$ cargo build
|
||||
```
|
||||
|
||||
## **4. Run a minimal local cluster.**
|
||||
```bash
|
||||
$ ./run.sh
|
||||
```
|
||||
|
||||
# Testing
|
||||
|
||||
**Run the test suite:**
|
||||
@ -67,7 +68,7 @@ devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://doc
|
||||
|
||||
# Benchmarking
|
||||
|
||||
First install the nightly build of rustc. `cargo bench` requires use of the
|
||||
First, install the nightly build of rustc. `cargo bench` requires the use of the
|
||||
unstable features only available in the nightly build.
|
||||
|
||||
```bash
|
||||
@ -109,17 +110,17 @@ send us that patch!
|
||||
|
||||
All claims, content, designs, algorithms, estimates, roadmaps,
|
||||
specifications, and performance measurements described in this project
|
||||
are done with the Solana Foundation's ("SF") best efforts. It is up to
|
||||
are done with the Solana Foundation's ("SF") good faith efforts. It is up to
|
||||
the reader to check and validate their accuracy and truthfulness.
|
||||
Furthermore nothing in this project constitutes a solicitation for
|
||||
Furthermore, nothing in this project constitutes a solicitation for
|
||||
investment.
|
||||
|
||||
Any content produced by SF or developer resources that SF provides, are
|
||||
for educational and inspiration purposes only. SF does not encourage,
|
||||
Any content produced by SF or developer resources that SF provides are
|
||||
for educational and inspirational purposes only. SF does not encourage,
|
||||
induce or sanction the deployment, integration or use of any such
|
||||
applications (including the code comprising the Solana blockchain
|
||||
protocol) in violation of applicable laws or regulations and hereby
|
||||
prohibits any such deployment, integration or use. This includes use of
|
||||
prohibits any such deployment, integration or use. This includes the use of
|
||||
any such applications by the reader (a) in violation of export control
|
||||
or sanctions laws of the United States or any other applicable
|
||||
jurisdiction, (b) if the reader is located in or ordinarily resident in
|
||||
@ -132,7 +133,7 @@ prohibitions.
|
||||
The reader should be aware that U.S. export control and sanctions laws
|
||||
prohibit U.S. persons (and other persons that are subject to such laws)
|
||||
from transacting with persons in certain countries and territories or
|
||||
that are on the SDN list. As a project based primarily on open-source
|
||||
that are on the SDN list. As a project-based primarily on open-source
|
||||
software, it is possible that such sanctioned persons may nevertheless
|
||||
bypass prohibitions, obtain the code comprising the Solana blockchain
|
||||
protocol (or other project code or applications) and deploy, integrate,
|
||||
|
@ -94,7 +94,7 @@ Alternatively use the Github UI.
|
||||
```
|
||||
1. Confirm that your freshly cut release branch is shown as `BETA_CHANNEL` and the previous release branch as `STABLE_CHANNEL`:
|
||||
```
|
||||
ci/channel_info.sh
|
||||
ci/channel-info.sh
|
||||
```
|
||||
|
||||
## Steps to Create a Release
|
||||
@ -152,5 +152,5 @@ appearing. To check for progress:
|
||||
[Crates.io](https://crates.io/crates/solana) should have an updated Solana version. This can take 2-3 hours, and sometimes fails in the `solana-secondary` job.
|
||||
If this happens and the error is non-fatal, click "Retry" on the "publish crate" job
|
||||
|
||||
### Update software on devnet.solana.com/testnet.solana.com/mainnet-beta.solana.com
|
||||
See the documentation at https://github.com/solana-labs/cluster-ops/
|
||||
### Update software on testnet.solana.com
|
||||
See the documentation at https://github.com/solana-labs/cluster-ops/. devnet.solana.com and mainnet-beta.solana.com run stable releases that have been tested on testnet. Do not update devnet or mainnet-beta with a beta release.
|
||||
|
36
SECURITY.md
36
SECURITY.md
@ -18,38 +18,60 @@ Expect a response as fast as possible, within one business day at the latest.
|
||||
We offer bounties for critical security issues. Please see below for more details.
|
||||
|
||||
Loss of Funds:
|
||||
$500,000 USD in locked SOL tokens (locked for 12 months)
|
||||
$2,000,000 USD in locked SOL tokens (locked for 12 months)
|
||||
* Theft of funds without users signature from any account
|
||||
* Theft of funds without users interaction in system, token, stake, vote programs
|
||||
* Theft of funds that requires users signature - creating a vote program that drains the delegated stakes.
|
||||
|
||||
Consensus/Safety Violations:
|
||||
$250,000 USD in locked SOL tokens (locked for 12 months)
|
||||
$1,000,000 USD in locked SOL tokens (locked for 12 months)
|
||||
* Consensus safety violation
|
||||
* Tricking a validator to accept an optimistic confirmation or rooted slot without a double vote, etc..
|
||||
|
||||
Other Attacks:
|
||||
$100,000 USD in locked SOL tokens (locked for 12 months)
|
||||
$400,000 USD in locked SOL tokens (locked for 12 months)
|
||||
* Protocol liveness attacks,
|
||||
* Eclipse attacks,
|
||||
* Remote attacks that partition the network,
|
||||
|
||||
DoS Attacks:
|
||||
$25,000 USD in locked SOL tokens (locked for 12 months)
|
||||
$100,000 USD in locked SOL tokens (locked for 12 months)
|
||||
* Remote resource exaustion via Non-RPC protocols
|
||||
|
||||
RPC DoS/Crashes:
|
||||
$5,000 USD in locked SOL tokens (locked for 12 months)
|
||||
* RPC attacks
|
||||
|
||||
Out of Scope:
|
||||
The following components are out of scope for the bounty program
|
||||
* Metrics: `/metrics` in the monorepo as well as https://metrics.solana.com
|
||||
* Explorer: `/explorer` in the monorepo as well as https://explorer.solana.com
|
||||
* Any encrypted credentials, auth tokens, etc. checked into the repo
|
||||
* Bugs in dependencies. Please take them upstream!
|
||||
* Attacks that require social engineering
|
||||
|
||||
Eligibility:
|
||||
* The participant submitting the bug bounty shall follow the process outlined within this document
|
||||
* The participant submitting the bug report shall follow the process outlined within this document
|
||||
* Valid exploits can be eligible even if they are not successfully executed on the cluster
|
||||
* Multiple submissions for the same class of exploit are still eligible for compensation, though may be compensated at a lower rate, however these will be assessed on a case-by-case basis
|
||||
* Participants must complete KYC and sign the participation agreement here when the registrations are open https://solana.com/validator-registration. Security exploits will still be assessed and open for submission at all times. This needs only be done prior to distribution of tokens.
|
||||
|
||||
Notes:
|
||||
* All locked tokens can be staked during the lockup period
|
||||
Payment of Bug Bounties:
|
||||
* Payments for eligible bug reports are distributed monthly.
|
||||
* Bounties for all bug reports submitted in a given month are paid out in the middle of the
|
||||
following month.
|
||||
* The SOL/USD conversion rate used for payments is the market price at the end of
|
||||
the last day of the month for the month in which the bug was submitted.
|
||||
* The reference for this price is the Closing Price given by Coingecko.com on
|
||||
that date given here:
|
||||
https://www.coingecko.com/en/coins/solana/historical_data/usd#panel
|
||||
* For example, for all bugs submitted in March 2021, the SOL/USD price for bug
|
||||
payouts is the Close price on 2021-03-31 of $19.49. This applies to all bugs
|
||||
submitted in March 2021, to be paid in mid-April 2021.
|
||||
* Bug bounties are paid out in
|
||||
[stake accounts](https://solana.com/staking) with a
|
||||
[lockup](https://docs.solana.com/staking/stake-accounts#lockups)
|
||||
expiring 12 months from the last day of the month in which the bug was submitted.
|
||||
|
||||
<a name="process"></a>
|
||||
## Incident Response Process
|
||||
|
@ -1,31 +1,30 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.6.0"
|
||||
version = "1.10.0"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-account-decoder"
|
||||
license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
base64 = "0.12.3"
|
||||
bincode = "1.3.1"
|
||||
bs58 = "0.3.1"
|
||||
bincode = "1.3.3"
|
||||
bs58 = "0.4.0"
|
||||
bv = "0.11.1"
|
||||
Inflector = "0.11.4"
|
||||
lazy_static = "1.4.0"
|
||||
serde = "1.0.122"
|
||||
serde = "1.0.134"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-config-program = { path = "../programs/config", version = "1.6.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.6.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.6.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.6.0" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
|
||||
serde_json = "1.0.78"
|
||||
solana-config-program = { path = "../programs/config", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.10.0" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
zstd = "0.5.1"
|
||||
zstd = "0.9.2"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -17,8 +17,10 @@ pub mod validator_info;
|
||||
use {
|
||||
crate::parse_account_data::{parse_account_data, AccountAdditionalData, ParsedAccount},
|
||||
solana_sdk::{
|
||||
account::ReadableAccount, account::WritableAccount, clock::Epoch,
|
||||
fee_calculator::FeeCalculator, pubkey::Pubkey,
|
||||
account::{ReadableAccount, WritableAccount},
|
||||
clock::Epoch,
|
||||
fee_calculator::FeeCalculator,
|
||||
pubkey::Pubkey,
|
||||
},
|
||||
std::{
|
||||
io::{Read, Write},
|
||||
@ -28,6 +30,7 @@ use {
|
||||
|
||||
pub type StringAmount = String;
|
||||
pub type StringDecimals = String;
|
||||
pub const MAX_BASE58_BYTES: usize = 128;
|
||||
|
||||
/// A duplicate representation of an Account for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
@ -48,7 +51,7 @@ pub enum UiAccountData {
|
||||
Binary(String, UiAccountEncoding),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum UiAccountEncoding {
|
||||
Binary, // Legacy. Retained for RPC backwards compatibility
|
||||
@ -60,41 +63,53 @@ pub enum UiAccountEncoding {
|
||||
}
|
||||
|
||||
impl UiAccount {
|
||||
fn encode_bs58<T: ReadableAccount>(
|
||||
account: &T,
|
||||
data_slice_config: Option<UiDataSliceConfig>,
|
||||
) -> String {
|
||||
if account.data().len() <= MAX_BASE58_BYTES {
|
||||
bs58::encode(slice_data(account.data(), data_slice_config)).into_string()
|
||||
} else {
|
||||
"error: data too large for bs58 encoding".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode<T: ReadableAccount>(
|
||||
pubkey: &Pubkey,
|
||||
account: T,
|
||||
account: &T,
|
||||
encoding: UiAccountEncoding,
|
||||
additional_data: Option<AccountAdditionalData>,
|
||||
data_slice_config: Option<UiDataSliceConfig>,
|
||||
) -> Self {
|
||||
let data = match encoding {
|
||||
UiAccountEncoding::Binary => UiAccountData::LegacyBinary(
|
||||
bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(),
|
||||
),
|
||||
UiAccountEncoding::Base58 => UiAccountData::Binary(
|
||||
bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(),
|
||||
encoding,
|
||||
),
|
||||
UiAccountEncoding::Binary => {
|
||||
let data = Self::encode_bs58(account, data_slice_config);
|
||||
UiAccountData::LegacyBinary(data)
|
||||
}
|
||||
UiAccountEncoding::Base58 => {
|
||||
let data = Self::encode_bs58(account, data_slice_config);
|
||||
UiAccountData::Binary(data, encoding)
|
||||
}
|
||||
UiAccountEncoding::Base64 => UiAccountData::Binary(
|
||||
base64::encode(slice_data(&account.data(), data_slice_config)),
|
||||
base64::encode(slice_data(account.data(), data_slice_config)),
|
||||
encoding,
|
||||
),
|
||||
UiAccountEncoding::Base64Zstd => {
|
||||
let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
|
||||
match encoder
|
||||
.write_all(slice_data(&account.data(), data_slice_config))
|
||||
.write_all(slice_data(account.data(), data_slice_config))
|
||||
.and_then(|()| encoder.finish())
|
||||
{
|
||||
Ok(zstd_data) => UiAccountData::Binary(base64::encode(zstd_data), encoding),
|
||||
Err(_) => UiAccountData::Binary(
|
||||
base64::encode(slice_data(&account.data(), data_slice_config)),
|
||||
base64::encode(slice_data(account.data(), data_slice_config)),
|
||||
UiAccountEncoding::Base64,
|
||||
),
|
||||
}
|
||||
}
|
||||
UiAccountEncoding::JsonParsed => {
|
||||
if let Ok(parsed_data) =
|
||||
parse_account_data(pubkey, &account.owner(), &account.data(), additional_data)
|
||||
parse_account_data(pubkey, account.owner(), account.data(), additional_data)
|
||||
{
|
||||
UiAccountData::Json(parsed_data)
|
||||
} else {
|
||||
@ -121,16 +136,13 @@ impl UiAccount {
|
||||
UiAccountData::Binary(blob, encoding) => match encoding {
|
||||
UiAccountEncoding::Base58 => bs58::decode(blob).into_vec().ok(),
|
||||
UiAccountEncoding::Base64 => base64::decode(blob).ok(),
|
||||
UiAccountEncoding::Base64Zstd => base64::decode(blob)
|
||||
.ok()
|
||||
.map(|zstd_data| {
|
||||
let mut data = vec![];
|
||||
zstd::stream::read::Decoder::new(zstd_data.as_slice())
|
||||
.and_then(|mut reader| reader.read_to_end(&mut data))
|
||||
.map(|_| data)
|
||||
.ok()
|
||||
})
|
||||
.flatten(),
|
||||
UiAccountEncoding::Base64Zstd => base64::decode(blob).ok().and_then(|zstd_data| {
|
||||
let mut data = vec![];
|
||||
zstd::stream::read::Decoder::new(zstd_data.as_slice())
|
||||
.and_then(|mut reader| reader.read_to_end(&mut data))
|
||||
.map(|_| data)
|
||||
.ok()
|
||||
}),
|
||||
UiAccountEncoding::Binary | UiAccountEncoding::JsonParsed => None,
|
||||
},
|
||||
}?;
|
||||
@ -166,7 +178,7 @@ impl Default for UiFeeCalculator {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct UiDataSliceConfig {
|
||||
pub offset: usize,
|
||||
@ -189,8 +201,10 @@ fn slice_data(data: &[u8], data_slice_config: Option<UiDataSliceConfig>) -> &[u8
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::account::{Account, AccountSharedData};
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::account::{Account, AccountSharedData},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_slice_data() {
|
||||
@ -224,7 +238,7 @@ mod test {
|
||||
fn test_base64_zstd() {
|
||||
let encoded_account = UiAccount::encode(
|
||||
&Pubkey::default(),
|
||||
AccountSharedData::from(Account {
|
||||
&AccountSharedData::from(Account {
|
||||
data: vec![0; 1024],
|
||||
..Account::default()
|
||||
}),
|
||||
|
@ -1,25 +1,27 @@
|
||||
use crate::{
|
||||
parse_bpf_loader::parse_bpf_upgradeable_loader,
|
||||
parse_config::parse_config,
|
||||
parse_nonce::parse_nonce,
|
||||
parse_stake::parse_stake,
|
||||
parse_sysvar::parse_sysvar,
|
||||
parse_token::{parse_token, spl_token_id_v2_0},
|
||||
parse_vote::parse_vote,
|
||||
use {
|
||||
crate::{
|
||||
parse_bpf_loader::parse_bpf_upgradeable_loader,
|
||||
parse_config::parse_config,
|
||||
parse_nonce::parse_nonce,
|
||||
parse_stake::parse_stake,
|
||||
parse_sysvar::parse_sysvar,
|
||||
parse_token::{parse_token, spl_token_id},
|
||||
parse_vote::parse_vote,
|
||||
},
|
||||
inflector::Inflector,
|
||||
serde_json::Value,
|
||||
solana_sdk::{instruction::InstructionError, pubkey::Pubkey, stake, system_program, sysvar},
|
||||
std::collections::HashMap,
|
||||
thiserror::Error,
|
||||
};
|
||||
use inflector::Inflector;
|
||||
use serde_json::Value;
|
||||
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program, sysvar};
|
||||
use std::collections::HashMap;
|
||||
use thiserror::Error;
|
||||
|
||||
lazy_static! {
|
||||
static ref BPF_UPGRADEABLE_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader_upgradeable::id();
|
||||
static ref CONFIG_PROGRAM_ID: Pubkey = solana_config_program::id();
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = solana_stake_program::id();
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = stake::program::id();
|
||||
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
|
||||
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v2_0();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id();
|
||||
static ref VOTE_PROGRAM_ID: Pubkey = solana_vote_program::id();
|
||||
pub static ref PARSABLE_PROGRAM_IDS: HashMap<Pubkey, ParsableAccount> = {
|
||||
let mut m = HashMap::new();
|
||||
@ -112,12 +114,14 @@ pub fn parse_account_data(
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
},
|
||||
solana_vote_program::vote_state::{VoteState, VoteStateVersions},
|
||||
};
|
||||
use solana_vote_program::vote_state::{VoteState, VoteStateVersions};
|
||||
|
||||
#[test]
|
||||
fn test_parse_account_data() {
|
||||
|
@ -1,9 +1,11 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
UiAccountData, UiAccountEncoding,
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
UiAccountData, UiAccountEncoding,
|
||||
},
|
||||
bincode::{deserialize, serialized_size},
|
||||
solana_sdk::{bpf_loader_upgradeable::UpgradeableLoaderState, pubkey::Pubkey},
|
||||
};
|
||||
use bincode::{deserialize, serialized_size};
|
||||
use solana_sdk::{bpf_loader_upgradeable::UpgradeableLoaderState, pubkey::Pubkey};
|
||||
|
||||
pub fn parse_bpf_upgradeable_loader(
|
||||
data: &[u8],
|
||||
@ -90,9 +92,7 @@ pub struct UiProgramData {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use {super::*, bincode::serialize, solana_sdk::pubkey::Pubkey};
|
||||
|
||||
#[test]
|
||||
fn test_parse_bpf_upgradeable_loader_accounts() {
|
||||
|
@ -1,15 +1,19 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
validator_info,
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
validator_info,
|
||||
},
|
||||
bincode::deserialize,
|
||||
serde_json::Value,
|
||||
solana_config_program::{get_config_data, ConfigKeys},
|
||||
solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
stake::config::{self as stake_config, Config as StakeConfig},
|
||||
},
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use serde_json::Value;
|
||||
use solana_config_program::{get_config_data, ConfigKeys};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_stake_program::config::Config as StakeConfig;
|
||||
|
||||
pub fn parse_config(data: &[u8], pubkey: &Pubkey) -> Result<ConfigAccountType, ParseAccountError> {
|
||||
let parsed_account = if pubkey == &solana_stake_program::config::id() {
|
||||
let parsed_account = if pubkey == &stake_config::id() {
|
||||
get_config_data(data)
|
||||
.ok()
|
||||
.and_then(|data| deserialize::<StakeConfig>(data).ok())
|
||||
@ -37,7 +41,7 @@ fn parse_config_data<T>(data: &[u8], keys: Vec<(Pubkey, bool)>) -> Option<UiConf
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
let config_data: T = deserialize(&get_config_data(data).ok()?).ok()?;
|
||||
let config_data: T = deserialize(get_config_data(data).ok()?).ok()?;
|
||||
let keys = keys
|
||||
.iter()
|
||||
.map(|key| UiConfigKey {
|
||||
@ -87,11 +91,10 @@ pub struct UiConfig<T> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::validator_info::ValidatorInfo;
|
||||
use serde_json::json;
|
||||
use solana_config_program::create_config_account;
|
||||
use solana_sdk::account::ReadableAccount;
|
||||
use {
|
||||
super::*, crate::validator_info::ValidatorInfo, serde_json::json,
|
||||
solana_config_program::create_config_account, solana_sdk::account::ReadableAccount,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_parse_config() {
|
||||
@ -101,11 +104,7 @@ mod test {
|
||||
};
|
||||
let stake_config_account = create_config_account(vec![], &stake_config, 10);
|
||||
assert_eq!(
|
||||
parse_config(
|
||||
&stake_config_account.data(),
|
||||
&solana_stake_program::config::id()
|
||||
)
|
||||
.unwrap(),
|
||||
parse_config(stake_config_account.data(), &stake_config::id()).unwrap(),
|
||||
ConfigAccountType::StakeConfig(UiStakeConfig {
|
||||
warmup_cooldown_rate: 0.25,
|
||||
slash_penalty: 50,
|
||||
@ -125,7 +124,7 @@ mod test {
|
||||
10,
|
||||
);
|
||||
assert_eq!(
|
||||
parse_config(&validator_info_config_account.data(), &info_pubkey).unwrap(),
|
||||
parse_config(validator_info_config_account.data(), &info_pubkey).unwrap(),
|
||||
ConfigAccountType::ValidatorInfo(UiConfig {
|
||||
keys: vec![
|
||||
UiConfigKey {
|
||||
|
@ -1,7 +1,9 @@
|
||||
use crate::{parse_account_data::ParseAccountError, UiFeeCalculator};
|
||||
use solana_sdk::{
|
||||
instruction::InstructionError,
|
||||
nonce::{state::Versions, State},
|
||||
use {
|
||||
crate::{parse_account_data::ParseAccountError, UiFeeCalculator},
|
||||
solana_sdk::{
|
||||
instruction::InstructionError,
|
||||
nonce::{state::Versions, State},
|
||||
},
|
||||
};
|
||||
|
||||
pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
|
||||
@ -9,7 +11,13 @@ pub fn parse_nonce(data: &[u8]) -> Result<UiNonceState, ParseAccountError> {
|
||||
.map_err(|_| ParseAccountError::from(InstructionError::InvalidAccountData))?;
|
||||
let nonce_state = nonce_state.convert_to_current();
|
||||
match nonce_state {
|
||||
State::Uninitialized => Ok(UiNonceState::Uninitialized),
|
||||
// This prevents parsing an allocated System-owned account with empty data of any non-zero
|
||||
// length as `uninitialized` nonce. An empty account of the wrong length can never be
|
||||
// initialized as a nonce account, and an empty account of the correct length may not be an
|
||||
// uninitialized nonce account, since it can be assigned to another program.
|
||||
State::Uninitialized => Err(ParseAccountError::from(
|
||||
InstructionError::InvalidAccountData,
|
||||
)),
|
||||
State::Initialized(data) => Ok(UiNonceState::Initialized(UiNonceData {
|
||||
authority: data.authority.to_string(),
|
||||
blockhash: data.blockhash.to_string(),
|
||||
@ -36,14 +44,16 @@ pub struct UiNonceData {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::{
|
||||
hash::Hash,
|
||||
nonce::{
|
||||
state::{Data, Versions},
|
||||
State,
|
||||
},
|
||||
pubkey::Pubkey,
|
||||
},
|
||||
pubkey::Pubkey,
|
||||
};
|
||||
|
||||
#[test]
|
||||
|
@ -1,10 +1,14 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount,
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount,
|
||||
},
|
||||
bincode::deserialize,
|
||||
solana_sdk::{
|
||||
clock::{Epoch, UnixTimestamp},
|
||||
stake::state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState},
|
||||
},
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use solana_sdk::clock::{Epoch, UnixTimestamp};
|
||||
use solana_stake_program::stake_state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState};
|
||||
|
||||
pub fn parse_stake(data: &[u8]) -> Result<StakeAccountType, ParseAccountError> {
|
||||
let stake_state: StakeState = deserialize(data)
|
||||
@ -132,8 +136,7 @@ impl From<Delegation> for UiDelegation {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use {super::*, bincode::serialize};
|
||||
|
||||
#[test]
|
||||
fn test_parse_stake() {
|
||||
|
@ -1,21 +1,26 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, UiFeeCalculator,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use bv::BitVec;
|
||||
use solana_sdk::{
|
||||
clock::{Clock, Epoch, Slot, UnixTimestamp},
|
||||
epoch_schedule::EpochSchedule,
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
slot_hashes::SlotHashes,
|
||||
slot_history::{self, SlotHistory},
|
||||
stake_history::{StakeHistory, StakeHistoryEntry},
|
||||
sysvar::{self, fees::Fees, recent_blockhashes::RecentBlockhashes, rewards::Rewards},
|
||||
#[allow(deprecated)]
|
||||
use solana_sdk::sysvar::{fees::Fees, recent_blockhashes::RecentBlockhashes};
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, UiFeeCalculator,
|
||||
},
|
||||
bincode::deserialize,
|
||||
bv::BitVec,
|
||||
solana_sdk::{
|
||||
clock::{Clock, Epoch, Slot, UnixTimestamp},
|
||||
epoch_schedule::EpochSchedule,
|
||||
pubkey::Pubkey,
|
||||
rent::Rent,
|
||||
slot_hashes::SlotHashes,
|
||||
slot_history::{self, SlotHistory},
|
||||
stake_history::{StakeHistory, StakeHistoryEntry},
|
||||
sysvar::{self, rewards::Rewards},
|
||||
},
|
||||
};
|
||||
|
||||
pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result<SysvarAccountType, ParseAccountError> {
|
||||
#[allow(deprecated)]
|
||||
let parsed_account = {
|
||||
if pubkey == &sysvar::clock::id() {
|
||||
deserialize::<Clock>(data)
|
||||
@ -91,7 +96,9 @@ pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result<SysvarAccountType, P
|
||||
pub enum SysvarAccountType {
|
||||
Clock(UiClock),
|
||||
EpochSchedule(EpochSchedule),
|
||||
#[allow(deprecated)]
|
||||
Fees(UiFees),
|
||||
#[allow(deprecated)]
|
||||
RecentBlockhashes(Vec<UiRecentBlockhashesEntry>),
|
||||
Rent(UiRent),
|
||||
Rewards(UiRewards),
|
||||
@ -127,6 +134,7 @@ impl From<Clock> for UiClock {
|
||||
pub struct UiFees {
|
||||
pub fee_calculator: UiFeeCalculator,
|
||||
}
|
||||
#[allow(deprecated)]
|
||||
impl From<Fees> for UiFees {
|
||||
fn from(fees: Fees) -> Self {
|
||||
Self {
|
||||
@ -212,15 +220,18 @@ pub struct UiStakeHistoryEntry {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
account::create_account, fee_calculator::FeeCalculator, hash::Hash,
|
||||
sysvar::recent_blockhashes::IterItem,
|
||||
#[allow(deprecated)]
|
||||
use solana_sdk::sysvar::recent_blockhashes::IterItem;
|
||||
use {
|
||||
super::*,
|
||||
solana_sdk::{account::create_account_for_test, fee_calculator::FeeCalculator, hash::Hash},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_parse_sysvars() {
|
||||
let clock_sysvar = create_account(&Clock::default(), 1);
|
||||
let hash = Hash::new(&[1; 32]);
|
||||
|
||||
let clock_sysvar = create_account_for_test(&Clock::default());
|
||||
assert_eq!(
|
||||
parse_sysvar(&clock_sysvar.data, &sysvar::clock::id()).unwrap(),
|
||||
SysvarAccountType::Clock(UiClock::default()),
|
||||
@ -233,50 +244,48 @@ mod test {
|
||||
first_normal_epoch: 1,
|
||||
first_normal_slot: 12,
|
||||
};
|
||||
let epoch_schedule_sysvar = create_account(&epoch_schedule, 1);
|
||||
let epoch_schedule_sysvar = create_account_for_test(&epoch_schedule);
|
||||
assert_eq!(
|
||||
parse_sysvar(&epoch_schedule_sysvar.data, &sysvar::epoch_schedule::id()).unwrap(),
|
||||
SysvarAccountType::EpochSchedule(epoch_schedule),
|
||||
);
|
||||
|
||||
let fees_sysvar = create_account(&Fees::default(), 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(&fees_sysvar.data, &sysvar::fees::id()).unwrap(),
|
||||
SysvarAccountType::Fees(UiFees::default()),
|
||||
);
|
||||
#[allow(deprecated)]
|
||||
{
|
||||
let fees_sysvar = create_account_for_test(&Fees::default());
|
||||
assert_eq!(
|
||||
parse_sysvar(&fees_sysvar.data, &sysvar::fees::id()).unwrap(),
|
||||
SysvarAccountType::Fees(UiFees::default()),
|
||||
);
|
||||
|
||||
let hash = Hash::new(&[1; 32]);
|
||||
let fee_calculator = FeeCalculator {
|
||||
lamports_per_signature: 10,
|
||||
};
|
||||
let recent_blockhashes: RecentBlockhashes = vec![IterItem(0, &hash, &fee_calculator)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
let recent_blockhashes_sysvar = create_account(&recent_blockhashes, 1);
|
||||
assert_eq!(
|
||||
parse_sysvar(
|
||||
&recent_blockhashes_sysvar.data,
|
||||
&sysvar::recent_blockhashes::id()
|
||||
)
|
||||
.unwrap(),
|
||||
SysvarAccountType::RecentBlockhashes(vec![UiRecentBlockhashesEntry {
|
||||
blockhash: hash.to_string(),
|
||||
fee_calculator: fee_calculator.into(),
|
||||
}]),
|
||||
);
|
||||
let recent_blockhashes: RecentBlockhashes =
|
||||
vec![IterItem(0, &hash, 10)].into_iter().collect();
|
||||
let recent_blockhashes_sysvar = create_account_for_test(&recent_blockhashes);
|
||||
assert_eq!(
|
||||
parse_sysvar(
|
||||
&recent_blockhashes_sysvar.data,
|
||||
&sysvar::recent_blockhashes::id()
|
||||
)
|
||||
.unwrap(),
|
||||
SysvarAccountType::RecentBlockhashes(vec![UiRecentBlockhashesEntry {
|
||||
blockhash: hash.to_string(),
|
||||
fee_calculator: FeeCalculator::new(10).into(),
|
||||
}]),
|
||||
);
|
||||
}
|
||||
|
||||
let rent = Rent {
|
||||
lamports_per_byte_year: 10,
|
||||
exemption_threshold: 2.0,
|
||||
burn_percent: 5,
|
||||
};
|
||||
let rent_sysvar = create_account(&rent, 1);
|
||||
let rent_sysvar = create_account_for_test(&rent);
|
||||
assert_eq!(
|
||||
parse_sysvar(&rent_sysvar.data, &sysvar::rent::id()).unwrap(),
|
||||
SysvarAccountType::Rent(rent.into()),
|
||||
);
|
||||
|
||||
let rewards_sysvar = create_account(&Rewards::default(), 1);
|
||||
let rewards_sysvar = create_account_for_test(&Rewards::default());
|
||||
assert_eq!(
|
||||
parse_sysvar(&rewards_sysvar.data, &sysvar::rewards::id()).unwrap(),
|
||||
SysvarAccountType::Rewards(UiRewards::default()),
|
||||
@ -284,7 +293,7 @@ mod test {
|
||||
|
||||
let mut slot_hashes = SlotHashes::default();
|
||||
slot_hashes.add(1, hash);
|
||||
let slot_hashes_sysvar = create_account(&slot_hashes, 1);
|
||||
let slot_hashes_sysvar = create_account_for_test(&slot_hashes);
|
||||
assert_eq!(
|
||||
parse_sysvar(&slot_hashes_sysvar.data, &sysvar::slot_hashes::id()).unwrap(),
|
||||
SysvarAccountType::SlotHashes(vec![UiSlotHashEntry {
|
||||
@ -295,7 +304,7 @@ mod test {
|
||||
|
||||
let mut slot_history = SlotHistory::default();
|
||||
slot_history.add(42);
|
||||
let slot_history_sysvar = create_account(&slot_history, 1);
|
||||
let slot_history_sysvar = create_account_for_test(&slot_history);
|
||||
assert_eq!(
|
||||
parse_sysvar(&slot_history_sysvar.data, &sysvar::slot_history::id()).unwrap(),
|
||||
SysvarAccountType::SlotHistory(UiSlotHistory {
|
||||
@ -311,7 +320,7 @@ mod test {
|
||||
deactivating: 3,
|
||||
};
|
||||
stake_history.add(1, stake_history_entry.clone());
|
||||
let stake_history_sysvar = create_account(&stake_history, 1);
|
||||
let stake_history_sysvar = create_account_for_test(&stake_history);
|
||||
assert_eq!(
|
||||
parse_sysvar(&stake_history_sysvar.data, &sysvar::stake_history::id()).unwrap(),
|
||||
SysvarAccountType::StakeHistory(vec![UiStakeHistoryEntry {
|
||||
|
@ -1,36 +1,38 @@
|
||||
use crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, StringDecimals,
|
||||
};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use spl_token_v2_0::{
|
||||
solana_program::{
|
||||
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
|
||||
use {
|
||||
crate::{
|
||||
parse_account_data::{ParsableAccount, ParseAccountError},
|
||||
StringAmount, StringDecimals,
|
||||
},
|
||||
state::{Account, AccountState, Mint, Multisig},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
spl_token::{
|
||||
solana_program::{
|
||||
program_option::COption, program_pack::Pack, pubkey::Pubkey as SplTokenPubkey,
|
||||
},
|
||||
state::{Account, AccountState, Mint, Multisig},
|
||||
},
|
||||
std::str::FromStr,
|
||||
};
|
||||
use std::str::FromStr;
|
||||
|
||||
// A helper function to convert spl_token_v2_0::id() as spl_sdk::pubkey::Pubkey to
|
||||
// A helper function to convert spl_token::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_id_v2_0() -> Pubkey {
|
||||
Pubkey::from_str(&spl_token_v2_0::id().to_string()).unwrap()
|
||||
pub fn spl_token_id() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token::id().to_bytes())
|
||||
}
|
||||
|
||||
// A helper function to convert spl_token_v2_0::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
||||
// A helper function to convert spl_token::native_mint::id() as spl_sdk::pubkey::Pubkey to
|
||||
// solana_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_v2_0_native_mint() -> Pubkey {
|
||||
Pubkey::from_str(&spl_token_v2_0::native_mint::id().to_string()).unwrap()
|
||||
pub fn spl_token_native_mint() -> Pubkey {
|
||||
Pubkey::new_from_array(spl_token::native_mint::id().to_bytes())
|
||||
}
|
||||
|
||||
// A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey
|
||||
pub fn spl_token_v2_0_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
|
||||
SplTokenPubkey::from_str(&pubkey.to_string()).unwrap()
|
||||
pub fn spl_token_pubkey(pubkey: &Pubkey) -> SplTokenPubkey {
|
||||
SplTokenPubkey::new_from_array(pubkey.to_bytes())
|
||||
}
|
||||
|
||||
// A helper function to convert a spl_sdk::pubkey::Pubkey to solana_sdk::pubkey::Pubkey
|
||||
pub fn pubkey_from_spl_token_v2_0(pubkey: &SplTokenPubkey) -> Pubkey {
|
||||
Pubkey::from_str(&pubkey.to_string()).unwrap()
|
||||
pub fn pubkey_from_spl_token(pubkey: &SplTokenPubkey) -> Pubkey {
|
||||
Pubkey::new_from_array(pubkey.to_bytes())
|
||||
}
|
||||
|
||||
pub fn parse_token(
|
||||
@ -116,6 +118,7 @@ pub fn parse_token(
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "camelCase", tag = "type", content = "info")]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum TokenAccountType {
|
||||
Account(UiTokenAccount),
|
||||
Mint(UiMint),
|
||||
@ -172,10 +175,12 @@ pub fn real_number_string(amount: u64, decimals: u8) -> StringDecimals {
|
||||
}
|
||||
|
||||
pub fn real_number_string_trimmed(amount: u64, decimals: u8) -> StringDecimals {
|
||||
let s = real_number_string(amount, decimals);
|
||||
let zeros_trimmed = s.trim_end_matches('0');
|
||||
let decimal_trimmed = zeros_trimmed.trim_end_matches('.');
|
||||
decimal_trimmed.to_string()
|
||||
let mut s = real_number_string(amount, decimals);
|
||||
if decimals > 0 {
|
||||
let zeros_trimmed = s.trim_end_matches('0');
|
||||
s = zeros_trimmed.trim_end_matches('.').to_string();
|
||||
}
|
||||
s
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
@ -363,6 +368,14 @@ mod test {
|
||||
real_number_string_trimmed(1, 0)
|
||||
);
|
||||
assert_eq!(token_amount.ui_amount, Some(1.0));
|
||||
assert_eq!(&real_number_string(10, 0), "10");
|
||||
assert_eq!(&real_number_string_trimmed(10, 0), "10");
|
||||
let token_amount = token_amount_to_ui_amount(10, 0);
|
||||
assert_eq!(
|
||||
token_amount.ui_amount_string,
|
||||
real_number_string_trimmed(10, 0)
|
||||
);
|
||||
assert_eq!(token_amount.ui_amount, Some(10.0));
|
||||
assert_eq!(&real_number_string(1, 9), "0.000000001");
|
||||
assert_eq!(&real_number_string_trimmed(1, 9), "0.000000001");
|
||||
let token_amount = token_amount_to_ui_amount(1, 9);
|
||||
@ -402,4 +415,32 @@ mod test {
|
||||
);
|
||||
assert_eq!(token_amount.ui_amount, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ui_token_amount_real_string_zero() {
|
||||
assert_eq!(&real_number_string(0, 0), "0");
|
||||
assert_eq!(&real_number_string_trimmed(0, 0), "0");
|
||||
let token_amount = token_amount_to_ui_amount(0, 0);
|
||||
assert_eq!(
|
||||
token_amount.ui_amount_string,
|
||||
real_number_string_trimmed(0, 0)
|
||||
);
|
||||
assert_eq!(token_amount.ui_amount, Some(0.0));
|
||||
assert_eq!(&real_number_string(0, 9), "0.000000000");
|
||||
assert_eq!(&real_number_string_trimmed(0, 9), "0");
|
||||
let token_amount = token_amount_to_ui_amount(0, 9);
|
||||
assert_eq!(
|
||||
token_amount.ui_amount_string,
|
||||
real_number_string_trimmed(0, 9)
|
||||
);
|
||||
assert_eq!(token_amount.ui_amount, Some(0.0));
|
||||
assert_eq!(&real_number_string(0, 25), "0.0000000000000000000000000");
|
||||
assert_eq!(&real_number_string_trimmed(0, 25), "0");
|
||||
let token_amount = token_amount_to_ui_amount(0, 20);
|
||||
assert_eq!(
|
||||
token_amount.ui_amount_string,
|
||||
real_number_string_trimmed(0, 20)
|
||||
);
|
||||
assert_eq!(token_amount.ui_amount, None);
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,11 @@
|
||||
use crate::{parse_account_data::ParseAccountError, StringAmount};
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
pubkey::Pubkey,
|
||||
use {
|
||||
crate::{parse_account_data::ParseAccountError, StringAmount},
|
||||
solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
pubkey::Pubkey,
|
||||
},
|
||||
solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState},
|
||||
};
|
||||
use solana_vote_program::vote_state::{BlockTimestamp, Lockout, VoteState};
|
||||
|
||||
pub fn parse_vote(data: &[u8]) -> Result<VoteAccountType, ParseAccountError> {
|
||||
let mut vote_state = VoteState::deserialize(data).map_err(ParseAccountError::from)?;
|
||||
@ -121,8 +123,7 @@ struct UiEpochCredits {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_vote_program::vote_state::VoteStateVersions;
|
||||
use {super::*, solana_vote_program::vote_state::VoteStateVersions};
|
||||
|
||||
#[test]
|
||||
fn test_parse_vote() {
|
||||
|
@ -1,24 +1,22 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.6.0"
|
||||
version = "1.10.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
solana-logger = { path = "../logger", version = "1.6.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.6.0" }
|
||||
solana-measure = { path = "../measure", version = "1.6.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.6.0" }
|
||||
solana-version = { path = "../version", version = "1.6.0" }
|
||||
rand = "0.7.0"
|
||||
log = "0.4.14"
|
||||
rayon = "1.5.1"
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,15 +1,19 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
use clap::{crate_description, crate_name, value_t, App, Arg};
|
||||
use rayon::prelude::*;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::{
|
||||
accounts::{create_test_accounts, update_accounts_bench, Accounts},
|
||||
accounts_index::Ancestors,
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, App, Arg},
|
||||
rayon::prelude::*,
|
||||
solana_measure::measure::Measure,
|
||||
solana_runtime::{
|
||||
accounts::{create_test_accounts, update_accounts_bench, Accounts},
|
||||
accounts_db::AccountShrinkThreshold,
|
||||
accounts_index::AccountSecondaryIndexes,
|
||||
ancestors::Ancestors,
|
||||
},
|
||||
solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey},
|
||||
std::{env, fs, path::PathBuf},
|
||||
};
|
||||
use solana_sdk::{genesis_config::ClusterType, pubkey::Pubkey};
|
||||
use std::{collections::HashSet, env, fs, path::PathBuf};
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
@ -58,8 +62,13 @@ fn main() {
|
||||
if fs::remove_dir_all(path.clone()).is_err() {
|
||||
println!("Warning: Couldn't remove {:?}", path);
|
||||
}
|
||||
let accounts =
|
||||
Accounts::new_with_config(vec![path], &ClusterType::Testnet, HashSet::new(), false);
|
||||
let accounts = Accounts::new_with_config_for_benches(
|
||||
vec![path],
|
||||
&ClusterType::Testnet,
|
||||
AccountSecondaryIndexes::default(),
|
||||
false,
|
||||
AccountShrinkThreshold::default(),
|
||||
);
|
||||
println!("Creating {} accounts", num_accounts);
|
||||
let mut create_time = Measure::start("create accounts");
|
||||
let pubkeys: Vec<_> = (0..num_slots)
|
||||
@ -83,17 +92,19 @@ fn main() {
|
||||
num_slots,
|
||||
create_time
|
||||
);
|
||||
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
|
||||
let mut ancestors = Vec::with_capacity(num_slots);
|
||||
ancestors.push(0);
|
||||
for i in 1..num_slots {
|
||||
ancestors.insert(i as u64, i - 1);
|
||||
ancestors.push(i as u64);
|
||||
accounts.add_root(i as u64);
|
||||
}
|
||||
let ancestors = Ancestors::from(ancestors);
|
||||
let mut elapsed = vec![0; iterations];
|
||||
let mut elapsed_store = vec![0; iterations];
|
||||
for x in 0..iterations {
|
||||
if clean {
|
||||
let mut time = Measure::start("clean");
|
||||
accounts.accounts_db.clean_accounts(None);
|
||||
accounts.accounts_db.clean_accounts(None, false, None);
|
||||
time.stop();
|
||||
println!("{}", time);
|
||||
for slot in 0..num_slots {
|
||||
@ -112,6 +123,9 @@ fn main() {
|
||||
solana_sdk::clock::Slot::default(),
|
||||
&ancestors,
|
||||
None,
|
||||
false,
|
||||
None,
|
||||
false,
|
||||
);
|
||||
time_store.stop();
|
||||
if results != results_store {
|
||||
|
1
accounts-cluster-bench/.gitignore
vendored
Normal file
1
accounts-cluster-bench/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/farf/
|
@ -1,8 +1,8 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
name = "solana-accounts-cluster-bench"
|
||||
version = "1.6.0"
|
||||
version = "1.10.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,25 +10,28 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
log = "0.4.11"
|
||||
log = "0.4.14"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.4.1"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "1.6.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
|
||||
solana-client = { path = "../client", version = "1.6.0" }
|
||||
solana-core = { path = "../core", version = "1.6.0" }
|
||||
solana-measure = { path = "../measure", version = "1.6.0" }
|
||||
solana-logger = { path = "../logger", version = "1.6.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.6.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.6.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.6.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.6.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.6.0" }
|
||||
solana-version = { path = "../version", version = "1.6.0" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
|
||||
rayon = "1.5.1"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.10.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.10.0" }
|
||||
solana-client = { path = "../client", version = "=1.10.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.10.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
spl-token = { version = "=3.2.0", features = ["no-entrypoint"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.6.0" }
|
||||
solana-core = { path = "../core", version = "=1.10.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.10.0" }
|
||||
solana-test-validator = { path = "../test-validator", version = "=1.10.0" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,36 +1,38 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use clap::{crate_description, crate_name, value_t, value_t_or_exit, App, Arg};
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_account_decoder::parse_token::spl_token_v2_0_pubkey;
|
||||
use solana_clap_utils::input_parsers::pubkey_of;
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_core::gossip_service::discover;
|
||||
use solana_faucet::faucet::{request_airdrop_transaction, FAUCET_PORT};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::inline_spl_token_v2_0;
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
rpc_port::DEFAULT_RPC_PORT,
|
||||
signature::{read_keypair_file, Keypair, Signature, Signer},
|
||||
system_instruction, system_program,
|
||||
timing::timestamp,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_transaction_status::parse_token::spl_token_v2_0_instruction;
|
||||
use spl_token_v2_0::solana_program::pubkey::Pubkey as SplPubkey;
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicU64, Ordering},
|
||||
Arc, RwLock,
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, values_t_or_exit, App, Arg},
|
||||
log::*,
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::prelude::*,
|
||||
solana_account_decoder::parse_token::spl_token_pubkey,
|
||||
solana_clap_utils::input_parsers::pubkey_of,
|
||||
solana_client::{rpc_client::RpcClient, transaction_executor::TransactionExecutor},
|
||||
solana_faucet::faucet::{request_airdrop_transaction, FAUCET_PORT},
|
||||
solana_gossip::gossip_service::discover,
|
||||
solana_runtime::inline_spl_token,
|
||||
solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
instruction::{AccountMeta, Instruction},
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
rpc_port::DEFAULT_RPC_PORT,
|
||||
signature::{read_keypair_file, Keypair, Signer},
|
||||
system_instruction, system_program,
|
||||
transaction::Transaction,
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
solana_transaction_status::parse_token::spl_token_instruction,
|
||||
std::{
|
||||
cmp::min,
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
Arc,
|
||||
},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
pub fn airdrop_lamports(
|
||||
@ -51,8 +53,8 @@ pub fn airdrop_lamports(
|
||||
id.pubkey(),
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
|
||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
let blockhash = client.get_latest_blockhash().unwrap();
|
||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
Ok(transaction) => {
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
@ -96,234 +98,144 @@ pub fn airdrop_lamports(
|
||||
true
|
||||
}
|
||||
|
||||
// signature, timestamp, id
|
||||
type PendingQueue = Vec<(Signature, u64, u64)>;
|
||||
|
||||
struct TransactionExecutor {
|
||||
sig_clear_t: JoinHandle<()>,
|
||||
sigs: Arc<RwLock<PendingQueue>>,
|
||||
cleared: Arc<RwLock<Vec<u64>>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
counter: AtomicU64,
|
||||
client: RpcClient,
|
||||
struct SeedTracker {
|
||||
max_created: Arc<AtomicU64>,
|
||||
max_closed: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
impl TransactionExecutor {
|
||||
fn new(entrypoint_addr: SocketAddr) -> Self {
|
||||
let sigs = Arc::new(RwLock::new(Vec::new()));
|
||||
let cleared = Arc::new(RwLock::new(Vec::new()));
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let sig_clear_t = Self::start_sig_clear_thread(&exit, &sigs, &cleared, entrypoint_addr);
|
||||
let client =
|
||||
RpcClient::new_socket_with_commitment(entrypoint_addr, CommitmentConfig::confirmed());
|
||||
Self {
|
||||
sigs,
|
||||
cleared,
|
||||
sig_clear_t,
|
||||
exit,
|
||||
counter: AtomicU64::new(0),
|
||||
client,
|
||||
}
|
||||
}
|
||||
|
||||
fn num_outstanding(&self) -> usize {
|
||||
self.sigs.read().unwrap().len()
|
||||
}
|
||||
|
||||
fn push_transactions(&self, txs: Vec<Transaction>) -> Vec<u64> {
|
||||
let mut ids = vec![];
|
||||
let new_sigs = txs.into_iter().filter_map(|tx| {
|
||||
let id = self.counter.fetch_add(1, Ordering::Relaxed);
|
||||
ids.push(id);
|
||||
match self.client.send_transaction(&tx) {
|
||||
Ok(sig) => {
|
||||
return Some((sig, timestamp(), id));
|
||||
}
|
||||
Err(e) => {
|
||||
info!("error: {:#?}", e);
|
||||
}
|
||||
}
|
||||
None
|
||||
});
|
||||
let mut sigs_w = self.sigs.write().unwrap();
|
||||
sigs_w.extend(new_sigs);
|
||||
ids
|
||||
}
|
||||
|
||||
fn drain_cleared(&self) -> Vec<u64> {
|
||||
std::mem::take(&mut *self.cleared.write().unwrap())
|
||||
}
|
||||
|
||||
fn close(self) {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
self.sig_clear_t.join().unwrap();
|
||||
}
|
||||
|
||||
fn start_sig_clear_thread(
|
||||
exit: &Arc<AtomicBool>,
|
||||
sigs: &Arc<RwLock<PendingQueue>>,
|
||||
cleared: &Arc<RwLock<Vec<u64>>>,
|
||||
entrypoint_addr: SocketAddr,
|
||||
) -> JoinHandle<()> {
|
||||
let sigs = sigs.clone();
|
||||
let exit = exit.clone();
|
||||
let cleared = cleared.clone();
|
||||
Builder::new()
|
||||
.name("sig_clear".to_string())
|
||||
.spawn(move || {
|
||||
let client = RpcClient::new_socket_with_commitment(
|
||||
entrypoint_addr,
|
||||
CommitmentConfig::confirmed(),
|
||||
);
|
||||
let mut success = 0;
|
||||
let mut error_count = 0;
|
||||
let mut timed_out = 0;
|
||||
let mut last_log = Instant::now();
|
||||
while !exit.load(Ordering::Relaxed) {
|
||||
let sigs_len = sigs.read().unwrap().len();
|
||||
if sigs_len > 0 {
|
||||
let mut sigs_w = sigs.write().unwrap();
|
||||
let mut start = Measure::start("sig_status");
|
||||
let statuses: Vec<_> = sigs_w
|
||||
.chunks(200)
|
||||
.map(|sig_chunk| {
|
||||
let only_sigs: Vec<_> = sig_chunk.iter().map(|s| s.0).collect();
|
||||
client
|
||||
.get_signature_statuses(&only_sigs)
|
||||
.expect("status fail")
|
||||
.value
|
||||
})
|
||||
.flatten()
|
||||
.collect();
|
||||
let mut num_cleared = 0;
|
||||
let start_len = sigs_w.len();
|
||||
let now = timestamp();
|
||||
let mut new_ids = vec![];
|
||||
let mut i = 0;
|
||||
let mut j = 0;
|
||||
while i != sigs_w.len() {
|
||||
let mut retain = true;
|
||||
let sent_ts = sigs_w[i].1;
|
||||
if let Some(e) = &statuses[j] {
|
||||
debug!("error: {:?}", e);
|
||||
if e.status.is_ok() {
|
||||
success += 1;
|
||||
} else {
|
||||
error_count += 1;
|
||||
}
|
||||
num_cleared += 1;
|
||||
retain = false;
|
||||
} else if now - sent_ts > 30_000 {
|
||||
retain = false;
|
||||
timed_out += 1;
|
||||
}
|
||||
if !retain {
|
||||
new_ids.push(sigs_w.remove(i).2);
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
j += 1;
|
||||
}
|
||||
let final_sigs_len = sigs_w.len();
|
||||
drop(sigs_w);
|
||||
cleared.write().unwrap().extend(new_ids);
|
||||
start.stop();
|
||||
debug!(
|
||||
"sigs len: {:?} success: {} took: {}ms cleared: {}/{}",
|
||||
final_sigs_len,
|
||||
success,
|
||||
start.as_ms(),
|
||||
num_cleared,
|
||||
start_len,
|
||||
);
|
||||
if last_log.elapsed().as_millis() > 5000 {
|
||||
info!(
|
||||
"success: {} error: {} timed_out: {}",
|
||||
success, error_count, timed_out,
|
||||
);
|
||||
last_log = Instant::now();
|
||||
}
|
||||
}
|
||||
sleep(Duration::from_millis(200));
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
fn make_message(
|
||||
fn make_create_message(
|
||||
keypair: &Keypair,
|
||||
base_keypair: &Keypair,
|
||||
max_created_seed: Arc<AtomicU64>,
|
||||
num_instructions: usize,
|
||||
balance: u64,
|
||||
maybe_space: Option<u64>,
|
||||
mint: Option<Pubkey>,
|
||||
) -> (Message, Vec<Keypair>) {
|
||||
) -> Message {
|
||||
let space = maybe_space.unwrap_or_else(|| thread_rng().gen_range(0, 1000));
|
||||
|
||||
let (instructions, new_keypairs): (Vec<_>, Vec<_>) = (0..num_instructions)
|
||||
let instructions: Vec<_> = (0..num_instructions)
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
let new_keypair = Keypair::new();
|
||||
|
||||
.flat_map(|_| {
|
||||
let program_id = if mint.is_some() {
|
||||
inline_spl_token_v2_0::id()
|
||||
inline_spl_token::id()
|
||||
} else {
|
||||
system_program::id()
|
||||
};
|
||||
let mut instructions = vec![system_instruction::create_account(
|
||||
let seed = max_created_seed.fetch_add(1, Ordering::Relaxed).to_string();
|
||||
let to_pubkey =
|
||||
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
|
||||
let mut instructions = vec![system_instruction::create_account_with_seed(
|
||||
&keypair.pubkey(),
|
||||
&new_keypair.pubkey(),
|
||||
&to_pubkey,
|
||||
&base_keypair.pubkey(),
|
||||
&seed,
|
||||
balance,
|
||||
space,
|
||||
&program_id,
|
||||
)];
|
||||
if let Some(mint_address) = mint {
|
||||
instructions.push(spl_token_v2_0_instruction(
|
||||
spl_token_v2_0::instruction::initialize_account(
|
||||
&spl_token_v2_0::id(),
|
||||
&spl_token_v2_0_pubkey(&new_keypair.pubkey()),
|
||||
&spl_token_v2_0_pubkey(&mint_address),
|
||||
&SplPubkey::new_unique(),
|
||||
instructions.push(spl_token_instruction(
|
||||
spl_token::instruction::initialize_account(
|
||||
&spl_token::id(),
|
||||
&spl_token_pubkey(&to_pubkey),
|
||||
&spl_token_pubkey(&mint_address),
|
||||
&spl_token_pubkey(&base_keypair.pubkey()),
|
||||
)
|
||||
.unwrap(),
|
||||
));
|
||||
}
|
||||
|
||||
(instructions, new_keypair)
|
||||
instructions
|
||||
})
|
||||
.unzip();
|
||||
let instructions: Vec<_> = instructions.into_iter().flatten().collect();
|
||||
.collect();
|
||||
|
||||
(
|
||||
Message::new(&instructions, Some(&keypair.pubkey())),
|
||||
new_keypairs,
|
||||
)
|
||||
Message::new(&instructions, Some(&keypair.pubkey()))
|
||||
}
|
||||
|
||||
fn make_close_message(
|
||||
keypair: &Keypair,
|
||||
base_keypair: &Keypair,
|
||||
max_created: Arc<AtomicU64>,
|
||||
max_closed: Arc<AtomicU64>,
|
||||
num_instructions: usize,
|
||||
balance: u64,
|
||||
spl_token: bool,
|
||||
) -> Message {
|
||||
let instructions: Vec<_> = (0..num_instructions)
|
||||
.into_iter()
|
||||
.filter_map(|_| {
|
||||
let program_id = if spl_token {
|
||||
inline_spl_token::id()
|
||||
} else {
|
||||
system_program::id()
|
||||
};
|
||||
let max_created_seed = max_created.load(Ordering::Relaxed);
|
||||
let max_closed_seed = max_closed.load(Ordering::Relaxed);
|
||||
if max_closed_seed >= max_created_seed {
|
||||
return None;
|
||||
}
|
||||
let seed = max_closed.fetch_add(1, Ordering::Relaxed).to_string();
|
||||
let address =
|
||||
Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap();
|
||||
if spl_token {
|
||||
Some(spl_token_instruction(
|
||||
spl_token::instruction::close_account(
|
||||
&spl_token::id(),
|
||||
&spl_token_pubkey(&address),
|
||||
&spl_token_pubkey(&keypair.pubkey()),
|
||||
&spl_token_pubkey(&base_keypair.pubkey()),
|
||||
&[],
|
||||
)
|
||||
.unwrap(),
|
||||
))
|
||||
} else {
|
||||
Some(system_instruction::transfer_with_seed(
|
||||
&address,
|
||||
&base_keypair.pubkey(),
|
||||
seed,
|
||||
&program_id,
|
||||
&keypair.pubkey(),
|
||||
balance,
|
||||
))
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Message::new(&instructions, Some(&keypair.pubkey()))
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn run_accounts_bench(
|
||||
entrypoint_addr: SocketAddr,
|
||||
faucet_addr: SocketAddr,
|
||||
keypair: &Keypair,
|
||||
payer_keypairs: &[&Keypair],
|
||||
iterations: usize,
|
||||
maybe_space: Option<u64>,
|
||||
batch_size: usize,
|
||||
close_nth_batch: u64,
|
||||
maybe_lamports: Option<u64>,
|
||||
num_instructions: usize,
|
||||
mint: Option<Pubkey>,
|
||||
reclaim_accounts: bool,
|
||||
) {
|
||||
assert!(num_instructions > 0);
|
||||
let client =
|
||||
RpcClient::new_socket_with_commitment(entrypoint_addr, CommitmentConfig::confirmed());
|
||||
|
||||
info!("Targetting {}", entrypoint_addr);
|
||||
info!("Targeting {}", entrypoint_addr);
|
||||
|
||||
let mut last_blockhash = Instant::now();
|
||||
let mut latest_blockhash = Instant::now();
|
||||
let mut last_log = Instant::now();
|
||||
let mut count = 0;
|
||||
let mut recent_blockhash = client.get_recent_blockhash().expect("blockhash");
|
||||
let mut blockhash = client.get_latest_blockhash().expect("blockhash");
|
||||
let mut tx_sent_count = 0;
|
||||
let mut total_account_count = 0;
|
||||
let mut balance = client.get_balance(&keypair.pubkey()).unwrap_or(0);
|
||||
let mut total_accounts_created = 0;
|
||||
let mut total_accounts_closed = 0;
|
||||
let mut balances: Vec<_> = payer_keypairs
|
||||
.iter()
|
||||
.map(|keypair| client.get_balance(&keypair.pubkey()).unwrap_or(0))
|
||||
.collect();
|
||||
let mut last_balance = Instant::now();
|
||||
|
||||
let default_max_lamports = 1000;
|
||||
@ -334,72 +246,144 @@ fn run_accounts_bench(
|
||||
.expect("min balance")
|
||||
});
|
||||
|
||||
info!("Starting balance: {}", balance);
|
||||
let base_keypair = Keypair::new();
|
||||
let seed_tracker = SeedTracker {
|
||||
max_created: Arc::new(AtomicU64::default()),
|
||||
max_closed: Arc::new(AtomicU64::default()),
|
||||
};
|
||||
|
||||
info!("Starting balance(s): {:?}", balances);
|
||||
|
||||
let executor = TransactionExecutor::new(entrypoint_addr);
|
||||
|
||||
// Create and close messages both require 2 signatures, fake a 2 signature message to calculate fees
|
||||
let mut message = Message::new(
|
||||
&[
|
||||
Instruction::new_with_bytes(
|
||||
Pubkey::new_unique(),
|
||||
&[],
|
||||
vec![AccountMeta::new(Pubkey::new_unique(), true)],
|
||||
),
|
||||
Instruction::new_with_bytes(
|
||||
Pubkey::new_unique(),
|
||||
&[],
|
||||
vec![AccountMeta::new(Pubkey::new_unique(), true)],
|
||||
),
|
||||
],
|
||||
None,
|
||||
);
|
||||
|
||||
loop {
|
||||
if last_blockhash.elapsed().as_millis() > 10_000 {
|
||||
recent_blockhash = client.get_recent_blockhash().expect("blockhash");
|
||||
last_blockhash = Instant::now();
|
||||
if latest_blockhash.elapsed().as_millis() > 10_000 {
|
||||
blockhash = client.get_latest_blockhash().expect("blockhash");
|
||||
latest_blockhash = Instant::now();
|
||||
}
|
||||
|
||||
let (message, _keypairs) =
|
||||
make_message(keypair, num_instructions, min_balance, maybe_space, mint);
|
||||
let fee = recent_blockhash.1.calculate_fee(&message);
|
||||
message.recent_blockhash = blockhash;
|
||||
let fee = client
|
||||
.get_fee_for_message(&message)
|
||||
.expect("get_fee_for_message");
|
||||
let lamports = min_balance + fee;
|
||||
|
||||
if balance < lamports || last_balance.elapsed().as_millis() > 2000 {
|
||||
if let Ok(b) = client.get_balance(&keypair.pubkey()) {
|
||||
balance = b;
|
||||
}
|
||||
last_balance = Instant::now();
|
||||
if balance < lamports {
|
||||
info!(
|
||||
"Balance {} is less than needed: {}, doing aidrop...",
|
||||
balance, lamports
|
||||
);
|
||||
if !airdrop_lamports(&client, &faucet_addr, keypair, lamports * 100_000) {
|
||||
warn!("failed airdrop, exiting");
|
||||
return;
|
||||
for (i, balance) in balances.iter_mut().enumerate() {
|
||||
if *balance < lamports || last_balance.elapsed().as_millis() > 2000 {
|
||||
if let Ok(b) = client.get_balance(&payer_keypairs[i].pubkey()) {
|
||||
*balance = b;
|
||||
}
|
||||
last_balance = Instant::now();
|
||||
if *balance < lamports * 2 {
|
||||
info!(
|
||||
"Balance {} is less than needed: {}, doing aidrop...",
|
||||
balance, lamports
|
||||
);
|
||||
if !airdrop_lamports(
|
||||
&client,
|
||||
&faucet_addr,
|
||||
payer_keypairs[i],
|
||||
lamports * 100_000,
|
||||
) {
|
||||
warn!("failed airdrop, exiting");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create accounts
|
||||
let sigs_len = executor.num_outstanding();
|
||||
if sigs_len < batch_size {
|
||||
let num_to_create = batch_size - sigs_len;
|
||||
info!("creating {} new", num_to_create);
|
||||
let (txs, _new_keypairs): (Vec<_>, Vec<_>) = (0..num_to_create)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let (message, new_keypairs) =
|
||||
make_message(keypair, num_instructions, min_balance, maybe_space, mint);
|
||||
let signers: Vec<&Keypair> = new_keypairs
|
||||
.iter()
|
||||
.chain(std::iter::once(keypair))
|
||||
if num_to_create >= payer_keypairs.len() {
|
||||
info!("creating {} new", num_to_create);
|
||||
let chunk_size = num_to_create / payer_keypairs.len();
|
||||
if chunk_size > 0 {
|
||||
for (i, keypair) in payer_keypairs.iter().enumerate() {
|
||||
let txs: Vec<_> = (0..chunk_size)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let message = make_create_message(
|
||||
keypair,
|
||||
&base_keypair,
|
||||
seed_tracker.max_created.clone(),
|
||||
num_instructions,
|
||||
min_balance,
|
||||
maybe_space,
|
||||
mint,
|
||||
);
|
||||
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
|
||||
Transaction::new(&signers, message, blockhash)
|
||||
})
|
||||
.collect();
|
||||
balances[i] = balances[i].saturating_sub(lamports * txs.len() as u64);
|
||||
info!("txs: {}", txs.len());
|
||||
let new_ids = executor.push_transactions(txs);
|
||||
info!("ids: {}", new_ids.len());
|
||||
tx_sent_count += new_ids.len();
|
||||
total_accounts_created += num_instructions * new_ids.len();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if close_nth_batch > 0 {
|
||||
let num_batches_to_close =
|
||||
total_accounts_created as u64 / (close_nth_batch * batch_size as u64);
|
||||
let expected_closed = num_batches_to_close * batch_size as u64;
|
||||
let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed);
|
||||
// Close every account we've created with seed between max_closed_seed..expected_closed
|
||||
if max_closed_seed < expected_closed {
|
||||
let txs: Vec<_> = (0..expected_closed - max_closed_seed)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let message = make_close_message(
|
||||
payer_keypairs[0],
|
||||
&base_keypair,
|
||||
seed_tracker.max_created.clone(),
|
||||
seed_tracker.max_closed.clone(),
|
||||
1,
|
||||
min_balance,
|
||||
mint.is_some(),
|
||||
);
|
||||
let signers: Vec<&Keypair> = vec![payer_keypairs[0], &base_keypair];
|
||||
Transaction::new(&signers, message, blockhash)
|
||||
})
|
||||
.collect();
|
||||
(
|
||||
Transaction::new(&signers, message, recent_blockhash.0),
|
||||
new_keypairs,
|
||||
)
|
||||
})
|
||||
.unzip();
|
||||
balance = balance.saturating_sub(lamports * txs.len() as u64);
|
||||
info!("txs: {}", txs.len());
|
||||
let new_ids = executor.push_transactions(txs);
|
||||
info!("ids: {}", new_ids.len());
|
||||
tx_sent_count += new_ids.len();
|
||||
total_account_count += num_instructions * new_ids.len();
|
||||
balances[0] = balances[0].saturating_sub(fee * txs.len() as u64);
|
||||
info!("close txs: {}", txs.len());
|
||||
let new_ids = executor.push_transactions(txs);
|
||||
info!("close ids: {}", new_ids.len());
|
||||
tx_sent_count += new_ids.len();
|
||||
total_accounts_closed += new_ids.len() as u64;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let _ = executor.drain_cleared();
|
||||
}
|
||||
|
||||
count += 1;
|
||||
if last_log.elapsed().as_millis() > 3000 {
|
||||
if last_log.elapsed().as_millis() > 3000 || count >= iterations {
|
||||
info!(
|
||||
"total_accounts: {} tx_sent_count: {} loop_count: {} balance: {}",
|
||||
total_account_count, tx_sent_count, count, balance
|
||||
"total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
|
||||
total_accounts_created, total_accounts_closed, tx_sent_count, count, balances
|
||||
);
|
||||
last_log = Instant::now();
|
||||
}
|
||||
@ -411,6 +395,83 @@ fn run_accounts_bench(
|
||||
}
|
||||
}
|
||||
executor.close();
|
||||
|
||||
if reclaim_accounts {
|
||||
let executor = TransactionExecutor::new(entrypoint_addr);
|
||||
loop {
|
||||
let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed);
|
||||
let max_created_seed = seed_tracker.max_created.load(Ordering::Relaxed);
|
||||
|
||||
if latest_blockhash.elapsed().as_millis() > 10_000 {
|
||||
blockhash = client.get_latest_blockhash().expect("blockhash");
|
||||
latest_blockhash = Instant::now();
|
||||
}
|
||||
message.recent_blockhash = blockhash;
|
||||
let fee = client
|
||||
.get_fee_for_message(&message)
|
||||
.expect("get_fee_for_message");
|
||||
|
||||
let sigs_len = executor.num_outstanding();
|
||||
if sigs_len < batch_size && max_closed_seed < max_created_seed {
|
||||
let num_to_close = min(
|
||||
batch_size - sigs_len,
|
||||
(max_created_seed - max_closed_seed) as usize,
|
||||
);
|
||||
if num_to_close >= payer_keypairs.len() {
|
||||
info!("closing {} accounts", num_to_close);
|
||||
let chunk_size = num_to_close / payer_keypairs.len();
|
||||
info!("{:?} chunk_size", chunk_size);
|
||||
if chunk_size > 0 {
|
||||
for (i, keypair) in payer_keypairs.iter().enumerate() {
|
||||
let txs: Vec<_> = (0..chunk_size)
|
||||
.into_par_iter()
|
||||
.filter_map(|_| {
|
||||
let message = make_close_message(
|
||||
keypair,
|
||||
&base_keypair,
|
||||
seed_tracker.max_created.clone(),
|
||||
seed_tracker.max_closed.clone(),
|
||||
num_instructions,
|
||||
min_balance,
|
||||
mint.is_some(),
|
||||
);
|
||||
if message.instructions.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let signers: Vec<&Keypair> = vec![keypair, &base_keypair];
|
||||
Some(Transaction::new(&signers, message, blockhash))
|
||||
})
|
||||
.collect();
|
||||
balances[i] = balances[i].saturating_sub(fee * txs.len() as u64);
|
||||
info!("close txs: {}", txs.len());
|
||||
let new_ids = executor.push_transactions(txs);
|
||||
info!("close ids: {}", new_ids.len());
|
||||
tx_sent_count += new_ids.len();
|
||||
total_accounts_closed += (num_instructions * new_ids.len()) as u64;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let _ = executor.drain_cleared();
|
||||
}
|
||||
count += 1;
|
||||
if last_log.elapsed().as_millis() > 3000 || max_closed_seed >= max_created_seed {
|
||||
info!(
|
||||
"total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
|
||||
total_accounts_closed, tx_sent_count, count, balances
|
||||
);
|
||||
last_log = Instant::now();
|
||||
}
|
||||
|
||||
if max_closed_seed >= max_created_seed {
|
||||
break;
|
||||
}
|
||||
if executor.num_outstanding() >= batch_size {
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
}
|
||||
executor.close();
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
@ -450,19 +511,33 @@ fn main() {
|
||||
Arg::with_name("identity")
|
||||
.long("identity")
|
||||
.takes_value(true)
|
||||
.multiple(true)
|
||||
.value_name("FILE")
|
||||
.help("keypair file"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("batch_size")
|
||||
.long("batch_size")
|
||||
.long("batch-size")
|
||||
.takes_value(true)
|
||||
.value_name("BYTES")
|
||||
.help("Size of accounts to create"),
|
||||
.help("Number of transactions to send per batch"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("close_nth_batch")
|
||||
.long("close-frequency")
|
||||
.takes_value(true)
|
||||
.value_name("BYTES")
|
||||
.help(
|
||||
"Every `n` batches, create a batch of close transactions for
|
||||
the earliest remaining batch of accounts created.
|
||||
Note: Should be > 1 to avoid situations where the close \
|
||||
transactions will be submitted before the corresponding \
|
||||
create transactions have been confirmed",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num_instructions")
|
||||
.long("num_instructions")
|
||||
.long("num-instructions")
|
||||
.takes_value(true)
|
||||
.value_name("NUM")
|
||||
.help("Number of accounts to create on each transaction"),
|
||||
@ -472,7 +547,7 @@ fn main() {
|
||||
.long("iterations")
|
||||
.takes_value(true)
|
||||
.value_name("NUM")
|
||||
.help("Number of iterations to make"),
|
||||
.help("Number of iterations to make. 0 = unlimited iterations."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("check_gossip")
|
||||
@ -485,6 +560,12 @@ fn main() {
|
||||
.takes_value(true)
|
||||
.help("Mint address to initialize account"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("reclaim_accounts")
|
||||
.long("reclaim-accounts")
|
||||
.takes_value(false)
|
||||
.help("Reclaim accounts after session ends; incompatible with --iterations 0"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let skip_gossip = !matches.is_present("check_gossip");
|
||||
@ -508,6 +589,7 @@ fn main() {
|
||||
let space = value_t!(matches, "space", u64).ok();
|
||||
let lamports = value_t!(matches, "lamports", u64).ok();
|
||||
let batch_size = value_t!(matches, "batch_size", usize).unwrap_or(4);
|
||||
let close_nth_batch = value_t!(matches, "close_nth_batch", u64).unwrap_or(0);
|
||||
let iterations = value_t!(matches, "iterations", usize).unwrap_or(10);
|
||||
let num_instructions = value_t!(matches, "num_instructions", usize).unwrap_or(1);
|
||||
if num_instructions == 0 || num_instructions > 500 {
|
||||
@ -517,20 +599,30 @@ fn main() {
|
||||
|
||||
let mint = pubkey_of(&matches, "mint");
|
||||
|
||||
let keypair =
|
||||
read_keypair_file(&value_t_or_exit!(matches, "identity", String)).expect("bad keypair");
|
||||
let payer_keypairs: Vec<_> = values_t_or_exit!(matches, "identity", String)
|
||||
.iter()
|
||||
.map(|keypair_string| {
|
||||
read_keypair_file(keypair_string)
|
||||
.unwrap_or_else(|_| panic!("bad keypair {:?}", keypair_string))
|
||||
})
|
||||
.collect();
|
||||
let mut payer_keypair_refs: Vec<&Keypair> = vec![];
|
||||
for keypair in payer_keypairs.iter() {
|
||||
payer_keypair_refs.push(keypair);
|
||||
}
|
||||
|
||||
let rpc_addr = if !skip_gossip {
|
||||
info!("Finding cluster entry: {:?}", entrypoint_addr);
|
||||
let (gossip_nodes, _validators) = discover(
|
||||
None,
|
||||
None, // keypair
|
||||
Some(&entrypoint_addr),
|
||||
None,
|
||||
Some(60),
|
||||
None,
|
||||
Some(&entrypoint_addr),
|
||||
None,
|
||||
0,
|
||||
None, // num_nodes
|
||||
Duration::from_secs(60), // timeout
|
||||
None, // find_node_by_pubkey
|
||||
Some(&entrypoint_addr), // find_node_by_gossip_addr
|
||||
None, // my_gossip_addr
|
||||
0, // my_shred_version
|
||||
SocketAddrSpace::Unspecified,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} node: {:?}", entrypoint_addr, err);
|
||||
@ -547,25 +639,36 @@ fn main() {
|
||||
run_accounts_bench(
|
||||
rpc_addr,
|
||||
faucet_addr,
|
||||
&keypair,
|
||||
&payer_keypair_refs,
|
||||
iterations,
|
||||
space,
|
||||
batch_size,
|
||||
close_nth_batch,
|
||||
lamports,
|
||||
num_instructions,
|
||||
mint,
|
||||
matches.is_present("reclaim_accounts"),
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use super::*;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
use {
|
||||
super::*,
|
||||
solana_core::validator::ValidatorConfig,
|
||||
solana_faucet::faucet::run_local_faucet,
|
||||
solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_sdk::{native_token::sol_to_lamports, poh_config::PohConfig},
|
||||
solana_test_validator::TestValidator,
|
||||
spl_token::{
|
||||
solana_program::program_pack::Pack,
|
||||
state::{Account, Mint},
|
||||
},
|
||||
};
|
||||
use solana_sdk::poh_config::PohConfig;
|
||||
|
||||
#[test]
|
||||
fn test_accounts_cluster_bench() {
|
||||
@ -581,23 +684,127 @@ pub mod test {
|
||||
};
|
||||
|
||||
let faucet_addr = SocketAddr::from(([127, 0, 0, 1], 9900));
|
||||
let cluster = LocalCluster::new(&mut config);
|
||||
let cluster = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified);
|
||||
let iterations = 10;
|
||||
let maybe_space = None;
|
||||
let batch_size = 100;
|
||||
let close_nth_batch = 100;
|
||||
let maybe_lamports = None;
|
||||
let num_instructions = 2;
|
||||
let mut start = Measure::start("total accounts run");
|
||||
run_accounts_bench(
|
||||
cluster.entry_point_info.rpc,
|
||||
faucet_addr,
|
||||
&cluster.funding_keypair,
|
||||
&[&cluster.funding_keypair],
|
||||
iterations,
|
||||
maybe_space,
|
||||
batch_size,
|
||||
close_nth_batch,
|
||||
maybe_lamports,
|
||||
num_instructions,
|
||||
None,
|
||||
false,
|
||||
);
|
||||
start.stop();
|
||||
info!("{}", start);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_then_reclaim_spl_token_accounts() {
|
||||
solana_logger::setup();
|
||||
let mint_keypair = Keypair::new();
|
||||
let mint_pubkey = mint_keypair.pubkey();
|
||||
let faucet_addr = run_local_faucet(mint_keypair, None);
|
||||
let test_validator = TestValidator::with_custom_fees(
|
||||
mint_pubkey,
|
||||
1,
|
||||
Some(faucet_addr),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
let rpc_client =
|
||||
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
|
||||
|
||||
// Created funder
|
||||
let funder = Keypair::new();
|
||||
let latest_blockhash = rpc_client.get_latest_blockhash().unwrap();
|
||||
let signature = rpc_client
|
||||
.request_airdrop_with_blockhash(
|
||||
&funder.pubkey(),
|
||||
sol_to_lamports(1.0),
|
||||
&latest_blockhash,
|
||||
)
|
||||
.unwrap();
|
||||
rpc_client
|
||||
.confirm_transaction_with_spinner(
|
||||
&signature,
|
||||
&latest_blockhash,
|
||||
CommitmentConfig::confirmed(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Create Mint
|
||||
let spl_mint_keypair = Keypair::new();
|
||||
let spl_mint_len = Mint::get_packed_len();
|
||||
let spl_mint_rent = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(spl_mint_len)
|
||||
.unwrap();
|
||||
let transaction = Transaction::new_signed_with_payer(
|
||||
&[
|
||||
system_instruction::create_account(
|
||||
&funder.pubkey(),
|
||||
&spl_mint_keypair.pubkey(),
|
||||
spl_mint_rent,
|
||||
spl_mint_len as u64,
|
||||
&inline_spl_token::id(),
|
||||
),
|
||||
spl_token_instruction(
|
||||
spl_token::instruction::initialize_mint(
|
||||
&spl_token::id(),
|
||||
&spl_token_pubkey(&spl_mint_keypair.pubkey()),
|
||||
&spl_token_pubkey(&spl_mint_keypair.pubkey()),
|
||||
None,
|
||||
2,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
],
|
||||
Some(&funder.pubkey()),
|
||||
&[&funder, &spl_mint_keypair],
|
||||
latest_blockhash,
|
||||
);
|
||||
let _sig = rpc_client
|
||||
.send_and_confirm_transaction(&transaction)
|
||||
.unwrap();
|
||||
|
||||
let account_len = Account::get_packed_len();
|
||||
let minimum_balance = rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(account_len)
|
||||
.unwrap();
|
||||
|
||||
let iterations = 5;
|
||||
let batch_size = 100;
|
||||
let close_nth_batch = 0;
|
||||
let num_instructions = 4;
|
||||
let mut start = Measure::start("total accounts run");
|
||||
let keypair0 = Keypair::new();
|
||||
let keypair1 = Keypair::new();
|
||||
let keypair2 = Keypair::new();
|
||||
run_accounts_bench(
|
||||
test_validator
|
||||
.rpc_url()
|
||||
.replace("http://", "")
|
||||
.parse()
|
||||
.unwrap(),
|
||||
faucet_addr,
|
||||
&[&keypair0, &keypair1, &keypair2],
|
||||
iterations,
|
||||
Some(account_len as u64),
|
||||
batch_size,
|
||||
close_nth_batch,
|
||||
Some(minimum_balance),
|
||||
num_instructions,
|
||||
Some(spl_mint_keypair.pubkey()),
|
||||
true,
|
||||
);
|
||||
start.stop();
|
||||
info!("{}", start);
|
||||
|
19
accountsdb-plugin-interface/Cargo.toml
Normal file
19
accountsdb-plugin-interface/Cargo.toml
Normal file
@ -0,0 +1,19 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accountsdb-plugin-interface"
|
||||
description = "The Solana AccountsDb plugin interface."
|
||||
version = "1.10.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-accountsdb-plugin-interface"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.11"
|
||||
thiserror = "1.0.30"
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
20
accountsdb-plugin-interface/README.md
Normal file
20
accountsdb-plugin-interface/README.md
Normal file
@ -0,0 +1,20 @@
|
||||
<p align="center">
|
||||
<a href="https://solana.com">
|
||||
<img alt="Solana" src="https://i.imgur.com/IKyzQ6T.png" width="250" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
# Solana AccountsDb Plugin Interface
|
||||
|
||||
This crate enables an AccountsDb plugin to be plugged into the Solana Validator runtime to take actions
|
||||
at the time of each account update; for example, saving the account state to an external database. The plugin must implement the `AccountsDbPlugin` trait. Please see the detail of the `accountsdb_plugin_interface.rs` for the interface definition.
|
||||
|
||||
The plugin should produce a `cdylib` dynamic library, which must expose a `C` function `_create_plugin()` that
|
||||
instantiates the implementation of the interface.
|
||||
|
||||
The `solana-accountsdb-plugin-postgres` crate provides an example of how to create a plugin which saves the accounts data into an
|
||||
external PostgreSQL databases.
|
||||
|
||||
More information about Solana is available in the [Solana documentation](https://docs.solana.com/).
|
||||
|
||||
Still have questions? Ask us on [Discord](https://discordapp.com/invite/pquxPsq)
|
220
accountsdb-plugin-interface/src/accountsdb_plugin_interface.rs
Normal file
220
accountsdb-plugin-interface/src/accountsdb_plugin_interface.rs
Normal file
@ -0,0 +1,220 @@
|
||||
/// The interface for AccountsDb plugins. A plugin must implement
|
||||
/// the AccountsDbPlugin trait to work with the runtime.
|
||||
/// In addition, the dynamic library must export a "C" function _create_plugin which
|
||||
/// creates the implementation of the plugin.
|
||||
use {
|
||||
solana_sdk::{clock::UnixTimestamp, signature::Signature, transaction::SanitizedTransaction},
|
||||
solana_transaction_status::{Reward, TransactionStatusMeta},
|
||||
std::{any::Any, error, io},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
impl Eq for ReplicaAccountInfo<'_> {}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
/// Information about an account being updated
|
||||
pub struct ReplicaAccountInfo<'a> {
|
||||
/// The Pubkey for the account
|
||||
pub pubkey: &'a [u8],
|
||||
|
||||
/// The lamports for the account
|
||||
pub lamports: u64,
|
||||
|
||||
/// The Pubkey of the owner program account
|
||||
pub owner: &'a [u8],
|
||||
|
||||
/// This account's data contains a loaded program (and is now read-only)
|
||||
pub executable: bool,
|
||||
|
||||
/// The epoch at which this account will next owe rent
|
||||
pub rent_epoch: u64,
|
||||
|
||||
/// The data held in this account.
|
||||
pub data: &'a [u8],
|
||||
|
||||
/// A global monotonically increasing atomic number, which can be used
|
||||
/// to tell the order of the account update. For example, when an
|
||||
/// account is updated in the same slot multiple times, the update
|
||||
/// with higher write_version should supersede the one with lower
|
||||
/// write_version.
|
||||
pub write_version: u64,
|
||||
}
|
||||
|
||||
/// A wrapper to future-proof ReplicaAccountInfo handling.
|
||||
/// If there were a change to the structure of ReplicaAccountInfo,
|
||||
/// there would be new enum entry for the newer version, forcing
|
||||
/// plugin implementations to handle the change.
|
||||
pub enum ReplicaAccountInfoVersions<'a> {
|
||||
V0_0_1(&'a ReplicaAccountInfo<'a>),
|
||||
}
|
||||
|
||||
/// Information about a transaction
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ReplicaTransactionInfo<'a> {
|
||||
/// The first signature of the transaction, used for identifying the transaction.
|
||||
pub signature: &'a Signature,
|
||||
|
||||
/// Indicates if the transaction is a simple vote transaction.
|
||||
pub is_vote: bool,
|
||||
|
||||
/// The sanitized transaction.
|
||||
pub transaction: &'a SanitizedTransaction,
|
||||
|
||||
/// Metadata of the transaction status.
|
||||
pub transaction_status_meta: &'a TransactionStatusMeta,
|
||||
}
|
||||
|
||||
/// A wrapper to future-proof ReplicaTransactionInfo handling.
|
||||
/// If there were a change to the structure of ReplicaTransactionInfo,
|
||||
/// there would be new enum entry for the newer version, forcing
|
||||
/// plugin implementations to handle the change.
|
||||
pub enum ReplicaTransactionInfoVersions<'a> {
|
||||
V0_0_1(&'a ReplicaTransactionInfo<'a>),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ReplicaBlockInfo<'a> {
|
||||
pub slot: u64,
|
||||
pub blockhash: &'a str,
|
||||
pub rewards: &'a [Reward],
|
||||
pub block_time: Option<UnixTimestamp>,
|
||||
pub block_height: Option<u64>,
|
||||
}
|
||||
|
||||
pub enum ReplicaBlockInfoVersions<'a> {
|
||||
V0_0_1(&'a ReplicaBlockInfo<'a>),
|
||||
}
|
||||
|
||||
/// Errors returned by plugin calls
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AccountsDbPluginError {
|
||||
/// Error opening the configuration file; for example, when the file
|
||||
/// is not found or when the validator process has no permission to read it.
|
||||
#[error("Error opening config file. Error detail: ({0}).")]
|
||||
ConfigFileOpenError(#[from] io::Error),
|
||||
|
||||
/// Error in reading the content of the config file or the content
|
||||
/// is not in the expected format.
|
||||
#[error("Error reading config file. Error message: ({msg})")]
|
||||
ConfigFileReadError { msg: String },
|
||||
|
||||
/// Error when updating the account.
|
||||
#[error("Error updating account. Error message: ({msg})")]
|
||||
AccountsUpdateError { msg: String },
|
||||
|
||||
/// Error when updating the slot status
|
||||
#[error("Error updating slot status. Error message: ({msg})")]
|
||||
SlotStatusUpdateError { msg: String },
|
||||
|
||||
/// Any custom error defined by the plugin.
|
||||
#[error("Plugin-defined custom error. Error message: ({0})")]
|
||||
Custom(Box<dyn error::Error + Send + Sync>),
|
||||
}
|
||||
|
||||
/// The current status of a slot
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum SlotStatus {
|
||||
/// The highest slot of the heaviest fork processed by the node. Ledger state at this slot is
|
||||
/// not derived from a confirmed or finalized block, but if multiple forks are present, is from
|
||||
/// the fork the validator believes is most likely to finalize.
|
||||
Processed,
|
||||
|
||||
/// The highest slot having reached max vote lockout.
|
||||
Rooted,
|
||||
|
||||
/// The highest slot that has been voted on by supermajority of the cluster, ie. is confirmed.
|
||||
Confirmed,
|
||||
}
|
||||
|
||||
impl SlotStatus {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
SlotStatus::Confirmed => "confirmed",
|
||||
SlotStatus::Processed => "processed",
|
||||
SlotStatus::Rooted => "rooted",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, AccountsDbPluginError>;
|
||||
|
||||
/// Defines an AccountsDb plugin, to stream data from the runtime.
|
||||
/// AccountsDb plugins must describe desired behavior for load and unload,
|
||||
/// as well as how they will handle streamed data.
|
||||
pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug {
|
||||
fn name(&self) -> &'static str;
|
||||
|
||||
/// The callback called when a plugin is loaded by the system,
|
||||
/// used for doing whatever initialization is required by the plugin.
|
||||
/// The _config_file contains the name of the
|
||||
/// of the config file. The config must be in JSON format and
|
||||
/// include a field "libpath" indicating the full path
|
||||
/// name of the shared library implementing this interface.
|
||||
fn on_load(&mut self, _config_file: &str) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The callback called right before a plugin is unloaded by the system
|
||||
/// Used for doing cleanup before unload.
|
||||
fn on_unload(&mut self) {}
|
||||
|
||||
/// Called when an account is updated at a slot.
|
||||
/// When `is_startup` is true, it indicates the account is loaded from
|
||||
/// snapshots when the validator starts up. When `is_startup` is false,
|
||||
/// the account is updated during transaction processing.
|
||||
#[allow(unused_variables)]
|
||||
fn update_account(
|
||||
&mut self,
|
||||
account: ReplicaAccountInfoVersions,
|
||||
slot: u64,
|
||||
is_startup: bool,
|
||||
) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Called when all accounts are notified of during startup.
|
||||
fn notify_end_of_startup(&mut self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Called when a slot status is updated
|
||||
#[allow(unused_variables)]
|
||||
fn update_slot_status(
|
||||
&mut self,
|
||||
slot: u64,
|
||||
parent: Option<u64>,
|
||||
status: SlotStatus,
|
||||
) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Called when a transaction is updated at a slot.
|
||||
#[allow(unused_variables)]
|
||||
fn notify_transaction(
|
||||
&mut self,
|
||||
transaction: ReplicaTransactionInfoVersions,
|
||||
slot: u64,
|
||||
) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Called when block's metadata is updated.
|
||||
#[allow(unused_variables)]
|
||||
fn notify_block_metadata(&mut self, blockinfo: ReplicaBlockInfoVersions) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if the plugin is interested in account data
|
||||
/// Default is true -- if the plugin is not interested in
|
||||
/// account data, please return false.
|
||||
fn account_data_notifications_enabled(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
/// Check if the plugin is interested in transaction data
|
||||
/// Default is false -- if the plugin is not interested in
|
||||
/// transaction data, please return false.
|
||||
fn transaction_notifications_enabled(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
1
accountsdb-plugin-interface/src/lib.rs
Normal file
1
accountsdb-plugin-interface/src/lib.rs
Normal file
@ -0,0 +1 @@
|
||||
pub mod accountsdb_plugin_interface;
|
29
accountsdb-plugin-manager/Cargo.toml
Normal file
29
accountsdb-plugin-manager/Cargo.toml
Normal file
@ -0,0 +1,29 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2021"
|
||||
name = "solana-accountsdb-plugin-manager"
|
||||
description = "The Solana AccountsDb plugin manager."
|
||||
version = "1.10.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-validator"
|
||||
|
||||
[dependencies]
|
||||
bs58 = "0.4.0"
|
||||
crossbeam-channel = "0.5"
|
||||
json5 = "0.4.1"
|
||||
libloading = "0.7.3"
|
||||
log = "0.4.11"
|
||||
serde_json = "1.0.78"
|
||||
solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.10.0" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.10.0" }
|
||||
thiserror = "1.0.30"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
180
accountsdb-plugin-manager/src/accounts_update_notifier.rs
Normal file
180
accountsdb-plugin-manager/src/accounts_update_notifier.rs
Normal file
@ -0,0 +1,180 @@
|
||||
/// Module responsible for notifying plugins of account updates
|
||||
use {
|
||||
crate::accountsdb_plugin_manager::AccountsDbPluginManager,
|
||||
log::*,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
ReplicaAccountInfo, ReplicaAccountInfoVersions,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::*,
|
||||
solana_runtime::{
|
||||
accounts_update_notifier_interface::AccountsUpdateNotifierInterface,
|
||||
append_vec::{StoredAccountMeta, StoredMeta},
|
||||
},
|
||||
solana_sdk::{
|
||||
account::{AccountSharedData, ReadableAccount},
|
||||
clock::Slot,
|
||||
},
|
||||
std::sync::{Arc, RwLock},
|
||||
};
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct AccountsUpdateNotifierImpl {
|
||||
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
|
||||
}
|
||||
|
||||
impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl {
|
||||
fn notify_account_update(&self, slot: Slot, meta: &StoredMeta, account: &AccountSharedData) {
|
||||
if let Some(account_info) = self.accountinfo_from_shared_account_data(meta, account) {
|
||||
self.notify_plugins_of_account_update(account_info, slot, false);
|
||||
}
|
||||
}
|
||||
|
||||
fn notify_account_restore_from_snapshot(&self, slot: Slot, account: &StoredAccountMeta) {
|
||||
let mut measure_all = Measure::start("accountsdb-plugin-notify-account-restore-all");
|
||||
let mut measure_copy = Measure::start("accountsdb-plugin-copy-stored-account-info");
|
||||
|
||||
let account = self.accountinfo_from_stored_account_meta(account);
|
||||
measure_copy.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-copy-stored-account-info-us",
|
||||
measure_copy.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
|
||||
if let Some(account_info) = account {
|
||||
self.notify_plugins_of_account_update(account_info, slot, true);
|
||||
}
|
||||
measure_all.stop();
|
||||
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-notify-account-restore-all-us",
|
||||
measure_all.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
}
|
||||
|
||||
fn notify_end_of_restore_from_snapshot(&self) {
|
||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
||||
if plugin_manager.plugins.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
for plugin in plugin_manager.plugins.iter_mut() {
|
||||
let mut measure = Measure::start("accountsdb-plugin-end-of-restore-from-snapshot");
|
||||
match plugin.notify_end_of_startup() {
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to notify the end of restore from snapshot, error: {} to plugin {}",
|
||||
err,
|
||||
plugin.name()
|
||||
)
|
||||
}
|
||||
Ok(_) => {
|
||||
trace!(
|
||||
"Successfully notified the end of restore from snapshot to plugin {}",
|
||||
plugin.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-end-of-restore-from-snapshot",
|
||||
measure.as_us() as usize
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AccountsUpdateNotifierImpl {
|
||||
pub fn new(plugin_manager: Arc<RwLock<AccountsDbPluginManager>>) -> Self {
|
||||
AccountsUpdateNotifierImpl { plugin_manager }
|
||||
}
|
||||
|
||||
fn accountinfo_from_shared_account_data<'a>(
|
||||
&self,
|
||||
meta: &'a StoredMeta,
|
||||
account: &'a AccountSharedData,
|
||||
) -> Option<ReplicaAccountInfo<'a>> {
|
||||
Some(ReplicaAccountInfo {
|
||||
pubkey: meta.pubkey.as_ref(),
|
||||
lamports: account.lamports(),
|
||||
owner: account.owner().as_ref(),
|
||||
executable: account.executable(),
|
||||
rent_epoch: account.rent_epoch(),
|
||||
data: account.data(),
|
||||
write_version: meta.write_version,
|
||||
})
|
||||
}
|
||||
|
||||
fn accountinfo_from_stored_account_meta<'a>(
|
||||
&self,
|
||||
stored_account_meta: &'a StoredAccountMeta,
|
||||
) -> Option<ReplicaAccountInfo<'a>> {
|
||||
Some(ReplicaAccountInfo {
|
||||
pubkey: stored_account_meta.meta.pubkey.as_ref(),
|
||||
lamports: stored_account_meta.account_meta.lamports,
|
||||
owner: stored_account_meta.account_meta.owner.as_ref(),
|
||||
executable: stored_account_meta.account_meta.executable,
|
||||
rent_epoch: stored_account_meta.account_meta.rent_epoch,
|
||||
data: stored_account_meta.data,
|
||||
write_version: stored_account_meta.meta.write_version,
|
||||
})
|
||||
}
|
||||
|
||||
fn notify_plugins_of_account_update(
|
||||
&self,
|
||||
account: ReplicaAccountInfo,
|
||||
slot: Slot,
|
||||
is_startup: bool,
|
||||
) {
|
||||
let mut measure2 = Measure::start("accountsdb-plugin-notify_plugins_of_account_update");
|
||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
||||
|
||||
if plugin_manager.plugins.is_empty() {
|
||||
return;
|
||||
}
|
||||
for plugin in plugin_manager.plugins.iter_mut() {
|
||||
let mut measure = Measure::start("accountsdb-plugin-update-account");
|
||||
match plugin.update_account(
|
||||
ReplicaAccountInfoVersions::V0_0_1(&account),
|
||||
slot,
|
||||
is_startup,
|
||||
) {
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to update account {} at slot {}, error: {} to plugin {}",
|
||||
bs58::encode(account.pubkey).into_string(),
|
||||
slot,
|
||||
err,
|
||||
plugin.name()
|
||||
)
|
||||
}
|
||||
Ok(_) => {
|
||||
trace!(
|
||||
"Successfully updated account {} at slot {} to plugin {}",
|
||||
bs58::encode(account.pubkey).into_string(),
|
||||
slot,
|
||||
plugin.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-update-account-us",
|
||||
measure.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
}
|
||||
measure2.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-notify_plugins_of_account_update-us",
|
||||
measure2.as_us() as usize,
|
||||
100000,
|
||||
100000
|
||||
);
|
||||
}
|
||||
}
|
75
accountsdb-plugin-manager/src/accountsdb_plugin_manager.rs
Normal file
75
accountsdb-plugin-manager/src/accountsdb_plugin_manager.rs
Normal file
@ -0,0 +1,75 @@
|
||||
/// Managing the AccountsDb plugins
|
||||
use {
|
||||
libloading::{Library, Symbol},
|
||||
log::*,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::AccountsDbPlugin,
|
||||
std::error::Error,
|
||||
};
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct AccountsDbPluginManager {
|
||||
pub plugins: Vec<Box<dyn AccountsDbPlugin>>,
|
||||
libs: Vec<Library>,
|
||||
}
|
||||
|
||||
impl AccountsDbPluginManager {
|
||||
pub fn new() -> Self {
|
||||
AccountsDbPluginManager {
|
||||
plugins: Vec::default(),
|
||||
libs: Vec::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// This function loads the dynamically linked library specified in the path. The library
|
||||
/// must do necessary initializations.
|
||||
pub unsafe fn load_plugin(
|
||||
&mut self,
|
||||
libpath: &str,
|
||||
config_file: &str,
|
||||
) -> Result<(), Box<dyn Error>> {
|
||||
type PluginConstructor = unsafe fn() -> *mut dyn AccountsDbPlugin;
|
||||
let lib = Library::new(libpath)?;
|
||||
let constructor: Symbol<PluginConstructor> = lib.get(b"_create_plugin")?;
|
||||
let plugin_raw = constructor();
|
||||
let mut plugin = Box::from_raw(plugin_raw);
|
||||
plugin.on_load(config_file)?;
|
||||
self.plugins.push(plugin);
|
||||
self.libs.push(lib);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Unload all plugins and loaded plugin libraries, making sure to fire
|
||||
/// their `on_plugin_unload()` methods so they can do any necessary cleanup.
|
||||
pub fn unload(&mut self) {
|
||||
for mut plugin in self.plugins.drain(..) {
|
||||
info!("Unloading plugin for {:?}", plugin.name());
|
||||
plugin.on_unload();
|
||||
}
|
||||
|
||||
for lib in self.libs.drain(..) {
|
||||
drop(lib);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if there is any plugin interested in account data
|
||||
pub fn account_data_notifications_enabled(&self) -> bool {
|
||||
for plugin in &self.plugins {
|
||||
if plugin.account_data_notifications_enabled() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Check if there is any plugin interested in transaction data
|
||||
pub fn transaction_notifications_enabled(&self) -> bool {
|
||||
for plugin in &self.plugins {
|
||||
if plugin.transaction_notifications_enabled() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
210
accountsdb-plugin-manager/src/accountsdb_plugin_service.rs
Normal file
210
accountsdb-plugin-manager/src/accountsdb_plugin_service.rs
Normal file
@ -0,0 +1,210 @@
|
||||
use {
|
||||
crate::{
|
||||
accounts_update_notifier::AccountsUpdateNotifierImpl,
|
||||
accountsdb_plugin_manager::AccountsDbPluginManager,
|
||||
block_metadata_notifier::BlockMetadataNotifierImpl,
|
||||
block_metadata_notifier_interface::BlockMetadataNotifierLock,
|
||||
slot_status_notifier::SlotStatusNotifierImpl, slot_status_observer::SlotStatusObserver,
|
||||
transaction_notifier::TransactionNotifierImpl,
|
||||
},
|
||||
crossbeam_channel::Receiver,
|
||||
log::*,
|
||||
solana_rpc::{
|
||||
optimistically_confirmed_bank_tracker::BankNotification,
|
||||
transaction_notifier_interface::TransactionNotifierLock,
|
||||
},
|
||||
solana_runtime::accounts_update_notifier_interface::AccountsUpdateNotifier,
|
||||
std::{
|
||||
fs::File,
|
||||
io::Read,
|
||||
path::{Path, PathBuf},
|
||||
sync::{Arc, RwLock},
|
||||
thread,
|
||||
},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AccountsdbPluginServiceError {
|
||||
#[error("Cannot open the the plugin config file")]
|
||||
CannotOpenConfigFile(String),
|
||||
|
||||
#[error("Cannot read the the plugin config file")]
|
||||
CannotReadConfigFile(String),
|
||||
|
||||
#[error("The config file is not in a valid Json format")]
|
||||
InvalidConfigFileFormat(String),
|
||||
|
||||
#[error("Plugin library path is not specified in the config file")]
|
||||
LibPathNotSet,
|
||||
|
||||
#[error("Invalid plugin path")]
|
||||
InvalidPluginPath,
|
||||
|
||||
#[error("Cannot load plugin shared library")]
|
||||
PluginLoadError(String),
|
||||
}
|
||||
|
||||
/// The service managing the AccountsDb plugin workflow.
|
||||
pub struct AccountsDbPluginService {
|
||||
slot_status_observer: Option<SlotStatusObserver>,
|
||||
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
|
||||
accounts_update_notifier: Option<AccountsUpdateNotifier>,
|
||||
transaction_notifier: Option<TransactionNotifierLock>,
|
||||
block_metadata_notifier: Option<BlockMetadataNotifierLock>,
|
||||
}
|
||||
|
||||
impl AccountsDbPluginService {
|
||||
/// Creates and returns the AccountsDbPluginService.
|
||||
/// # Arguments
|
||||
/// * `confirmed_bank_receiver` - The receiver for confirmed bank notification
|
||||
/// * `accountsdb_plugin_config_file` - The config file path for the plugin. The
|
||||
/// config file controls the plugin responsible
|
||||
/// for transporting the data to external data stores. It is defined in JSON format.
|
||||
/// The `libpath` field should be pointed to the full path of the dynamic shared library
|
||||
/// (.so file) to be loaded. The shared library must implement the `AccountsDbPlugin`
|
||||
/// trait. And the shared library shall export a `C` function `_create_plugin` which
|
||||
/// shall create the implementation of `AccountsDbPlugin` and returns to the caller.
|
||||
/// The rest of the JSON fields' definition is up to to the concrete plugin implementation
|
||||
/// It is usually used to configure the connection information for the external data store.
|
||||
|
||||
pub fn new(
|
||||
confirmed_bank_receiver: Receiver<BankNotification>,
|
||||
accountsdb_plugin_config_files: &[PathBuf],
|
||||
) -> Result<Self, AccountsdbPluginServiceError> {
|
||||
info!(
|
||||
"Starting AccountsDbPluginService from config files: {:?}",
|
||||
accountsdb_plugin_config_files
|
||||
);
|
||||
let mut plugin_manager = AccountsDbPluginManager::new();
|
||||
|
||||
for accountsdb_plugin_config_file in accountsdb_plugin_config_files {
|
||||
Self::load_plugin(&mut plugin_manager, accountsdb_plugin_config_file)?;
|
||||
}
|
||||
let account_data_notifications_enabled =
|
||||
plugin_manager.account_data_notifications_enabled();
|
||||
let transaction_notifications_enabled = plugin_manager.transaction_notifications_enabled();
|
||||
|
||||
let plugin_manager = Arc::new(RwLock::new(plugin_manager));
|
||||
|
||||
let accounts_update_notifier: Option<AccountsUpdateNotifier> =
|
||||
if account_data_notifications_enabled {
|
||||
let accounts_update_notifier =
|
||||
AccountsUpdateNotifierImpl::new(plugin_manager.clone());
|
||||
Some(Arc::new(RwLock::new(accounts_update_notifier)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let transaction_notifier: Option<TransactionNotifierLock> =
|
||||
if transaction_notifications_enabled {
|
||||
let transaction_notifier = TransactionNotifierImpl::new(plugin_manager.clone());
|
||||
Some(Arc::new(RwLock::new(transaction_notifier)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (slot_status_observer, block_metadata_notifier): (
|
||||
Option<SlotStatusObserver>,
|
||||
Option<BlockMetadataNotifierLock>,
|
||||
) = if account_data_notifications_enabled || transaction_notifications_enabled {
|
||||
let slot_status_notifier = SlotStatusNotifierImpl::new(plugin_manager.clone());
|
||||
let slot_status_notifier = Arc::new(RwLock::new(slot_status_notifier));
|
||||
(
|
||||
Some(SlotStatusObserver::new(
|
||||
confirmed_bank_receiver,
|
||||
slot_status_notifier,
|
||||
)),
|
||||
Some(Arc::new(RwLock::new(BlockMetadataNotifierImpl::new(
|
||||
plugin_manager.clone(),
|
||||
)))),
|
||||
)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
info!("Started AccountsDbPluginService");
|
||||
Ok(AccountsDbPluginService {
|
||||
slot_status_observer,
|
||||
plugin_manager,
|
||||
accounts_update_notifier,
|
||||
transaction_notifier,
|
||||
block_metadata_notifier,
|
||||
})
|
||||
}
|
||||
|
||||
fn load_plugin(
|
||||
plugin_manager: &mut AccountsDbPluginManager,
|
||||
accountsdb_plugin_config_file: &Path,
|
||||
) -> Result<(), AccountsdbPluginServiceError> {
|
||||
let mut file = match File::open(accountsdb_plugin_config_file) {
|
||||
Ok(file) => file,
|
||||
Err(err) => {
|
||||
return Err(AccountsdbPluginServiceError::CannotOpenConfigFile(format!(
|
||||
"Failed to open the plugin config file {:?}, error: {:?}",
|
||||
accountsdb_plugin_config_file, err
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let mut contents = String::new();
|
||||
if let Err(err) = file.read_to_string(&mut contents) {
|
||||
return Err(AccountsdbPluginServiceError::CannotReadConfigFile(format!(
|
||||
"Failed to read the plugin config file {:?}, error: {:?}",
|
||||
accountsdb_plugin_config_file, err
|
||||
)));
|
||||
}
|
||||
|
||||
let result: serde_json::Value = match json5::from_str(&contents) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
return Err(AccountsdbPluginServiceError::InvalidConfigFileFormat(
|
||||
format!(
|
||||
"The config file {:?} is not in a valid Json5 format, error: {:?}",
|
||||
accountsdb_plugin_config_file, err
|
||||
),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let libpath = result["libpath"]
|
||||
.as_str()
|
||||
.ok_or(AccountsdbPluginServiceError::LibPathNotSet)?;
|
||||
let config_file = accountsdb_plugin_config_file
|
||||
.as_os_str()
|
||||
.to_str()
|
||||
.ok_or(AccountsdbPluginServiceError::InvalidPluginPath)?;
|
||||
|
||||
unsafe {
|
||||
let result = plugin_manager.load_plugin(libpath, config_file);
|
||||
if let Err(err) = result {
|
||||
let msg = format!(
|
||||
"Failed to load the plugin library: {:?}, error: {:?}",
|
||||
libpath, err
|
||||
);
|
||||
return Err(AccountsdbPluginServiceError::PluginLoadError(msg));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_accounts_update_notifier(&self) -> Option<AccountsUpdateNotifier> {
|
||||
self.accounts_update_notifier.clone()
|
||||
}
|
||||
|
||||
pub fn get_transaction_notifier(&self) -> Option<TransactionNotifierLock> {
|
||||
self.transaction_notifier.clone()
|
||||
}
|
||||
|
||||
pub fn get_block_metadata_notifier(&self) -> Option<BlockMetadataNotifierLock> {
|
||||
self.block_metadata_notifier.clone()
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
if let Some(mut slot_status_observer) = self.slot_status_observer {
|
||||
slot_status_observer.join()?;
|
||||
}
|
||||
self.plugin_manager.write().unwrap().unload();
|
||||
Ok(())
|
||||
}
|
||||
}
|
105
accountsdb-plugin-manager/src/block_metadata_notifier.rs
Normal file
105
accountsdb-plugin-manager/src/block_metadata_notifier.rs
Normal file
@ -0,0 +1,105 @@
|
||||
use {
|
||||
crate::{
|
||||
accountsdb_plugin_manager::AccountsDbPluginManager,
|
||||
block_metadata_notifier_interface::BlockMetadataNotifier,
|
||||
},
|
||||
log::*,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
ReplicaBlockInfo, ReplicaBlockInfoVersions,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::*,
|
||||
solana_runtime::bank::RewardInfo,
|
||||
solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey},
|
||||
solana_transaction_status::{Reward, Rewards},
|
||||
std::sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
pub(crate) struct BlockMetadataNotifierImpl {
|
||||
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
|
||||
}
|
||||
|
||||
impl BlockMetadataNotifier for BlockMetadataNotifierImpl {
|
||||
/// Notify the block metadata
|
||||
fn notify_block_metadata(
|
||||
&self,
|
||||
slot: u64,
|
||||
blockhash: &str,
|
||||
rewards: &RwLock<Vec<(Pubkey, RewardInfo)>>,
|
||||
block_time: Option<UnixTimestamp>,
|
||||
block_height: Option<u64>,
|
||||
) {
|
||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
||||
if plugin_manager.plugins.is_empty() {
|
||||
return;
|
||||
}
|
||||
let rewards = Self::build_rewards(rewards);
|
||||
|
||||
for plugin in plugin_manager.plugins.iter_mut() {
|
||||
let mut measure = Measure::start("accountsdb-plugin-update-slot");
|
||||
let block_info =
|
||||
Self::build_replica_block_info(slot, blockhash, &rewards, block_time, block_height);
|
||||
let block_info = ReplicaBlockInfoVersions::V0_0_1(&block_info);
|
||||
match plugin.notify_block_metadata(block_info) {
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to update block metadata at slot {}, error: {} to plugin {}",
|
||||
slot,
|
||||
err,
|
||||
plugin.name()
|
||||
)
|
||||
}
|
||||
Ok(_) => {
|
||||
trace!(
|
||||
"Successfully updated block metadata at slot {} to plugin {}",
|
||||
slot,
|
||||
plugin.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-update-block-metadata-us",
|
||||
measure.as_us() as usize,
|
||||
1000,
|
||||
1000
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockMetadataNotifierImpl {
|
||||
fn build_rewards(rewards: &RwLock<Vec<(Pubkey, RewardInfo)>>) -> Rewards {
|
||||
let rewards = rewards.read().unwrap();
|
||||
rewards
|
||||
.iter()
|
||||
.map(|(pubkey, reward)| Reward {
|
||||
pubkey: pubkey.to_string(),
|
||||
lamports: reward.lamports,
|
||||
post_balance: reward.post_balance,
|
||||
reward_type: Some(reward.reward_type),
|
||||
commission: reward.commission,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn build_replica_block_info<'a>(
|
||||
slot: u64,
|
||||
blockhash: &'a str,
|
||||
rewards: &'a [Reward],
|
||||
block_time: Option<UnixTimestamp>,
|
||||
block_height: Option<u64>,
|
||||
) -> ReplicaBlockInfo<'a> {
|
||||
ReplicaBlockInfo {
|
||||
slot,
|
||||
blockhash,
|
||||
rewards,
|
||||
block_time,
|
||||
block_height,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(plugin_manager: Arc<RwLock<AccountsDbPluginManager>>) -> Self {
|
||||
Self { plugin_manager }
|
||||
}
|
||||
}
|
@ -0,0 +1,20 @@
|
||||
use {
|
||||
solana_runtime::bank::RewardInfo,
|
||||
solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey},
|
||||
std::sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
/// Interface for notifying block metadata changes
|
||||
pub trait BlockMetadataNotifier {
|
||||
/// Notify the block metadata
|
||||
fn notify_block_metadata(
|
||||
&self,
|
||||
slot: u64,
|
||||
blockhash: &str,
|
||||
rewards: &RwLock<Vec<(Pubkey, RewardInfo)>>,
|
||||
block_time: Option<UnixTimestamp>,
|
||||
block_height: Option<u64>,
|
||||
);
|
||||
}
|
||||
|
||||
pub type BlockMetadataNotifierLock = Arc<RwLock<dyn BlockMetadataNotifier + Sync + Send>>;
|
8
accountsdb-plugin-manager/src/lib.rs
Normal file
8
accountsdb-plugin-manager/src/lib.rs
Normal file
@ -0,0 +1,8 @@
|
||||
pub mod accounts_update_notifier;
|
||||
pub mod accountsdb_plugin_manager;
|
||||
pub mod accountsdb_plugin_service;
|
||||
pub mod block_metadata_notifier;
|
||||
pub mod block_metadata_notifier_interface;
|
||||
pub mod slot_status_notifier;
|
||||
pub mod slot_status_observer;
|
||||
pub mod transaction_notifier;
|
81
accountsdb-plugin-manager/src/slot_status_notifier.rs
Normal file
81
accountsdb-plugin-manager/src/slot_status_notifier.rs
Normal file
@ -0,0 +1,81 @@
|
||||
use {
|
||||
crate::accountsdb_plugin_manager::AccountsDbPluginManager,
|
||||
log::*,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::SlotStatus,
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::*,
|
||||
solana_sdk::clock::Slot,
|
||||
std::sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
pub trait SlotStatusNotifierInterface {
|
||||
/// Notified when a slot is optimistically confirmed
|
||||
fn notify_slot_confirmed(&self, slot: Slot, parent: Option<Slot>);
|
||||
|
||||
/// Notified when a slot is marked frozen.
|
||||
fn notify_slot_processed(&self, slot: Slot, parent: Option<Slot>);
|
||||
|
||||
/// Notified when a slot is rooted.
|
||||
fn notify_slot_rooted(&self, slot: Slot, parent: Option<Slot>);
|
||||
}
|
||||
|
||||
pub type SlotStatusNotifier = Arc<RwLock<dyn SlotStatusNotifierInterface + Sync + Send>>;
|
||||
|
||||
pub struct SlotStatusNotifierImpl {
|
||||
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
|
||||
}
|
||||
|
||||
impl SlotStatusNotifierInterface for SlotStatusNotifierImpl {
|
||||
fn notify_slot_confirmed(&self, slot: Slot, parent: Option<Slot>) {
|
||||
self.notify_slot_status(slot, parent, SlotStatus::Confirmed);
|
||||
}
|
||||
|
||||
fn notify_slot_processed(&self, slot: Slot, parent: Option<Slot>) {
|
||||
self.notify_slot_status(slot, parent, SlotStatus::Processed);
|
||||
}
|
||||
|
||||
fn notify_slot_rooted(&self, slot: Slot, parent: Option<Slot>) {
|
||||
self.notify_slot_status(slot, parent, SlotStatus::Rooted);
|
||||
}
|
||||
}
|
||||
|
||||
impl SlotStatusNotifierImpl {
|
||||
pub fn new(plugin_manager: Arc<RwLock<AccountsDbPluginManager>>) -> Self {
|
||||
Self { plugin_manager }
|
||||
}
|
||||
|
||||
pub fn notify_slot_status(&self, slot: Slot, parent: Option<Slot>, slot_status: SlotStatus) {
|
||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
||||
if plugin_manager.plugins.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
for plugin in plugin_manager.plugins.iter_mut() {
|
||||
let mut measure = Measure::start("accountsdb-plugin-update-slot");
|
||||
match plugin.update_slot_status(slot, parent, slot_status.clone()) {
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to update slot status at slot {}, error: {} to plugin {}",
|
||||
slot,
|
||||
err,
|
||||
plugin.name()
|
||||
)
|
||||
}
|
||||
Ok(_) => {
|
||||
trace!(
|
||||
"Successfully updated slot status at slot {} to plugin {}",
|
||||
slot,
|
||||
plugin.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-update-slot-us",
|
||||
measure.as_us() as usize,
|
||||
1000,
|
||||
1000
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
80
accountsdb-plugin-manager/src/slot_status_observer.rs
Normal file
80
accountsdb-plugin-manager/src/slot_status_observer.rs
Normal file
@ -0,0 +1,80 @@
|
||||
use {
|
||||
crate::slot_status_notifier::SlotStatusNotifier,
|
||||
crossbeam_channel::Receiver,
|
||||
solana_rpc::optimistically_confirmed_bank_tracker::BankNotification,
|
||||
std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct SlotStatusObserver {
|
||||
bank_notification_receiver_service: Option<JoinHandle<()>>,
|
||||
exit_updated_slot_server: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl SlotStatusObserver {
|
||||
pub fn new(
|
||||
bank_notification_receiver: Receiver<BankNotification>,
|
||||
slot_status_notifier: SlotStatusNotifier,
|
||||
) -> Self {
|
||||
let exit_updated_slot_server = Arc::new(AtomicBool::new(false));
|
||||
|
||||
Self {
|
||||
bank_notification_receiver_service: Some(Self::run_bank_notification_receiver(
|
||||
bank_notification_receiver,
|
||||
exit_updated_slot_server.clone(),
|
||||
slot_status_notifier,
|
||||
)),
|
||||
exit_updated_slot_server,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(&mut self) -> thread::Result<()> {
|
||||
self.exit_updated_slot_server.store(true, Ordering::Relaxed);
|
||||
self.bank_notification_receiver_service
|
||||
.take()
|
||||
.map(JoinHandle::join)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn run_bank_notification_receiver(
|
||||
bank_notification_receiver: Receiver<BankNotification>,
|
||||
exit: Arc<AtomicBool>,
|
||||
slot_status_notifier: SlotStatusNotifier,
|
||||
) -> JoinHandle<()> {
|
||||
Builder::new()
|
||||
.name("bank_notification_receiver".to_string())
|
||||
.spawn(move || {
|
||||
while !exit.load(Ordering::Relaxed) {
|
||||
if let Ok(slot) = bank_notification_receiver.recv() {
|
||||
match slot {
|
||||
BankNotification::OptimisticallyConfirmed(slot) => {
|
||||
slot_status_notifier
|
||||
.read()
|
||||
.unwrap()
|
||||
.notify_slot_confirmed(slot, None);
|
||||
}
|
||||
BankNotification::Frozen(bank) => {
|
||||
slot_status_notifier
|
||||
.read()
|
||||
.unwrap()
|
||||
.notify_slot_processed(bank.slot(), Some(bank.parent_slot()));
|
||||
}
|
||||
BankNotification::Root(bank) => {
|
||||
slot_status_notifier
|
||||
.read()
|
||||
.unwrap()
|
||||
.notify_slot_rooted(bank.slot(), Some(bank.parent_slot()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
}
|
92
accountsdb-plugin-manager/src/transaction_notifier.rs
Normal file
92
accountsdb-plugin-manager/src/transaction_notifier.rs
Normal file
@ -0,0 +1,92 @@
|
||||
/// Module responsible for notifying plugins of transactions
|
||||
use {
|
||||
crate::accountsdb_plugin_manager::AccountsDbPluginManager,
|
||||
log::*,
|
||||
solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{
|
||||
ReplicaTransactionInfo, ReplicaTransactionInfoVersions,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::*,
|
||||
solana_rpc::transaction_notifier_interface::TransactionNotifier,
|
||||
solana_sdk::{clock::Slot, signature::Signature, transaction::SanitizedTransaction},
|
||||
solana_transaction_status::TransactionStatusMeta,
|
||||
std::sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
/// This implementation of TransactionNotifier is passed to the rpc's TransactionStatusService
|
||||
/// at the validator startup. TransactionStatusService invokes the notify_transaction method
|
||||
/// for new transactions. The implementation in turn invokes the notify_transaction of each
|
||||
/// plugin enabled with transaction notification managed by the AccountsDbPluginManager.
|
||||
pub(crate) struct TransactionNotifierImpl {
|
||||
plugin_manager: Arc<RwLock<AccountsDbPluginManager>>,
|
||||
}
|
||||
|
||||
impl TransactionNotifier for TransactionNotifierImpl {
|
||||
fn notify_transaction(
|
||||
&self,
|
||||
slot: Slot,
|
||||
signature: &Signature,
|
||||
transaction_status_meta: &TransactionStatusMeta,
|
||||
transaction: &SanitizedTransaction,
|
||||
) {
|
||||
let mut measure = Measure::start("accountsdb-plugin-notify_plugins_of_transaction_info");
|
||||
let transaction_log_info =
|
||||
Self::build_replica_transaction_info(signature, transaction_status_meta, transaction);
|
||||
|
||||
let mut plugin_manager = self.plugin_manager.write().unwrap();
|
||||
|
||||
if plugin_manager.plugins.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
for plugin in plugin_manager.plugins.iter_mut() {
|
||||
if !plugin.transaction_notifications_enabled() {
|
||||
continue;
|
||||
}
|
||||
match plugin.notify_transaction(
|
||||
ReplicaTransactionInfoVersions::V0_0_1(&transaction_log_info),
|
||||
slot,
|
||||
) {
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to notify transaction, error: ({}) to plugin {}",
|
||||
err,
|
||||
plugin.name()
|
||||
)
|
||||
}
|
||||
Ok(_) => {
|
||||
trace!(
|
||||
"Successfully notified transaction to plugin {}",
|
||||
plugin.name()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
measure.stop();
|
||||
inc_new_counter_debug!(
|
||||
"accountsdb-plugin-notify_plugins_of_transaction_info-us",
|
||||
measure.as_us() as usize,
|
||||
10000,
|
||||
10000
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl TransactionNotifierImpl {
|
||||
pub fn new(plugin_manager: Arc<RwLock<AccountsDbPluginManager>>) -> Self {
|
||||
Self { plugin_manager }
|
||||
}
|
||||
|
||||
fn build_replica_transaction_info<'a>(
|
||||
signature: &'a Signature,
|
||||
transaction_status_meta: &'a TransactionStatusMeta,
|
||||
transaction: &'a SanitizedTransaction,
|
||||
) -> ReplicaTransactionInfo<'a> {
|
||||
ReplicaTransactionInfo {
|
||||
signature,
|
||||
is_vote: transaction.is_simple_vote_transaction(),
|
||||
transaction,
|
||||
transaction_status_meta,
|
||||
}
|
||||
}
|
||||
}
|
@ -1,8 +1,8 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.6.0"
|
||||
version = "1.10.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,20 +10,21 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
log = "0.4.11"
|
||||
crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
solana-core = { path = "../core", version = "1.6.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.6.0" }
|
||||
solana-perf = { path = "../perf", version = "1.6.0" }
|
||||
solana-ledger = { path = "../ledger", version = "1.6.0" }
|
||||
solana-logger = { path = "../logger", version = "1.6.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.6.0" }
|
||||
solana-measure = { path = "../measure", version = "1.6.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.6.0" }
|
||||
solana-version = { path = "../version", version = "1.6.0" }
|
||||
rayon = "1.5.1"
|
||||
solana-core = { path = "../core", version = "=1.10.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.10.0" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.10.0" }
|
||||
solana-poh = { path = "../poh", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,38 +1,37 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use clap::{crate_description, crate_name, value_t, App, Arg};
|
||||
use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_core::{
|
||||
banking_stage::{create_test_recorder, BankingStage},
|
||||
cluster_info::ClusterInfo,
|
||||
cluster_info::Node,
|
||||
poh_recorder::PohRecorder,
|
||||
poh_recorder::WorkingBankEntry,
|
||||
};
|
||||
use solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_runtime::{
|
||||
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
|
||||
};
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
signature::Keypair,
|
||||
signature::Signature,
|
||||
system_transaction,
|
||||
timing::{duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
use {
|
||||
clap::{crate_description, crate_name, value_t, App, Arg},
|
||||
crossbeam_channel::{unbounded, Receiver},
|
||||
log::*,
|
||||
rand::{thread_rng, Rng},
|
||||
rayon::prelude::*,
|
||||
solana_core::banking_stage::BankingStage,
|
||||
solana_gossip::cluster_info::{ClusterInfo, Node},
|
||||
solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
},
|
||||
solana_measure::measure::Measure,
|
||||
solana_perf::packet::to_packet_batches,
|
||||
solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry},
|
||||
solana_runtime::{
|
||||
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
|
||||
cost_model::CostModel,
|
||||
},
|
||||
solana_sdk::{
|
||||
hash::Hash,
|
||||
signature::{Keypair, Signature},
|
||||
system_transaction,
|
||||
timing::{duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{
|
||||
sync::{atomic::Ordering, Arc, Mutex, RwLock},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
};
|
||||
|
||||
fn check_txs(
|
||||
@ -78,7 +77,7 @@ fn make_accounts_txs(
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
if !same_payer {
|
||||
new.message.account_keys[0] = solana_sdk::pubkey::new_rand();
|
||||
}
|
||||
@ -169,8 +168,9 @@ fn main() {
|
||||
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
let (vote_sender, vote_receiver) = unbounded();
|
||||
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
|
||||
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
let bank0 = Bank::new_for_benches(&genesis_config);
|
||||
let mut bank_forks = BankForks::new(bank0);
|
||||
let mut bank = bank_forks.working_bank();
|
||||
|
||||
@ -189,7 +189,7 @@ fn main() {
|
||||
genesis_config.hash(),
|
||||
);
|
||||
// Ignore any pesky duplicate signature errors in the case we are using single-payer
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
fund.signatures = vec![Signature::new(&sig[0..64])];
|
||||
let x = bank.process_transaction(&fund);
|
||||
x.unwrap();
|
||||
@ -199,19 +199,20 @@ fn main() {
|
||||
if !skip_sanity {
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
let res = bank.process_transaction(tx);
|
||||
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
let res = bank.process_transactions(&transactions);
|
||||
|
||||
let res = bank.process_transactions(transactions.iter());
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution error: {:?}", r);
|
||||
}
|
||||
bank.clear_signatures();
|
||||
}
|
||||
|
||||
let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk);
|
||||
let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_chunk);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(
|
||||
@ -219,15 +220,21 @@ fn main() {
|
||||
);
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = ClusterInfo::new(
|
||||
Node::new_localhost().info,
|
||||
Arc::new(Keypair::new()),
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
&poh_recorder,
|
||||
verified_receiver,
|
||||
tpu_vote_receiver,
|
||||
vote_receiver,
|
||||
None,
|
||||
replay_vote_sender,
|
||||
Arc::new(RwLock::new(CostModel::default())),
|
||||
);
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
|
||||
@ -307,11 +314,10 @@ fn main() {
|
||||
tx_total_us += duration_as_us(&now.elapsed());
|
||||
|
||||
let mut poh_time = Measure::start("poh_time");
|
||||
poh_recorder.lock().unwrap().reset(
|
||||
bank.last_blockhash(),
|
||||
bank.slot(),
|
||||
Some((bank.slot(), bank.slot() + 1)),
|
||||
);
|
||||
poh_recorder
|
||||
.lock()
|
||||
.unwrap()
|
||||
.reset(bank.clone(), Some((bank.slot(), bank.slot() + 1)));
|
||||
poh_time.stop();
|
||||
|
||||
let mut new_bank_time = Measure::start("new_bank");
|
||||
@ -355,10 +361,10 @@ fn main() {
|
||||
if bank.slot() > 0 && bank.slot() % 16 == 0 {
|
||||
for tx in transactions.iter_mut() {
|
||||
tx.message.recent_blockhash = bank.last_blockhash();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
tx.signatures[0] = Signature::new(&sig[0..64]);
|
||||
}
|
||||
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||
verified = to_packet_batches(&transactions.clone(), packets_per_chunk);
|
||||
}
|
||||
|
||||
start += chunk_len;
|
||||
@ -380,6 +386,7 @@ fn main() {
|
||||
);
|
||||
|
||||
drop(verified_sender);
|
||||
drop(tpu_vote_sender);
|
||||
drop(vote_sender);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
banking_stage.join().unwrap();
|
||||
|
@ -1,30 +1,28 @@
|
||||
[package]
|
||||
name = "solana-banks-client"
|
||||
version = "1.6.0"
|
||||
version = "1.10.0"
|
||||
description = "Solana banks client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-banks-client"
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.3.1"
|
||||
borsh = "0.8.1"
|
||||
borsh-derive = "0.8.1"
|
||||
borsh = "0.9.1"
|
||||
futures = "0.3"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "1.6.0" }
|
||||
solana-program = { path = "../sdk/program", version = "1.6.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.6.0" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1.1", features = ["full"] }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.10.0" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../runtime", version = "1.6.0" }
|
||||
solana-banks-server = { path = "../banks-server", version = "1.6.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.10.0" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
73
banks-client/src/error.rs
Normal file
73
banks-client/src/error.rs
Normal file
@ -0,0 +1,73 @@
|
||||
use {
|
||||
solana_sdk::{transaction::TransactionError, transport::TransportError},
|
||||
std::io,
|
||||
tarpc::client::RpcError,
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
/// Errors from BanksClient
|
||||
#[derive(Error, Debug)]
|
||||
pub enum BanksClientError {
|
||||
#[error("client error: {0}")]
|
||||
ClientError(&'static str),
|
||||
|
||||
#[error(transparent)]
|
||||
Io(#[from] io::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
RpcError(#[from] RpcError),
|
||||
|
||||
#[error("transport transaction error: {0}")]
|
||||
TransactionError(#[from] TransactionError),
|
||||
|
||||
#[error("simulation error: {err:?}, logs: {logs:?}, units_consumed: {units_consumed:?}")]
|
||||
SimulationError {
|
||||
err: TransactionError,
|
||||
logs: Vec<String>,
|
||||
units_consumed: u64,
|
||||
},
|
||||
}
|
||||
|
||||
impl BanksClientError {
|
||||
pub fn unwrap(&self) -> TransactionError {
|
||||
match self {
|
||||
BanksClientError::TransactionError(err)
|
||||
| BanksClientError::SimulationError { err, .. } => err.clone(),
|
||||
_ => panic!("unexpected transport error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BanksClientError> for io::Error {
|
||||
fn from(err: BanksClientError) -> Self {
|
||||
match err {
|
||||
BanksClientError::ClientError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
|
||||
BanksClientError::Io(err) => err,
|
||||
BanksClientError::RpcError(err) => Self::new(io::ErrorKind::Other, err.to_string()),
|
||||
BanksClientError::TransactionError(err) => {
|
||||
Self::new(io::ErrorKind::Other, err.to_string())
|
||||
}
|
||||
BanksClientError::SimulationError { err, .. } => {
|
||||
Self::new(io::ErrorKind::Other, err.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BanksClientError> for TransportError {
|
||||
fn from(err: BanksClientError) -> Self {
|
||||
match err {
|
||||
BanksClientError::ClientError(err) => {
|
||||
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
|
||||
}
|
||||
BanksClientError::Io(err) => {
|
||||
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
|
||||
}
|
||||
BanksClientError::RpcError(err) => {
|
||||
Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string()))
|
||||
}
|
||||
BanksClientError::TransactionError(err) => Self::TransactionError(err),
|
||||
BanksClientError::SimulationError { err, .. } => Self::TransactionError(err),
|
||||
}
|
||||
}
|
||||
}
|
@ -5,31 +5,34 @@
|
||||
//! but they are undocumented, may change over time, and are generally more
|
||||
//! cumbersome to use.
|
||||
|
||||
use borsh::BorshDeserialize;
|
||||
use futures::{future::join_all, Future, FutureExt};
|
||||
pub use crate::error::BanksClientError;
|
||||
pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
|
||||
use solana_banks_interface::{BanksRequest, BanksResponse};
|
||||
use solana_program::{
|
||||
clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey,
|
||||
rent::Rent, sysvar,
|
||||
use {
|
||||
borsh::BorshDeserialize,
|
||||
futures::{future::join_all, Future, FutureExt, TryFutureExt},
|
||||
solana_banks_interface::{BanksRequest, BanksResponse, BanksTransactionResultWithSimulation},
|
||||
solana_program::{
|
||||
clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey,
|
||||
rent::Rent, sysvar::Sysvar,
|
||||
},
|
||||
solana_sdk::{
|
||||
account::{from_account, Account},
|
||||
commitment_config::CommitmentLevel,
|
||||
message::Message,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
},
|
||||
tarpc::{
|
||||
client::{self, NewClient, RequestDispatch},
|
||||
context::{self, Context},
|
||||
serde_transport::tcp,
|
||||
ClientMessage, Response, Transport,
|
||||
},
|
||||
tokio::{net::ToSocketAddrs, time::Duration},
|
||||
tokio_serde::formats::Bincode,
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::{from_account, Account},
|
||||
commitment_config::CommitmentLevel,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
transport,
|
||||
};
|
||||
use std::io::{self, Error, ErrorKind};
|
||||
use tarpc::{
|
||||
client::{self, channel::RequestDispatch, NewClient},
|
||||
context::{self, Context},
|
||||
rpc::{ClientMessage, Response},
|
||||
serde_transport::tcp,
|
||||
Transport,
|
||||
};
|
||||
use tokio::{net::ToSocketAddrs, time::Duration};
|
||||
use tokio_serde::formats::Bincode;
|
||||
|
||||
mod error;
|
||||
|
||||
// This exists only for backward compatibility
|
||||
pub trait BanksClientExt {}
|
||||
@ -55,34 +58,55 @@ impl BanksClient {
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = io::Result<()>> + '_ {
|
||||
self.inner.send_transaction_with_context(ctx, transaction)
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.send_transaction_with_context(ctx, transaction)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
#[deprecated(
|
||||
since = "1.9.0",
|
||||
note = "Please use `get_fee_for_message` or `is_blockhash_valid` instead"
|
||||
)]
|
||||
pub fn get_fees_with_commitment_and_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, Slot)>> + '_ {
|
||||
) -> impl Future<Output = Result<(FeeCalculator, Hash, u64), BanksClientError>> + '_ {
|
||||
#[allow(deprecated)]
|
||||
self.inner
|
||||
.get_fees_with_commitment_and_context(ctx, commitment)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_transaction_status_with_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
signature: Signature,
|
||||
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
|
||||
) -> impl Future<Output = Result<Option<TransactionStatus>, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_transaction_status_with_context(ctx, signature)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_slot_with_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
self.inner.get_slot_with_context(ctx, commitment)
|
||||
) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_slot_with_context(ctx, commitment)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_block_height_with_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_block_height_with_context(ctx, commitment)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn process_transaction_with_commitment_and_context(
|
||||
@ -90,9 +114,26 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Option<transaction::Result<()>>>> + '_ {
|
||||
) -> impl Future<Output = Result<Option<transaction::Result<()>>, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn process_transaction_with_preflight_and_commitment_and_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<BanksTransactionResultWithSimulation, BanksClientError>> + '_
|
||||
{
|
||||
self.inner
|
||||
.process_transaction_with_preflight_and_commitment_and_context(
|
||||
ctx,
|
||||
transaction,
|
||||
commitment,
|
||||
)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_account_with_commitment_and_context(
|
||||
@ -100,9 +141,10 @@ impl BanksClient {
|
||||
ctx: Context,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_account_with_commitment_and_context(ctx, address, commitment)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Send a transaction and return immediately. The server will resend the
|
||||
@ -111,34 +153,49 @@ impl BanksClient {
|
||||
pub fn send_transaction(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = io::Result<()>> + '_ {
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
self.send_transaction_with_context(context::current(), transaction)
|
||||
}
|
||||
|
||||
/// Return the fee parameters associated with a recent, rooted blockhash. The cluster
|
||||
/// will use the transaction's blockhash to look up these same fee parameters and
|
||||
/// use them to calculate the transaction fee.
|
||||
#[deprecated(
|
||||
since = "1.9.0",
|
||||
note = "Please use `get_fee_for_message` or `is_blockhash_valid` instead"
|
||||
)]
|
||||
pub fn get_fees(
|
||||
&mut self,
|
||||
) -> impl Future<Output = io::Result<(FeeCalculator, Hash, Slot)>> + '_ {
|
||||
) -> impl Future<Output = Result<(FeeCalculator, Hash, u64), BanksClientError>> + '_ {
|
||||
#[allow(deprecated)]
|
||||
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the cluster rent
|
||||
pub fn get_rent(&mut self) -> impl Future<Output = io::Result<Rent>> + '_ {
|
||||
self.get_account(sysvar::rent::id()).map(|result| {
|
||||
let rent_sysvar = result?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Rent sysvar not present"))?;
|
||||
from_account::<Rent, _>(&rent_sysvar).ok_or_else(|| {
|
||||
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Rent sysvar")
|
||||
})
|
||||
/// Return the cluster Sysvar
|
||||
pub fn get_sysvar<T: Sysvar>(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<T, BanksClientError>> + '_ {
|
||||
self.get_account(T::id()).map(|result| {
|
||||
let sysvar = result?.ok_or(BanksClientError::ClientError("Sysvar not present"))?;
|
||||
from_account::<T, _>(&sysvar).ok_or(BanksClientError::ClientError(
|
||||
"Failed to deserialize sysvar",
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the cluster rent
|
||||
pub fn get_rent(&mut self) -> impl Future<Output = Result<Rent, BanksClientError>> + '_ {
|
||||
self.get_sysvar::<Rent>()
|
||||
}
|
||||
|
||||
/// Return a recent, rooted blockhash from the server. The cluster will only accept
|
||||
/// transactions with a blockhash that has not yet expired. Use the `get_fees`
|
||||
/// method to get both a blockhash and the blockhash's last valid slot.
|
||||
pub fn get_recent_blockhash(&mut self) -> impl Future<Output = io::Result<Hash>> + '_ {
|
||||
#[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")]
|
||||
pub fn get_recent_blockhash(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<Hash, BanksClientError>> + '_ {
|
||||
#[allow(deprecated)]
|
||||
self.get_fees().map(|result| Ok(result?.1))
|
||||
}
|
||||
|
||||
@ -148,23 +205,71 @@ impl BanksClient {
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = transport::Result<()>> + '_ {
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
let mut ctx = context::current();
|
||||
ctx.deadline += Duration::from_secs(50);
|
||||
self.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.map(|result| match result? {
|
||||
None => {
|
||||
Err(Error::new(ErrorKind::TimedOut, "invalid blockhash or fee-payer").into())
|
||||
}
|
||||
None => Err(BanksClientError::ClientError(
|
||||
"invalid blockhash or fee-payer",
|
||||
)),
|
||||
Some(transaction_result) => Ok(transaction_result?),
|
||||
})
|
||||
}
|
||||
|
||||
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
|
||||
/// after the transaction has been rejected or reached the given level of commitment.
|
||||
pub fn process_transaction_with_preflight_and_commitment(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
let mut ctx = context::current();
|
||||
ctx.deadline += Duration::from_secs(50);
|
||||
self.process_transaction_with_preflight_and_commitment_and_context(
|
||||
ctx,
|
||||
transaction,
|
||||
commitment,
|
||||
)
|
||||
.map(|result| match result? {
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: None,
|
||||
simulation_details: _,
|
||||
} => Err(BanksClientError::ClientError(
|
||||
"invalid blockhash or fee-payer",
|
||||
)),
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: Some(Err(err)),
|
||||
simulation_details: Some(simulation_details),
|
||||
} => Err(BanksClientError::SimulationError {
|
||||
err,
|
||||
logs: simulation_details.logs,
|
||||
units_consumed: simulation_details.units_consumed,
|
||||
}),
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: Some(result),
|
||||
simulation_details: _,
|
||||
} => result.map_err(Into::into),
|
||||
})
|
||||
}
|
||||
|
||||
/// Send a transaction and return any preflight (sanitization or simulation) errors, or return
|
||||
/// after the transaction has been finalized or rejected.
|
||||
pub fn process_transaction_with_preflight(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
self.process_transaction_with_preflight_and_commitment(
|
||||
transaction,
|
||||
CommitmentLevel::default(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Send a transaction and return until the transaction has been finalized or rejected.
|
||||
pub fn process_transaction(
|
||||
&mut self,
|
||||
transaction: Transaction,
|
||||
) -> impl Future<Output = transport::Result<()>> + '_ {
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
self.process_transaction_with_commitment(transaction, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
@ -172,7 +277,7 @@ impl BanksClient {
|
||||
&mut self,
|
||||
transactions: Vec<Transaction>,
|
||||
commitment: CommitmentLevel,
|
||||
) -> transport::Result<()> {
|
||||
) -> Result<(), BanksClientError> {
|
||||
let mut clients: Vec<_> = transactions.iter().map(|_| self.clone()).collect();
|
||||
let futures = clients
|
||||
.iter_mut()
|
||||
@ -188,23 +293,31 @@ impl BanksClient {
|
||||
pub fn process_transactions(
|
||||
&mut self,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> impl Future<Output = transport::Result<()>> + '_ {
|
||||
) -> impl Future<Output = Result<(), BanksClientError>> + '_ {
|
||||
self.process_transactions_with_commitment(transactions, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the most recent rooted slot height. All transactions at or below this height
|
||||
/// are said to be finalized. The cluster will not fork to a higher slot height.
|
||||
pub fn get_root_slot(&mut self) -> impl Future<Output = io::Result<Slot>> + '_ {
|
||||
/// Return the most recent rooted slot. All transactions at or below this slot
|
||||
/// are said to be finalized. The cluster will not fork to a higher slot.
|
||||
pub fn get_root_slot(&mut self) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
self.get_slot_with_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the most recent rooted block height. All transactions at or below this height
|
||||
/// are said to be finalized. The cluster will not fork to a higher block height.
|
||||
pub fn get_root_block_height(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<Slot, BanksClientError>> + '_ {
|
||||
self.get_block_height_with_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the account at the given address at the slot corresponding to the given
|
||||
/// commitment level. If the account is not found, None is returned.
|
||||
pub fn get_account_with_commitment(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ {
|
||||
self.get_account_with_commitment_and_context(context::current(), address, commitment)
|
||||
}
|
||||
|
||||
@ -213,7 +326,7 @@ impl BanksClient {
|
||||
pub fn get_account(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = io::Result<Option<Account>>> + '_ {
|
||||
) -> impl Future<Output = Result<Option<Account>, BanksClientError>> + '_ {
|
||||
self.get_account_with_commitment(address, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
@ -222,12 +335,11 @@ impl BanksClient {
|
||||
pub fn get_packed_account_data<T: Pack>(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
) -> impl Future<Output = Result<T, BanksClientError>> + '_ {
|
||||
self.get_account(address).map(|result| {
|
||||
let account =
|
||||
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Account not found"))?;
|
||||
let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?;
|
||||
T::unpack_from_slice(&account.data)
|
||||
.map_err(|_| io::Error::new(io::ErrorKind::Other, "Failed to deserialize account"))
|
||||
.map_err(|_| BanksClientError::ClientError("Failed to deserialize account"))
|
||||
})
|
||||
}
|
||||
|
||||
@ -236,11 +348,10 @@ impl BanksClient {
|
||||
pub fn get_account_data_with_borsh<T: BorshDeserialize>(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
) -> impl Future<Output = Result<T, BanksClientError>> + '_ {
|
||||
self.get_account(address).map(|result| {
|
||||
let account =
|
||||
result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))?;
|
||||
T::try_from_slice(&account.data)
|
||||
let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?;
|
||||
T::try_from_slice(&account.data).map_err(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
@ -250,14 +361,17 @@ impl BanksClient {
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = io::Result<u64>> + '_ {
|
||||
) -> impl Future<Output = Result<u64, BanksClientError>> + '_ {
|
||||
self.get_account_with_commitment_and_context(context::current(), address, commitment)
|
||||
.map(|result| Ok(result?.map(|x| x.lamports).unwrap_or(0)))
|
||||
}
|
||||
|
||||
/// Return the balance in lamports of an account at the given address at the time
|
||||
/// of the most recent root slot.
|
||||
pub fn get_balance(&mut self, address: Pubkey) -> impl Future<Output = io::Result<u64>> + '_ {
|
||||
pub fn get_balance(
|
||||
&mut self,
|
||||
address: Pubkey,
|
||||
) -> impl Future<Output = Result<u64, BanksClientError>> + '_ {
|
||||
self.get_balance_with_commitment(address, CommitmentLevel::default())
|
||||
}
|
||||
|
||||
@ -269,7 +383,7 @@ impl BanksClient {
|
||||
pub fn get_transaction_status(
|
||||
&mut self,
|
||||
signature: Signature,
|
||||
) -> impl Future<Output = io::Result<Option<TransactionStatus>>> + '_ {
|
||||
) -> impl Future<Output = Result<Option<TransactionStatus>, BanksClientError>> + '_ {
|
||||
self.get_transaction_status_with_context(context::current(), signature)
|
||||
}
|
||||
|
||||
@ -277,7 +391,7 @@ impl BanksClient {
|
||||
pub async fn get_transaction_statuses(
|
||||
&mut self,
|
||||
signatures: Vec<Signature>,
|
||||
) -> io::Result<Vec<Option<TransactionStatus>>> {
|
||||
) -> Result<Vec<Option<TransactionStatus>>, BanksClientError> {
|
||||
// tarpc futures oddly hold a mutable reference back to the client so clone the client upfront
|
||||
let mut clients_and_signatures: Vec<_> = signatures
|
||||
.into_iter()
|
||||
@ -293,36 +407,78 @@ impl BanksClient {
|
||||
// Convert Vec<Result<_, _>> to Result<Vec<_>>
|
||||
statuses.into_iter().collect()
|
||||
}
|
||||
|
||||
pub fn get_latest_blockhash(
|
||||
&mut self,
|
||||
) -> impl Future<Output = Result<Hash, BanksClientError>> + '_ {
|
||||
self.get_latest_blockhash_with_commitment(CommitmentLevel::default())
|
||||
.map(|result| {
|
||||
result?
|
||||
.map(|x| x.0)
|
||||
.ok_or(BanksClientError::ClientError("valid blockhash not found"))
|
||||
.map_err(Into::into)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_latest_blockhash_with_commitment(
|
||||
&mut self,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Option<(Hash, u64)>, BanksClientError>> + '_ {
|
||||
self.get_latest_blockhash_with_commitment_and_context(context::current(), commitment)
|
||||
}
|
||||
|
||||
pub fn get_latest_blockhash_with_commitment_and_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> impl Future<Output = Result<Option<(Hash, u64)>, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_latest_blockhash_with_commitment_and_context(ctx, commitment)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
pub fn get_fee_for_message_with_commitment_and_context(
|
||||
&mut self,
|
||||
ctx: Context,
|
||||
commitment: CommitmentLevel,
|
||||
message: Message,
|
||||
) -> impl Future<Output = Result<Option<u64>, BanksClientError>> + '_ {
|
||||
self.inner
|
||||
.get_fee_for_message_with_commitment_and_context(ctx, commitment, message)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_client<C>(transport: C) -> io::Result<BanksClient>
|
||||
pub async fn start_client<C>(transport: C) -> Result<BanksClient, BanksClientError>
|
||||
where
|
||||
C: Transport<ClientMessage<BanksRequest>, Response<BanksResponse>> + Send + 'static,
|
||||
{
|
||||
Ok(BanksClient {
|
||||
inner: TarpcClient::new(client::Config::default(), transport).spawn()?,
|
||||
inner: TarpcClient::new(client::Config::default(), transport).spawn(),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> io::Result<BanksClient> {
|
||||
pub async fn start_tcp_client<T: ToSocketAddrs>(addr: T) -> Result<BanksClient, BanksClientError> {
|
||||
let transport = tcp::connect(addr, Bincode::default).await?;
|
||||
Ok(BanksClient {
|
||||
inner: TarpcClient::new(client::Config::default(), transport).spawn()?,
|
||||
inner: TarpcClient::new(client::Config::default(), transport).spawn(),
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_banks_server::banks_server::start_local_server;
|
||||
use solana_runtime::{
|
||||
bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache,
|
||||
genesis_utils::create_genesis_config,
|
||||
use {
|
||||
super::*,
|
||||
solana_banks_server::banks_server::start_local_server,
|
||||
solana_runtime::{
|
||||
bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache,
|
||||
genesis_utils::create_genesis_config,
|
||||
},
|
||||
solana_sdk::{message::Message, signature::Signer, system_instruction},
|
||||
std::sync::{Arc, RwLock},
|
||||
tarpc::transport,
|
||||
tokio::{runtime::Runtime, time::sleep},
|
||||
};
|
||||
use solana_sdk::{message::Message, signature::Signer, system_instruction};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use tarpc::transport;
|
||||
use tokio::{runtime::Runtime, time::sleep};
|
||||
|
||||
#[test]
|
||||
fn test_banks_client_new() {
|
||||
@ -331,13 +487,13 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_banks_server_transfer_via_server() -> io::Result<()> {
|
||||
fn test_banks_server_transfer_via_server() -> Result<(), BanksClientError> {
|
||||
// This test shows the preferred way to interact with BanksServer.
|
||||
// It creates a runtime explicitly (no globals via tokio macros) and calls
|
||||
// `runtime.block_on()` just once, to run all the async code.
|
||||
|
||||
let genesis = create_genesis_config(10);
|
||||
let bank = Bank::new(&genesis.genesis_config);
|
||||
let bank = Bank::new_for_tests(&genesis.genesis_config);
|
||||
let slot = bank.slot();
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_slots(slot, slot),
|
||||
@ -350,10 +506,12 @@ mod tests {
|
||||
let message = Message::new(&[instruction], Some(&mint_pubkey));
|
||||
|
||||
Runtime::new()?.block_on(async {
|
||||
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;
|
||||
let client_transport =
|
||||
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
|
||||
.await;
|
||||
let mut banks_client = start_client(client_transport).await?;
|
||||
|
||||
let recent_blockhash = banks_client.get_recent_blockhash().await?;
|
||||
let recent_blockhash = banks_client.get_latest_blockhash().await?;
|
||||
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
|
||||
banks_client.process_transaction(transaction).await.unwrap();
|
||||
assert_eq!(banks_client.get_balance(bob_pubkey).await?, 1);
|
||||
@ -362,13 +520,13 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_banks_server_transfer_via_client() -> io::Result<()> {
|
||||
fn test_banks_server_transfer_via_client() -> Result<(), BanksClientError> {
|
||||
// The caller may not want to hold the connection open until the transaction
|
||||
// is processed (or blockhash expires). In this test, we verify the
|
||||
// server-side functionality is available to the client.
|
||||
|
||||
let genesis = create_genesis_config(10);
|
||||
let bank = Bank::new(&genesis.genesis_config);
|
||||
let bank = Bank::new_for_tests(&genesis.genesis_config);
|
||||
let slot = bank.slot();
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_slots(slot, slot),
|
||||
@ -377,13 +535,18 @@ mod tests {
|
||||
|
||||
let mint_pubkey = &genesis.mint_keypair.pubkey();
|
||||
let bob_pubkey = solana_sdk::pubkey::new_rand();
|
||||
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
|
||||
let message = Message::new(&[instruction], Some(&mint_pubkey));
|
||||
let instruction = system_instruction::transfer(mint_pubkey, &bob_pubkey, 1);
|
||||
let message = Message::new(&[instruction], Some(mint_pubkey));
|
||||
|
||||
Runtime::new()?.block_on(async {
|
||||
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;
|
||||
let client_transport =
|
||||
start_local_server(bank_forks, block_commitment_cache, Duration::from_millis(1))
|
||||
.await;
|
||||
let mut banks_client = start_client(client_transport).await?;
|
||||
let (_, recent_blockhash, last_valid_slot) = banks_client.get_fees().await?;
|
||||
let (recent_blockhash, last_valid_block_height) = banks_client
|
||||
.get_latest_blockhash_with_commitment(CommitmentLevel::default())
|
||||
.await?
|
||||
.unwrap();
|
||||
let transaction = Transaction::new(&[&genesis.mint_keypair], message, recent_blockhash);
|
||||
let signature = transaction.signatures[0];
|
||||
banks_client.send_transaction(transaction).await?;
|
||||
@ -391,8 +554,8 @@ mod tests {
|
||||
let mut status = banks_client.get_transaction_status(signature).await?;
|
||||
|
||||
while status.is_none() {
|
||||
let root_slot = banks_client.get_root_slot().await?;
|
||||
if root_slot > last_valid_slot {
|
||||
let root_block_height = banks_client.get_root_block_height().await?;
|
||||
if root_block_height > last_valid_block_height {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
@ -1,22 +1,18 @@
|
||||
[package]
|
||||
name = "solana-banks-interface"
|
||||
version = "1.6.0"
|
||||
version = "1.10.0"
|
||||
description = "Solana banks RPC interface"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-banks-interface"
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
mio = "0.7.6"
|
||||
serde = { version = "1.0.122", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "1.6.0" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1.1", features = ["full"] }
|
||||
serde = { version = "1.0.134", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@ -1,13 +1,18 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction, TransactionError},
|
||||
#![allow(deprecated)]
|
||||
|
||||
use {
|
||||
serde::{Deserialize, Serialize},
|
||||
solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction, TransactionError},
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
@ -25,15 +30,37 @@ pub struct TransactionStatus {
|
||||
pub confirmation_status: Option<TransactionConfirmationStatus>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TransactionSimulationDetails {
|
||||
pub logs: Vec<String>,
|
||||
pub units_consumed: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BanksTransactionResultWithSimulation {
|
||||
pub result: Option<transaction::Result<()>>,
|
||||
pub simulation_details: Option<TransactionSimulationDetails>,
|
||||
}
|
||||
|
||||
#[tarpc::service]
|
||||
pub trait Banks {
|
||||
async fn send_transaction_with_context(transaction: Transaction);
|
||||
#[deprecated(
|
||||
since = "1.9.0",
|
||||
note = "Please use `get_fee_for_message_with_commitment_and_context` instead"
|
||||
)]
|
||||
async fn get_fees_with_commitment_and_context(
|
||||
commitment: CommitmentLevel,
|
||||
) -> (FeeCalculator, Hash, Slot);
|
||||
async fn get_transaction_status_with_context(signature: Signature)
|
||||
-> Option<TransactionStatus>;
|
||||
async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot;
|
||||
async fn get_block_height_with_context(commitment: CommitmentLevel) -> u64;
|
||||
async fn process_transaction_with_preflight_and_commitment_and_context(
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> BanksTransactionResultWithSimulation;
|
||||
async fn process_transaction_with_commitment_and_context(
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
@ -42,12 +69,22 @@ pub trait Banks {
|
||||
address: Pubkey,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<Account>;
|
||||
async fn get_latest_blockhash_with_context() -> Hash;
|
||||
async fn get_latest_blockhash_with_commitment_and_context(
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<(Hash, u64)>;
|
||||
async fn get_fee_for_message_with_commitment_and_context(
|
||||
commitment: CommitmentLevel,
|
||||
message: Message,
|
||||
) -> Option<u64>;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tarpc::{client, transport};
|
||||
use {
|
||||
super::*,
|
||||
tarpc::{client, transport},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_banks_client_new() {
|
||||
|
@ -1,25 +1,24 @@
|
||||
[package]
|
||||
name = "solana-banks-server"
|
||||
version = "1.6.0"
|
||||
version = "1.10.0"
|
||||
description = "Solana banks server"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-banks-server"
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.3.1"
|
||||
bincode = "1.3.3"
|
||||
crossbeam-channel = "0.5"
|
||||
futures = "0.3"
|
||||
log = "0.4.11"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "1.6.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.6.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.6.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.6.0" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1.1", features = ["full"] }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.10.0" }
|
||||
tarpc = { version = "0.27.2", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
tokio-stream = "0.1"
|
||||
|
||||
|
@ -1,48 +1,57 @@
|
||||
use crate::send_transaction_service::{SendTransactionService, TransactionInfo};
|
||||
use bincode::{deserialize, serialize};
|
||||
use futures::{
|
||||
future,
|
||||
prelude::stream::{self, StreamExt},
|
||||
};
|
||||
use solana_banks_interface::{
|
||||
Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus,
|
||||
};
|
||||
use solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use std::{
|
||||
io,
|
||||
net::{Ipv4Addr, SocketAddr},
|
||||
sync::{
|
||||
mpsc::{channel, Receiver, Sender},
|
||||
Arc, RwLock,
|
||||
use {
|
||||
bincode::{deserialize, serialize},
|
||||
crossbeam_channel::{unbounded, Receiver, Sender},
|
||||
futures::{future, prelude::stream::StreamExt},
|
||||
solana_banks_interface::{
|
||||
Banks, BanksRequest, BanksResponse, BanksTransactionResultWithSimulation,
|
||||
TransactionConfirmationStatus, TransactionSimulationDetails, TransactionStatus,
|
||||
},
|
||||
thread::Builder,
|
||||
time::Duration,
|
||||
solana_runtime::{
|
||||
bank::{Bank, TransactionSimulationResult},
|
||||
bank_forks::BankForks,
|
||||
commitment::BlockCommitmentCache,
|
||||
},
|
||||
solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
commitment_config::CommitmentLevel,
|
||||
feature_set::FeatureSet,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
message::{Message, SanitizedMessage},
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
transaction::{self, SanitizedTransaction, Transaction},
|
||||
},
|
||||
solana_send_transaction_service::{
|
||||
send_transaction_service::{SendTransactionService, TransactionInfo},
|
||||
tpu_info::NullTpuInfo,
|
||||
},
|
||||
std::{
|
||||
convert::TryFrom,
|
||||
io,
|
||||
net::{Ipv4Addr, SocketAddr},
|
||||
sync::{Arc, RwLock},
|
||||
thread::Builder,
|
||||
time::Duration,
|
||||
},
|
||||
tarpc::{
|
||||
context::Context,
|
||||
serde_transport::tcp,
|
||||
server::{self, incoming::Incoming, Channel},
|
||||
transport::{self, channel::UnboundedChannel},
|
||||
ClientMessage, Response,
|
||||
},
|
||||
tokio::time::sleep,
|
||||
tokio_serde::formats::Bincode,
|
||||
};
|
||||
use tarpc::{
|
||||
context::Context,
|
||||
rpc::{transport::channel::UnboundedChannel, ClientMessage, Response},
|
||||
serde_transport::tcp,
|
||||
server::{self, Channel, Handler},
|
||||
transport,
|
||||
};
|
||||
use tokio::time::sleep;
|
||||
use tokio_serde::formats::Bincode;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct BanksServer {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
transaction_sender: Sender<TransactionInfo>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
}
|
||||
|
||||
impl BanksServer {
|
||||
@ -54,11 +63,13 @@ impl BanksServer {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
transaction_sender: Sender<TransactionInfo>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> Self {
|
||||
Self {
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
transaction_sender,
|
||||
poll_signature_status_sleep_duration,
|
||||
}
|
||||
}
|
||||
|
||||
@ -73,7 +84,7 @@ impl BanksServer {
|
||||
.map(|info| deserialize(&info.wire_transaction).unwrap())
|
||||
.collect();
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
let _ = bank.process_transactions(&transactions);
|
||||
let _ = bank.try_process_transactions(transactions.iter());
|
||||
}
|
||||
}
|
||||
|
||||
@ -81,8 +92,9 @@ impl BanksServer {
|
||||
fn new_loopback(
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> Self {
|
||||
let (transaction_sender, transaction_receiver) = channel();
|
||||
let (transaction_sender, transaction_receiver) = unbounded();
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
let slot = bank.slot();
|
||||
{
|
||||
@ -95,7 +107,12 @@ impl BanksServer {
|
||||
.name("solana-bank-forks-client".to_string())
|
||||
.spawn(move || Self::run(server_bank_forks, transaction_receiver))
|
||||
.unwrap();
|
||||
Self::new(bank_forks, block_commitment_cache, transaction_sender)
|
||||
Self::new(
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
transaction_sender,
|
||||
poll_signature_status_sleep_duration,
|
||||
)
|
||||
}
|
||||
|
||||
fn slot(&self, commitment: CommitmentLevel) -> Slot {
|
||||
@ -113,16 +130,16 @@ impl BanksServer {
|
||||
self,
|
||||
signature: &Signature,
|
||||
blockhash: &Hash,
|
||||
last_valid_slot: Slot,
|
||||
last_valid_block_height: u64,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<transaction::Result<()>> {
|
||||
let mut status = self
|
||||
.bank(commitment)
|
||||
.get_signature_status_with_blockhash(signature, blockhash);
|
||||
while status.is_none() {
|
||||
sleep(Duration::from_millis(200)).await;
|
||||
sleep(self.poll_signature_status_sleep_duration).await;
|
||||
let bank = self.bank(commitment);
|
||||
if bank.slot() > last_valid_slot {
|
||||
if bank.block_height() > last_valid_block_height {
|
||||
break;
|
||||
}
|
||||
status = bank.get_signature_status_with_blockhash(signature, blockhash);
|
||||
@ -131,10 +148,13 @@ impl BanksServer {
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_transaction(transaction: &Transaction) -> transaction::Result<()> {
|
||||
fn verify_transaction(
|
||||
transaction: &Transaction,
|
||||
feature_set: &Arc<FeatureSet>,
|
||||
) -> transaction::Result<()> {
|
||||
if let Err(err) = transaction.verify() {
|
||||
Err(err)
|
||||
} else if let Err(err) = transaction.verify_precompiles() {
|
||||
} else if let Err(err) = transaction.verify_precompiles(feature_set) {
|
||||
Err(err)
|
||||
} else {
|
||||
Ok(())
|
||||
@ -145,16 +165,21 @@ fn verify_transaction(transaction: &Transaction) -> transaction::Result<()> {
|
||||
impl Banks for BanksServer {
|
||||
async fn send_transaction_with_context(self, _: Context, transaction: Transaction) {
|
||||
let blockhash = &transaction.message.recent_blockhash;
|
||||
let last_valid_slot = self
|
||||
let last_valid_block_height = self
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.root_bank()
|
||||
.get_blockhash_last_valid_slot(&blockhash)
|
||||
.get_blockhash_last_valid_block_height(blockhash)
|
||||
.unwrap();
|
||||
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
|
||||
let info =
|
||||
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
|
||||
let info = TransactionInfo::new(
|
||||
signature,
|
||||
serialize(&transaction).unwrap(),
|
||||
last_valid_block_height,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
self.transaction_sender.send(info).unwrap();
|
||||
}
|
||||
|
||||
@ -162,11 +187,18 @@ impl Banks for BanksServer {
|
||||
self,
|
||||
_: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> (FeeCalculator, Hash, Slot) {
|
||||
) -> (FeeCalculator, Hash, u64) {
|
||||
let bank = self.bank(commitment);
|
||||
let (blockhash, fee_calculator) = bank.last_blockhash_with_fee_calculator();
|
||||
let last_valid_slot = bank.get_blockhash_last_valid_slot(&blockhash).unwrap();
|
||||
(fee_calculator, blockhash, last_valid_slot)
|
||||
let blockhash = bank.last_blockhash();
|
||||
let lamports_per_signature = bank.get_lamports_per_signature();
|
||||
let last_valid_block_height = bank
|
||||
.get_blockhash_last_valid_block_height(&blockhash)
|
||||
.unwrap();
|
||||
(
|
||||
FeeCalculator::new(lamports_per_signature),
|
||||
blockhash,
|
||||
last_valid_block_height,
|
||||
)
|
||||
}
|
||||
|
||||
async fn get_transaction_status_with_context(
|
||||
@ -209,29 +241,76 @@ impl Banks for BanksServer {
|
||||
self.slot(commitment)
|
||||
}
|
||||
|
||||
async fn get_block_height_with_context(self, _: Context, commitment: CommitmentLevel) -> u64 {
|
||||
self.bank(commitment).block_height()
|
||||
}
|
||||
|
||||
async fn process_transaction_with_preflight_and_commitment_and_context(
|
||||
self,
|
||||
ctx: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> BanksTransactionResultWithSimulation {
|
||||
let sanitized_transaction =
|
||||
match SanitizedTransaction::try_from_legacy_transaction(transaction.clone()) {
|
||||
Err(err) => {
|
||||
return BanksTransactionResultWithSimulation {
|
||||
result: Some(Err(err)),
|
||||
simulation_details: None,
|
||||
};
|
||||
}
|
||||
Ok(tx) => tx,
|
||||
};
|
||||
if let TransactionSimulationResult {
|
||||
result: Err(err),
|
||||
logs,
|
||||
post_simulation_accounts: _,
|
||||
units_consumed,
|
||||
} = self
|
||||
.bank(commitment)
|
||||
.simulate_transaction_unchecked(sanitized_transaction)
|
||||
{
|
||||
return BanksTransactionResultWithSimulation {
|
||||
result: Some(Err(err)),
|
||||
simulation_details: Some(TransactionSimulationDetails {
|
||||
logs,
|
||||
units_consumed,
|
||||
}),
|
||||
};
|
||||
}
|
||||
BanksTransactionResultWithSimulation {
|
||||
result: self
|
||||
.process_transaction_with_commitment_and_context(ctx, transaction, commitment)
|
||||
.await,
|
||||
simulation_details: None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_transaction_with_commitment_and_context(
|
||||
self,
|
||||
_: Context,
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<transaction::Result<()>> {
|
||||
if let Err(err) = verify_transaction(&transaction) {
|
||||
if let Err(err) = verify_transaction(&transaction, &self.bank(commitment).feature_set) {
|
||||
return Some(Err(err));
|
||||
}
|
||||
|
||||
let blockhash = &transaction.message.recent_blockhash;
|
||||
let last_valid_slot = self
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.root_bank()
|
||||
.get_blockhash_last_valid_slot(blockhash)
|
||||
let last_valid_block_height = self
|
||||
.bank(commitment)
|
||||
.get_blockhash_last_valid_block_height(blockhash)
|
||||
.unwrap();
|
||||
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
|
||||
let info =
|
||||
TransactionInfo::new(signature, serialize(&transaction).unwrap(), last_valid_slot);
|
||||
let info = TransactionInfo::new(
|
||||
signature,
|
||||
serialize(&transaction).unwrap(),
|
||||
last_valid_block_height,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
self.transaction_sender.send(info).unwrap();
|
||||
self.poll_signature_status(&signature, blockhash, last_valid_slot, commitment)
|
||||
self.poll_signature_status(&signature, blockhash, last_valid_block_height, commitment)
|
||||
.await
|
||||
}
|
||||
|
||||
@ -244,17 +323,47 @@ impl Banks for BanksServer {
|
||||
let bank = self.bank(commitment);
|
||||
bank.get_account(&address).map(Account::from)
|
||||
}
|
||||
|
||||
async fn get_latest_blockhash_with_context(self, _: Context) -> Hash {
|
||||
let bank = self.bank(CommitmentLevel::default());
|
||||
bank.last_blockhash()
|
||||
}
|
||||
|
||||
async fn get_latest_blockhash_with_commitment_and_context(
|
||||
self,
|
||||
_: Context,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<(Hash, u64)> {
|
||||
let bank = self.bank(commitment);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let last_valid_block_height = bank.get_blockhash_last_valid_block_height(&blockhash)?;
|
||||
Some((blockhash, last_valid_block_height))
|
||||
}
|
||||
|
||||
async fn get_fee_for_message_with_commitment_and_context(
|
||||
self,
|
||||
_: Context,
|
||||
commitment: CommitmentLevel,
|
||||
message: Message,
|
||||
) -> Option<u64> {
|
||||
let bank = self.bank(commitment);
|
||||
let sanitized_message = SanitizedMessage::try_from(message).ok()?;
|
||||
bank.get_fee_for_message(&sanitized_message)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_local_server(
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
poll_signature_status_sleep_duration: Duration,
|
||||
) -> UnboundedChannel<Response<BanksResponse>, ClientMessage<BanksRequest>> {
|
||||
let banks_server = BanksServer::new_loopback(bank_forks, block_commitment_cache);
|
||||
let banks_server = BanksServer::new_loopback(
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
poll_signature_status_sleep_duration,
|
||||
);
|
||||
let (client_transport, server_transport) = transport::channel::unbounded();
|
||||
let server = server::new(server::Config::default())
|
||||
.incoming(stream::once(future::ready(server_transport)))
|
||||
.respond_with(banks_server.serve());
|
||||
let server = server::BaseChannel::with_defaults(server_transport).execute(banks_server.serve());
|
||||
tokio::spawn(server);
|
||||
client_transport
|
||||
}
|
||||
@ -281,13 +390,24 @@ pub async fn start_tcp_server(
|
||||
// serve is generated by the service attribute. It takes as input any type implementing
|
||||
// the generated Banks trait.
|
||||
.map(move |chan| {
|
||||
let (sender, receiver) = channel();
|
||||
let (sender, receiver) = unbounded();
|
||||
|
||||
SendTransactionService::new(tpu_addr, &bank_forks, receiver);
|
||||
SendTransactionService::new::<NullTpuInfo>(
|
||||
tpu_addr,
|
||||
&bank_forks,
|
||||
None,
|
||||
receiver,
|
||||
5_000,
|
||||
0,
|
||||
);
|
||||
|
||||
let server =
|
||||
BanksServer::new(bank_forks.clone(), block_commitment_cache.clone(), sender);
|
||||
chan.respond_with(server.serve()).execute()
|
||||
let server = BanksServer::new(
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache.clone(),
|
||||
sender,
|
||||
Duration::from_millis(200),
|
||||
);
|
||||
chan.execute(server.serve())
|
||||
})
|
||||
// Max 10 channels.
|
||||
.buffer_unordered(10)
|
||||
|
@ -1,7 +1,3 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
pub mod banks_server;
|
||||
pub mod rpc_banks_service;
|
||||
pub mod send_transaction_service;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana_metrics;
|
||||
|
@ -1,21 +1,23 @@
|
||||
//! The `rpc_banks_service` module implements the Solana Banks RPC API.
|
||||
|
||||
use crate::banks_server::start_tcp_server;
|
||||
use futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select};
|
||||
use solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache};
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc, RwLock,
|
||||
use {
|
||||
crate::banks_server::start_tcp_server,
|
||||
futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select},
|
||||
solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache},
|
||||
std::{
|
||||
net::SocketAddr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
tokio::{
|
||||
runtime::Runtime,
|
||||
time::{self, Duration},
|
||||
},
|
||||
tokio_stream::wrappers::IntervalStream,
|
||||
};
|
||||
use tokio::{
|
||||
runtime::Runtime,
|
||||
time::{self, Duration},
|
||||
};
|
||||
use tokio_stream::wrappers::IntervalStream;
|
||||
|
||||
pub struct RpcBanksService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
@ -101,12 +103,11 @@ impl RpcBanksService {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_runtime::bank::Bank;
|
||||
use {super::*, solana_runtime::bank::Bank};
|
||||
|
||||
#[test]
|
||||
fn test_rpc_banks_server_exit() {
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(Bank::default())));
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(Bank::default_for_tests())));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let addr = "127.0.0.1:0".parse().unwrap();
|
||||
|
@ -1,343 +0,0 @@
|
||||
// TODO: Merge this implementation with the one at `core/src/send_transaction_service.rs`
|
||||
use log::*;
|
||||
use solana_metrics::{datapoint_warn, inc_new_counter_info};
|
||||
use solana_runtime::{bank::Bank, bank_forks::BankForks};
|
||||
use solana_sdk::{clock::Slot, signature::Signature};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::{SocketAddr, UdpSocket},
|
||||
sync::{
|
||||
mpsc::{Receiver, RecvTimeoutError},
|
||||
Arc, RwLock,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
/// Maximum size of the transaction queue
|
||||
const MAX_TRANSACTION_QUEUE_SIZE: usize = 10_000; // This seems like a lot but maybe it needs to be bigger one day
|
||||
|
||||
pub struct SendTransactionService {
|
||||
thread: JoinHandle<()>,
|
||||
}
|
||||
|
||||
pub struct TransactionInfo {
|
||||
pub signature: Signature,
|
||||
pub wire_transaction: Vec<u8>,
|
||||
pub last_valid_slot: Slot,
|
||||
}
|
||||
|
||||
impl TransactionInfo {
|
||||
pub fn new(signature: Signature, wire_transaction: Vec<u8>, last_valid_slot: Slot) -> Self {
|
||||
Self {
|
||||
signature,
|
||||
wire_transaction,
|
||||
last_valid_slot,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq)]
|
||||
struct ProcessTransactionsResult {
|
||||
rooted: u64,
|
||||
expired: u64,
|
||||
retried: u64,
|
||||
failed: u64,
|
||||
retained: u64,
|
||||
}
|
||||
|
||||
impl SendTransactionService {
|
||||
pub fn new(
|
||||
tpu_address: SocketAddr,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
receiver: Receiver<TransactionInfo>,
|
||||
) -> Self {
|
||||
let thread = Self::retry_thread(receiver, bank_forks.clone(), tpu_address);
|
||||
Self { thread }
|
||||
}
|
||||
|
||||
fn retry_thread(
|
||||
receiver: Receiver<TransactionInfo>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
tpu_address: SocketAddr,
|
||||
) -> JoinHandle<()> {
|
||||
let mut last_status_check = Instant::now();
|
||||
let mut transactions = HashMap::new();
|
||||
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
Builder::new()
|
||||
.name("send-tx-svc".to_string())
|
||||
.spawn(move || loop {
|
||||
match receiver.recv_timeout(Duration::from_secs(1)) {
|
||||
Err(RecvTimeoutError::Disconnected) => break,
|
||||
Err(RecvTimeoutError::Timeout) => {}
|
||||
Ok(transaction_info) => {
|
||||
Self::send_transaction(
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&transaction_info.wire_transaction,
|
||||
);
|
||||
if transactions.len() < MAX_TRANSACTION_QUEUE_SIZE {
|
||||
transactions.insert(transaction_info.signature, transaction_info);
|
||||
} else {
|
||||
datapoint_warn!("send_transaction_service-queue-overflow");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if Instant::now().duration_since(last_status_check).as_secs() >= 5 {
|
||||
if !transactions.is_empty() {
|
||||
datapoint_info!(
|
||||
"send_transaction_service-queue-size",
|
||||
("len", transactions.len(), i64)
|
||||
);
|
||||
let bank_forks = bank_forks.read().unwrap();
|
||||
let root_bank = bank_forks.root_bank();
|
||||
let working_bank = bank_forks.working_bank();
|
||||
|
||||
let _result = Self::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
}
|
||||
last_status_check = Instant::now();
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn process_transactions(
|
||||
working_bank: &Arc<Bank>,
|
||||
root_bank: &Arc<Bank>,
|
||||
send_socket: &UdpSocket,
|
||||
tpu_address: &SocketAddr,
|
||||
transactions: &mut HashMap<Signature, TransactionInfo>,
|
||||
) -> ProcessTransactionsResult {
|
||||
let mut result = ProcessTransactionsResult::default();
|
||||
|
||||
transactions.retain(|signature, transaction_info| {
|
||||
if root_bank.has_signature(signature) {
|
||||
info!("Transaction is rooted: {}", signature);
|
||||
result.rooted += 1;
|
||||
inc_new_counter_info!("send_transaction_service-rooted", 1);
|
||||
false
|
||||
} else if transaction_info.last_valid_slot < root_bank.slot() {
|
||||
info!("Dropping expired transaction: {}", signature);
|
||||
result.expired += 1;
|
||||
inc_new_counter_info!("send_transaction_service-expired", 1);
|
||||
false
|
||||
} else {
|
||||
match working_bank.get_signature_status_slot(signature) {
|
||||
None => {
|
||||
// Transaction is unknown to the working bank, it might have been
|
||||
// dropped or landed in another fork. Re-send it
|
||||
info!("Retrying transaction: {}", signature);
|
||||
result.retried += 1;
|
||||
inc_new_counter_info!("send_transaction_service-retry", 1);
|
||||
Self::send_transaction(
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&transaction_info.wire_transaction,
|
||||
);
|
||||
true
|
||||
}
|
||||
Some((_slot, status)) => {
|
||||
if status.is_err() {
|
||||
info!("Dropping failed transaction: {}", signature);
|
||||
result.failed += 1;
|
||||
inc_new_counter_info!("send_transaction_service-failed", 1);
|
||||
false
|
||||
} else {
|
||||
result.retained += 1;
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn send_transaction(
|
||||
send_socket: &UdpSocket,
|
||||
tpu_address: &SocketAddr,
|
||||
wire_transaction: &[u8],
|
||||
) {
|
||||
if let Err(err) = send_socket.send_to(wire_transaction, tpu_address) {
|
||||
warn!("Failed to send transaction to {}: {:?}", tpu_address, err);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
self.thread.join()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
genesis_config::create_genesis_config, pubkey::Pubkey, signature::Signer,
|
||||
system_transaction,
|
||||
};
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[test]
|
||||
fn service_exit() {
|
||||
let tpu_address = "127.0.0.1:0".parse().unwrap();
|
||||
let bank = Bank::default();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
let send_tranaction_service =
|
||||
SendTransactionService::new(tpu_address, &bank_forks, receiver);
|
||||
|
||||
drop(sender);
|
||||
send_tranaction_service.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_transactions() {
|
||||
let (genesis_config, mint_keypair) = create_genesis_config(4);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank)));
|
||||
let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let tpu_address = "127.0.0.1:0".parse().unwrap();
|
||||
|
||||
let root_bank = Arc::new(Bank::new_from_parent(
|
||||
&bank_forks.read().unwrap().working_bank(),
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
));
|
||||
let rooted_signature = root_bank
|
||||
.transfer(1, &mint_keypair, &mint_keypair.pubkey())
|
||||
.unwrap();
|
||||
|
||||
let working_bank = Arc::new(Bank::new_from_parent(&root_bank, &Pubkey::default(), 2));
|
||||
|
||||
let non_rooted_signature = working_bank
|
||||
.transfer(2, &mint_keypair, &mint_keypair.pubkey())
|
||||
.unwrap();
|
||||
|
||||
let failed_signature = {
|
||||
let blockhash = working_bank.last_blockhash();
|
||||
let transaction =
|
||||
system_transaction::transfer(&mint_keypair, &Pubkey::default(), 1, blockhash);
|
||||
let signature = transaction.signatures[0];
|
||||
working_bank.process_transaction(&transaction).unwrap_err();
|
||||
signature
|
||||
};
|
||||
|
||||
let mut transactions = HashMap::new();
|
||||
|
||||
info!("Expired transactions are dropped..");
|
||||
transactions.insert(
|
||||
Signature::default(),
|
||||
TransactionInfo::new(Signature::default(), vec![], root_bank.slot() - 1),
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert!(transactions.is_empty());
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
expired: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
|
||||
info!("Rooted transactions are dropped...");
|
||||
transactions.insert(
|
||||
rooted_signature,
|
||||
TransactionInfo::new(rooted_signature, vec![], working_bank.slot()),
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert!(transactions.is_empty());
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
rooted: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
|
||||
info!("Failed transactions are dropped...");
|
||||
transactions.insert(
|
||||
failed_signature,
|
||||
TransactionInfo::new(failed_signature, vec![], working_bank.slot()),
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert!(transactions.is_empty());
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
failed: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
|
||||
info!("Non-rooted transactions are kept...");
|
||||
transactions.insert(
|
||||
non_rooted_signature,
|
||||
TransactionInfo::new(non_rooted_signature, vec![], working_bank.slot()),
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert_eq!(transactions.len(), 1);
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
retained: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
transactions.clear();
|
||||
|
||||
info!("Unknown transactions are retried...");
|
||||
transactions.insert(
|
||||
Signature::default(),
|
||||
TransactionInfo::new(Signature::default(), vec![], working_bank.slot()),
|
||||
);
|
||||
let result = SendTransactionService::process_transactions(
|
||||
&working_bank,
|
||||
&root_bank,
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
&mut transactions,
|
||||
);
|
||||
assert_eq!(transactions.len(), 1);
|
||||
assert_eq!(
|
||||
result,
|
||||
ProcessTransactionsResult {
|
||||
retried: 1,
|
||||
..ProcessTransactionsResult::default()
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
4
bench-exchange/.gitignore
vendored
4
bench-exchange/.gitignore
vendored
@ -1,4 +0,0 @@
|
||||
/target/
|
||||
/config/
|
||||
/config-local/
|
||||
/farf/
|
@ -1,38 +0,0 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.6.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
itertools = "0.9.0"
|
||||
log = "0.4.11"
|
||||
num-derive = "0.3"
|
||||
num-traits = "0.2"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
|
||||
solana-core = { path = "../core", version = "1.6.0" }
|
||||
solana-genesis = { path = "../genesis", version = "1.6.0" }
|
||||
solana-client = { path = "../client", version = "1.6.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.6.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.6.0" }
|
||||
solana-logger = { path = "../logger", version = "1.6.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.6.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.6.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.6.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.6.0" }
|
||||
solana-version = { path = "../version", version = "1.6.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.6.0" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
@ -1,479 +0,0 @@
|
||||
# token-exchange
|
||||
Solana Token Exchange Bench
|
||||
|
||||
If you can't wait; jump to [Running the exchange](#Running-the-exchange) to
|
||||
learn how to start and interact with the exchange.
|
||||
|
||||
### Table of Contents
|
||||
[Overview](#Overview)<br>
|
||||
[Premise](#Premise)<br>
|
||||
[Exchange startup](#Exchange-startup)<br>
|
||||
[Order Requests](#Trade-requests)<br>
|
||||
[Order Cancellations](#Trade-cancellations)<br>
|
||||
[Trade swap](#Trade-swap)<br>
|
||||
[Exchange program operations](#Exchange-program-operations)<br>
|
||||
[Quotes and OHLCV](#Quotes-and-OHLCV)<br>
|
||||
[Investor strategies](#Investor-strategies)<br>
|
||||
[Running the exchange](#Running-the-exchange)<br>
|
||||
|
||||
## Overview
|
||||
|
||||
An exchange is a marketplace where one asset can be traded for another. This
|
||||
demo demonstrates one way to host an exchange on the Solana blockchain by
|
||||
emulating a currency exchange.
|
||||
|
||||
The assets are virtual tokens held by investors who may post order requests to
|
||||
the exchange. A Matcher monitors the exchange and posts swap requests for
|
||||
matching orders. All the transactions can execute concurrently.
|
||||
|
||||
## Premise
|
||||
|
||||
- Exchange
|
||||
- An exchange is a marketplace where one asset can be traded for another.
|
||||
The exchange in this demo is the on-chain program that implements the
|
||||
tokens and the policies for trading those tokens.
|
||||
- Token
|
||||
- A virtual asset that can be owned, traded, and holds virtual intrinsic value
|
||||
compared to other assets. There are four types of tokens in this demo, A,
|
||||
B, C, D. Each one may be traded for another.
|
||||
- Token account
|
||||
- An account owned by the exchange that holds a quantity of one type of token.
|
||||
- Account request
|
||||
- A request to create a token account
|
||||
- Token request
|
||||
- A request to deposit tokens of a particular type into a token account.
|
||||
- Asset pair
|
||||
- A struct with fields Base and Quote, representing the two assets which make up a
|
||||
trading pair, which themselves are Tokens. The Base or 'primary' asset is the
|
||||
numerator and the Quote is the denominator for pricing purposes.
|
||||
- Order side
|
||||
- Describes which side of the market an investor wants to place a trade on. Options
|
||||
are "Bid" or "Ask", where a bid represents an offer to purchase the Base asset of
|
||||
the AssetPair for a sum of the Quote Asset and an Ask is an offer to sell Base asset
|
||||
for the Quote asset.
|
||||
- Price ratio
|
||||
- An expression of the relative prices of two tokens. Calculated with the Base
|
||||
Asset as the numerator and the Quote Asset as the denominator. Ratios are
|
||||
represented as fixed point numbers. The fixed point scaler is defined in
|
||||
[exchange_state.rs](https://github.com/solana-labs/solana/blob/c2fdd1362a029dcf89c8907c562d2079d977df11/programs/exchange_api/src/exchange_state.rs#L7)
|
||||
- Order request
|
||||
- A Solana transaction sent by a trader to the exchange to submit an order.
|
||||
Order requests are made up of the token pair, the order side (bid or ask),
|
||||
quantity of the primary token, the price ratio, and the two token accounts
|
||||
to be credited/deducted. An example trade request looks like "T AB 5 2"
|
||||
which reads "Exchange 5 A tokens to B tokens at a price ratio of 1:2" A fulfilled trade would result in 5 A tokens
|
||||
deducted and 10 B tokens credited to the trade initiator's token accounts.
|
||||
Successful order requests result in an order.
|
||||
- Order
|
||||
- The result of a successful order request. orders are stored in
|
||||
accounts owned by the submitter of the order request. They can only be
|
||||
canceled by their owner but can be used by anyone in a trade swap. They
|
||||
contain the same information as the order request.
|
||||
- Price spread
|
||||
- The difference between the two matching orders. The spread is the
|
||||
profit of the Matcher initiating the swap request.
|
||||
- Match requirements
|
||||
- Policies that result in a successful trade swap.
|
||||
- Match request
|
||||
- A request to fill two complementary orders (bid/ask), resulting if successful,
|
||||
in a trade being created.
|
||||
- Trade
|
||||
- A successful trade is created from two matching orders that meet
|
||||
swap requirements which are submitted in a Match Request by a Matcher and
|
||||
executed by the exchange. A trade may not wholly satisfy one or both of the
|
||||
orders in which case the orders are adjusted appropriately. Upon execution,
|
||||
tokens are distributed to the traders' accounts and any overlap or
|
||||
"negative spread" between orders is deposited into the Matcher's profit
|
||||
account. All successful trades are recorded in the data of a new solana
|
||||
account for posterity.
|
||||
- Investor
|
||||
- Individual investors who hold a number of tokens and wish to trade them on
|
||||
the exchange. Investors operate as Solana thin clients who own a set of
|
||||
accounts containing tokens and/or order requests. Investors post
|
||||
transactions to the exchange in order to request tokens and post or cancel
|
||||
order requests.
|
||||
- Matcher
|
||||
- An agent who facilitates trading between investors. Matchers operate as
|
||||
Solana thin clients who monitor all the orders looking for a trade
|
||||
match. Once found, the Matcher issues a swap request to the exchange.
|
||||
Matchers are the engine of the exchange and are rewarded for their efforts by
|
||||
accumulating the price spreads of the swaps they initiate. Matchers also
|
||||
provide current bid/ask price and OHLCV (Open, High, Low, Close, Volume)
|
||||
information on demand via a public network port.
|
||||
- Transaction fees
|
||||
- Solana transaction fees are paid for by the transaction submitters who are
|
||||
the Investors and Matchers.
|
||||
|
||||
## Exchange startup
|
||||
|
||||
The exchange is up and running when it reaches a state where it can take
|
||||
investors' trades and Matchers' match requests. To achieve this state the
|
||||
following must occur in order:
|
||||
|
||||
- Start the Solana blockchain
|
||||
- Start the thin-client
|
||||
- The Matcher subscribes to change notifications for all the accounts owned by
|
||||
the exchange program id. The subscription is managed via Solana's JSON RPC
|
||||
interface.
|
||||
- The Matcher starts responding to queries for bid/ask price and OHLCV
|
||||
|
||||
The Matcher responding successfully to price and OHLCV requests is the signal to
|
||||
the investors that trades submitted after that point will be analyzed. <!--This
|
||||
is not ideal, and instead investors should be able to submit trades at any time,
|
||||
and the Matcher could come and go without missing a trade. One way to achieve
|
||||
this is for the Matcher to read the current state of all accounts looking for all
|
||||
open orders.-->
|
||||
|
||||
Investors will initially query the exchange to discover their current balance
|
||||
for each type of token. If the investor does not already have an account for
|
||||
each type of token, they will submit account requests. Matcher as well will
|
||||
request accounts to hold the tokens they earn by initiating trade swaps.
|
||||
|
||||
```rust
|
||||
/// Supported token types
|
||||
pub enum Token {
|
||||
A,
|
||||
B,
|
||||
C,
|
||||
D,
|
||||
}
|
||||
|
||||
/// Supported token pairs
|
||||
pub enum TokenPair {
|
||||
AB,
|
||||
AC,
|
||||
AD,
|
||||
BC,
|
||||
BD,
|
||||
CD,
|
||||
}
|
||||
|
||||
pub enum ExchangeInstruction {
|
||||
/// New token account
|
||||
/// key 0 - Signer
|
||||
/// key 1 - New token account
|
||||
AccountRequest,
|
||||
}
|
||||
|
||||
/// Token accounts are populated with this structure
|
||||
pub struct TokenAccountInfo {
|
||||
/// Investor who owns this account
|
||||
pub owner: Pubkey,
|
||||
/// Current number of tokens this account holds
|
||||
pub tokens: Tokens,
|
||||
}
|
||||
```
|
||||
|
||||
For this demo investors or Matcher can request more tokens from the exchange at
|
||||
any time by submitting token requests. In non-demos, an exchange of this type
|
||||
would provide another way to exchange a 3rd party asset into tokens.
|
||||
|
||||
To request tokens, investors submit transfer requests:
|
||||
|
||||
```rust
|
||||
pub enum ExchangeInstruction {
|
||||
/// Transfer tokens between two accounts
|
||||
/// key 0 - Account to transfer tokens to
|
||||
/// key 1 - Account to transfer tokens from. This can be the exchange program itself,
|
||||
/// the exchange has a limitless number of tokens it can transfer.
|
||||
TransferRequest(Token, u64),
|
||||
}
|
||||
```
|
||||
|
||||
## Order Requests
|
||||
|
||||
When an investor decides to exchange a token of one type for another, they
|
||||
submit a transaction to the Solana Blockchain containing an order request, which,
|
||||
if successful, is turned into an order. orders do not expire but are
|
||||
cancellable. <!-- orders should have a timestamp to enable trade
|
||||
expiration --> When an order is created, tokens are deducted from a token
|
||||
account and the order acts as an escrow. The tokens are held until the
|
||||
order is fulfilled or canceled. If the direction is `To`, then the number
|
||||
of `tokens` are deducted from the primary account, if `From` then `tokens`
|
||||
multiplied by `price` are deducted from the secondary account. orders are
|
||||
no longer valid when the number of `tokens` goes to zero, at which point they
|
||||
can no longer be used. <!-- Could support refilling orders, so order
|
||||
accounts are refilled rather than accumulating -->
|
||||
|
||||
```rust
|
||||
/// Direction of the exchange between two tokens in a pair
|
||||
pub enum Direction {
|
||||
/// Trade first token type (primary) in the pair 'To' the second
|
||||
To,
|
||||
/// Trade first token type in the pair 'From' the second (secondary)
|
||||
From,
|
||||
}
|
||||
|
||||
pub struct OrderRequestInfo {
|
||||
/// Direction of trade
|
||||
pub direction: Direction,
|
||||
|
||||
/// Token pair to trade
|
||||
pub pair: TokenPair,
|
||||
|
||||
/// Number of tokens to exchange; refers to the primary or the secondary depending on the direction
|
||||
pub tokens: u64,
|
||||
|
||||
/// The price ratio the primary price over the secondary price. The primary price is fixed
|
||||
/// and equal to the variable `SCALER`.
|
||||
pub price: u64,
|
||||
|
||||
/// Token account to deposit tokens on successful swap
|
||||
pub dst_account: Pubkey,
|
||||
}
|
||||
|
||||
pub enum ExchangeInstruction {
|
||||
/// order request
|
||||
/// key 0 - Signer
|
||||
/// key 1 - Account in which to record the swap
|
||||
/// key 2 - Token account associated with this trade
|
||||
TradeRequest(TradeRequestInfo),
|
||||
}
|
||||
|
||||
/// Trade accounts are populated with this structure
|
||||
pub struct TradeOrderInfo {
|
||||
/// Owner of the order
|
||||
pub owner: Pubkey,
|
||||
/// Direction of the exchange
|
||||
pub direction: Direction,
|
||||
/// Token pair indicating two tokens to exchange, first is primary
|
||||
pub pair: TokenPair,
|
||||
/// Number of tokens to exchange; primary or secondary depending on direction
|
||||
pub tokens: u64,
|
||||
/// Scaled price of the secondary token given the primary is equal to the scale value
|
||||
/// If scale is 1 and price is 2 then ratio is 1:2 or 1 primary token for 2 secondary tokens
|
||||
pub price: u64,
|
||||
/// account which the tokens were source from. The trade account holds the tokens in escrow
|
||||
/// until either one or more part of a swap or the trade is canceled.
|
||||
pub src_account: Pubkey,
|
||||
/// account which the tokens the tokens will be deposited into on a successful trade
|
||||
pub dst_account: Pubkey,
|
||||
}
|
||||
```
|
||||
|
||||
## Order cancellations
|
||||
|
||||
An investor may cancel a trade at anytime, but only trades they own. If the
|
||||
cancellation is successful, any tokens held in escrow are returned to the
|
||||
account from which they came.
|
||||
|
||||
```rust
|
||||
pub enum ExchangeInstruction {
|
||||
/// order cancellation
|
||||
/// key 0 - Signer
|
||||
/// key 1 -order to cancel
|
||||
TradeCancellation,
|
||||
}
|
||||
```
|
||||
|
||||
## Trade swaps
|
||||
|
||||
The Matcher is monitoring the accounts assigned to the exchange program and
|
||||
building a trade-order table. The order table is used to identify
|
||||
matching orders which could be fulfilled. When a match is found the
|
||||
Matcher should issue a swap request. Swap requests may not satisfy the entirety
|
||||
of either order, but the exchange will greedily fulfill it. Any leftover tokens
|
||||
in either account will keep the order valid for further swap requests in
|
||||
the future.
|
||||
|
||||
Matching orders are defined by the following swap requirements:
|
||||
|
||||
- Opposite polarity (one `To` and one `From`)
|
||||
- Operate on the same token pair
|
||||
- The price ratio of the `From` order is greater than or equal to the `To` order
|
||||
- There are sufficient tokens to perform the trade
|
||||
|
||||
Orders can be written in the following format:
|
||||
|
||||
`investor direction pair quantity price-ratio`
|
||||
|
||||
For example:
|
||||
|
||||
- `1 T AB 2 1`
|
||||
- Investor 1 wishes to exchange 2 A tokens to B tokens at a ratio of 1 A to 1
|
||||
B
|
||||
- `2 F AC 6 1.2`
|
||||
- Investor 2 wishes to exchange A tokens from 6 B tokens at a ratio of 1 A
|
||||
from 1.2 B
|
||||
|
||||
An order table could look something like the following. Notice how the columns
|
||||
are sorted low to high and high to low, respectively. Prices are dramatic and
|
||||
whole for clarity.
|
||||
|
||||
|Row| To | From |
|
||||
|---|-------------|------------|
|
||||
| 1 | 1 T AB 2 4 | 2 F AB 2 8 |
|
||||
| 2 | 1 T AB 1 4 | 2 F AB 2 8 |
|
||||
| 3 | 1 T AB 6 6 | 2 F AB 2 7 |
|
||||
| 4 | 1 T AB 2 8 | 2 F AB 3 6 |
|
||||
| 5 | 1 T AB 2 10 | 2 F AB 1 5 |
|
||||
|
||||
As part of a successful swap request, the exchange will credit tokens to the
|
||||
Matcher's account equal to the difference in the price ratios or the two orders.
|
||||
These tokens are considered the Matcher's profit for initiating the trade.
|
||||
|
||||
The Matcher would initiate the following swap on the order table above:
|
||||
|
||||
- Row 1, To: Investor 1 trades 2 A tokens to 8 B tokens
|
||||
- Row 1, From: Investor 2 trades 2 A tokens from 8 B tokens
|
||||
- Matcher takes 8 B tokens as profit
|
||||
|
||||
Both row 1 trades are fully realized, table becomes:
|
||||
|
||||
|Row| To | From |
|
||||
|---|-------------|------------|
|
||||
| 1 | 1 T AB 1 4 | 2 F AB 2 8 |
|
||||
| 2 | 1 T AB 6 6 | 2 F AB 2 7 |
|
||||
| 3 | 1 T AB 2 8 | 2 F AB 3 6 |
|
||||
| 4 | 1 T AB 2 10 | 2 F AB 1 5 |
|
||||
|
||||
The Matcher would initiate the following swap:
|
||||
|
||||
- Row 1, To: Investor 1 trades 1 A token to 4 B tokens
|
||||
- Row 1, From: Investor 2 trades 1 A token from 4 B tokens
|
||||
- Matcher takes 4 B tokens as profit
|
||||
|
||||
Row 1 From is not fully realized, table becomes:
|
||||
|
||||
|Row| To | From |
|
||||
|---|-------------|------------|
|
||||
| 1 | 1 T AB 6 6 | 2 F AB 1 8 |
|
||||
| 2 | 1 T AB 2 8 | 2 F AB 2 7 |
|
||||
| 3 | 1 T AB 2 10 | 2 F AB 3 6 |
|
||||
| 4 | | 2 F AB 1 5 |
|
||||
|
||||
The Matcher would initiate the following swap:
|
||||
|
||||
- Row 1, To: Investor 1 trades 1 A token to 6 B tokens
|
||||
- Row 1, From: Investor 2 trades 1 A token from 6 B tokens
|
||||
- Matcher takes 2 B tokens as profit
|
||||
|
||||
Row 1 To is now fully realized, table becomes:
|
||||
|
||||
|Row| To | From |
|
||||
|---|-------------|------------|
|
||||
| 1 | 1 T AB 5 6 | 2 F AB 2 7 |
|
||||
| 2 | 1 T AB 2 8 | 2 F AB 3 5 |
|
||||
| 3 | 1 T AB 2 10 | 2 F AB 1 5 |
|
||||
|
||||
The Matcher would initiate the following last swap:
|
||||
|
||||
- Row 1, To: Investor 1 trades 2 A token to 12 B tokens
|
||||
- Row 1, From: Investor 2 trades 2 A token from 12 B tokens
|
||||
- Matcher takes 2 B tokens as profit
|
||||
|
||||
Table becomes:
|
||||
|
||||
|Row| To | From |
|
||||
|---|-------------|------------|
|
||||
| 1 | 1 T AB 3 6 | 2 F AB 3 5 |
|
||||
| 2 | 1 T AB 2 8 | 2 F AB 1 5 |
|
||||
| 3 | 1 T AB 2 10 | |
|
||||
|
||||
At this point the lowest To's price is larger than the largest From's price so
|
||||
no more swaps would be initiated until new orders came in.
|
||||
|
||||
```rust
|
||||
pub enum ExchangeInstruction {
|
||||
/// Trade swap request
|
||||
/// key 0 - Signer
|
||||
/// key 1 - Account in which to record the swap
|
||||
/// key 2 - 'To' order
|
||||
/// key 3 - `From` order
|
||||
/// key 4 - Token account associated with the To Trade
|
||||
/// key 5 - Token account associated with From trade
|
||||
/// key 6 - Token account in which to deposit the Matcher profit from the swap.
|
||||
SwapRequest,
|
||||
}
|
||||
|
||||
/// Swap accounts are populated with this structure
|
||||
pub struct TradeSwapInfo {
|
||||
/// Pair swapped
|
||||
pub pair: TokenPair,
|
||||
/// `To` order
|
||||
pub to_trade_order: Pubkey,
|
||||
/// `From` order
|
||||
pub from_trade_order: Pubkey,
|
||||
/// Number of primary tokens exchanged
|
||||
pub primary_tokens: u64,
|
||||
/// Price the primary tokens were exchanged for
|
||||
pub primary_price: u64,
|
||||
/// Number of secondary tokens exchanged
|
||||
pub secondary_tokens: u64,
|
||||
/// Price the secondary tokens were exchanged for
|
||||
pub secondary_price: u64,
|
||||
}
|
||||
```
|
||||
|
||||
## Exchange program operations
|
||||
|
||||
Putting all the commands together from above, the following operations will be
|
||||
supported by the on-chain exchange program:
|
||||
|
||||
```rust
|
||||
pub enum ExchangeInstruction {
|
||||
/// New token account
|
||||
/// key 0 - Signer
|
||||
/// key 1 - New token account
|
||||
AccountRequest,
|
||||
|
||||
/// Transfer tokens between two accounts
|
||||
/// key 0 - Account to transfer tokens to
|
||||
/// key 1 - Account to transfer tokens from. This can be the exchange program itself,
|
||||
/// the exchange has a limitless number of tokens it can transfer.
|
||||
TransferRequest(Token, u64),
|
||||
|
||||
/// order request
|
||||
/// key 0 - Signer
|
||||
/// key 1 - Account in which to record the swap
|
||||
/// key 2 - Token account associated with this trade
|
||||
TradeRequest(TradeRequestInfo),
|
||||
|
||||
/// order cancellation
|
||||
/// key 0 - Signer
|
||||
/// key 1 -order to cancel
|
||||
TradeCancellation,
|
||||
|
||||
/// Trade swap request
|
||||
/// key 0 - Signer
|
||||
/// key 1 - Account in which to record the swap
|
||||
/// key 2 - 'To' order
|
||||
/// key 3 - `From` order
|
||||
/// key 4 - Token account associated with the To Trade
|
||||
/// key 5 - Token account associated with From trade
|
||||
/// key 6 - Token account in which to deposit the Matcher profit from the swap.
|
||||
SwapRequest,
|
||||
}
|
||||
```
|
||||
|
||||
## Quotes and OHLCV
|
||||
|
||||
The Matcher will provide current bid/ask price quotes based on trade actively and
|
||||
also provide OHLCV based on some time window. The details of how the bid/ask
|
||||
price quotes are calculated are yet to be decided.
|
||||
|
||||
## Investor strategies
|
||||
|
||||
To make a compelling demo, the investors needs to provide interesting trade
|
||||
behavior. Something as simple as a randomly twiddled baseline would be a
|
||||
minimum starting point.
|
||||
|
||||
## Running the exchange
|
||||
|
||||
The exchange bench posts trades and swaps matches as fast as it can.
|
||||
|
||||
You might want to bump the duration up
|
||||
to 60 seconds and the batch size to 1000 for better numbers. You can modify those
|
||||
in client_demo/src/demo.rs::test_exchange_local_cluster.
|
||||
|
||||
The following command runs the bench:
|
||||
|
||||
```bash
|
||||
$ RUST_LOG=solana_bench_exchange=info cargo test --release -- --nocapture test_exchange_local_cluster
|
||||
```
|
||||
|
||||
To also see the cluster messages:
|
||||
|
||||
```bash
|
||||
$ RUST_LOG=solana_bench_exchange=info,solana=info cargo test --release -- --nocapture test_exchange_local_cluster
|
||||
```
|
File diff suppressed because it is too large
Load Diff
@ -1,221 +0,0 @@
|
||||
use clap::{crate_description, crate_name, value_t, App, Arg, ArgMatches};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_faucet::faucet::FAUCET_PORT;
|
||||
use solana_sdk::signature::{read_keypair_file, Keypair};
|
||||
use std::net::SocketAddr;
|
||||
use std::process::exit;
|
||||
use std::time::Duration;
|
||||
|
||||
pub struct Config {
|
||||
pub entrypoint_addr: SocketAddr,
|
||||
pub faucet_addr: SocketAddr,
|
||||
pub identity: Keypair,
|
||||
pub threads: usize,
|
||||
pub num_nodes: usize,
|
||||
pub duration: Duration,
|
||||
pub transfer_delay: u64,
|
||||
pub fund_amount: u64,
|
||||
pub batch_size: usize,
|
||||
pub chunk_size: usize,
|
||||
pub account_groups: usize,
|
||||
pub client_ids_and_stake_file: String,
|
||||
pub write_to_client_file: bool,
|
||||
pub read_from_client_file: bool,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
entrypoint_addr: SocketAddr::from(([127, 0, 0, 1], 8001)),
|
||||
faucet_addr: SocketAddr::from(([127, 0, 0, 1], FAUCET_PORT)),
|
||||
identity: Keypair::new(),
|
||||
num_nodes: 1,
|
||||
threads: 4,
|
||||
duration: Duration::new(u64::max_value(), 0),
|
||||
transfer_delay: 0,
|
||||
fund_amount: 100_000,
|
||||
batch_size: 100,
|
||||
chunk_size: 100,
|
||||
account_groups: 100,
|
||||
client_ids_and_stake_file: String::new(),
|
||||
write_to_client_file: false,
|
||||
read_from_client_file: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
App::new(crate_name!())
|
||||
.about(crate_description!())
|
||||
.version(version)
|
||||
.arg(
|
||||
Arg::with_name("entrypoint")
|
||||
.short("n")
|
||||
.long("entrypoint")
|
||||
.value_name("HOST:PORT")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("127.0.0.1:8001")
|
||||
.help("Cluster entry point; defaults to 127.0.0.1:8001"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("faucet")
|
||||
.short("d")
|
||||
.long("faucet")
|
||||
.value_name("HOST:PORT")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("127.0.0.1:9900")
|
||||
.help("Location of the faucet; defaults to 127.0.0.1:9900"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("identity")
|
||||
.short("i")
|
||||
.long("identity")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("File containing a client identity (keypair)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("threads")
|
||||
.long("threads")
|
||||
.value_name("<threads>")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("1")
|
||||
.help("Number of threads submitting transactions"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num-nodes")
|
||||
.long("num-nodes")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("1")
|
||||
.help("Wait for NUM nodes to converge"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("duration")
|
||||
.long("duration")
|
||||
.value_name("SECS")
|
||||
.takes_value(true)
|
||||
.default_value("60")
|
||||
.help("Seconds to run benchmark, then exit; default is forever"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("transfer-delay")
|
||||
.long("transfer-delay")
|
||||
.value_name("<delay>")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("0")
|
||||
.help("Delay between each chunk"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("fund-amount")
|
||||
.long("fund-amount")
|
||||
.value_name("<fund>")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("100000")
|
||||
.help("Number of lamports to fund to each signer"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("batch-size")
|
||||
.long("batch-size")
|
||||
.value_name("<batch>")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("1000")
|
||||
.help("Number of transactions before the signer rolls over"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("chunk-size")
|
||||
.long("chunk-size")
|
||||
.value_name("<cunk>")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("500")
|
||||
.help("Number of transactions to generate and send at a time"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("account-groups")
|
||||
.long("account-groups")
|
||||
.value_name("<groups>")
|
||||
.takes_value(true)
|
||||
.required(false)
|
||||
.default_value("10")
|
||||
.help("Number of account groups to cycle for each batch"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("write-client-keys")
|
||||
.long("write-client-keys")
|
||||
.value_name("FILENAME")
|
||||
.takes_value(true)
|
||||
.help("Generate client keys and stakes and write the list to YAML file"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("read-client-keys")
|
||||
.long("read-client-keys")
|
||||
.value_name("FILENAME")
|
||||
.takes_value(true)
|
||||
.help("Read client keys and stakes from the YAML file"),
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::field_reassign_with_default)]
|
||||
pub fn extract_args(matches: &ArgMatches) -> Config {
|
||||
let mut args = Config::default();
|
||||
|
||||
args.entrypoint_addr = solana_net_utils::parse_host_port(
|
||||
matches.value_of("entrypoint").unwrap(),
|
||||
)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse entrypoint address: {}", e);
|
||||
exit(1)
|
||||
});
|
||||
|
||||
args.faucet_addr = solana_net_utils::parse_host_port(matches.value_of("faucet").unwrap())
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse faucet address: {}", e);
|
||||
exit(1)
|
||||
});
|
||||
|
||||
if matches.is_present("identity") {
|
||||
args.identity = read_keypair_file(matches.value_of("identity").unwrap())
|
||||
.expect("can't read client identity");
|
||||
} else {
|
||||
args.identity = {
|
||||
let seed = [42_u8; 32];
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
rnd.gen_keypair()
|
||||
};
|
||||
}
|
||||
args.threads = value_t!(matches.value_of("threads"), usize).expect("Failed to parse threads");
|
||||
args.num_nodes =
|
||||
value_t!(matches.value_of("num-nodes"), usize).expect("Failed to parse num-nodes");
|
||||
let duration = value_t!(matches.value_of("duration"), u64).expect("Failed to parse duration");
|
||||
args.duration = Duration::from_secs(duration);
|
||||
args.transfer_delay =
|
||||
value_t!(matches.value_of("transfer-delay"), u64).expect("Failed to parse transfer-delay");
|
||||
args.fund_amount =
|
||||
value_t!(matches.value_of("fund-amount"), u64).expect("Failed to parse fund-amount");
|
||||
args.batch_size =
|
||||
value_t!(matches.value_of("batch-size"), usize).expect("Failed to parse batch-size");
|
||||
args.chunk_size =
|
||||
value_t!(matches.value_of("chunk-size"), usize).expect("Failed to parse chunk-size");
|
||||
args.account_groups = value_t!(matches.value_of("account-groups"), usize)
|
||||
.expect("Failed to parse account-groups");
|
||||
|
||||
if let Some(s) = matches.value_of("write-client-keys") {
|
||||
args.write_to_client_file = true;
|
||||
args.client_ids_and_stake_file = s.to_string();
|
||||
}
|
||||
|
||||
if let Some(s) = matches.value_of("read-client-keys") {
|
||||
assert!(!args.write_to_client_file);
|
||||
args.read_from_client_file = true;
|
||||
args.client_ids_and_stake_file = s.to_string();
|
||||
}
|
||||
args
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
pub mod bench;
|
||||
pub mod cli;
|
||||
mod order_book;
|
@ -1,83 +0,0 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
pub mod bench;
|
||||
mod cli;
|
||||
pub mod order_book;
|
||||
|
||||
use crate::bench::{airdrop_lamports, create_client_accounts_file, do_bench_exchange, Config};
|
||||
use log::*;
|
||||
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_sdk::signature::Signer;
|
||||
|
||||
fn main() {
|
||||
solana_logger::setup();
|
||||
solana_metrics::set_panic_hook("bench-exchange");
|
||||
|
||||
let matches = cli::build_args(solana_version::version!()).get_matches();
|
||||
let cli_config = cli::extract_args(&matches);
|
||||
|
||||
let cli::Config {
|
||||
entrypoint_addr,
|
||||
faucet_addr,
|
||||
identity,
|
||||
threads,
|
||||
num_nodes,
|
||||
duration,
|
||||
transfer_delay,
|
||||
fund_amount,
|
||||
batch_size,
|
||||
chunk_size,
|
||||
account_groups,
|
||||
client_ids_and_stake_file,
|
||||
write_to_client_file,
|
||||
read_from_client_file,
|
||||
..
|
||||
} = cli_config;
|
||||
|
||||
let config = Config {
|
||||
identity,
|
||||
threads,
|
||||
duration,
|
||||
transfer_delay,
|
||||
fund_amount,
|
||||
batch_size,
|
||||
chunk_size,
|
||||
account_groups,
|
||||
client_ids_and_stake_file,
|
||||
read_from_client_file,
|
||||
};
|
||||
|
||||
if write_to_client_file {
|
||||
create_client_accounts_file(
|
||||
&config.client_ids_and_stake_file,
|
||||
config.batch_size,
|
||||
config.account_groups,
|
||||
config.fund_amount,
|
||||
);
|
||||
} else {
|
||||
info!("Connecting to the cluster");
|
||||
let nodes = discover_cluster(&entrypoint_addr, num_nodes).unwrap_or_else(|_| {
|
||||
panic!("Failed to discover nodes");
|
||||
});
|
||||
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
|
||||
info!("{} nodes found", num_clients);
|
||||
if num_clients < num_nodes {
|
||||
panic!("Error: Insufficient nodes discovered");
|
||||
}
|
||||
|
||||
if !read_from_client_file {
|
||||
info!("Funding keypair: {}", config.identity.pubkey());
|
||||
|
||||
let accounts_in_groups = batch_size * account_groups;
|
||||
const NUM_SIGNERS: u64 = 2;
|
||||
airdrop_lamports(
|
||||
&client,
|
||||
&faucet_addr,
|
||||
&config.identity,
|
||||
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
||||
);
|
||||
}
|
||||
do_bench_exchange(vec![client], config);
|
||||
}
|
||||
}
|
@ -1,134 +0,0 @@
|
||||
use itertools::EitherOrBoth::{Both, Left, Right};
|
||||
use itertools::Itertools;
|
||||
use log::*;
|
||||
use solana_exchange_program::exchange_state::*;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BinaryHeap;
|
||||
use std::{error, fmt};
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct ToOrder {
|
||||
pub pubkey: Pubkey,
|
||||
pub info: OrderInfo,
|
||||
}
|
||||
|
||||
impl Ord for ToOrder {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
other.info.price.cmp(&self.info.price)
|
||||
}
|
||||
}
|
||||
impl PartialOrd for ToOrder {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct FromOrder {
|
||||
pub pubkey: Pubkey,
|
||||
pub info: OrderInfo,
|
||||
}
|
||||
|
||||
impl Ord for FromOrder {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.info.price.cmp(&other.info.price)
|
||||
}
|
||||
}
|
||||
impl PartialOrd for FromOrder {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct OrderBook {
|
||||
// TODO scale to x token types
|
||||
to_ab: BinaryHeap<ToOrder>,
|
||||
from_ab: BinaryHeap<FromOrder>,
|
||||
}
|
||||
impl fmt::Display for OrderBook {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(
|
||||
f,
|
||||
"+-Order Book--------------------------+-------------------------------------+"
|
||||
)?;
|
||||
for (i, it) in self
|
||||
.to_ab
|
||||
.iter()
|
||||
.zip_longest(self.from_ab.iter())
|
||||
.enumerate()
|
||||
{
|
||||
match it {
|
||||
Both(to, from) => writeln!(
|
||||
f,
|
||||
"| T AB {:8} for {:8}/{:8} | F AB {:8} for {:8}/{:8} |{}",
|
||||
to.info.tokens,
|
||||
SCALER,
|
||||
to.info.price,
|
||||
from.info.tokens,
|
||||
SCALER,
|
||||
from.info.price,
|
||||
i
|
||||
)?,
|
||||
Left(to) => writeln!(
|
||||
f,
|
||||
"| T AB {:8} for {:8}/{:8} | |{}",
|
||||
to.info.tokens, SCALER, to.info.price, i
|
||||
)?,
|
||||
Right(from) => writeln!(
|
||||
f,
|
||||
"| | F AB {:8} for {:8}/{:8} |{}",
|
||||
from.info.tokens, SCALER, from.info.price, i
|
||||
)?,
|
||||
}
|
||||
}
|
||||
write!(
|
||||
f,
|
||||
"+-------------------------------------+-------------------------------------+"
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl OrderBook {
|
||||
// TODO
|
||||
// pub fn cancel(&mut self, pubkey: Pubkey) -> Result<(), Box<dyn error::Error>> {
|
||||
// Ok(())
|
||||
// }
|
||||
pub fn push(&mut self, pubkey: Pubkey, info: OrderInfo) -> Result<(), Box<dyn error::Error>> {
|
||||
check_trade(info.side, info.tokens, info.price)?;
|
||||
match info.side {
|
||||
OrderSide::Ask => {
|
||||
self.to_ab.push(ToOrder { pubkey, info });
|
||||
}
|
||||
OrderSide::Bid => {
|
||||
self.from_ab.push(FromOrder { pubkey, info });
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
pub fn pop(&mut self) -> Option<(ToOrder, FromOrder)> {
|
||||
if let Some(pair) = Self::pop_pair(&mut self.to_ab, &mut self.from_ab) {
|
||||
return Some(pair);
|
||||
}
|
||||
None
|
||||
}
|
||||
pub fn get_num_outstanding(&self) -> (usize, usize) {
|
||||
(self.to_ab.len(), self.from_ab.len())
|
||||
}
|
||||
|
||||
fn pop_pair(
|
||||
to_ab: &mut BinaryHeap<ToOrder>,
|
||||
from_ab: &mut BinaryHeap<FromOrder>,
|
||||
) -> Option<(ToOrder, FromOrder)> {
|
||||
let to = to_ab.peek()?;
|
||||
let from = from_ab.peek()?;
|
||||
if from.info.price < to.info.price {
|
||||
debug!("Trade not viable");
|
||||
return None;
|
||||
}
|
||||
let to = to_ab.pop()?;
|
||||
let from = from_ab.pop()?;
|
||||
Some((to, from))
|
||||
}
|
||||
}
|
@ -1,115 +0,0 @@
|
||||
use log::*;
|
||||
use solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config};
|
||||
use solana_core::{
|
||||
gossip_service::{discover_cluster, get_multi_client},
|
||||
validator::ValidatorConfig,
|
||||
};
|
||||
use solana_exchange_program::{
|
||||
exchange_processor::process_instruction, id, solana_exchange_program,
|
||||
};
|
||||
use solana_faucet::faucet::run_local_faucet_with_port;
|
||||
use solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
};
|
||||
use solana_runtime::{bank::Bank, bank_client::BankClient};
|
||||
use solana_sdk::{
|
||||
genesis_config::create_genesis_config,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::{process::exit, sync::mpsc::channel, time::Duration};
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_exchange_local_cluster() {
|
||||
solana_logger::setup();
|
||||
|
||||
const NUM_NODES: usize = 1;
|
||||
|
||||
let config = Config {
|
||||
identity: Keypair::new(),
|
||||
duration: Duration::from_secs(1),
|
||||
fund_amount: 100_000,
|
||||
threads: 1,
|
||||
transfer_delay: 20, // 15
|
||||
batch_size: 100, // 1000
|
||||
chunk_size: 10, // 200
|
||||
account_groups: 1, // 10
|
||||
..Config::default()
|
||||
};
|
||||
let Config {
|
||||
fund_amount,
|
||||
batch_size,
|
||||
account_groups,
|
||||
..
|
||||
} = config;
|
||||
let accounts_in_groups = batch_size * account_groups;
|
||||
|
||||
let cluster = LocalCluster::new(&mut ClusterConfig {
|
||||
node_stakes: vec![100_000; NUM_NODES],
|
||||
cluster_lamports: 100_000_000_000_000,
|
||||
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES),
|
||||
native_instruction_processors: [solana_exchange_program!()].to_vec(),
|
||||
..ClusterConfig::default()
|
||||
});
|
||||
|
||||
let faucet_keypair = Keypair::new();
|
||||
cluster.transfer(
|
||||
&cluster.funding_keypair,
|
||||
&faucet_keypair.pubkey(),
|
||||
2_000_000_000_000,
|
||||
);
|
||||
|
||||
let (addr_sender, addr_receiver) = channel();
|
||||
run_local_faucet_with_port(faucet_keypair, addr_sender, Some(1_000_000_000_000), 0);
|
||||
let faucet_addr = addr_receiver
|
||||
.recv_timeout(Duration::from_secs(2))
|
||||
.expect("run_local_faucet")
|
||||
.expect("faucet_addr");
|
||||
|
||||
info!("Connecting to the cluster");
|
||||
let nodes =
|
||||
discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES).unwrap_or_else(|err| {
|
||||
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
|
||||
info!("clients: {}", num_clients);
|
||||
assert!(num_clients >= NUM_NODES);
|
||||
|
||||
const NUM_SIGNERS: u64 = 2;
|
||||
airdrop_lamports(
|
||||
&client,
|
||||
&faucet_addr,
|
||||
&config.identity,
|
||||
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
||||
);
|
||||
|
||||
do_bench_exchange(vec![client], config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exchange_bank_client() {
|
||||
solana_logger::setup();
|
||||
let (genesis_config, identity) = create_genesis_config(100_000_000_000_000);
|
||||
let mut bank = Bank::new(&genesis_config);
|
||||
bank.add_builtin("exchange_program", id(), process_instruction);
|
||||
let clients = vec![BankClient::new(bank)];
|
||||
|
||||
do_bench_exchange(
|
||||
clients,
|
||||
Config {
|
||||
identity,
|
||||
duration: Duration::from_secs(1),
|
||||
fund_amount: 100_000,
|
||||
threads: 1,
|
||||
transfer_delay: 20, // 0;
|
||||
batch_size: 100, // 1500;
|
||||
chunk_size: 10, // 1500;
|
||||
account_groups: 1, // 50;
|
||||
..Config::default()
|
||||
},
|
||||
);
|
||||
}
|
@ -1,20 +1,19 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.6.0"
|
||||
version = "1.10.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
crossbeam-channel = "0.5"
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
|
||||
solana-streamer = { path = "../streamer", version = "1.6.0" }
|
||||
solana-logger = { path = "../logger", version = "1.6.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.6.0" }
|
||||
solana-version = { path = "../version", version = "1.6.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,32 +1,38 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use clap::{crate_description, crate_name, App, Arg};
|
||||
use solana_streamer::packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE};
|
||||
use solana_streamer::streamer::{receiver, PacketReceiver};
|
||||
use std::cmp::max;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::thread::{spawn, JoinHandle, Result};
|
||||
use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
use {
|
||||
clap::{crate_description, crate_name, App, Arg},
|
||||
crossbeam_channel::unbounded,
|
||||
solana_streamer::{
|
||||
packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE},
|
||||
streamer::{receiver, PacketBatchReceiver},
|
||||
},
|
||||
std::{
|
||||
cmp::max,
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket},
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicUsize, Ordering},
|
||||
Arc,
|
||||
},
|
||||
thread::{sleep, spawn, JoinHandle, Result},
|
||||
time::{Duration, SystemTime},
|
||||
},
|
||||
};
|
||||
|
||||
fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut msgs = Packets::default();
|
||||
msgs.packets.resize(10, Packet::default());
|
||||
for w in msgs.packets.iter_mut() {
|
||||
let mut packet_batch = PacketBatch::default();
|
||||
packet_batch.packets.resize(10, Packet::default());
|
||||
for w in packet_batch.packets.iter_mut() {
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
w.meta.set_addr(addr);
|
||||
}
|
||||
let msgs = Arc::new(msgs);
|
||||
let packet_batch = Arc::new(packet_batch);
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in &msgs.packets {
|
||||
for p in &packet_batch.packets {
|
||||
let a = p.meta.addr();
|
||||
assert!(p.meta.size <= PACKET_DATA_SIZE);
|
||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||
@ -36,14 +42,14 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
})
|
||||
}
|
||||
|
||||
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketReceiver) -> JoinHandle<()> {
|
||||
fn sink(exit: Arc<AtomicBool>, rvs: Arc<AtomicUsize>, r: PacketBatchReceiver) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
if let Ok(msgs) = r.recv_timeout(timer) {
|
||||
rvs.fetch_add(msgs.packets.len(), Ordering::Relaxed);
|
||||
if let Ok(packet_batch) = r.recv_timeout(timer) {
|
||||
rvs.fetch_add(packet_batch.packets.len(), Ordering::Relaxed);
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -75,7 +81,7 @@ fn main() -> Result<()> {
|
||||
|
||||
let mut read_channels = Vec::new();
|
||||
let mut read_threads = Vec::new();
|
||||
let recycler = PacketsRecycler::new_without_limit("bench-streamer-recycler-shrink-stats");
|
||||
let recycler = PacketBatchRecycler::default();
|
||||
for _ in 0..num_sockets {
|
||||
let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap();
|
||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
@ -83,7 +89,7 @@ fn main() -> Result<()> {
|
||||
addr = read.local_addr().unwrap();
|
||||
port = addr.port();
|
||||
|
||||
let (s_reader, r_reader) = channel();
|
||||
let (s_reader, r_reader) = unbounded();
|
||||
read_channels.push(r_reader);
|
||||
read_threads.push(receiver(
|
||||
Arc::new(read),
|
||||
@ -92,6 +98,7 @@ fn main() -> Result<()> {
|
||||
recycler.clone(),
|
||||
"bench-streamer-test",
|
||||
1,
|
||||
true,
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -1,36 +1,37 @@
|
||||
[package]
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
edition = "2021"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.6.0"
|
||||
version = "1.10.0"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.3.1"
|
||||
clap = "2.33.1"
|
||||
log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.6.0" }
|
||||
solana-core = { path = "../core", version = "1.6.0" }
|
||||
solana-genesis = { path = "../genesis", version = "1.6.0" }
|
||||
solana-client = { path = "../client", version = "1.6.0" }
|
||||
solana-faucet = { path = "../faucet", version = "1.6.0" }
|
||||
solana-logger = { path = "../logger", version = "1.6.0" }
|
||||
solana-metrics = { path = "../metrics", version = "1.6.0" }
|
||||
solana-measure = { path = "../measure", version = "1.6.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.6.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.6.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.6.0" }
|
||||
solana-version = { path = "../version", version = "1.6.0" }
|
||||
crossbeam-channel = "0.5"
|
||||
log = "0.4.14"
|
||||
rayon = "1.5.1"
|
||||
serde_json = "1.0.78"
|
||||
serde_yaml = "0.8.23"
|
||||
solana-core = { path = "../core", version = "=1.10.0" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.10.0" }
|
||||
solana-client = { path = "../client", version = "=1.10.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.10.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.10.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.10.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.10.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.10.0" }
|
||||
solana-version = { path = "../version", version = "=1.10.0" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.6.0" }
|
||||
serial_test = "0.5.1"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.10.0" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,34 +1,36 @@
|
||||
use crate::cli::Config;
|
||||
use log::*;
|
||||
use rayon::prelude::*;
|
||||
use solana_client::perf_utils::{sample_txs, SampleStats};
|
||||
use solana_core::gen_keys::GenKeys;
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{self, datapoint_info};
|
||||
use solana_sdk::{
|
||||
client::Client,
|
||||
clock::{DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE},
|
||||
commitment_config::CommitmentConfig,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction, system_transaction,
|
||||
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
collections::{HashSet, VecDeque},
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
||||
Arc, Mutex, RwLock,
|
||||
use {
|
||||
crate::cli::Config,
|
||||
log::*,
|
||||
rayon::prelude::*,
|
||||
solana_client::perf_utils::{sample_txs, SampleStats},
|
||||
solana_core::gen_keys::GenKeys,
|
||||
solana_faucet::faucet::request_airdrop_transaction,
|
||||
solana_measure::measure::Measure,
|
||||
solana_metrics::{self, datapoint_info},
|
||||
solana_sdk::{
|
||||
client::Client,
|
||||
clock::{DEFAULT_MS_PER_SLOT, DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE},
|
||||
commitment_config::CommitmentConfig,
|
||||
hash::Hash,
|
||||
instruction::{AccountMeta, Instruction},
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction, system_transaction,
|
||||
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
},
|
||||
std::{
|
||||
collections::{HashSet, VecDeque},
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
||||
Arc, Mutex, RwLock,
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
thread::{sleep, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
// The point at which transactions become "too old", in seconds.
|
||||
@ -45,14 +47,12 @@ pub type Result<T> = std::result::Result<T, BenchTpsError>;
|
||||
|
||||
pub type SharedTransactions = Arc<RwLock<VecDeque<Vec<(Transaction, u64)>>>>;
|
||||
|
||||
fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
|
||||
fn get_latest_blockhash<T: Client>(client: &T) -> Hash {
|
||||
loop {
|
||||
match client.get_recent_blockhash_with_commitment(CommitmentConfig::processed()) {
|
||||
Ok((blockhash, fee_calculator, _last_valid_slot)) => {
|
||||
return (blockhash, fee_calculator)
|
||||
}
|
||||
match client.get_latest_blockhash_with_commitment(CommitmentConfig::processed()) {
|
||||
Ok((blockhash, _)) => return blockhash,
|
||||
Err(err) => {
|
||||
info!("Couldn't get recent blockhash: {:?}", err);
|
||||
info!("Couldn't get last blockhash: {:?}", err);
|
||||
sleep(Duration::from_secs(1));
|
||||
}
|
||||
};
|
||||
@ -239,19 +239,19 @@ where
|
||||
|
||||
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
|
||||
|
||||
let recent_blockhash = Arc::new(RwLock::new(get_recent_blockhash(client.as_ref()).0));
|
||||
let blockhash = Arc::new(RwLock::new(get_latest_blockhash(client.as_ref())));
|
||||
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
|
||||
let total_tx_sent_count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let blockhash_thread = {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let recent_blockhash = recent_blockhash.clone();
|
||||
let blockhash = blockhash.clone();
|
||||
let client = client.clone();
|
||||
let id = id.pubkey();
|
||||
Builder::new()
|
||||
.name("solana-blockhash-poller".to_string())
|
||||
.spawn(move || {
|
||||
poll_blockhash(&exit_signal, &recent_blockhash, &client, &id);
|
||||
poll_blockhash(&exit_signal, &blockhash, &client, &id);
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
@ -271,7 +271,7 @@ where
|
||||
let start = Instant::now();
|
||||
|
||||
generate_chunked_transfers(
|
||||
recent_blockhash,
|
||||
blockhash,
|
||||
&shared_txs,
|
||||
shared_tx_active_thread_count,
|
||||
source_keypair_chunks,
|
||||
@ -391,6 +391,22 @@ fn generate_txs(
|
||||
}
|
||||
}
|
||||
|
||||
fn get_new_latest_blockhash<T: Client>(client: &Arc<T>, blockhash: &Hash) -> Option<Hash> {
|
||||
let start = Instant::now();
|
||||
while start.elapsed().as_secs() < 5 {
|
||||
if let Ok(new_blockhash) = client.get_latest_blockhash() {
|
||||
if new_blockhash != *blockhash {
|
||||
return Some(new_blockhash);
|
||||
}
|
||||
}
|
||||
debug!("Got same blockhash ({:?}), will retry...", blockhash);
|
||||
|
||||
// Retry ~twice during a slot
|
||||
sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT / 2));
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn poll_blockhash<T: Client>(
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
blockhash: &Arc<RwLock<Hash>>,
|
||||
@ -402,7 +418,7 @@ fn poll_blockhash<T: Client>(
|
||||
loop {
|
||||
let blockhash_updated = {
|
||||
let old_blockhash = *blockhash.read().unwrap();
|
||||
if let Ok((new_blockhash, _fee)) = client.get_new_blockhash(&old_blockhash) {
|
||||
if let Some(new_blockhash) = get_new_latest_blockhash(client, &old_blockhash) {
|
||||
*blockhash.write().unwrap() = new_blockhash;
|
||||
blockhash_last_updated = Instant::now();
|
||||
true
|
||||
@ -540,16 +556,16 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
self.len(),
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = get_recent_blockhash(client.as_ref());
|
||||
let blockhash = get_latest_blockhash(client.as_ref());
|
||||
|
||||
// re-sign retained to_fund_txes with updated blockhash
|
||||
self.sign(blockhash);
|
||||
self.send(&client);
|
||||
self.send(client);
|
||||
|
||||
// Sleep a few slots to allow transactions to process
|
||||
sleep(Duration::from_secs(1));
|
||||
|
||||
self.verify(&client, to_lamports);
|
||||
self.verify(client, to_lamports);
|
||||
|
||||
// retry anything that seems to have dropped through cracks
|
||||
// again since these txs are all or nothing, they're fine to
|
||||
@ -564,7 +580,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
|
||||
.par_iter()
|
||||
.map(|(k, t)| {
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), &t);
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), t);
|
||||
let message = Message::new(&instructions, Some(&k.pubkey()));
|
||||
(*k, Transaction::new_unsigned(message))
|
||||
})
|
||||
@ -617,7 +633,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
return None;
|
||||
}
|
||||
|
||||
let verified = if verify_funding_transfer(&client, &tx, to_lamports) {
|
||||
let verified = if verify_funding_transfer(&client, tx, to_lamports) {
|
||||
verified_txs.fetch_add(1, Ordering::Relaxed);
|
||||
Some(k.pubkey())
|
||||
} else {
|
||||
@ -732,8 +748,8 @@ pub fn airdrop_lamports<T: Client>(
|
||||
id.pubkey(),
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
|
||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
let blockhash = get_latest_blockhash(client);
|
||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
Ok(transaction) => {
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
@ -890,8 +906,16 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
// pay for the transaction fees in a new run.
|
||||
let enough_lamports = 8 * lamports_per_account / 10;
|
||||
if first_keypair_balance < enough_lamports || last_keypair_balance < enough_lamports {
|
||||
let fee_rate_governor = client.get_fee_rate_governor().unwrap();
|
||||
let max_fee = fee_rate_governor.max_lamports_per_signature;
|
||||
let single_sig_message = Message::new_with_blockhash(
|
||||
&[Instruction::new_with_bytes(
|
||||
Pubkey::new_unique(),
|
||||
&[],
|
||||
vec![AccountMeta::new(Pubkey::new_unique(), true)],
|
||||
)],
|
||||
None,
|
||||
&client.get_latest_blockhash().unwrap(),
|
||||
);
|
||||
let max_fee = client.get_fee_for_message(&single_sig_message).unwrap();
|
||||
let extra_fees = extra * max_fee;
|
||||
let total_keypairs = keypairs.len() as u64 + 1; // Add one for funding keypair
|
||||
let total = lamports_per_account * total_keypairs + extra_fees;
|
||||
@ -924,17 +948,19 @@ pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::bank_client::BankClient;
|
||||
use solana_sdk::client::SyncClient;
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::genesis_config::create_genesis_config;
|
||||
use {
|
||||
super::*,
|
||||
solana_runtime::{bank::Bank, bank_client::BankClient},
|
||||
solana_sdk::{
|
||||
client::SyncClient, fee_calculator::FeeRateGovernor,
|
||||
genesis_config::create_genesis_config,
|
||||
},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_bench_tps_bank_client() {
|
||||
let (genesis_config, id) = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let client = Arc::new(BankClient::new(bank));
|
||||
|
||||
let config = Config {
|
||||
@ -955,7 +981,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_bench_tps_fund_keys() {
|
||||
let (genesis_config, id) = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let client = Arc::new(BankClient::new(bank));
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
@ -978,7 +1004,7 @@ mod tests {
|
||||
let (mut genesis_config, id) = create_genesis_config(10_000);
|
||||
let fee_rate_governor = FeeRateGovernor::new(11, 0);
|
||||
genesis_config.fee_rate_governor = fee_rate_governor;
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let bank = Bank::new_for_tests(&genesis_config);
|
||||
let client = Arc::new(BankClient::new(bank));
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
|
@ -1,11 +1,13 @@
|
||||
use clap::{crate_description, crate_name, App, Arg, ArgMatches};
|
||||
use solana_faucet::faucet::FAUCET_PORT;
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Keypair},
|
||||
use {
|
||||
clap::{crate_description, crate_name, App, Arg, ArgMatches},
|
||||
solana_faucet::faucet::FAUCET_PORT,
|
||||
solana_sdk::{
|
||||
fee_calculator::FeeRateGovernor,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Keypair},
|
||||
},
|
||||
std::{net::SocketAddr, process::exit, time::Duration},
|
||||
};
|
||||
use std::{net::SocketAddr, process::exit, time::Duration};
|
||||
|
||||
const NUM_LAMPORTS_PER_ACCOUNT_DEFAULT: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL;
|
||||
|
||||
|
@ -1,13 +1,20 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use log::*;
|
||||
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs};
|
||||
use solana_bench_tps::cli;
|
||||
use solana_core::gossip_service::{discover_cluster, get_client, get_multi_client};
|
||||
use solana_genesis::Base64Account;
|
||||
use solana_sdk::fee_calculator::FeeRateGovernor;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::system_program;
|
||||
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc};
|
||||
use {
|
||||
log::*,
|
||||
solana_bench_tps::{
|
||||
bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs},
|
||||
cli,
|
||||
},
|
||||
solana_genesis::Base64Account,
|
||||
solana_gossip::gossip_service::{discover_cluster, get_client, get_multi_client},
|
||||
solana_sdk::{
|
||||
fee_calculator::FeeRateGovernor,
|
||||
signature::{Keypair, Signer},
|
||||
system_program,
|
||||
},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc},
|
||||
};
|
||||
|
||||
/// Number of signatures for all transactions in ~1 week at ~100K TPS
|
||||
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
|
||||
@ -39,7 +46,7 @@ fn main() {
|
||||
let keypair_count = *tx_count * keypair_multiplier;
|
||||
if *write_to_client_file {
|
||||
info!("Generating {} keypairs", keypair_count);
|
||||
let (keypairs, _) = generate_keypairs(&id, keypair_count as u64);
|
||||
let (keypairs, _) = generate_keypairs(id, keypair_count as u64);
|
||||
let num_accounts = keypairs.len() as u64;
|
||||
let max_fee =
|
||||
FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
|
||||
@ -68,13 +75,14 @@ fn main() {
|
||||
}
|
||||
|
||||
info!("Connecting to the cluster");
|
||||
let nodes = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||
exit(1);
|
||||
});
|
||||
let nodes = discover_cluster(entrypoint_addr, *num_nodes, SocketAddrSpace::Unspecified)
|
||||
.unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let client = if *multi_client {
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
let (client, num_clients) = get_multi_client(&nodes, &SocketAddrSpace::Unspecified);
|
||||
if nodes.len() < num_clients {
|
||||
eprintln!(
|
||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||
@ -88,7 +96,7 @@ fn main() {
|
||||
let mut target_client = None;
|
||||
for node in nodes {
|
||||
if node.id == *target_node {
|
||||
target_client = Some(Arc::new(get_client(&[node])));
|
||||
target_client = Some(Arc::new(get_client(&[node], &SocketAddrSpace::Unspecified)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -97,7 +105,7 @@ fn main() {
|
||||
exit(1);
|
||||
})
|
||||
} else {
|
||||
Arc::new(get_client(&nodes))
|
||||
Arc::new(get_client(&nodes, &SocketAddrSpace::Unspecified))
|
||||
};
|
||||
|
||||
let keypairs = if *read_from_client_file {
|
||||
@ -135,7 +143,7 @@ fn main() {
|
||||
generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
Some(*faucet_addr),
|
||||
&id,
|
||||
id,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
)
|
||||
|
@ -1,20 +1,22 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
use serial_test::serial;
|
||||
use solana_bench_tps::{
|
||||
bench::{do_bench_tps, generate_and_fund_keypairs},
|
||||
cli::Config,
|
||||
};
|
||||
use solana_client::thin_client::create_client;
|
||||
use solana_core::{cluster_info::VALIDATOR_PORT_RANGE, validator::ValidatorConfig};
|
||||
use solana_faucet::faucet::run_local_faucet_with_port;
|
||||
use solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
};
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use std::{
|
||||
sync::{mpsc::channel, Arc},
|
||||
time::Duration,
|
||||
use {
|
||||
crossbeam_channel::unbounded,
|
||||
serial_test::serial,
|
||||
solana_bench_tps::{
|
||||
bench::{do_bench_tps, generate_and_fund_keypairs},
|
||||
cli::Config,
|
||||
},
|
||||
solana_client::thin_client::create_client,
|
||||
solana_core::validator::ValidatorConfig,
|
||||
solana_faucet::faucet::run_local_faucet_with_port,
|
||||
solana_gossip::cluster_info::VALIDATOR_PORT_RANGE,
|
||||
solana_local_cluster::{
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
validator_configs::make_identical_validator_configs,
|
||||
},
|
||||
solana_sdk::signature::{Keypair, Signer},
|
||||
solana_streamer::socket::SocketAddrSpace,
|
||||
std::{sync::Arc, time::Duration},
|
||||
};
|
||||
|
||||
fn test_bench_tps_local_cluster(config: Config) {
|
||||
@ -22,13 +24,19 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
|
||||
solana_logger::setup();
|
||||
const NUM_NODES: usize = 1;
|
||||
let cluster = LocalCluster::new(&mut ClusterConfig {
|
||||
node_stakes: vec![999_990; NUM_NODES],
|
||||
cluster_lamports: 200_000_000,
|
||||
validator_configs: make_identical_validator_configs(&ValidatorConfig::default(), NUM_NODES),
|
||||
native_instruction_processors,
|
||||
..ClusterConfig::default()
|
||||
});
|
||||
let cluster = LocalCluster::new(
|
||||
&mut ClusterConfig {
|
||||
node_stakes: vec![999_990; NUM_NODES],
|
||||
cluster_lamports: 200_000_000,
|
||||
validator_configs: make_identical_validator_configs(
|
||||
&ValidatorConfig::default(),
|
||||
NUM_NODES,
|
||||
),
|
||||
native_instruction_processors,
|
||||
..ClusterConfig::default()
|
||||
},
|
||||
SocketAddrSpace::Unspecified,
|
||||
);
|
||||
|
||||
let faucet_keypair = Keypair::new();
|
||||
cluster.transfer(
|
||||
@ -42,7 +50,7 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
VALIDATOR_PORT_RANGE,
|
||||
));
|
||||
|
||||
let (addr_sender, addr_receiver) = channel();
|
||||
let (addr_sender, addr_receiver) = unbounded();
|
||||
run_local_faucet_with_port(faucet_keypair, addr_sender, None, 0);
|
||||
let faucet_addr = addr_receiver
|
||||
.recv_timeout(Duration::from_secs(2))
|
||||
|
32
bloom/Cargo.toml
Normal file
32
bloom/Cargo.toml
Normal file
@ -0,0 +1,32 @@
|
||||
[package]
|
||||
name = "solana-bloom"
|
||||
version = "1.10.0"
|
||||
description = "Solana bloom filter"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-bloom"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bv = { version = "0.11.1", features = ["serde"] }
|
||||
fnv = "1.0.7"
|
||||
rand = "0.7.0"
|
||||
serde = { version = "1.0.134", features = ["rc"] }
|
||||
rayon = "1.5.1"
|
||||
serde_derive = "1.0.103"
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.10.0" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.10.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
log = "0.4.14"
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
name = "solana_bloom"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
||||
[build-dependencies]
|
||||
rustc_version = "0.4"
|
@ -1,17 +1,18 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate test;
|
||||
use bv::BitVec;
|
||||
use fnv::FnvHasher;
|
||||
use rand::Rng;
|
||||
use solana_runtime::bloom::{AtomicBloom, Bloom, BloomHashIndex};
|
||||
use solana_sdk::{
|
||||
hash::{hash, Hash},
|
||||
signature::Signature,
|
||||
use {
|
||||
bv::BitVec,
|
||||
fnv::FnvHasher,
|
||||
rand::Rng,
|
||||
solana_bloom::bloom::{AtomicBloom, Bloom, BloomHashIndex},
|
||||
solana_sdk::{
|
||||
hash::{hash, Hash},
|
||||
signature::Signature,
|
||||
},
|
||||
std::{collections::HashSet, hash::Hasher},
|
||||
test::Bencher,
|
||||
};
|
||||
use std::collections::HashSet;
|
||||
use std::hash::Hasher;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
#[ignore]
|
1
bloom/build.rs
Symbolic link
1
bloom/build.rs
Symbolic link
@ -0,0 +1 @@
|
||||
../frozen-abi/build.rs
|
@ -1,12 +1,17 @@
|
||||
//! Simple Bloom Filter
|
||||
use bv::BitVec;
|
||||
use fnv::FnvHasher;
|
||||
use rand::{self, Rng};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use solana_sdk::sanitize::{Sanitize, SanitizeError};
|
||||
use std::fmt;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::{cmp, hash::Hasher, marker::PhantomData};
|
||||
use {
|
||||
bv::BitVec,
|
||||
fnv::FnvHasher,
|
||||
rand::{self, Rng},
|
||||
serde::{Deserialize, Serialize},
|
||||
solana_sdk::sanitize::{Sanitize, SanitizeError},
|
||||
std::{
|
||||
cmp, fmt,
|
||||
hash::Hasher,
|
||||
marker::PhantomData,
|
||||
sync::atomic::{AtomicU64, Ordering},
|
||||
},
|
||||
};
|
||||
|
||||
/// Generate a stable hash of `self` for each `hash_index`
|
||||
/// Best effort can be made for uniqueness of each hash.
|
||||
@ -67,10 +72,12 @@ impl<T: BloomHashIndex> Bloom<T> {
|
||||
_phantom: PhantomData::default(),
|
||||
}
|
||||
}
|
||||
/// create filter optimal for num size given the `FALSE_RATE`
|
||||
/// the keys are randomized for picking data out of a collision resistant hash of size
|
||||
/// `keysize` bytes
|
||||
/// https://hur.st/bloomfilter/
|
||||
/// Create filter optimal for num size given the `FALSE_RATE`.
|
||||
///
|
||||
/// The keys are randomized for picking data out of a collision resistant hash of size
|
||||
/// `keysize` bytes.
|
||||
///
|
||||
/// See <https://hur.st/bloomfilter/>.
|
||||
pub fn random(num_items: usize, false_rate: f64, max_bits: usize) -> Self {
|
||||
let m = Self::num_bits(num_items as f64, false_rate);
|
||||
let num_bits = cmp::max(1, cmp::min(m as usize, max_bits));
|
||||
@ -94,7 +101,7 @@ impl<T: BloomHashIndex> Bloom<T> {
|
||||
}
|
||||
}
|
||||
fn pos(&self, key: &T, k: u64) -> u64 {
|
||||
key.hash_at_index(k) % self.bits.len()
|
||||
key.hash_at_index(k).wrapping_rem(self.bits.len())
|
||||
}
|
||||
pub fn clear(&mut self) {
|
||||
self.bits = BitVec::new_fill(false, self.bits.len());
|
||||
@ -104,7 +111,7 @@ impl<T: BloomHashIndex> Bloom<T> {
|
||||
for k in &self.keys {
|
||||
let pos = self.pos(key, *k);
|
||||
if !self.bits.get(pos) {
|
||||
self.num_bits_set += 1;
|
||||
self.num_bits_set = self.num_bits_set.saturating_add(1);
|
||||
self.bits.set(pos, true);
|
||||
}
|
||||
}
|
||||
@ -157,21 +164,26 @@ impl<T: BloomHashIndex> From<Bloom<T>> for AtomicBloom<T> {
|
||||
|
||||
impl<T: BloomHashIndex> AtomicBloom<T> {
|
||||
fn pos(&self, key: &T, hash_index: u64) -> (usize, u64) {
|
||||
let pos = key.hash_at_index(hash_index) % self.num_bits;
|
||||
let pos = key.hash_at_index(hash_index).wrapping_rem(self.num_bits);
|
||||
// Divide by 64 to figure out which of the
|
||||
// AtomicU64 bit chunks we need to modify.
|
||||
let index = pos >> 6;
|
||||
let index = pos.wrapping_shr(6);
|
||||
// (pos & 63) is equivalent to mod 64 so that we can find
|
||||
// the index of the bit within the AtomicU64 to modify.
|
||||
let mask = 1u64 << (pos & 63);
|
||||
let mask = 1u64.wrapping_shl(u32::try_from(pos & 63).unwrap());
|
||||
(index as usize, mask)
|
||||
}
|
||||
|
||||
pub fn add(&self, key: &T) {
|
||||
/// Adds an item to the bloom filter and returns true if the item
|
||||
/// was not in the filter before.
|
||||
pub fn add(&self, key: &T) -> bool {
|
||||
let mut added = false;
|
||||
for k in &self.keys {
|
||||
let (index, mask) = self.pos(key, *k);
|
||||
self.bits[index].fetch_or(mask, Ordering::Relaxed);
|
||||
let prev_val = self.bits[index].fetch_or(mask, Ordering::Relaxed);
|
||||
added = added || prev_val & mask == 0u64;
|
||||
}
|
||||
added
|
||||
}
|
||||
|
||||
pub fn contains(&self, key: &T) -> bool {
|
||||
@ -182,6 +194,12 @@ impl<T: BloomHashIndex> AtomicBloom<T> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn clear_for_tests(&mut self) {
|
||||
self.bits.iter().for_each(|bit| {
|
||||
bit.store(0u64, Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
|
||||
// Only for tests and simulations.
|
||||
pub fn mock_clone(&self) -> Self {
|
||||
Self {
|
||||
@ -217,9 +235,11 @@ impl<T: BloomHashIndex> From<AtomicBloom<T>> for Bloom<T> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use rayon::prelude::*;
|
||||
use solana_sdk::hash::{hash, Hash};
|
||||
use {
|
||||
super::*,
|
||||
rayon::prelude::*,
|
||||
solana_sdk::hash::{hash, Hash},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_bloom_filter() {
|
||||
@ -311,7 +331,9 @@ mod test {
|
||||
assert_eq!(bloom.keys.len(), 3);
|
||||
assert_eq!(bloom.num_bits, 6168);
|
||||
assert_eq!(bloom.bits.len(), 97);
|
||||
hash_values.par_iter().for_each(|v| bloom.add(v));
|
||||
hash_values.par_iter().for_each(|v| {
|
||||
bloom.add(v);
|
||||
});
|
||||
let bloom: Bloom<Hash> = bloom.into();
|
||||
assert_eq!(bloom.keys.len(), 3);
|
||||
assert_eq!(bloom.bits.len(), 6168);
|
||||
@ -353,7 +375,9 @@ mod test {
|
||||
}
|
||||
// Round trip, re-inserting the same hash values.
|
||||
let bloom: AtomicBloom<_> = bloom.into();
|
||||
hash_values.par_iter().for_each(|v| bloom.add(v));
|
||||
hash_values.par_iter().for_each(|v| {
|
||||
bloom.add(v);
|
||||
});
|
||||
for hash_value in &hash_values {
|
||||
assert!(bloom.contains(hash_value));
|
||||
}
|
||||
@ -371,7 +395,9 @@ mod test {
|
||||
let bloom: AtomicBloom<_> = bloom.into();
|
||||
assert_eq!(bloom.num_bits, 9731);
|
||||
assert_eq!(bloom.bits.len(), (9731 + 63) / 64);
|
||||
more_hash_values.par_iter().for_each(|v| bloom.add(v));
|
||||
more_hash_values.par_iter().for_each(|v| {
|
||||
bloom.add(v);
|
||||
});
|
||||
for hash_value in &hash_values {
|
||||
assert!(bloom.contains(hash_value));
|
||||
}
|
5
bloom/src/lib.rs
Normal file
5
bloom/src/lib.rs
Normal file
@ -0,0 +1,5 @@
|
||||
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))]
|
||||
pub mod bloom;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana_frozen_abi_macro;
|
88
book/src/proposals/handle-duplicate-block.md
Normal file
88
book/src/proposals/handle-duplicate-block.md
Normal file
@ -0,0 +1,88 @@
|
||||
# Leader Duplicate Block Slashing
|
||||
|
||||
This design describes how the cluster slashes leaders that produce duplicate
|
||||
blocks.
|
||||
|
||||
Leaders that produce multiple blocks for the same slot increase the number of
|
||||
potential forks that the cluster has to resolve.
|
||||
|
||||
## Primitives
|
||||
1. gossip_root: Nodes now gossip their current root
|
||||
2. gossip_duplicate_slots: Nodes can gossip up to `N` duplicate slot proofs.
|
||||
3. `DUPLICATE_THRESHOLD`: The minimum percentage of stake that needs to vote on a fork with version `X` of a duplicate slot, in order for that fork to become votable.
|
||||
|
||||
## Protocol
|
||||
1. When WindowStage detects a duplicate slot proof `P`, it checks the new `gossip_root` to see if `<= 1/3` of the nodes have rooted a slot `S >= P`. If so, it pushes a proof to `gossip_duplicate_slots` to gossip. WindowStage then signals ReplayStage about this duplicate slot `S`. These proofs can be purged from gossip once the validator sees > 2/3 of people gossiping roots `R > S`.
|
||||
|
||||
2. When ReplayStage receives the signal for a duplicate slot `S` from `1)` above, the validator monitors gossip and replay waiting for`>= DUPLICATE_THRESHOLD` votes for the same hash which implies the same version of the slot. If this conditon is met for some version with hash `H` of slot `S`, this is then known as the `duplicate_confirmed` version of the slot.
|
||||
|
||||
Before a duplicate slot `S` is `duplicate_confirmed`, it's first excluded from the vote candidate set in the fork choice rules. In addition, ReplayStage also resets PoH to the *latest* ancestor of the *earliest* `non-duplicate/confirmed_duplicate_slot`, so that block generation can start happening on the earliest known *safe* block.
|
||||
|
||||
Some notes about the `DUPLICATE_THRESHOLD`. In the cases below, assume `DUPLICATE_THRESHOLD = 52`:
|
||||
|
||||
a) If less than `2 * DUPLICATE_THRESHOLD - 1` percentage of the network is malicious, then there can only be one such `duplicate_confirmed` version of the slot. With `DUPLICATE_THRESHOLD = 52`, this is
|
||||
a malcious tolerance of `4%`
|
||||
|
||||
b) The liveness of the network is at most `1 - DUPLICATE_THRESHOLD - SWITCH_THRESHOLD`. This is because if you need at least `SWITCH_THRESHOLD` percentage of the stake voting on a different fork in order to switch off of a duplicate fork that has `< DUPLICATE_THRESHOLD` stake voting on it, and is *not* `duplicate_confirmed`. For `DUPLICATE_THRESHOLD = 52` and `DUPLICATE_THRESHOLD = 38`, this implies a liveness tolerance of `10%`.
|
||||
|
||||
For example in the situation below, validators that voted on `2` can't vote any further on fork `2` because it's been removed from fork choice. Now slot 6 better have enough stake for a switching proof, or the network halts.
|
||||
|
||||
```text
|
||||
|-------- 2 (51% voted, then detected this slot was a duplicate and removed this slot from fork choice)
|
||||
0---|
|
||||
|---------- 6 (39%)
|
||||
|
||||
```
|
||||
|
||||
3. Switching proofs need to be extended to allow including vote hashes from different versions of the same same slot (detected through 1). Right now this is not supported since switching proofs can
|
||||
only be built using votes from banks in BankForks, and two different versions of the same slot cannot
|
||||
simultaneously exist in BankForks. For instance:
|
||||
|
||||
```text
|
||||
|-------- 2
|
||||
|
|
||||
0------------- 1 ------ 2'
|
||||
|
|
||||
|---------- 6
|
||||
|
||||
```
|
||||
|
||||
Imagine each version of slot 2 and 2' have `DUPLICATE_THRESHOLD / 2` of the votes on them, so neither duplicate can be confirmed. At most slot 6 has `1 - DUPLICATE_THRESHOLD / 2` of the votes
|
||||
on it, which is less than the switching threshold. Thus, in order for validators voting on `2` or `2'` to switch to slot 6, and make progress, they need to incorporate votes from the other version of the slot into their switching proofs.
|
||||
|
||||
|
||||
### The repair problem.
|
||||
Now what happens if one of the following occurs:
|
||||
|
||||
1) Due to network blips/latencies, some validators fail to observe the gossip votes before they are overwritten by newer votes? Then some validators may conclude a slot `S` is `duplicate_confirmed` while others don't.
|
||||
|
||||
2) Due to lockouts, no version of duplicate slot `S` reaches `duplicate_confirmed` status, but one of its descendants may reach `duplicate_confirmed` after those lockouts expire, which by definition, means `S` is also `duplicate_confirmed`.
|
||||
|
||||
3) People who are catching up and don't see the votes in gossip encounter a dup block and can't make progress.
|
||||
|
||||
We assume that given a network is eventually stable, if at least one correct validator observed `S` is `duplicate_confirmed`, then if `S` is part of the heaviest fork, then eventually all validators will observe some descendant of `S` is duplicate confirmed.
|
||||
|
||||
This problem we need to solve is modeled simply by the below scenario:
|
||||
|
||||
```text
|
||||
1 -> 2 (duplicate) -> 3 -> 4 (duplicate)
|
||||
```
|
||||
Assume the following:
|
||||
|
||||
1. Due to gossiping duplciate proofs, we assume everyone will eventually see duplicate proofs for 2 and 4, so everyone agrees to remove them from fork choice until they are `duplicate_confirmed`.
|
||||
|
||||
2. Due to lockouts, `> DUPLICATE_THRESHOLD` of the stake votes on 4, but not 2. This means at least `DUPLICATE_THRESHOLD` of people have the "correct" version of both slots 2 and 4.
|
||||
|
||||
3. However, the remaining `1-DUPLICATE_THRESHOLD` of people have wrong version of 2. This means in replay, their slot 3 will be marked dead, *even though the faulty slot is 2*. The goal is to get these people on the right fork again.
|
||||
|
||||
Possible solution:
|
||||
|
||||
1. Change `EpochSlots` to signal when a bank is frozen, not when a slot is complete. If we see > `DUPLICATE_THRESHOLD` have frozen the dead slot 3, then we attempt recovery. Note this does not mean that all `DUPLICATE_THRESHOLD` have frozen the same version of the bank, it's just a signal to us that something may be wrong with our version of the bank.
|
||||
|
||||
2. Recovery takes the form of a special repair request, `RepairDuplicateConfirmed(dead_slot, Vec<(Slot, Hash)>)`, which specifies a dead slot, and then a vector of `(slot, hash)` of `N` of its latest parents.
|
||||
|
||||
3. The repairer sees this request and responds with the correct hash only if any element of the `(slot, hash)` vector is both `duplicate_confirmed` and the hash doesn't match the requester's hash in the vector.
|
||||
|
||||
4. Once the requester sees the "correct" hash is different than their frozen hash, they dump the block so that they can accept a new block, and ask the network for the block with the correct hash.
|
||||
|
||||
Of course the repairer might lie to you, and you'll get the wrong version of the block, in which case you'll end up with another dead block and repeat the procedure.
|
32
bucket_map/Cargo.toml
Normal file
32
bucket_map/Cargo.toml
Normal file
@ -0,0 +1,32 @@
|
||||
[package]
|
||||
name = "solana-bucket-map"
|
||||
version = "1.10.0"
|
||||
description = "solana-bucket-map"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-bucket-map"
|
||||
readme = "../README.md"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
license = "Apache-2.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../sdk", version = "=1.10.0" }
|
||||
memmap2 = "0.5.2"
|
||||
log = { version = "0.4.11" }
|
||||
solana-measure = { path = "../measure", version = "=1.10.0" }
|
||||
rand = "0.7.0"
|
||||
tempfile = "3.3.0"
|
||||
modular-bitfield = "0.11.2"
|
||||
|
||||
[dev-dependencies]
|
||||
fs_extra = "1.2.0"
|
||||
rayon = "1.5.0"
|
||||
solana-logger = { path = "../logger", version = "=1.10.0" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
name = "solana_bucket_map"
|
||||
|
||||
[[bench]]
|
||||
name = "bucket_map"
|
77
bucket_map/benches/bucket_map.rs
Normal file
77
bucket_map/benches/bucket_map.rs
Normal file
@ -0,0 +1,77 @@
|
||||
#![feature(test)]
|
||||
|
||||
macro_rules! DEFINE_NxM_BENCH {
|
||||
($i:ident, $n:literal, $m:literal) => {
|
||||
mod $i {
|
||||
use super::*;
|
||||
|
||||
#[bench]
|
||||
fn bench_insert_baseline_hashmap(bencher: &mut Bencher) {
|
||||
do_bench_insert_baseline_hashmap(bencher, $n, $m);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_insert_bucket_map(bencher: &mut Bencher) {
|
||||
do_bench_insert_bucket_map(bencher, $n, $m);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
extern crate test;
|
||||
use {
|
||||
rayon::prelude::*,
|
||||
solana_bucket_map::bucket_map::{BucketMap, BucketMapConfig},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::{collections::hash_map::HashMap, sync::RwLock},
|
||||
test::Bencher,
|
||||
};
|
||||
|
||||
type IndexValue = u64;
|
||||
|
||||
DEFINE_NxM_BENCH!(dim_01x02, 1, 2);
|
||||
DEFINE_NxM_BENCH!(dim_02x04, 2, 4);
|
||||
DEFINE_NxM_BENCH!(dim_04x08, 4, 8);
|
||||
DEFINE_NxM_BENCH!(dim_08x16, 8, 16);
|
||||
DEFINE_NxM_BENCH!(dim_16x32, 16, 32);
|
||||
DEFINE_NxM_BENCH!(dim_32x64, 32, 64);
|
||||
|
||||
/// Benchmark insert with Hashmap as baseline for N threads inserting M keys each
|
||||
fn do_bench_insert_baseline_hashmap(bencher: &mut Bencher, n: usize, m: usize) {
|
||||
let index = RwLock::new(HashMap::new());
|
||||
(0..n).into_iter().into_par_iter().for_each(|i| {
|
||||
let key = Pubkey::new_unique();
|
||||
index
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(key, vec![(i, IndexValue::default())]);
|
||||
});
|
||||
bencher.iter(|| {
|
||||
(0..n).into_iter().into_par_iter().for_each(|_| {
|
||||
for j in 0..m {
|
||||
let key = Pubkey::new_unique();
|
||||
index
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(key, vec![(j, IndexValue::default())]);
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
/// Benchmark insert with BucketMap with N buckets for N threads inserting M keys each
|
||||
fn do_bench_insert_bucket_map(bencher: &mut Bencher, n: usize, m: usize) {
|
||||
let index = BucketMap::new(BucketMapConfig::new(n));
|
||||
(0..n).into_iter().into_par_iter().for_each(|i| {
|
||||
let key = Pubkey::new_unique();
|
||||
index.update(&key, |_| Some((vec![(i, IndexValue::default())], 0)));
|
||||
});
|
||||
bencher.iter(|| {
|
||||
(0..n).into_iter().into_par_iter().for_each(|_| {
|
||||
for j in 0..m {
|
||||
let key = Pubkey::new_unique();
|
||||
index.update(&key, |_| Some((vec![(j, IndexValue::default())], 0)));
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
501
bucket_map/src/bucket.rs
Normal file
501
bucket_map/src/bucket.rs
Normal file
@ -0,0 +1,501 @@
|
||||
use {
|
||||
crate::{
|
||||
bucket_item::BucketItem,
|
||||
bucket_map::BucketMapError,
|
||||
bucket_stats::BucketMapStats,
|
||||
bucket_storage::{BucketStorage, Uid, DEFAULT_CAPACITY_POW2},
|
||||
index_entry::IndexEntry,
|
||||
MaxSearch, RefCount,
|
||||
},
|
||||
rand::{thread_rng, Rng},
|
||||
solana_measure::measure::Measure,
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::{
|
||||
collections::hash_map::DefaultHasher,
|
||||
hash::{Hash, Hasher},
|
||||
marker::PhantomData,
|
||||
ops::RangeBounds,
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
atomic::{AtomicU64, AtomicUsize, Ordering},
|
||||
Arc, Mutex,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ReallocatedItems {
|
||||
// Some if the index was reallocated
|
||||
// u64 is random associated with the new index
|
||||
pub index: Option<(u64, BucketStorage)>,
|
||||
// Some for a data bucket reallocation
|
||||
// u64 is data bucket index
|
||||
pub data: Option<(u64, BucketStorage)>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Reallocated {
|
||||
/// > 0 if reallocations are encoded
|
||||
pub active_reallocations: AtomicUsize,
|
||||
|
||||
/// actual reallocated bucket
|
||||
/// mutex because bucket grow code runs with a read lock
|
||||
pub items: Mutex<ReallocatedItems>,
|
||||
}
|
||||
|
||||
impl Reallocated {
|
||||
/// specify that a reallocation has occurred
|
||||
pub fn add_reallocation(&self) {
|
||||
assert_eq!(
|
||||
0,
|
||||
self.active_reallocations.fetch_add(1, Ordering::Relaxed),
|
||||
"Only 1 reallocation can occur at a time"
|
||||
);
|
||||
}
|
||||
/// Return true IFF a reallocation has occurred.
|
||||
/// Calling this takes conceptual ownership of the reallocation encoded in the struct.
|
||||
pub fn get_reallocated(&self) -> bool {
|
||||
self.active_reallocations
|
||||
.compare_exchange(1, 0, Ordering::Acquire, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
// >= 2 instances of BucketStorage per 'bucket' in the bucket map. 1 for index, >= 1 for data
|
||||
pub struct Bucket<T> {
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
//index
|
||||
pub index: BucketStorage,
|
||||
//random offset for the index
|
||||
random: u64,
|
||||
//storage buckets to store SlotSlice up to a power of 2 in len
|
||||
pub data: Vec<BucketStorage>,
|
||||
_phantom: PhantomData<T>,
|
||||
stats: Arc<BucketMapStats>,
|
||||
|
||||
pub reallocated: Reallocated,
|
||||
}
|
||||
|
||||
impl<T: Clone + Copy> Bucket<T> {
|
||||
pub fn new(
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketMapStats>,
|
||||
count: Arc<AtomicU64>,
|
||||
) -> Self {
|
||||
let index = BucketStorage::new(
|
||||
Arc::clone(&drives),
|
||||
1,
|
||||
std::mem::size_of::<IndexEntry>() as u64,
|
||||
max_search,
|
||||
Arc::clone(&stats.index),
|
||||
count,
|
||||
);
|
||||
Self {
|
||||
random: thread_rng().gen(),
|
||||
drives,
|
||||
index,
|
||||
data: vec![],
|
||||
_phantom: PhantomData::default(),
|
||||
stats,
|
||||
reallocated: Reallocated::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn keys(&self) -> Vec<Pubkey> {
|
||||
let mut rv = vec![];
|
||||
for i in 0..self.index.capacity() {
|
||||
if self.index.is_free(i) {
|
||||
continue;
|
||||
}
|
||||
let ix: &IndexEntry = self.index.get(i);
|
||||
rv.push(ix.key);
|
||||
}
|
||||
rv
|
||||
}
|
||||
|
||||
pub fn items_in_range<R>(&self, range: &Option<&R>) -> Vec<BucketItem<T>>
|
||||
where
|
||||
R: RangeBounds<Pubkey>,
|
||||
{
|
||||
let mut result = Vec::with_capacity(self.index.count.load(Ordering::Relaxed) as usize);
|
||||
for i in 0..self.index.capacity() {
|
||||
let ii = i % self.index.capacity();
|
||||
if self.index.is_free(ii) {
|
||||
continue;
|
||||
}
|
||||
let ix: &IndexEntry = self.index.get(ii);
|
||||
let key = ix.key;
|
||||
if range.map(|r| r.contains(&key)).unwrap_or(true) {
|
||||
let val = ix.read_value(self);
|
||||
result.push(BucketItem {
|
||||
pubkey: key,
|
||||
ref_count: ix.ref_count(),
|
||||
slot_list: val.map(|(v, _ref_count)| v.to_vec()).unwrap_or_default(),
|
||||
});
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
pub fn find_entry(&self, key: &Pubkey) -> Option<(&IndexEntry, u64)> {
|
||||
Self::bucket_find_entry(&self.index, key, self.random)
|
||||
}
|
||||
|
||||
fn find_entry_mut(&self, key: &Pubkey) -> Option<(&mut IndexEntry, u64)> {
|
||||
Self::bucket_find_entry_mut(&self.index, key, self.random)
|
||||
}
|
||||
|
||||
fn bucket_find_entry_mut<'a>(
|
||||
index: &'a BucketStorage,
|
||||
key: &Pubkey,
|
||||
random: u64,
|
||||
) -> Option<(&'a mut IndexEntry, u64)> {
|
||||
let ix = Self::bucket_index_ix(index, key, random);
|
||||
for i in ix..ix + index.max_search() {
|
||||
let ii = i % index.capacity();
|
||||
if index.is_free(ii) {
|
||||
continue;
|
||||
}
|
||||
let elem: &mut IndexEntry = index.get_mut(ii);
|
||||
if elem.key == *key {
|
||||
return Some((elem, ii));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn bucket_find_entry<'a>(
|
||||
index: &'a BucketStorage,
|
||||
key: &Pubkey,
|
||||
random: u64,
|
||||
) -> Option<(&'a IndexEntry, u64)> {
|
||||
let ix = Self::bucket_index_ix(index, key, random);
|
||||
for i in ix..ix + index.max_search() {
|
||||
let ii = i % index.capacity();
|
||||
if index.is_free(ii) {
|
||||
continue;
|
||||
}
|
||||
let elem: &IndexEntry = index.get(ii);
|
||||
if elem.key == *key {
|
||||
return Some((elem, ii));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn bucket_create_key(
|
||||
index: &mut BucketStorage,
|
||||
key: &Pubkey,
|
||||
elem_uid: Uid,
|
||||
random: u64,
|
||||
is_resizing: bool,
|
||||
) -> Result<u64, BucketMapError> {
|
||||
let ix = Self::bucket_index_ix(index, key, random);
|
||||
for i in ix..ix + index.max_search() {
|
||||
let ii = i as u64 % index.capacity();
|
||||
if !index.is_free(ii) {
|
||||
continue;
|
||||
}
|
||||
index.allocate(ii, elem_uid, is_resizing).unwrap();
|
||||
let elem: &mut IndexEntry = index.get_mut(ii);
|
||||
// These fields will be overwritten after allocation by callers.
|
||||
// Since this part of the mmapped file could have previously been used by someone else, there can be garbage here.
|
||||
elem.init(key);
|
||||
//debug!( "INDEX ALLOC {:?} {} {} {}", key, ii, index.capacity, elem_uid );
|
||||
return Ok(ii);
|
||||
}
|
||||
Err(BucketMapError::IndexNoSpace(index.capacity_pow2))
|
||||
}
|
||||
|
||||
pub fn addref(&mut self, key: &Pubkey) -> Option<RefCount> {
|
||||
let (elem, _) = self.find_entry_mut(key)?;
|
||||
elem.ref_count += 1;
|
||||
Some(elem.ref_count)
|
||||
}
|
||||
|
||||
pub fn unref(&mut self, key: &Pubkey) -> Option<RefCount> {
|
||||
let (elem, _) = self.find_entry_mut(key)?;
|
||||
elem.ref_count -= 1;
|
||||
Some(elem.ref_count)
|
||||
}
|
||||
|
||||
fn create_key(&mut self, key: &Pubkey) -> Result<u64, BucketMapError> {
|
||||
Self::bucket_create_key(
|
||||
&mut self.index,
|
||||
key,
|
||||
IndexEntry::key_uid(key),
|
||||
self.random,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn read_value(&self, key: &Pubkey) -> Option<(&[T], RefCount)> {
|
||||
//debug!("READ_VALUE: {:?}", key);
|
||||
let (elem, _) = self.find_entry(key)?;
|
||||
elem.read_value(self)
|
||||
}
|
||||
|
||||
pub fn try_write(
|
||||
&mut self,
|
||||
key: &Pubkey,
|
||||
data: &[T],
|
||||
ref_count: u64,
|
||||
) -> Result<(), BucketMapError> {
|
||||
let best_fit_bucket = IndexEntry::data_bucket_from_num_slots(data.len() as u64);
|
||||
if self.data.get(best_fit_bucket as usize).is_none() {
|
||||
// fail early if the data bucket we need doesn't exist - we don't want the index entry partially allocated
|
||||
return Err(BucketMapError::DataNoSpace((best_fit_bucket, 0)));
|
||||
}
|
||||
let index_entry = self.find_entry_mut(key);
|
||||
let (elem, elem_ix) = match index_entry {
|
||||
None => {
|
||||
let ii = self.create_key(key)?;
|
||||
let elem: &mut IndexEntry = self.index.get_mut(ii);
|
||||
(elem, ii)
|
||||
}
|
||||
Some(res) => res,
|
||||
};
|
||||
elem.ref_count = ref_count;
|
||||
let elem_uid = self.index.uid_unchecked(elem_ix);
|
||||
let bucket_ix = elem.data_bucket_ix();
|
||||
let current_bucket = &self.data[bucket_ix as usize];
|
||||
let num_slots = data.len() as u64;
|
||||
if best_fit_bucket == bucket_ix && elem.num_slots > 0 {
|
||||
// in place update
|
||||
let elem_loc = elem.data_loc(current_bucket);
|
||||
let slice: &mut [T] = current_bucket.get_mut_cell_slice(elem_loc, data.len() as u64);
|
||||
assert_eq!(current_bucket.uid(elem_loc), Some(elem_uid));
|
||||
elem.num_slots = num_slots;
|
||||
slice.copy_from_slice(data);
|
||||
Ok(())
|
||||
} else {
|
||||
// need to move the allocation to a best fit spot
|
||||
let best_bucket = &self.data[best_fit_bucket as usize];
|
||||
let cap_power = best_bucket.capacity_pow2;
|
||||
let cap = best_bucket.capacity();
|
||||
let pos = thread_rng().gen_range(0, cap);
|
||||
for i in pos..pos + self.index.max_search() {
|
||||
let ix = i % cap;
|
||||
if best_bucket.is_free(ix) {
|
||||
let elem_loc = elem.data_loc(current_bucket);
|
||||
let old_slots = elem.num_slots;
|
||||
elem.set_storage_offset(ix);
|
||||
elem.set_storage_capacity_when_created_pow2(best_bucket.capacity_pow2);
|
||||
elem.num_slots = num_slots;
|
||||
if old_slots > 0 {
|
||||
let current_bucket = &mut self.data[bucket_ix as usize];
|
||||
current_bucket.free(elem_loc, elem_uid);
|
||||
}
|
||||
//debug!( "DATA ALLOC {:?} {} {} {}", key, elem.data_location, best_bucket.capacity, elem_uid );
|
||||
if num_slots > 0 {
|
||||
let best_bucket = &mut self.data[best_fit_bucket as usize];
|
||||
best_bucket.allocate(ix, elem_uid, false).unwrap();
|
||||
let slice = best_bucket.get_mut_cell_slice(ix, num_slots);
|
||||
slice.copy_from_slice(data);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
Err(BucketMapError::DataNoSpace((best_fit_bucket, cap_power)))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete_key(&mut self, key: &Pubkey) {
|
||||
if let Some((elem, elem_ix)) = self.find_entry(key) {
|
||||
let elem_uid = self.index.uid_unchecked(elem_ix);
|
||||
if elem.num_slots > 0 {
|
||||
let ix = elem.data_bucket_ix() as usize;
|
||||
let data_bucket = &self.data[ix];
|
||||
let loc = elem.data_loc(data_bucket);
|
||||
let data_bucket = &mut self.data[ix];
|
||||
//debug!( "DATA FREE {:?} {} {} {}", key, elem.data_location, data_bucket.capacity, elem_uid );
|
||||
data_bucket.free(loc, elem_uid);
|
||||
}
|
||||
//debug!("INDEX FREE {:?} {}", key, elem_uid);
|
||||
self.index.free(elem_ix, elem_uid);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn grow_index(&self, current_capacity_pow2: u8) {
|
||||
if self.index.capacity_pow2 == current_capacity_pow2 {
|
||||
let mut m = Measure::start("grow_index");
|
||||
//debug!("GROW_INDEX: {}", current_capacity_pow2);
|
||||
let increment = 1;
|
||||
for i in increment.. {
|
||||
//increasing the capacity by ^4 reduces the
|
||||
//likelyhood of a re-index collision of 2^(max_search)^2
|
||||
//1 in 2^32
|
||||
let mut index = BucketStorage::new_with_capacity(
|
||||
Arc::clone(&self.drives),
|
||||
1,
|
||||
std::mem::size_of::<IndexEntry>() as u64,
|
||||
// *2 causes rapid growth of index buckets
|
||||
self.index.capacity_pow2 + i, // * 2,
|
||||
self.index.max_search,
|
||||
Arc::clone(&self.stats.index),
|
||||
Arc::clone(&self.index.count),
|
||||
);
|
||||
let random = thread_rng().gen();
|
||||
let mut valid = true;
|
||||
for ix in 0..self.index.capacity() {
|
||||
let uid = self.index.uid(ix);
|
||||
if let Some(uid) = uid {
|
||||
let elem: &IndexEntry = self.index.get(ix);
|
||||
let new_ix =
|
||||
Self::bucket_create_key(&mut index, &elem.key, uid, random, true);
|
||||
if new_ix.is_err() {
|
||||
valid = false;
|
||||
break;
|
||||
}
|
||||
let new_ix = new_ix.unwrap();
|
||||
let new_elem: &mut IndexEntry = index.get_mut(new_ix);
|
||||
*new_elem = *elem;
|
||||
/*
|
||||
let dbg_elem: IndexEntry = *new_elem;
|
||||
assert_eq!(
|
||||
Self::bucket_find_entry(&index, &elem.key, random).unwrap(),
|
||||
(&dbg_elem, new_ix)
|
||||
);
|
||||
*/
|
||||
}
|
||||
}
|
||||
if valid {
|
||||
let sz = index.capacity();
|
||||
{
|
||||
let mut max = self.stats.index.max_size.lock().unwrap();
|
||||
*max = std::cmp::max(*max, sz);
|
||||
}
|
||||
let mut items = self.reallocated.items.lock().unwrap();
|
||||
items.index = Some((random, index));
|
||||
self.reallocated.add_reallocation();
|
||||
break;
|
||||
}
|
||||
}
|
||||
m.stop();
|
||||
self.stats.index.resizes.fetch_add(1, Ordering::Relaxed);
|
||||
self.stats
|
||||
.index
|
||||
.resize_us
|
||||
.fetch_add(m.as_us(), Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn apply_grow_index(&mut self, random: u64, index: BucketStorage) {
|
||||
self.random = random;
|
||||
self.index = index;
|
||||
}
|
||||
|
||||
fn elem_size() -> u64 {
|
||||
std::mem::size_of::<T>() as u64
|
||||
}
|
||||
|
||||
pub fn apply_grow_data(&mut self, ix: usize, bucket: BucketStorage) {
|
||||
if self.data.get(ix).is_none() {
|
||||
for i in self.data.len()..ix {
|
||||
// insert empty data buckets
|
||||
self.data.push(BucketStorage::new(
|
||||
Arc::clone(&self.drives),
|
||||
1 << i,
|
||||
Self::elem_size(),
|
||||
self.index.max_search,
|
||||
Arc::clone(&self.stats.data),
|
||||
Arc::default(),
|
||||
))
|
||||
}
|
||||
self.data.push(bucket);
|
||||
} else {
|
||||
self.data[ix] = bucket;
|
||||
}
|
||||
}
|
||||
|
||||
/// grow a data bucket
|
||||
/// The application of the new bucket is deferred until the next write lock.
|
||||
pub fn grow_data(&self, data_index: u64, current_capacity_pow2: u8) {
|
||||
let new_bucket = BucketStorage::new_resized(
|
||||
&self.drives,
|
||||
self.index.max_search,
|
||||
self.data.get(data_index as usize),
|
||||
std::cmp::max(current_capacity_pow2 + 1, DEFAULT_CAPACITY_POW2),
|
||||
1 << data_index,
|
||||
Self::elem_size(),
|
||||
&self.stats.data,
|
||||
);
|
||||
self.reallocated.add_reallocation();
|
||||
let mut items = self.reallocated.items.lock().unwrap();
|
||||
items.data = Some((data_index, new_bucket));
|
||||
}
|
||||
|
||||
fn bucket_index_ix(index: &BucketStorage, key: &Pubkey, random: u64) -> u64 {
|
||||
let uid = IndexEntry::key_uid(key);
|
||||
let mut s = DefaultHasher::new();
|
||||
uid.hash(&mut s);
|
||||
//the locally generated random will make it hard for an attacker
|
||||
//to deterministically cause all the pubkeys to land in the same
|
||||
//location in any bucket on all validators
|
||||
random.hash(&mut s);
|
||||
let ix = s.finish();
|
||||
ix % index.capacity()
|
||||
//debug!( "INDEX_IX: {:?} uid:{} loc: {} cap:{}", key, uid, location, index.capacity() );
|
||||
}
|
||||
|
||||
/// grow the appropriate piece. Note this takes an immutable ref.
|
||||
/// The actual grow is set into self.reallocated and applied later on a write lock
|
||||
pub fn grow(&self, err: BucketMapError) {
|
||||
match err {
|
||||
BucketMapError::DataNoSpace((data_index, current_capacity_pow2)) => {
|
||||
//debug!("GROWING SPACE {:?}", (data_index, current_capacity_pow2));
|
||||
self.grow_data(data_index, current_capacity_pow2);
|
||||
}
|
||||
BucketMapError::IndexNoSpace(current_capacity_pow2) => {
|
||||
//debug!("GROWING INDEX {}", sz);
|
||||
self.grow_index(current_capacity_pow2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// if a bucket was resized previously with a read lock, then apply that resize now
|
||||
pub fn handle_delayed_grows(&mut self) {
|
||||
if self.reallocated.get_reallocated() {
|
||||
// swap out the bucket that was resized previously with a read lock
|
||||
let mut items = ReallocatedItems::default();
|
||||
std::mem::swap(&mut items, &mut self.reallocated.items.lock().unwrap());
|
||||
|
||||
if let Some((random, bucket)) = items.index.take() {
|
||||
self.apply_grow_index(random, bucket);
|
||||
} else {
|
||||
// data bucket
|
||||
let (i, new_bucket) = items.data.take().unwrap();
|
||||
self.apply_grow_data(i as usize, new_bucket);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, key: &Pubkey, value: (&[T], RefCount)) {
|
||||
let (new, refct) = value;
|
||||
loop {
|
||||
let rv = self.try_write(key, new, refct);
|
||||
match rv {
|
||||
Ok(_) => return,
|
||||
Err(err) => {
|
||||
self.grow(err);
|
||||
self.handle_delayed_grows();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update<F>(&mut self, key: &Pubkey, mut updatefn: F)
|
||||
where
|
||||
F: FnMut(Option<(&[T], RefCount)>) -> Option<(Vec<T>, RefCount)>,
|
||||
{
|
||||
let current = self.read_value(key);
|
||||
let new = updatefn(current);
|
||||
if new.is_none() {
|
||||
self.delete_key(key);
|
||||
return;
|
||||
}
|
||||
let (new, refct) = new.unwrap();
|
||||
self.insert(key, (&new, refct));
|
||||
}
|
||||
}
|
142
bucket_map/src/bucket_api.rs
Normal file
142
bucket_map/src/bucket_api.rs
Normal file
@ -0,0 +1,142 @@
|
||||
use {
|
||||
crate::{
|
||||
bucket::Bucket, bucket_item::BucketItem, bucket_map::BucketMapError,
|
||||
bucket_stats::BucketMapStats, MaxSearch, RefCount,
|
||||
},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::{
|
||||
ops::RangeBounds,
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
Arc, RwLock, RwLockWriteGuard,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
type LockedBucket<T> = RwLock<Option<Bucket<T>>>;
|
||||
|
||||
pub struct BucketApi<T: Clone + Copy> {
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
max_search: MaxSearch,
|
||||
pub stats: Arc<BucketMapStats>,
|
||||
|
||||
bucket: LockedBucket<T>,
|
||||
count: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
impl<T: Clone + Copy> BucketApi<T> {
|
||||
pub fn new(
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketMapStats>,
|
||||
) -> Self {
|
||||
Self {
|
||||
drives,
|
||||
max_search,
|
||||
stats,
|
||||
bucket: RwLock::default(),
|
||||
count: Arc::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the items for bucket
|
||||
pub fn items_in_range<R>(&self, range: &Option<&R>) -> Vec<BucketItem<T>>
|
||||
where
|
||||
R: RangeBounds<Pubkey>,
|
||||
{
|
||||
self.bucket
|
||||
.read()
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.map(|bucket| bucket.items_in_range(range))
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Get the Pubkeys
|
||||
pub fn keys(&self) -> Vec<Pubkey> {
|
||||
self.bucket
|
||||
.read()
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.map_or_else(Vec::default, |bucket| bucket.keys())
|
||||
}
|
||||
|
||||
/// Get the values for Pubkey `key`
|
||||
pub fn read_value(&self, key: &Pubkey) -> Option<(Vec<T>, RefCount)> {
|
||||
self.bucket.read().unwrap().as_ref().and_then(|bucket| {
|
||||
bucket
|
||||
.read_value(key)
|
||||
.map(|(value, ref_count)| (value.to_vec(), ref_count))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn bucket_len(&self) -> u64 {
|
||||
self.count.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn delete_key(&self, key: &Pubkey) {
|
||||
let mut bucket = self.get_write_bucket();
|
||||
if let Some(bucket) = bucket.as_mut() {
|
||||
bucket.delete_key(key)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_write_bucket(&self) -> RwLockWriteGuard<Option<Bucket<T>>> {
|
||||
let mut bucket = self.bucket.write().unwrap();
|
||||
if bucket.is_none() {
|
||||
*bucket = Some(Bucket::new(
|
||||
Arc::clone(&self.drives),
|
||||
self.max_search,
|
||||
Arc::clone(&self.stats),
|
||||
Arc::clone(&self.count),
|
||||
));
|
||||
} else {
|
||||
let write = bucket.as_mut().unwrap();
|
||||
write.handle_delayed_grows();
|
||||
}
|
||||
bucket
|
||||
}
|
||||
|
||||
pub fn addref(&self, key: &Pubkey) -> Option<RefCount> {
|
||||
self.get_write_bucket()
|
||||
.as_mut()
|
||||
.and_then(|bucket| bucket.addref(key))
|
||||
}
|
||||
|
||||
pub fn unref(&self, key: &Pubkey) -> Option<RefCount> {
|
||||
self.get_write_bucket()
|
||||
.as_mut()
|
||||
.and_then(|bucket| bucket.unref(key))
|
||||
}
|
||||
|
||||
pub fn insert(&self, pubkey: &Pubkey, value: (&[T], RefCount)) {
|
||||
let mut bucket = self.get_write_bucket();
|
||||
bucket.as_mut().unwrap().insert(pubkey, value)
|
||||
}
|
||||
|
||||
pub fn grow(&self, err: BucketMapError) {
|
||||
// grows are special - they get a read lock and modify 'reallocated'
|
||||
// the grown changes are applied the next time there is a write lock taken
|
||||
if let Some(bucket) = self.bucket.read().unwrap().as_ref() {
|
||||
bucket.grow(err)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update<F>(&self, key: &Pubkey, updatefn: F)
|
||||
where
|
||||
F: FnMut(Option<(&[T], RefCount)>) -> Option<(Vec<T>, RefCount)>,
|
||||
{
|
||||
let mut bucket = self.get_write_bucket();
|
||||
bucket.as_mut().unwrap().update(key, updatefn)
|
||||
}
|
||||
|
||||
pub fn try_write(
|
||||
&self,
|
||||
pubkey: &Pubkey,
|
||||
value: (&[T], RefCount),
|
||||
) -> Result<(), BucketMapError> {
|
||||
let mut bucket = self.get_write_bucket();
|
||||
bucket.as_mut().unwrap().try_write(pubkey, value.0, value.1)
|
||||
}
|
||||
}
|
8
bucket_map/src/bucket_item.rs
Normal file
8
bucket_map/src/bucket_item.rs
Normal file
@ -0,0 +1,8 @@
|
||||
use {crate::RefCount, solana_sdk::pubkey::Pubkey};
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct BucketItem<T> {
|
||||
pub pubkey: Pubkey,
|
||||
pub ref_count: RefCount,
|
||||
pub slot_list: Vec<T>,
|
||||
}
|
522
bucket_map/src/bucket_map.rs
Normal file
522
bucket_map/src/bucket_map.rs
Normal file
@ -0,0 +1,522 @@
|
||||
//! BucketMap is a mostly contention free concurrent map backed by MmapMut
|
||||
|
||||
use {
|
||||
crate::{bucket_api::BucketApi, bucket_stats::BucketMapStats, MaxSearch, RefCount},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::{convert::TryInto, fmt::Debug, fs, path::PathBuf, sync::Arc},
|
||||
tempfile::TempDir,
|
||||
};
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct BucketMapConfig {
|
||||
pub max_buckets: usize,
|
||||
pub drives: Option<Vec<PathBuf>>,
|
||||
pub max_search: Option<MaxSearch>,
|
||||
}
|
||||
|
||||
impl BucketMapConfig {
|
||||
/// Create a new BucketMapConfig
|
||||
/// NOTE: BucketMap requires that max_buckets is a power of two
|
||||
pub fn new(max_buckets: usize) -> BucketMapConfig {
|
||||
BucketMapConfig {
|
||||
max_buckets,
|
||||
..BucketMapConfig::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BucketMap<T: Clone + Copy + Debug> {
|
||||
buckets: Vec<Arc<BucketApi<T>>>,
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
max_buckets_pow2: u8,
|
||||
pub stats: Arc<BucketMapStats>,
|
||||
pub temp_dir: Option<TempDir>,
|
||||
}
|
||||
|
||||
impl<T: Clone + Copy + Debug> Drop for BucketMap<T> {
|
||||
fn drop(&mut self) {
|
||||
if self.temp_dir.is_none() {
|
||||
BucketMap::<T>::erase_previous_drives(&self.drives);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone + Copy + Debug> std::fmt::Debug for BucketMap<T> {
|
||||
fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BucketMapError {
|
||||
/// (bucket_index, current_capacity_pow2)
|
||||
DataNoSpace((u64, u8)),
|
||||
/// current_capacity_pow2
|
||||
IndexNoSpace(u8),
|
||||
}
|
||||
|
||||
impl<T: Clone + Copy + Debug> BucketMap<T> {
|
||||
pub fn new(config: BucketMapConfig) -> Self {
|
||||
assert_ne!(
|
||||
config.max_buckets, 0,
|
||||
"Max number of buckets must be non-zero"
|
||||
);
|
||||
assert!(
|
||||
config.max_buckets.is_power_of_two(),
|
||||
"Max number of buckets must be a power of two"
|
||||
);
|
||||
// this should be <= 1 << DEFAULT_CAPACITY or we end up searching the same items over and over - probably not a big deal since it is so small anyway
|
||||
const MAX_SEARCH: MaxSearch = 32;
|
||||
let max_search = config.max_search.unwrap_or(MAX_SEARCH);
|
||||
|
||||
if let Some(drives) = config.drives.as_ref() {
|
||||
Self::erase_previous_drives(drives);
|
||||
}
|
||||
let mut temp_dir = None;
|
||||
let drives = config.drives.unwrap_or_else(|| {
|
||||
temp_dir = Some(TempDir::new().unwrap());
|
||||
vec![temp_dir.as_ref().unwrap().path().to_path_buf()]
|
||||
});
|
||||
let drives = Arc::new(drives);
|
||||
|
||||
let stats = Arc::default();
|
||||
let buckets = (0..config.max_buckets)
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
Arc::new(BucketApi::new(
|
||||
Arc::clone(&drives),
|
||||
max_search,
|
||||
Arc::clone(&stats),
|
||||
))
|
||||
})
|
||||
.collect();
|
||||
|
||||
// A simple log2 function that is correct if x is a power of two
|
||||
let log2 = |x: usize| usize::BITS - x.leading_zeros() - 1;
|
||||
|
||||
Self {
|
||||
buckets,
|
||||
drives,
|
||||
max_buckets_pow2: log2(config.max_buckets) as u8,
|
||||
stats,
|
||||
temp_dir,
|
||||
}
|
||||
}
|
||||
|
||||
fn erase_previous_drives(drives: &[PathBuf]) {
|
||||
drives.iter().for_each(|folder| {
|
||||
let _ = fs::remove_dir_all(&folder);
|
||||
let _ = fs::create_dir_all(&folder);
|
||||
})
|
||||
}
|
||||
|
||||
pub fn num_buckets(&self) -> usize {
|
||||
self.buckets.len()
|
||||
}
|
||||
|
||||
/// Get the values for Pubkey `key`
|
||||
pub fn read_value(&self, key: &Pubkey) -> Option<(Vec<T>, RefCount)> {
|
||||
self.get_bucket(key).read_value(key)
|
||||
}
|
||||
|
||||
/// Delete the Pubkey `key`
|
||||
pub fn delete_key(&self, key: &Pubkey) {
|
||||
self.get_bucket(key).delete_key(key);
|
||||
}
|
||||
|
||||
/// Update Pubkey `key`'s value with 'value'
|
||||
pub fn insert(&self, key: &Pubkey, value: (&[T], RefCount)) {
|
||||
self.get_bucket(key).insert(key, value)
|
||||
}
|
||||
|
||||
/// Update Pubkey `key`'s value with 'value'
|
||||
pub fn try_insert(&self, key: &Pubkey, value: (&[T], RefCount)) -> Result<(), BucketMapError> {
|
||||
self.get_bucket(key).try_write(key, value)
|
||||
}
|
||||
|
||||
/// Update Pubkey `key`'s value with function `updatefn`
|
||||
pub fn update<F>(&self, key: &Pubkey, updatefn: F)
|
||||
where
|
||||
F: FnMut(Option<(&[T], RefCount)>) -> Option<(Vec<T>, RefCount)>,
|
||||
{
|
||||
self.get_bucket(key).update(key, updatefn)
|
||||
}
|
||||
|
||||
pub fn get_bucket(&self, key: &Pubkey) -> &Arc<BucketApi<T>> {
|
||||
self.get_bucket_from_index(self.bucket_ix(key))
|
||||
}
|
||||
|
||||
pub fn get_bucket_from_index(&self, ix: usize) -> &Arc<BucketApi<T>> {
|
||||
&self.buckets[ix]
|
||||
}
|
||||
|
||||
/// Get the bucket index for Pubkey `key`
|
||||
pub fn bucket_ix(&self, key: &Pubkey) -> usize {
|
||||
if self.max_buckets_pow2 > 0 {
|
||||
let location = read_be_u64(key.as_ref());
|
||||
(location >> (u64::BITS - self.max_buckets_pow2 as u32)) as usize
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment the refcount for Pubkey `key`
|
||||
pub fn addref(&self, key: &Pubkey) -> Option<RefCount> {
|
||||
let ix = self.bucket_ix(key);
|
||||
let bucket = &self.buckets[ix];
|
||||
bucket.addref(key)
|
||||
}
|
||||
|
||||
/// Decrement the refcount for Pubkey `key`
|
||||
pub fn unref(&self, key: &Pubkey) -> Option<RefCount> {
|
||||
let ix = self.bucket_ix(key);
|
||||
let bucket = &self.buckets[ix];
|
||||
bucket.unref(key)
|
||||
}
|
||||
}
|
||||
|
||||
/// Look at the first 8 bytes of the input and reinterpret them as a u64
|
||||
fn read_be_u64(input: &[u8]) -> u64 {
|
||||
assert!(input.len() >= std::mem::size_of::<u64>());
|
||||
u64::from_be_bytes(input[0..std::mem::size_of::<u64>()].try_into().unwrap())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {
|
||||
super::*,
|
||||
rand::{thread_rng, Rng},
|
||||
std::{collections::HashMap, sync::RwLock},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_insert() {
|
||||
let key = Pubkey::new_unique();
|
||||
let config = BucketMapConfig::new(1 << 1);
|
||||
let index = BucketMap::new(config);
|
||||
index.update(&key, |_| Some((vec![0], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![0], 0)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_insert2() {
|
||||
for pass in 0..3 {
|
||||
let key = Pubkey::new_unique();
|
||||
let config = BucketMapConfig::new(1 << 1);
|
||||
let index = BucketMap::new(config);
|
||||
let bucket = index.get_bucket(&key);
|
||||
if pass == 0 {
|
||||
index.insert(&key, (&[0], 0));
|
||||
} else {
|
||||
let result = index.try_insert(&key, (&[0], 0));
|
||||
assert!(result.is_err());
|
||||
assert_eq!(index.read_value(&key), None);
|
||||
if pass == 2 {
|
||||
// another call to try insert again - should still return an error
|
||||
let result = index.try_insert(&key, (&[0], 0));
|
||||
assert!(result.is_err());
|
||||
assert_eq!(index.read_value(&key), None);
|
||||
}
|
||||
bucket.grow(result.unwrap_err());
|
||||
let result = index.try_insert(&key, (&[0], 0));
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
assert_eq!(index.read_value(&key), Some((vec![0], 0)));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_update2() {
|
||||
let key = Pubkey::new_unique();
|
||||
let config = BucketMapConfig::new(1 << 1);
|
||||
let index = BucketMap::new(config);
|
||||
index.insert(&key, (&[0], 0));
|
||||
assert_eq!(index.read_value(&key), Some((vec![0], 0)));
|
||||
index.insert(&key, (&[1], 0));
|
||||
assert_eq!(index.read_value(&key), Some((vec![1], 0)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_update() {
|
||||
let key = Pubkey::new_unique();
|
||||
let config = BucketMapConfig::new(1 << 1);
|
||||
let index = BucketMap::new(config);
|
||||
index.update(&key, |_| Some((vec![0], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![0], 0)));
|
||||
index.update(&key, |_| Some((vec![1], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![1], 0)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_update_to_0_len() {
|
||||
solana_logger::setup();
|
||||
let key = Pubkey::new_unique();
|
||||
let config = BucketMapConfig::new(1 << 1);
|
||||
let index = BucketMap::new(config);
|
||||
index.update(&key, |_| Some((vec![0], 1)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![0], 1)));
|
||||
// sets len to 0, updates in place
|
||||
index.update(&key, |_| Some((vec![], 1)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![], 1)));
|
||||
// sets len to 0, doesn't update in place - finds a new place, which causes us to no longer have an allocation in data
|
||||
index.update(&key, |_| Some((vec![], 2)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![], 2)));
|
||||
// sets len to 1, doesn't update in place - finds a new place
|
||||
index.update(&key, |_| Some((vec![1], 2)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![1], 2)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_delete() {
|
||||
let config = BucketMapConfig::new(1 << 1);
|
||||
let index = BucketMap::new(config);
|
||||
for i in 0..10 {
|
||||
let key = Pubkey::new_unique();
|
||||
assert_eq!(index.read_value(&key), None);
|
||||
|
||||
index.update(&key, |_| Some((vec![i], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![i], 0)));
|
||||
|
||||
index.delete_key(&key);
|
||||
assert_eq!(index.read_value(&key), None);
|
||||
|
||||
index.update(&key, |_| Some((vec![i], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![i], 0)));
|
||||
index.delete_key(&key);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_delete_2() {
|
||||
let config = BucketMapConfig::new(1 << 2);
|
||||
let index = BucketMap::new(config);
|
||||
for i in 0..100 {
|
||||
let key = Pubkey::new_unique();
|
||||
assert_eq!(index.read_value(&key), None);
|
||||
|
||||
index.update(&key, |_| Some((vec![i], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![i], 0)));
|
||||
|
||||
index.delete_key(&key);
|
||||
assert_eq!(index.read_value(&key), None);
|
||||
|
||||
index.update(&key, |_| Some((vec![i], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![i], 0)));
|
||||
index.delete_key(&key);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_n_drives() {
|
||||
let config = BucketMapConfig::new(1 << 2);
|
||||
let index = BucketMap::new(config);
|
||||
for i in 0..100 {
|
||||
let key = Pubkey::new_unique();
|
||||
index.update(&key, |_| Some((vec![i], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![i], 0)));
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn bucket_map_test_grow_read() {
|
||||
let config = BucketMapConfig::new(1 << 2);
|
||||
let index = BucketMap::new(config);
|
||||
let keys: Vec<Pubkey> = (0..100).into_iter().map(|_| Pubkey::new_unique()).collect();
|
||||
for k in 0..keys.len() {
|
||||
let key = &keys[k];
|
||||
let i = read_be_u64(key.as_ref());
|
||||
index.update(key, |_| Some((vec![i], 0)));
|
||||
assert_eq!(index.read_value(key), Some((vec![i], 0)));
|
||||
for (ix, key) in keys.iter().enumerate() {
|
||||
let i = read_be_u64(key.as_ref());
|
||||
//debug!("READ: {:?} {}", key, i);
|
||||
let expected = if ix <= k { Some((vec![i], 0)) } else { None };
|
||||
assert_eq!(index.read_value(key), expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bucket_map_test_n_delete() {
|
||||
let config = BucketMapConfig::new(1 << 2);
|
||||
let index = BucketMap::new(config);
|
||||
let keys: Vec<Pubkey> = (0..20).into_iter().map(|_| Pubkey::new_unique()).collect();
|
||||
for key in keys.iter() {
|
||||
let i = read_be_u64(key.as_ref());
|
||||
index.update(key, |_| Some((vec![i], 0)));
|
||||
assert_eq!(index.read_value(key), Some((vec![i], 0)));
|
||||
}
|
||||
for key in keys.iter() {
|
||||
let i = read_be_u64(key.as_ref());
|
||||
//debug!("READ: {:?} {}", key, i);
|
||||
assert_eq!(index.read_value(key), Some((vec![i], 0)));
|
||||
}
|
||||
for k in 0..keys.len() {
|
||||
let key = &keys[k];
|
||||
index.delete_key(key);
|
||||
assert_eq!(index.read_value(key), None);
|
||||
for key in keys.iter().skip(k + 1) {
|
||||
let i = read_be_u64(key.as_ref());
|
||||
assert_eq!(index.read_value(key), Some((vec![i], 0)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hashmap_compare() {
|
||||
use std::sync::Mutex;
|
||||
solana_logger::setup();
|
||||
let maps = (0..2)
|
||||
.into_iter()
|
||||
.map(|max_buckets_pow2| {
|
||||
let config = BucketMapConfig::new(1 << max_buckets_pow2);
|
||||
BucketMap::new(config)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let hash_map = RwLock::new(HashMap::<Pubkey, (Vec<(usize, usize)>, RefCount)>::new());
|
||||
let max_slot_list_len = 3;
|
||||
let all_keys = Mutex::new(vec![]);
|
||||
|
||||
let gen_rand_value = || {
|
||||
let count = thread_rng().gen_range(0, max_slot_list_len);
|
||||
let v = (0..count)
|
||||
.into_iter()
|
||||
.map(|x| (x as usize, x as usize /*thread_rng().gen::<usize>()*/))
|
||||
.collect::<Vec<_>>();
|
||||
let rc = thread_rng().gen::<RefCount>();
|
||||
(v, rc)
|
||||
};
|
||||
|
||||
let get_key = || {
|
||||
let mut keys = all_keys.lock().unwrap();
|
||||
if keys.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let len = keys.len();
|
||||
Some(keys.remove(thread_rng().gen_range(0, len)))
|
||||
};
|
||||
let return_key = |key| {
|
||||
let mut keys = all_keys.lock().unwrap();
|
||||
keys.push(key);
|
||||
};
|
||||
|
||||
let verify = || {
|
||||
let mut maps = maps
|
||||
.iter()
|
||||
.map(|map| {
|
||||
let mut r = vec![];
|
||||
for bin in 0..map.num_buckets() {
|
||||
r.append(
|
||||
&mut map.buckets[bin]
|
||||
.items_in_range(&None::<&std::ops::RangeInclusive<Pubkey>>),
|
||||
);
|
||||
}
|
||||
r
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let hm = hash_map.read().unwrap();
|
||||
for (k, v) in hm.iter() {
|
||||
for map in maps.iter_mut() {
|
||||
for i in 0..map.len() {
|
||||
if k == &map[i].pubkey {
|
||||
assert_eq!(map[i].slot_list, v.0);
|
||||
assert_eq!(map[i].ref_count, v.1);
|
||||
map.remove(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for map in maps.iter() {
|
||||
assert!(map.is_empty());
|
||||
}
|
||||
};
|
||||
let mut initial = 100; // put this many items in to start
|
||||
|
||||
// do random operations: insert, update, delete, add/unref in random order
|
||||
// verify consistency between hashmap and all bucket maps
|
||||
for i in 0..10000 {
|
||||
if initial > 0 {
|
||||
initial -= 1;
|
||||
}
|
||||
if initial > 0 || thread_rng().gen_range(0, 5) == 0 {
|
||||
// insert
|
||||
let k = solana_sdk::pubkey::new_rand();
|
||||
let v = gen_rand_value();
|
||||
hash_map.write().unwrap().insert(k, v.clone());
|
||||
let insert = thread_rng().gen_range(0, 2) == 0;
|
||||
maps.iter().for_each(|map| {
|
||||
if insert {
|
||||
map.insert(&k, (&v.0, v.1))
|
||||
} else {
|
||||
map.update(&k, |current| {
|
||||
assert!(current.is_none());
|
||||
Some(v.clone())
|
||||
})
|
||||
}
|
||||
});
|
||||
return_key(k);
|
||||
}
|
||||
if thread_rng().gen_range(0, 10) == 0 {
|
||||
// update
|
||||
if let Some(k) = get_key() {
|
||||
let hm = hash_map.read().unwrap();
|
||||
let (v, rc) = gen_rand_value();
|
||||
let v_old = hm.get(&k);
|
||||
let insert = thread_rng().gen_range(0, 2) == 0;
|
||||
maps.iter().for_each(|map| {
|
||||
if insert {
|
||||
map.insert(&k, (&v, rc))
|
||||
} else {
|
||||
map.update(&k, |current| {
|
||||
assert_eq!(current, v_old.map(|(v, rc)| (&v[..], *rc)), "{}", k);
|
||||
Some((v.clone(), rc))
|
||||
})
|
||||
}
|
||||
});
|
||||
drop(hm);
|
||||
hash_map.write().unwrap().insert(k, (v, rc));
|
||||
return_key(k);
|
||||
}
|
||||
}
|
||||
if thread_rng().gen_range(0, 20) == 0 {
|
||||
// delete
|
||||
if let Some(k) = get_key() {
|
||||
let mut hm = hash_map.write().unwrap();
|
||||
hm.remove(&k);
|
||||
maps.iter().for_each(|map| {
|
||||
map.delete_key(&k);
|
||||
});
|
||||
}
|
||||
}
|
||||
if thread_rng().gen_range(0, 10) == 0 {
|
||||
// add/unref
|
||||
if let Some(k) = get_key() {
|
||||
let mut inc = thread_rng().gen_range(0, 2) == 0;
|
||||
let mut hm = hash_map.write().unwrap();
|
||||
let (v, mut rc) = hm.get(&k).map(|(v, rc)| (v.to_vec(), *rc)).unwrap();
|
||||
if !inc && rc == 0 {
|
||||
// can't decrement rc=0
|
||||
inc = true;
|
||||
}
|
||||
rc = if inc { rc + 1 } else { rc - 1 };
|
||||
hm.insert(k, (v.to_vec(), rc));
|
||||
maps.iter().for_each(|map| {
|
||||
if thread_rng().gen_range(0, 2) == 0 {
|
||||
map.update(&k, |current| Some((current.unwrap().0.to_vec(), rc)))
|
||||
} else if inc {
|
||||
map.addref(&k);
|
||||
} else {
|
||||
map.unref(&k);
|
||||
}
|
||||
});
|
||||
|
||||
return_key(k);
|
||||
}
|
||||
}
|
||||
if i % 1000 == 0 {
|
||||
verify();
|
||||
}
|
||||
}
|
||||
verify();
|
||||
}
|
||||
}
|
17
bucket_map/src/bucket_stats.rs
Normal file
17
bucket_map/src/bucket_stats.rs
Normal file
@ -0,0 +1,17 @@
|
||||
use std::sync::{atomic::AtomicU64, Arc, Mutex};
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct BucketStats {
|
||||
pub resizes: AtomicU64,
|
||||
pub max_size: Mutex<u64>,
|
||||
pub resize_us: AtomicU64,
|
||||
pub new_file_us: AtomicU64,
|
||||
pub flush_file_us: AtomicU64,
|
||||
pub mmap_us: AtomicU64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct BucketMapStats {
|
||||
pub index: Arc<BucketStats>,
|
||||
pub data: Arc<BucketStats>,
|
||||
}
|
424
bucket_map/src/bucket_storage.rs
Normal file
424
bucket_map/src/bucket_storage.rs
Normal file
@ -0,0 +1,424 @@
|
||||
use {
|
||||
crate::{bucket_stats::BucketStats, MaxSearch},
|
||||
memmap2::MmapMut,
|
||||
rand::{thread_rng, Rng},
|
||||
solana_measure::measure::Measure,
|
||||
std::{
|
||||
fs::{remove_file, OpenOptions},
|
||||
io::{Seek, SeekFrom, Write},
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
Arc,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
1 2
|
||||
2 4
|
||||
3 8
|
||||
4 16
|
||||
5 32
|
||||
6 64
|
||||
7 128
|
||||
8 256
|
||||
9 512
|
||||
10 1,024
|
||||
11 2,048
|
||||
12 4,096
|
||||
13 8,192
|
||||
14 16,384
|
||||
23 8,388,608
|
||||
24 16,777,216
|
||||
*/
|
||||
pub const DEFAULT_CAPACITY_POW2: u8 = 5;
|
||||
|
||||
/// A Header UID of 0 indicates that the header is unlocked
|
||||
const UID_UNLOCKED: Uid = 0;
|
||||
|
||||
pub(crate) type Uid = u64;
|
||||
|
||||
#[repr(C)]
|
||||
struct Header {
|
||||
lock: u64,
|
||||
}
|
||||
|
||||
impl Header {
|
||||
/// try to lock this entry with 'uid'
|
||||
/// return true if it could be locked
|
||||
fn try_lock(&mut self, uid: Uid) -> bool {
|
||||
if self.lock == UID_UNLOCKED {
|
||||
self.lock = uid;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
/// mark this entry as unlocked
|
||||
fn unlock(&mut self, expected: Uid) {
|
||||
assert_eq!(expected, self.lock);
|
||||
self.lock = UID_UNLOCKED;
|
||||
}
|
||||
/// uid that has locked this entry or None if unlocked
|
||||
fn uid(&self) -> Option<Uid> {
|
||||
if self.lock == UID_UNLOCKED {
|
||||
None
|
||||
} else {
|
||||
Some(self.lock)
|
||||
}
|
||||
}
|
||||
/// true if this entry is unlocked
|
||||
fn is_unlocked(&self) -> bool {
|
||||
self.lock == UID_UNLOCKED
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BucketStorage {
|
||||
path: PathBuf,
|
||||
mmap: MmapMut,
|
||||
pub cell_size: u64,
|
||||
pub capacity_pow2: u8,
|
||||
pub count: Arc<AtomicU64>,
|
||||
pub stats: Arc<BucketStats>,
|
||||
pub max_search: MaxSearch,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BucketStorageError {
|
||||
AlreadyAllocated,
|
||||
}
|
||||
|
||||
impl Drop for BucketStorage {
|
||||
fn drop(&mut self) {
|
||||
let _ = remove_file(&self.path);
|
||||
}
|
||||
}
|
||||
|
||||
impl BucketStorage {
|
||||
pub fn new_with_capacity(
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
num_elems: u64,
|
||||
elem_size: u64,
|
||||
capacity_pow2: u8,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketStats>,
|
||||
count: Arc<AtomicU64>,
|
||||
) -> Self {
|
||||
let cell_size = elem_size * num_elems + std::mem::size_of::<Header>() as u64;
|
||||
let (mmap, path) = Self::new_map(&drives, cell_size as usize, capacity_pow2, &stats);
|
||||
Self {
|
||||
path,
|
||||
mmap,
|
||||
cell_size,
|
||||
count,
|
||||
capacity_pow2,
|
||||
stats,
|
||||
max_search,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn max_search(&self) -> u64 {
|
||||
self.max_search as u64
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
drives: Arc<Vec<PathBuf>>,
|
||||
num_elems: u64,
|
||||
elem_size: u64,
|
||||
max_search: MaxSearch,
|
||||
stats: Arc<BucketStats>,
|
||||
count: Arc<AtomicU64>,
|
||||
) -> Self {
|
||||
Self::new_with_capacity(
|
||||
drives,
|
||||
num_elems,
|
||||
elem_size,
|
||||
DEFAULT_CAPACITY_POW2,
|
||||
max_search,
|
||||
stats,
|
||||
count,
|
||||
)
|
||||
}
|
||||
|
||||
/// return ref to header of item 'ix' in mmapped file
|
||||
fn header_ptr(&self, ix: u64) -> &Header {
|
||||
let ix = (ix * self.cell_size) as usize;
|
||||
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
|
||||
unsafe {
|
||||
let hdr = hdr_slice.as_ptr() as *const Header;
|
||||
hdr.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// return ref to header of item 'ix' in mmapped file
|
||||
fn header_mut_ptr(&mut self, ix: u64) -> &mut Header {
|
||||
let ix = (ix * self.cell_size) as usize;
|
||||
let hdr_slice: &mut [u8] = &mut self.mmap[ix..ix + std::mem::size_of::<Header>()];
|
||||
unsafe {
|
||||
let hdr = hdr_slice.as_mut_ptr() as *mut Header;
|
||||
hdr.as_mut().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// return uid allocated at index 'ix' or None if vacant
|
||||
pub fn uid(&self, ix: u64) -> Option<Uid> {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
self.header_ptr(ix).uid()
|
||||
}
|
||||
|
||||
/// true if the entry at index 'ix' is free (as opposed to being allocated)
|
||||
pub fn is_free(&self, ix: u64) -> bool {
|
||||
// note that the terminology in the implementation is locked or unlocked.
|
||||
// but our api is allocate/free
|
||||
self.header_ptr(ix).is_unlocked()
|
||||
}
|
||||
|
||||
/// caller knows id is not empty
|
||||
pub fn uid_unchecked(&self, ix: u64) -> Uid {
|
||||
self.uid(ix).unwrap()
|
||||
}
|
||||
|
||||
/// 'is_resizing' true if caller is resizing the index (so don't increment count)
|
||||
/// 'is_resizing' false if caller is adding an item to the index (so increment count)
|
||||
pub fn allocate(
|
||||
&mut self,
|
||||
ix: u64,
|
||||
uid: Uid,
|
||||
is_resizing: bool,
|
||||
) -> Result<(), BucketStorageError> {
|
||||
assert!(ix < self.capacity(), "allocate: bad index size");
|
||||
assert!(UID_UNLOCKED != uid, "allocate: bad uid");
|
||||
let mut e = Err(BucketStorageError::AlreadyAllocated);
|
||||
//debug!("ALLOC {} {}", ix, uid);
|
||||
if self.header_mut_ptr(ix).try_lock(uid) {
|
||||
e = Ok(());
|
||||
if !is_resizing {
|
||||
self.count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
e
|
||||
}
|
||||
|
||||
pub fn free(&mut self, ix: u64, uid: Uid) {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
assert!(UID_UNLOCKED != uid, "free: bad uid");
|
||||
self.header_mut_ptr(ix).unlock(uid);
|
||||
self.count.fetch_sub(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn get<T: Sized>(&self, ix: u64) -> &T {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
let start = (ix * self.cell_size) as usize + std::mem::size_of::<Header>();
|
||||
let end = start + std::mem::size_of::<T>();
|
||||
let item_slice: &[u8] = &self.mmap[start..end];
|
||||
unsafe {
|
||||
let item = item_slice.as_ptr() as *const T;
|
||||
&*item
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_empty_cell_slice<T: Sized>(&self) -> &[T] {
|
||||
let len = 0;
|
||||
let item_slice: &[u8] = &self.mmap[0..0];
|
||||
unsafe {
|
||||
let item = item_slice.as_ptr() as *const T;
|
||||
std::slice::from_raw_parts(item, len as usize)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_cell_slice<T: Sized>(&self, ix: u64, len: u64) -> &[T] {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
let ix = self.cell_size * ix;
|
||||
let start = ix as usize + std::mem::size_of::<Header>();
|
||||
let end = start + std::mem::size_of::<T>() * len as usize;
|
||||
//debug!("GET slice {} {}", start, end);
|
||||
let item_slice: &[u8] = &self.mmap[start..end];
|
||||
unsafe {
|
||||
let item = item_slice.as_ptr() as *const T;
|
||||
std::slice::from_raw_parts(item, len as usize)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::mut_from_ref)]
|
||||
pub fn get_mut<T: Sized>(&self, ix: u64) -> &mut T {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
let start = (ix * self.cell_size) as usize + std::mem::size_of::<Header>();
|
||||
let end = start + std::mem::size_of::<T>();
|
||||
let item_slice: &[u8] = &self.mmap[start..end];
|
||||
unsafe {
|
||||
let item = item_slice.as_ptr() as *mut T;
|
||||
&mut *item
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::mut_from_ref)]
|
||||
pub fn get_mut_cell_slice<T: Sized>(&self, ix: u64, len: u64) -> &mut [T] {
|
||||
assert!(ix < self.capacity(), "bad index size");
|
||||
let ix = self.cell_size * ix;
|
||||
let start = ix as usize + std::mem::size_of::<Header>();
|
||||
let end = start + std::mem::size_of::<T>() * len as usize;
|
||||
//debug!("GET mut slice {} {}", start, end);
|
||||
let item_slice: &[u8] = &self.mmap[start..end];
|
||||
unsafe {
|
||||
let item = item_slice.as_ptr() as *mut T;
|
||||
std::slice::from_raw_parts_mut(item, len as usize)
|
||||
}
|
||||
}
|
||||
|
||||
fn new_map(
|
||||
drives: &[PathBuf],
|
||||
cell_size: usize,
|
||||
capacity_pow2: u8,
|
||||
stats: &BucketStats,
|
||||
) -> (MmapMut, PathBuf) {
|
||||
let mut measure_new_file = Measure::start("measure_new_file");
|
||||
let capacity = 1u64 << capacity_pow2;
|
||||
let r = thread_rng().gen_range(0, drives.len());
|
||||
let drive = &drives[r];
|
||||
let pos = format!("{}", thread_rng().gen_range(0, u128::MAX),);
|
||||
let file = drive.join(pos);
|
||||
let mut data = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create(true)
|
||||
.open(file.clone())
|
||||
.map_err(|e| {
|
||||
panic!(
|
||||
"Unable to create data file {} in current dir({:?}): {:?}",
|
||||
file.display(),
|
||||
std::env::current_dir(),
|
||||
e
|
||||
);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// Theoretical performance optimization: write a zero to the end of
|
||||
// the file so that we won't have to resize it later, which may be
|
||||
// expensive.
|
||||
//debug!("GROWING file {}", capacity * cell_size as u64);
|
||||
data.seek(SeekFrom::Start(capacity * cell_size as u64 - 1))
|
||||
.unwrap();
|
||||
data.write_all(&[0]).unwrap();
|
||||
data.seek(SeekFrom::Start(0)).unwrap();
|
||||
measure_new_file.stop();
|
||||
let mut measure_flush = Measure::start("measure_flush");
|
||||
data.flush().unwrap(); // can we skip this?
|
||||
measure_flush.stop();
|
||||
let mut measure_mmap = Measure::start("measure_mmap");
|
||||
let res = (unsafe { MmapMut::map_mut(&data).unwrap() }, file);
|
||||
measure_mmap.stop();
|
||||
stats
|
||||
.new_file_us
|
||||
.fetch_add(measure_new_file.as_us(), Ordering::Relaxed);
|
||||
stats
|
||||
.flush_file_us
|
||||
.fetch_add(measure_flush.as_us(), Ordering::Relaxed);
|
||||
stats
|
||||
.mmap_us
|
||||
.fetch_add(measure_mmap.as_us(), Ordering::Relaxed);
|
||||
res
|
||||
}
|
||||
|
||||
/// copy contents from 'old_bucket' to 'self'
|
||||
fn copy_contents(&mut self, old_bucket: &Self) {
|
||||
let mut m = Measure::start("grow");
|
||||
let old_cap = old_bucket.capacity();
|
||||
let old_map = &old_bucket.mmap;
|
||||
|
||||
let increment = self.capacity_pow2 - old_bucket.capacity_pow2;
|
||||
let index_grow = 1 << increment;
|
||||
(0..old_cap as usize).into_iter().for_each(|i| {
|
||||
let old_ix = i * old_bucket.cell_size as usize;
|
||||
let new_ix = old_ix * index_grow;
|
||||
let dst_slice: &[u8] = &self.mmap[new_ix..new_ix + old_bucket.cell_size as usize];
|
||||
let src_slice: &[u8] = &old_map[old_ix..old_ix + old_bucket.cell_size as usize];
|
||||
|
||||
unsafe {
|
||||
let dst = dst_slice.as_ptr() as *mut u8;
|
||||
let src = src_slice.as_ptr() as *const u8;
|
||||
std::ptr::copy_nonoverlapping(src, dst, old_bucket.cell_size as usize);
|
||||
};
|
||||
});
|
||||
m.stop();
|
||||
self.stats.resizes.fetch_add(1, Ordering::Relaxed);
|
||||
self.stats.resize_us.fetch_add(m.as_us(), Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// allocate a new bucket, copying data from 'bucket'
|
||||
pub fn new_resized(
|
||||
drives: &Arc<Vec<PathBuf>>,
|
||||
max_search: MaxSearch,
|
||||
bucket: Option<&Self>,
|
||||
capacity_pow_2: u8,
|
||||
num_elems: u64,
|
||||
elem_size: u64,
|
||||
stats: &Arc<BucketStats>,
|
||||
) -> Self {
|
||||
let mut new_bucket = Self::new_with_capacity(
|
||||
Arc::clone(drives),
|
||||
num_elems,
|
||||
elem_size,
|
||||
capacity_pow_2,
|
||||
max_search,
|
||||
Arc::clone(stats),
|
||||
bucket
|
||||
.map(|bucket| Arc::clone(&bucket.count))
|
||||
.unwrap_or_default(),
|
||||
);
|
||||
if let Some(bucket) = bucket {
|
||||
new_bucket.copy_contents(bucket);
|
||||
}
|
||||
let sz = new_bucket.capacity();
|
||||
{
|
||||
let mut max = new_bucket.stats.max_size.lock().unwrap();
|
||||
*max = std::cmp::max(*max, sz);
|
||||
}
|
||||
new_bucket
|
||||
}
|
||||
|
||||
/// Return the number of cells currently allocated
|
||||
pub fn capacity(&self) -> u64 {
|
||||
1 << self.capacity_pow2
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_bucket_storage() {
|
||||
let tmpdir1 = std::env::temp_dir().join("bucket_map_test_mt");
|
||||
let paths: Vec<PathBuf> = [tmpdir1]
|
||||
.iter()
|
||||
.filter(|x| std::fs::create_dir_all(x).is_ok())
|
||||
.cloned()
|
||||
.collect();
|
||||
assert!(!paths.is_empty());
|
||||
|
||||
let mut storage =
|
||||
BucketStorage::new(Arc::new(paths), 1, 1, 1, Arc::default(), Arc::default());
|
||||
let ix = 0;
|
||||
let uid = Uid::MAX;
|
||||
assert!(storage.is_free(ix));
|
||||
assert!(storage.allocate(ix, uid, false).is_ok());
|
||||
assert!(storage.allocate(ix, uid, false).is_err());
|
||||
assert!(!storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), Some(uid));
|
||||
assert_eq!(storage.uid_unchecked(ix), uid);
|
||||
storage.free(ix, uid);
|
||||
assert!(storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), None);
|
||||
let uid = 1;
|
||||
assert!(storage.is_free(ix));
|
||||
assert!(storage.allocate(ix, uid, false).is_ok());
|
||||
assert!(storage.allocate(ix, uid, false).is_err());
|
||||
assert!(!storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), Some(uid));
|
||||
assert_eq!(storage.uid_unchecked(ix), uid);
|
||||
storage.free(ix, uid);
|
||||
assert!(storage.is_free(ix));
|
||||
assert_eq!(storage.uid(ix), None);
|
||||
}
|
||||
}
|
156
bucket_map/src/index_entry.rs
Normal file
156
bucket_map/src/index_entry.rs
Normal file
@ -0,0 +1,156 @@
|
||||
#![allow(dead_code)]
|
||||
use {
|
||||
crate::{
|
||||
bucket::Bucket,
|
||||
bucket_storage::{BucketStorage, Uid},
|
||||
RefCount,
|
||||
},
|
||||
modular_bitfield::prelude::*,
|
||||
solana_sdk::{clock::Slot, pubkey::Pubkey},
|
||||
std::{
|
||||
collections::hash_map::DefaultHasher,
|
||||
fmt::Debug,
|
||||
hash::{Hash, Hasher},
|
||||
},
|
||||
};
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
// one instance of this per item in the index
|
||||
// stored in the index bucket
|
||||
pub struct IndexEntry {
|
||||
pub key: Pubkey, // can this be smaller if we have reduced the keys into buckets already?
|
||||
pub ref_count: RefCount, // can this be smaller? Do we ever need more than 4B refcounts?
|
||||
storage_cap_and_offset: PackedStorage,
|
||||
// if the bucket doubled, the index can be recomputed using create_bucket_capacity_pow2
|
||||
pub num_slots: Slot, // can this be smaller? epoch size should ~ be the max len. this is the num elements in the slot list
|
||||
}
|
||||
|
||||
/// Pack the storage offset and capacity-when-crated-pow2 fields into a single u64
|
||||
#[bitfield(bits = 64)]
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
|
||||
struct PackedStorage {
|
||||
capacity_when_created_pow2: B8,
|
||||
offset: B56,
|
||||
}
|
||||
|
||||
impl IndexEntry {
|
||||
pub fn init(&mut self, pubkey: &Pubkey) {
|
||||
self.key = *pubkey;
|
||||
self.ref_count = 0;
|
||||
self.storage_cap_and_offset = PackedStorage::default();
|
||||
self.num_slots = 0;
|
||||
}
|
||||
|
||||
pub fn set_storage_capacity_when_created_pow2(
|
||||
&mut self,
|
||||
storage_capacity_when_created_pow2: u8,
|
||||
) {
|
||||
self.storage_cap_and_offset
|
||||
.set_capacity_when_created_pow2(storage_capacity_when_created_pow2)
|
||||
}
|
||||
|
||||
pub fn set_storage_offset(&mut self, storage_offset: u64) {
|
||||
self.storage_cap_and_offset
|
||||
.set_offset_checked(storage_offset)
|
||||
.expect("New storage offset must fit into 7 bytes!")
|
||||
}
|
||||
|
||||
pub fn data_bucket_from_num_slots(num_slots: Slot) -> u64 {
|
||||
(num_slots as f64).log2().ceil() as u64 // use int log here?
|
||||
}
|
||||
|
||||
pub fn data_bucket_ix(&self) -> u64 {
|
||||
Self::data_bucket_from_num_slots(self.num_slots)
|
||||
}
|
||||
|
||||
pub fn ref_count(&self) -> RefCount {
|
||||
self.ref_count
|
||||
}
|
||||
|
||||
fn storage_capacity_when_created_pow2(&self) -> u8 {
|
||||
self.storage_cap_and_offset.capacity_when_created_pow2()
|
||||
}
|
||||
|
||||
fn storage_offset(&self) -> u64 {
|
||||
self.storage_cap_and_offset.offset()
|
||||
}
|
||||
|
||||
// This function maps the original data location into an index in the current bucket storage.
|
||||
// This is coupled with how we resize bucket storages.
|
||||
pub fn data_loc(&self, storage: &BucketStorage) -> u64 {
|
||||
self.storage_offset() << (storage.capacity_pow2 - self.storage_capacity_when_created_pow2())
|
||||
}
|
||||
|
||||
pub fn read_value<'a, T>(&self, bucket: &'a Bucket<T>) -> Option<(&'a [T], RefCount)> {
|
||||
let data_bucket_ix = self.data_bucket_ix();
|
||||
let data_bucket = &bucket.data[data_bucket_ix as usize];
|
||||
let slice = if self.num_slots > 0 {
|
||||
let loc = self.data_loc(data_bucket);
|
||||
let uid = Self::key_uid(&self.key);
|
||||
assert_eq!(Some(uid), bucket.data[data_bucket_ix as usize].uid(loc));
|
||||
bucket.data[data_bucket_ix as usize].get_cell_slice(loc, self.num_slots)
|
||||
} else {
|
||||
// num_slots is 0. This means we don't have an actual allocation.
|
||||
// can we trust that the data_bucket is even safe?
|
||||
bucket.data[data_bucket_ix as usize].get_empty_cell_slice()
|
||||
};
|
||||
Some((slice, self.ref_count))
|
||||
}
|
||||
|
||||
pub fn key_uid(key: &Pubkey) -> Uid {
|
||||
let mut s = DefaultHasher::new();
|
||||
key.hash(&mut s);
|
||||
s.finish().max(1u64)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
impl IndexEntry {
|
||||
pub fn new(key: Pubkey) -> Self {
|
||||
IndexEntry {
|
||||
key,
|
||||
ref_count: 0,
|
||||
storage_cap_and_offset: PackedStorage::default(),
|
||||
num_slots: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// verify that accessors for storage_offset and capacity_when_created are
|
||||
/// correct and independent
|
||||
#[test]
|
||||
fn test_api() {
|
||||
for offset in [0, 1, u32::MAX as u64] {
|
||||
let mut index = IndexEntry::new(solana_sdk::pubkey::new_rand());
|
||||
if offset != 0 {
|
||||
index.set_storage_offset(offset);
|
||||
}
|
||||
assert_eq!(index.storage_offset(), offset);
|
||||
assert_eq!(index.storage_capacity_when_created_pow2(), 0);
|
||||
for pow in [1, 255, 0] {
|
||||
index.set_storage_capacity_when_created_pow2(pow);
|
||||
assert_eq!(index.storage_offset(), offset);
|
||||
assert_eq!(index.storage_capacity_when_created_pow2(), pow);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_size() {
|
||||
assert_eq!(std::mem::size_of::<PackedStorage>(), 1 + 7);
|
||||
assert_eq!(std::mem::size_of::<IndexEntry>(), 32 + 8 + 8 + 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "New storage offset must fit into 7 bytes!")]
|
||||
fn test_set_storage_offset_value_too_large() {
|
||||
let too_big = 1 << 56;
|
||||
let mut index = IndexEntry::new(Pubkey::new_unique());
|
||||
index.set_storage_offset(too_big);
|
||||
}
|
||||
}
|
11
bucket_map/src/lib.rs
Normal file
11
bucket_map/src/lib.rs
Normal file
@ -0,0 +1,11 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
mod bucket;
|
||||
pub mod bucket_api;
|
||||
mod bucket_item;
|
||||
pub mod bucket_map;
|
||||
mod bucket_stats;
|
||||
mod bucket_storage;
|
||||
mod index_entry;
|
||||
|
||||
pub type MaxSearch = u8;
|
||||
pub type RefCount = u64;
|
48
bucket_map/tests/bucket_map.rs
Normal file
48
bucket_map/tests/bucket_map.rs
Normal file
@ -0,0 +1,48 @@
|
||||
use {
|
||||
rayon::prelude::*,
|
||||
solana_bucket_map::bucket_map::{BucketMap, BucketMapConfig},
|
||||
solana_measure::measure::Measure,
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::path::PathBuf,
|
||||
};
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn bucket_map_test_mt() {
|
||||
let threads = 4096;
|
||||
let items = 4096;
|
||||
let tmpdir1 = std::env::temp_dir().join("bucket_map_test_mt");
|
||||
let tmpdir2 = PathBuf::from("/mnt/data/0").join("bucket_map_test_mt");
|
||||
let paths: Vec<PathBuf> = [tmpdir1, tmpdir2]
|
||||
.iter()
|
||||
.filter(|x| std::fs::create_dir_all(x).is_ok())
|
||||
.cloned()
|
||||
.collect();
|
||||
assert!(!paths.is_empty());
|
||||
let index = BucketMap::new(BucketMapConfig {
|
||||
max_buckets: 1 << 12,
|
||||
drives: Some(paths.clone()),
|
||||
..BucketMapConfig::default()
|
||||
});
|
||||
(0..threads).into_iter().into_par_iter().for_each(|_| {
|
||||
let key = Pubkey::new_unique();
|
||||
index.update(&key, |_| Some((vec![0u64], 0)));
|
||||
});
|
||||
let mut timer = Measure::start("bucket_map_test_mt");
|
||||
(0..threads).into_iter().into_par_iter().for_each(|_| {
|
||||
for _ in 0..items {
|
||||
let key = Pubkey::new_unique();
|
||||
let ix: u64 = index.bucket_ix(&key) as u64;
|
||||
index.update(&key, |_| Some((vec![ix], 0)));
|
||||
assert_eq!(index.read_value(&key), Some((vec![ix], 0)));
|
||||
}
|
||||
});
|
||||
timer.stop();
|
||||
println!("time: {}ns per item", timer.as_ns() / (threads * items));
|
||||
let mut total = 0;
|
||||
for tmpdir in paths.iter() {
|
||||
let folder_size = fs_extra::dir::get_size(tmpdir).unwrap();
|
||||
total += folder_size;
|
||||
std::fs::remove_dir_all(tmpdir).unwrap();
|
||||
}
|
||||
println!("overhead: {}bytes per item", total / (threads * items));
|
||||
}
|
9
cargo
9
cargo
@ -3,25 +3,22 @@
|
||||
# shellcheck source=ci/rust-version.sh
|
||||
here=$(dirname "$0")
|
||||
|
||||
source "${here}"/ci/rust-version.sh all
|
||||
|
||||
toolchain=
|
||||
case "$1" in
|
||||
stable)
|
||||
source "${here}"/ci/rust-version.sh stable
|
||||
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
|
||||
toolchain="$rust_stable"
|
||||
shift
|
||||
;;
|
||||
nightly)
|
||||
source "${here}"/ci/rust-version.sh nightly
|
||||
# shellcheck disable=SC2054 # rust_nightly is sourced from rust-version.sh
|
||||
toolchain="$rust_nightly"
|
||||
shift
|
||||
;;
|
||||
+*)
|
||||
toolchain="${1#+}"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
source "${here}"/ci/rust-version.sh stable
|
||||
# shellcheck disable=SC2054 # rust_stable is sourced from rust-version.sh
|
||||
toolchain="$rust_stable"
|
||||
;;
|
||||
|
@ -9,5 +9,8 @@ for a in "$@"; do
|
||||
fi
|
||||
done
|
||||
|
||||
set -x
|
||||
set -ex
|
||||
if [[ ! -f sdk/bpf/syscalls.txt ]]; then
|
||||
"$here"/cargo build --manifest-path "$here"/programs/bpf_loader/gen-syscall-list/Cargo.toml
|
||||
fi
|
||||
exec "$here"/cargo run --manifest-path "$here"/sdk/cargo-build-bpf/Cargo.toml -- $maybe_bpf_sdk "$@"
|
||||
|
@ -137,7 +137,7 @@ all_test_steps() {
|
||||
^ci/test-coverage.sh \
|
||||
^scripts/coverage.sh \
|
||||
; then
|
||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 30
|
||||
command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 40
|
||||
wait_step
|
||||
else
|
||||
annotate --style info --context test-coverage \
|
||||
@ -148,6 +148,33 @@ all_test_steps() {
|
||||
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
|
||||
wait_step
|
||||
|
||||
# BPF test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-bpf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-bpf.sh"
|
||||
name: "stable-bpf"
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "bpf-dumps.tar.bz2"
|
||||
agents:
|
||||
- "queue=default"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"Stable-BPF skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Perf test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
@ -165,7 +192,7 @@ all_test_steps() {
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 40
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
@ -199,6 +226,44 @@ EOF
|
||||
annotate --style info \
|
||||
"downstream-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Downstream Anchor projects backwards compatibility
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-perf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
^scripts/build-downstream-anchor-projects.sh \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "scripts/build-downstream-anchor-projects.sh"
|
||||
name: "downstream-anchor-projects"
|
||||
timeout_in_minutes: 10
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"downstream-anchor-projects skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Wasm support
|
||||
if affects \
|
||||
^ci/test-wasm.sh \
|
||||
^ci/test-stable.sh \
|
||||
^sdk/ \
|
||||
; then
|
||||
command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20
|
||||
else
|
||||
annotate --style info \
|
||||
"wasm skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Benches...
|
||||
if affects \
|
||||
.rs$ \
|
||||
@ -216,7 +281,15 @@ EOF
|
||||
|
||||
command_step "local-cluster" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster.sh" \
|
||||
45
|
||||
40
|
||||
|
||||
command_step "local-cluster-flakey" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-flakey.sh" \
|
||||
10
|
||||
|
||||
command_step "local-cluster-slow" \
|
||||
". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-local-cluster-slow.sh" \
|
||||
30
|
||||
}
|
||||
|
||||
pull_or_push_steps() {
|
||||
@ -235,7 +308,7 @@ pull_or_push_steps() {
|
||||
all_test_steps
|
||||
fi
|
||||
|
||||
# web3.js, explorer and docs changes run on Travis...
|
||||
# web3.js, explorer and docs changes run on Travis or Github actions...
|
||||
}
|
||||
|
||||
|
||||
|
@ -3,16 +3,24 @@
|
||||
# Pull requests to not run these steps.
|
||||
steps:
|
||||
- command: "ci/publish-tarball.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
- wait
|
||||
- command: "sdk/docker-solana/build.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish docker"
|
||||
- command: "ci/publish-crate.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 240
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
- command: "ci/publish-tarball.sh"
|
||||
agents:
|
||||
- "queue=release-build-aarch64-apple-darwin"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball (aarch64-apple-darwin)"
|
||||
|
@ -105,11 +105,18 @@ if [[ -z "$CHANNEL" ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $CHANNEL = beta ]]; then
|
||||
CHANNEL_LATEST_TAG="$BETA_CHANNEL_LATEST_TAG"
|
||||
elif [[ $CHANNEL = stable ]]; then
|
||||
CHANNEL_LATEST_TAG="$STABLE_CHANNEL_LATEST_TAG"
|
||||
fi
|
||||
|
||||
echo EDGE_CHANNEL="$EDGE_CHANNEL"
|
||||
echo BETA_CHANNEL="$BETA_CHANNEL"
|
||||
echo BETA_CHANNEL_LATEST_TAG="$BETA_CHANNEL_LATEST_TAG"
|
||||
echo STABLE_CHANNEL="$STABLE_CHANNEL"
|
||||
echo STABLE_CHANNEL_LATEST_TAG="$STABLE_CHANNEL_LATEST_TAG"
|
||||
echo CHANNEL="$CHANNEL"
|
||||
echo CHANNEL_LATEST_TAG="$CHANNEL_LATEST_TAG"
|
||||
|
||||
exit 0
|
||||
|
@ -23,7 +23,7 @@ echo --- "(FAILING) Backpropagating dependabot-triggered Cargo.lock updates"
|
||||
name="dependabot-buildkite"
|
||||
api_base="https://api.github.com/repos/solana-labs/solana/pulls"
|
||||
pr_num=$(echo "$BUILDKITE_BRANCH" | grep -Eo '[0-9]+')
|
||||
branch=$(curl -s "$api_base/$pr_num" | python -c 'import json,sys;print json.load(sys.stdin)["head"]["ref"]')
|
||||
branch=$(curl -s "$api_base/$pr_num" | python3 -c 'import json,sys;print(json.load(sys.stdin)["head"]["ref"])')
|
||||
|
||||
git add :**/Cargo.lock
|
||||
EMAIL="dependabot-buildkite@noreply.solana.com" \
|
||||
|
@ -7,8 +7,6 @@ src_root="$(readlink -f "${here}/..")"
|
||||
|
||||
cd "${src_root}"
|
||||
|
||||
source ci/rust-version.sh stable
|
||||
|
||||
cargo_audit_ignores=(
|
||||
# failure is officially deprecated/unmaintained
|
||||
#
|
||||
@ -30,16 +28,29 @@ cargo_audit_ignores=(
|
||||
# Blocked on multiple crates updating `time` to >= 0.2.23
|
||||
--ignore RUSTSEC-2020-0071
|
||||
|
||||
# difference is unmaintained
|
||||
#
|
||||
# Blocked on predicates v1.0.6 removing its dependency on `difference`
|
||||
--ignore RUSTSEC-2020-0095
|
||||
|
||||
# generic-array: arr! macro erases lifetimes
|
||||
#
|
||||
# Blocked on libsecp256k1 releasing with upgraded dependencies
|
||||
# https://github.com/paritytech/libsecp256k1/issues/66
|
||||
--ignore RUSTSEC-2020-0146
|
||||
|
||||
# hyper: Lenient `hyper` header parsing of `Content-Length` could allow request smuggling
|
||||
#
|
||||
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
|
||||
# https://github.com/paritytech/jsonrpc/issues/605
|
||||
--ignore RUSTSEC-2021-0078
|
||||
|
||||
# hyper: Integer overflow in `hyper`'s parsing of the `Transfer-Encoding` header leads to data loss
|
||||
#
|
||||
# Blocked on jsonrpc removing dependency on unmaintained `websocket`
|
||||
# https://github.com/paritytech/jsonrpc/issues/605
|
||||
--ignore RUSTSEC-2021-0079
|
||||
|
||||
# chrono: Potential segfault in `localtime_r` invocations
|
||||
#
|
||||
# Blocked due to no safe upgrade
|
||||
# https://github.com/chronotope/chrono/issues/499
|
||||
--ignore RUSTSEC-2020-0159
|
||||
|
||||
)
|
||||
scripts/cargo-for-all-lock-files.sh +"$rust_stable" audit "${cargo_audit_ignores[@]}"
|
||||
scripts/cargo-for-all-lock-files.sh stable audit "${cargo_audit_ignores[@]}"
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM solanalabs/rust:1.50.0
|
||||
FROM solanalabs/rust:1.58.1
|
||||
ARG date
|
||||
|
||||
RUN set -x \
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.50.0
|
||||
FROM rust:1.58.1
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
@ -11,28 +11,34 @@ RUN set -x \
|
||||
&& apt-get install apt-transport-https \
|
||||
&& echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list \
|
||||
&& apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 \
|
||||
&& curl -fsSL https://deb.nodesource.com/setup_current.x | bash - \
|
||||
&& apt update \
|
||||
&& apt install -y \
|
||||
buildkite-agent \
|
||||
clang-7 \
|
||||
clang \
|
||||
cmake \
|
||||
lcov \
|
||||
libudev-dev \
|
||||
libclang-common-7-dev \
|
||||
mscgen \
|
||||
nodejs \
|
||||
net-tools \
|
||||
rsync \
|
||||
sudo \
|
||||
golang \
|
||||
unzip \
|
||||
\
|
||||
&& apt remove -y libcurl4-openssl-dev \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& node --version \
|
||||
&& npm --version \
|
||||
&& rustup component add rustfmt \
|
||||
&& rustup component add clippy \
|
||||
&& rustup target add wasm32-unknown-unknown \
|
||||
&& cargo install cargo-audit \
|
||||
&& cargo install svgbob_cli \
|
||||
&& cargo install mdbook \
|
||||
&& cargo install mdbook-linkcheck \
|
||||
&& cargo install svgbob_cli \
|
||||
&& cargo install wasm-pack \
|
||||
&& rustc --version \
|
||||
&& cargo --version \
|
||||
&& curl -OL https://github.com/google/protobuf/releases/download/v$PROTOC_VERSION/$PROTOC_ZIP \
|
||||
|
21
ci/env.sh
21
ci/env.sh
@ -23,6 +23,9 @@ if [[ -n $CI ]]; then
|
||||
elif [[ -n $BUILDKITE ]]; then
|
||||
export CI_BRANCH=$BUILDKITE_BRANCH
|
||||
export CI_BUILD_ID=$BUILDKITE_BUILD_ID
|
||||
if [[ $BUILDKITE_COMMIT = HEAD ]]; then
|
||||
BUILDKITE_COMMIT="$(git rev-parse HEAD)"
|
||||
fi
|
||||
export CI_COMMIT=$BUILDKITE_COMMIT
|
||||
export CI_JOB_ID=$BUILDKITE_JOB_ID
|
||||
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
|
||||
@ -35,7 +38,18 @@ if [[ -n $CI ]]; then
|
||||
export CI_BASE_BRANCH=$BUILDKITE_BRANCH
|
||||
export CI_PULL_REQUEST=
|
||||
fi
|
||||
export CI_OS_NAME=linux
|
||||
|
||||
case "$(uname -s)" in
|
||||
Linux)
|
||||
export CI_OS_NAME=linux
|
||||
;;
|
||||
Darwin)
|
||||
export CI_OS_NAME=osx
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then
|
||||
# The solana-secondary pipeline should use the slug of the pipeline that
|
||||
# triggered it
|
||||
@ -74,10 +88,13 @@ else
|
||||
export CI_BUILD_ID=
|
||||
export CI_COMMIT=
|
||||
export CI_JOB_ID=
|
||||
export CI_OS_NAME=
|
||||
export CI_PULL_REQUEST=
|
||||
export CI_REPO_SLUG=
|
||||
export CI_TAG=
|
||||
# Don't override ci/run-local.sh
|
||||
if [[ -z $CI_LOCAL_RUN ]]; then
|
||||
export CI_OS_NAME=
|
||||
fi
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user