Compare commits
1219 Commits
v0.2.3
...
v0.7.0-bet
Author | SHA1 | Date | |
---|---|---|---|
5d8b2f899a | |||
490205ab84 | |||
2c0e704c82 | |||
253048f72d | |||
e09b8430ce | |||
9ae283dc3a | |||
f95a79d145 | |||
0dabdfd48e | |||
d2bb4dc14a | |||
b4dc180592 | |||
263577773f | |||
7d708be121 | |||
feb1669d39 | |||
2cbfe41422 | |||
b7653865b1 | |||
c72dced8fa | |||
6feed5fd56 | |||
b8fe5ae076 | |||
7e657d65f3 | |||
a166bb816e | |||
2952027d04 | |||
430d9d9314 | |||
fa247196c0 | |||
5d17c2b58f | |||
6ee45d282e | |||
cfc3bd0696 | |||
3e0e09555a | |||
1d8bb5144e | |||
67e0100866 | |||
f2ab08c65e | |||
04a93050e7 | |||
03401041db | |||
6eac744a05 | |||
ae29e2085f | |||
7ce0b58af8 | |||
ea5663c0da | |||
a61bfae8a4 | |||
5716898216 | |||
c0f9e452f2 | |||
4e3526394e | |||
6806a14a3f | |||
ec7e50b37d | |||
e7b7dfebf5 | |||
a9e0b27772 | |||
669164bada | |||
4f3a291391 | |||
56e37ad2f4 | |||
17de79a83a | |||
09e9139855 | |||
76fc5822c9 | |||
c767a854ed | |||
b60802ddff | |||
1c35d59f26 | |||
adcaf715c6 | |||
1f9494221b | |||
466d6f76b9 | |||
b05e6ce3db | |||
1d812e78d5 | |||
fba494343f | |||
0b878eccf8 | |||
98772b16d6 | |||
bb82ff0c80 | |||
71af03dc98 | |||
5671da4a0a | |||
d63493a852 | |||
c06582ba40 | |||
450f271cf7 | |||
a31889f129 | |||
ba6a6f5227 | |||
9a38d61048 | |||
903ec27754 | |||
0b56d603c2 | |||
4ffb5d157a | |||
816246ebee | |||
a9881aee05 | |||
7b5b989cfe | |||
c4b62e19f2 | |||
79a97ada04 | |||
da215d1a21 | |||
9ffc50bead | |||
f8352bac2f | |||
27c1410fdc | |||
9a4733bde7 | |||
f3df5df52c | |||
517d08c637 | |||
90dd794ae5 | |||
e0dbbba8a3 | |||
705df55a7f | |||
d354e85a9a | |||
e4e1f8ec1e | |||
0112a24179 | |||
d680f6b3a5 | |||
47e732717f | |||
ec56abfccb | |||
e7cdb402fb | |||
a3fe1965fb | |||
5256e6833e | |||
051cd2e1ff | |||
51929e7df8 | |||
a094507bb8 | |||
8effa4e3e0 | |||
1c9e7dbc45 | |||
799b249f02 | |||
7b4a378c92 | |||
47917d00d1 | |||
a4c49af859 | |||
1c1d7d1e0e | |||
d28536d76e | |||
63cfbb9497 | |||
231040b93e | |||
7c74afc35a | |||
7878a011eb | |||
c05416e27d | |||
ee200d8fa0 | |||
2f42658cd4 | |||
d95e8030fc | |||
4aedd3f1b6 | |||
bb89d6f54d | |||
ed10841e3d | |||
6dac87f2a7 | |||
a167d0d331 | |||
eed37820b5 | |||
124e1fa350 | |||
ac40434cdf | |||
39354c06f8 | |||
faedb88de0 | |||
5cd1fb486f | |||
5b5df49e6c | |||
86f9277e2d | |||
56b09bf0ac | |||
f4c4b9df9c | |||
6e568c69a7 | |||
14d624ee40 | |||
d5c0557891 | |||
1691060a22 | |||
a5ce578c72 | |||
05edfad13a | |||
136b43f461 | |||
ac40c1818f | |||
eb63dbcd2a | |||
4e2f1a519e | |||
55ec7f9fe9 | |||
b7ddefdbf9 | |||
ce361c2cdc | |||
ed6ba55261 | |||
ec333d2bd6 | |||
551f639259 | |||
da3bb6fb93 | |||
08bcb62016 | |||
8f4ce1e8d0 | |||
4a534d6abb | |||
b48a8c0555 | |||
1919ec247b | |||
3966eb5374 | |||
c22ef50cae | |||
be5f2ef9b9 | |||
adfcb79387 | |||
73c4c0ac5f | |||
6fc601f696 | |||
07111fb7bb | |||
a06d2170b0 | |||
7f53ea3bf3 | |||
b2accd1c2a | |||
8ef8a8dea7 | |||
e929404676 | |||
c2258bedae | |||
215fdbb7ed | |||
ee998f6882 | |||
826e95afca | |||
47583d48e7 | |||
e759cdf061 | |||
88503c2a09 | |||
d5be23dffe | |||
80c01dc085 | |||
45b2549fa9 | |||
c7ce454188 | |||
7059ea42d6 | |||
8ea1c29c9b | |||
33bbfdbc9b | |||
5de54f8853 | |||
a1ac41218a | |||
55fc647568 | |||
e83e898eed | |||
eb07e4588b | |||
563f834c96 | |||
183178681d | |||
8dba53e494 | |||
e4782b19a3 | |||
ec86b1dffa | |||
6cb8266c7b | |||
9c50302a39 | |||
3313c69898 | |||
530c6ca7ec | |||
07ed2fb523 | |||
d9ec380a15 | |||
b60eb3a899 | |||
b4df69791b | |||
c21b8a22b9 | |||
475a76e656 | |||
7ba5d5ef86 | |||
737dc1ddde | |||
164bf19b36 | |||
25976771d9 | |||
f2198c2e9a | |||
eec19c6d2c | |||
30e03feb5f | |||
58cd3bde9f | |||
662bfb7b88 | |||
5f3e3a17d3 | |||
feba2d9975 | |||
e3e3a1c457 | |||
90628f3c8d | |||
f6bcadb79d | |||
d4ac16773c | |||
96f044d2bf | |||
f31868b913 | |||
73b0ff5b55 | |||
64cf69045a | |||
e57dae0f31 | |||
6386e7d5cf | |||
4bad103da9 | |||
30a26adb7c | |||
8be4adfc0a | |||
fed4cc3965 | |||
7d1e074683 | |||
00516e50a1 | |||
e83d76fbd9 | |||
304f152315 | |||
3a82ebf7fd | |||
0253d34467 | |||
9209f9acde | |||
3dbbb398df | |||
17e8ad110f | |||
5e91d31ed3 | |||
fad9d20820 | |||
fe9a1c8580 | |||
cd6d7d5198 | |||
771478bc68 | |||
c4a59896f8 | |||
3eb1608403 | |||
8fde70d4dc | |||
5a047833ed | |||
f6c28e6be1 | |||
0ebf10d19d | |||
d3005d3ef3 | |||
effcef2184 | |||
89fc0ad7a9 | |||
410272ee1d | |||
1c97bf50b6 | |||
4ecd2c9d0b | |||
e592243a09 | |||
2f4a92e352 | |||
ceafc29040 | |||
b20efabfd2 | |||
85b6e7293c | |||
6aced927ad | |||
75997e6c08 | |||
9040d00110 | |||
8ebc5c6b07 | |||
d4807790ff | |||
0de5e7a285 | |||
c40000aeda | |||
31198bc105 | |||
92599acfca | |||
f6e70779fe | |||
3017bde686 | |||
9d84ec4bb3 | |||
586141adb2 | |||
3f763f99e2 | |||
15c7f36ea3 | |||
04d1a083fa | |||
327ee1dae8 | |||
22885c3e64 | |||
94ededb54c | |||
af6a07697a | |||
5f1d8c95eb | |||
7d9e032407 | |||
bc918a5ad5 | |||
ee54ce4727 | |||
e85bf2f2d5 | |||
a7460ffbd1 | |||
7fe1fd2f95 | |||
d30670e92e | |||
9b202c6e1e | |||
87946eafd5 | |||
7575d3c726 | |||
8b9713a934 | |||
ec713c18c4 | |||
c24b0a1a3f | |||
34e0cb0092 | |||
7b7c7cba21 | |||
c45343dd30 | |||
b7f6603c1f | |||
2d3b052dea | |||
dcb6234771 | |||
e44d423e83 | |||
5435bb734c | |||
13f59adf61 | |||
0fce3368d3 | |||
1ee5c81267 | |||
3bb9d5eb50 | |||
efb23f7cf9 | |||
013f4674de | |||
6966b25d9c | |||
d513f56c8c | |||
7aa05618a3 | |||
cdfbbe5e60 | |||
fe7d1cb81c | |||
c2a9395a4b | |||
586279bcfc | |||
8bd10e7c4c | |||
928e6165bc | |||
77c9e801aa | |||
c78132417f | |||
849928887e | |||
ba1163d49f | |||
6f9c89af39 | |||
246b8b1242 | |||
f0db68cb75 | |||
f0d1fdfb46 | |||
3b8b2e030a | |||
b4fee677a5 | |||
fe706583f9 | |||
d0e0c17ece | |||
5aaa38bcaf | |||
6ff9b27f8e | |||
3f4e035506 | |||
57d9fbb927 | |||
ee44e51b30 | |||
5011f24123 | |||
d1eda334f3 | |||
2ae5ce9f2c | |||
4f5ac78b7e | |||
074c9af020 | |||
2da2d4e365 | |||
8eb76ab2a5 | |||
a710d95243 | |||
a06535d7ed | |||
f511ac9be7 | |||
e28ad2177e | |||
cb16fe84cd | |||
ec3569aa39 | |||
246edecf53 | |||
34834c5af9 | |||
b845245614 | |||
5711fb9969 | |||
d1eaecde9a | |||
00c8505d1e | |||
33f01efe69 | |||
377d312c81 | |||
badf5d5412 | |||
0339f90b40 | |||
5455e8e6a9 | |||
6843b71a0d | |||
634408b5e8 | |||
d053f78b74 | |||
93b6fceb2f | |||
ac7860c35d | |||
b0eab8729f | |||
cb81f80b31 | |||
ea97529185 | |||
f1075191fe | |||
74c479fbc9 | |||
7e788d3a17 | |||
69b3c75f0d | |||
b2c2fa40a2 | |||
50458d9524 | |||
9679e3e356 | |||
6db9f92b8a | |||
4a44498d45 | |||
216510c573 | |||
fd338c3097 | |||
b66ebf5dec | |||
5da99de579 | |||
3aa2907bd6 | |||
05d1618659 | |||
86113811f2 | |||
53ecaa03f1 | |||
205c1aa505 | |||
9b54c1542b | |||
93d5d1b2ad | |||
4c0f3ed6f3 | |||
2580155bf2 | |||
6ab0dd4df9 | |||
4b8c36b6b9 | |||
359a8397c0 | |||
c9fd5d74b5 | |||
391744af97 | |||
587ab29e09 | |||
80f07dadc5 | |||
60609a44ba | |||
30c8fa46b4 | |||
7aab7d2f82 | |||
a8e1c44663 | |||
a2b92c35e1 | |||
9f2086c772 | |||
3eb005d492 | |||
68955bfcf4 | |||
9ac7070e08 | |||
e44e81bd17 | |||
f5eedd2d19 | |||
46059a37eb | |||
adc655a3a2 | |||
3058f80489 | |||
df98cae4b6 | |||
d327e0aabd | |||
17d3a6763c | |||
02c5b0343b | |||
2888e45fea | |||
f1311075d9 | |||
6c380e04a3 | |||
cef1c208a5 | |||
ef8eac92e3 | |||
9c9c63572b | |||
6c0c6de1d0 | |||
b57aecc24c | |||
290dde60a0 | |||
38623785f9 | |||
256ecc7208 | |||
76b06b47ba | |||
cf15cf587f | |||
134c7add57 | |||
ac0791826a | |||
d2622b7798 | |||
f82cbf3a27 | |||
aa7e3df8d6 | |||
ad00d7bd9c | |||
8d1f82c34d | |||
0cb2036e3a | |||
2b1e90b0a5 | |||
f2ccc133a2 | |||
5e824b39dd | |||
41efcae64b | |||
cf5671d058 | |||
2570bba6b1 | |||
71cb7d5c97 | |||
0df6541d5e | |||
52145caf7e | |||
86a50ae9e1 | |||
c64cfb74f3 | |||
26153d9919 | |||
5af922722f | |||
b70d730b32 | |||
bf4b856e0c | |||
0cf0ae6755 | |||
29061cff39 | |||
b7eec4c89f | |||
a3854c229e | |||
dcde256433 | |||
931bdbd5cd | |||
b7bd59c344 | |||
2dbf9a6017 | |||
fe93bba457 | |||
6e35f54738 | |||
089294a85e | |||
25c0b44641 | |||
58c1589688 | |||
bb53f69016 | |||
75659ca042 | |||
fc00594ea4 | |||
8d26be8b89 | |||
af4e95ae0f | |||
ffb4a7aa78 | |||
dcaeacc507 | |||
4f377e6710 | |||
122db85727 | |||
a598e4aa74 | |||
733b31ebbd | |||
dac9775de0 | |||
46c19a5783 | |||
aaeb5ba52f | |||
9f5a3d6064 | |||
4cdf873f98 | |||
b43ae748c3 | |||
02ddd89653 | |||
bbe6eccefe | |||
6677a7b66a | |||
75c37fcc73 | |||
5be71a8a9d | |||
b9ae7d1ebb | |||
8b02e0f57c | |||
342cc7350a | |||
2335a51ced | |||
868df1824c | |||
83c11f0f9d | |||
1022f1b0c6 | |||
c2c80232e3 | |||
115f4e54b8 | |||
669b1694b8 | |||
2128c58fbe | |||
e12e154877 | |||
73d3c17507 | |||
7f647a93da | |||
ecb3dbbb60 | |||
cc907ba69d | |||
5a45eef1dc | |||
0d980e89bc | |||
ef87832bff | |||
94507d1aca | |||
89924a38ff | |||
7faa2b8698 | |||
65352ce8e7 | |||
f1988ee1e3 | |||
82ac8eb731 | |||
ae47e34fa5 | |||
28e781efc3 | |||
5c3ceb8355 | |||
c9113b381d | |||
75e69eecfa | |||
f3c4acc723 | |||
2a0095e322 | |||
9ad5f3c65b | |||
579de64d49 | |||
d4200a7b1e | |||
84477835dc | |||
504b318ef1 | |||
f154c8c490 | |||
d4959bc157 | |||
87e025fe22 | |||
8049323ca8 | |||
b38c7ea2ff | |||
239b925fb3 | |||
60da7f7aaf | |||
8646ff4927 | |||
59be94a81f | |||
437c485e5c | |||
79a58da6a9 | |||
ae29641a18 | |||
9c3f65bca9 | |||
086365b4c4 | |||
64044da49c | |||
7b5b7feb63 | |||
2e059f8504 | |||
207b6686d1 | |||
abfd7d6951 | |||
7fc166b5ba | |||
021953d59a | |||
bbe89df2ff | |||
a638ec5911 | |||
26272a3600 | |||
8454eb79d0 | |||
796f4b981b | |||
34514d65bc | |||
2786357082 | |||
4badeacd1d | |||
63a0ba6ec8 | |||
9a4ce6d70e | |||
35ee2d0ce1 | |||
b04716d40d | |||
051fa6f1f1 | |||
8dc1b07e75 | |||
bee1e7ebaf | |||
f3f0b9f0c5 | |||
a5cf745e1c | |||
273b800047 | |||
6c1f1c2a7a | |||
9c62f8d81f | |||
82aef7ebe2 | |||
57636d3d5f | |||
dc87effc0a | |||
f0c9823e9f | |||
0b91dd6163 | |||
4955c6f13a | |||
2e7beca9ba | |||
59c1b9983d | |||
f7083e0923 | |||
6d4defdf96 | |||
b826f837f8 | |||
5855e18a4e | |||
3f38c0a245 | |||
cfe8b3fc55 | |||
e9ee020b5f | |||
1bcf3891b4 | |||
5456de63e9 | |||
9026c70952 | |||
99dc4ea4a9 | |||
0aaa500f7c | |||
5f5be83a17 | |||
7e44005a0f | |||
ee3fb985ea | |||
2a268aa528 | |||
cd262cf860 | |||
a1889c32d4 | |||
d42d024d9c | |||
7b88b8d159 | |||
4131071b9a | |||
ef6bd7e3b8 | |||
374bff6550 | |||
0a46bbe4f9 | |||
f4971be236 | |||
421273f862 | |||
2c7f229883 | |||
904eabad2f | |||
8b233f6be4 | |||
08fc821ca9 | |||
81706f2d75 | |||
7b50c3910f | |||
2d635386af | |||
a604dcb4c4 | |||
7736b9cac6 | |||
d2dd005a59 | |||
6e8f99d9b2 | |||
685de30047 | |||
17cc9ab07f | |||
3f10bf44db | |||
27984e469a | |||
a2c05b112e | |||
a578c1a5e3 | |||
500aaed48e | |||
4a94da8a94 | |||
cc447c0fda | |||
0ae69bdcd9 | |||
5ba20a94e8 | |||
f168c377fd | |||
dfb754dd13 | |||
455050e19c | |||
317031f455 | |||
b132ce1944 | |||
8b226652aa | |||
2c7fe3ed8d | |||
3d5f2b3c28 | |||
7a79afe4a6 | |||
1f7387a39b | |||
0fc2bee144 | |||
791ae852a2 | |||
c2fcd876d7 | |||
d239d4a495 | |||
aec05ef602 | |||
e5d46d998b | |||
b2e3299539 | |||
c308a6459f | |||
4eb1bc08a7 | |||
ff5e1c635f | |||
6149c2fcb5 | |||
d7cd80dce5 | |||
6264508f5e | |||
a3869dd4c1 | |||
a3d2831f8c | |||
4cd1fa8c38 | |||
1511dc43d7 | |||
3d82807965 | |||
4180571660 | |||
421d9aa501 | |||
898f4971a2 | |||
7ab3331f01 | |||
b4ca414492 | |||
73abea088a | |||
2376dfc139 | |||
d2f95d5319 | |||
cd96843699 | |||
ca80bc33c6 | |||
19607886f7 | |||
3c11a91f77 | |||
b781fdbd04 | |||
765d901530 | |||
3cedbc493e | |||
0488d0a82f | |||
f0be595e4c | |||
55100854d6 | |||
600a1f8866 | |||
95bf68f3f5 | |||
bcdb058492 | |||
7f46aef624 | |||
e779496dfb | |||
3d77fa5fbc | |||
250830ade9 | |||
7b2eb7ccfc | |||
458c27c6e9 | |||
a49e664e63 | |||
f20380d6b4 | |||
05a5e551d6 | |||
d278b71cb2 | |||
a485c141d5 | |||
8a9f6b9ae3 | |||
7144090528 | |||
ee0015ac38 | |||
8b7f7f1088 | |||
c95c6a75f8 | |||
44bf79e35f | |||
bb654f286c | |||
1acd2aa8cf | |||
18d3659b91 | |||
63a4bafa72 | |||
4eb2e84c9f | |||
73c7fb87e8 | |||
c1496722aa | |||
d9f81b0c8c | |||
d69beaabe1 | |||
b7a0bd6347 | |||
882ea6b672 | |||
736d3eabae | |||
af53197c04 | |||
cf186c5762 | |||
f384a2ce85 | |||
803b76e997 | |||
230d7c3dd6 | |||
4f629dd982 | |||
4fdd891b54 | |||
64a892321a | |||
a80991f2b3 | |||
c9cd81319a | |||
521ae21632 | |||
bcd6606a16 | |||
52ebb88205 | |||
1e91d09be7 | |||
02c573986b | |||
f2de486658 | |||
900b4f2644 | |||
1cfaa9afb6 | |||
801468d70d | |||
0601e05978 | |||
7ce11b5d1c | |||
f2d4799491 | |||
ebc458cd32 | |||
43cd631579 | |||
bc824c1a6c | |||
4223aff840 | |||
f107c6c2ca | |||
7daf14caa7 | |||
ded28c705f | |||
778bec0777 | |||
6967cf7f86 | |||
0ee3ec86bd | |||
e4c47e8417 | |||
98ae80f4ed | |||
876c77d0bc | |||
d44a6f7541 | |||
9040c04d27 | |||
ebbdef0538 | |||
bfbee988d0 | |||
1d4d0272ca | |||
77a76f0783 | |||
d9079de262 | |||
b3d732a1a1 | |||
52f1a02938 | |||
fe51669e85 | |||
670a6c50c9 | |||
86c1aaf7d8 | |||
658e787b60 | |||
40c50aef50 | |||
a24c2bbe73 | |||
bdbe90b891 | |||
3236be7877 | |||
1dca17fdb4 | |||
785e971698 | |||
2bfa20ff85 | |||
474a9af78d | |||
61425eacb8 | |||
4870def1fb | |||
3e73fb9233 | |||
5ad6061c3f | |||
fae019b974 | |||
3bb06d8364 | |||
c9c9afa472 | |||
bd0671e123 | |||
6f3ec8d21f | |||
9a0bf13feb | |||
9ff1a6f0cd | |||
a59f64cae1 | |||
a4ecd09723 | |||
f159dfd15a | |||
9e8ec86fa3 | |||
62bb78f58d | |||
893011c3ba | |||
880cb8e7cc | |||
85f83f2c74 | |||
4751e459cc | |||
138efa6cec | |||
a68e50935e | |||
e8f5fb35ac | |||
6af27669b0 | |||
e162f24119 | |||
dbcc462a48 | |||
2d5313639a | |||
38af0f436d | |||
888c2ffb20 | |||
588593f619 | |||
2cdd515b12 | |||
0aad71d46e | |||
6f9285322d | |||
68c7f992fa | |||
1feff408ff | |||
f752e02487 | |||
c9c7fb0a27 | |||
de680c2a8e | |||
03695ba4c5 | |||
c2e2960bf7 | |||
385d2a580c | |||
7e02652068 | |||
ae29c9b4a0 | |||
078f917e61 | |||
b65f04d500 | |||
6acaffe581 | |||
e47ef42a33 | |||
b950e33d81 | |||
ec8cfc77ad | |||
00a16db9cd | |||
4b9f115586 | |||
c5cc91443e | |||
48d94143e7 | |||
8174a05156 | |||
63cf6363a2 | |||
cc6de605ac | |||
d0151d2b79 | |||
6b45d453b8 | |||
b992a84d67 | |||
cb362e9052 | |||
ccb478c1f6 | |||
6af3680f99 | |||
e6c3c215ab | |||
5c66bbde01 | |||
77dd1bdd4a | |||
6268d540a8 | |||
5918e38747 | |||
3cfb571356 | |||
5eb80f8027 | |||
f6e5f2439d | |||
edf6272374 | |||
7f6a4b0ce3 | |||
3be5f25f2f | |||
1b6cdd5637 | |||
f752e55929 | |||
ebb089b3f1 | |||
ad6303f031 | |||
828b9d6717 | |||
444adcd1ca | |||
69ac305883 | |||
2ff57df2a0 | |||
7077f4cbe2 | |||
266f85f607 | |||
d90ab90145 | |||
48018b3f5b | |||
15584e7062 | |||
d415b17146 | |||
9ed953e8c3 | |||
b60a98bd6e | |||
a15e30d4b3 | |||
d5d133353f | |||
6badc98510 | |||
ea8bfb46ce | |||
58860ed19f | |||
583f652197 | |||
3215dcff78 | |||
38fdd17067 | |||
807ccd15ba | |||
1c923d2f9e | |||
2676b21400 | |||
fd5ef94b5a | |||
02c7eea236 | |||
34d1805b54 | |||
753eaa8266 | |||
0b39c6f98e | |||
55b8d0db4d | |||
3d7969d8a2 | |||
041de8082a | |||
3da1fa4d88 | |||
39df21de30 | |||
8cbb7d7362 | |||
10a0c47210 | |||
89bf3765f3 | |||
8181bc591b | |||
ca877e689c | |||
c6048e2bab | |||
60015aee04 | |||
43e6741071 | |||
b91f6bcbff | |||
64e2f1b949 | |||
13a2f05776 | |||
903374ae9b | |||
d366a07403 | |||
e94921174a | |||
dea5ab2f79 | |||
5e11078f34 | |||
d7670cd4ff | |||
29f3230089 | |||
d003efb522 | |||
97e772e87a | |||
0b33615979 | |||
249cead13e | |||
7c96dea359 | |||
374c9921fd | |||
fb55ab8c33 | |||
13485074ac | |||
4944c965e4 | |||
83c5b3bc38 | |||
7fc42de758 | |||
0a30bd74c1 | |||
9b12a79c8d | |||
0dcde23b05 | |||
8dc15b88eb | |||
d20c952f92 | |||
c2eeeb27fd | |||
180d8b67e4 | |||
9c989c46ee | |||
51633f509d | |||
705228ecc2 | |||
740f6d2258 | |||
3b9ef5ccab | |||
ab74e7f24f | |||
be9a670fb7 | |||
6e43e7a146 | |||
ab2093926a | |||
916b90f415 | |||
2ef3db9fab | |||
6987b6fd58 | |||
078179e9b8 | |||
50ccecdff5 | |||
e838a8c28a | |||
e5f7eeedbf | |||
d1948b5a00 | |||
c07f700c53 | |||
c934a30f66 | |||
310d01d8a2 | |||
f330739bc7 | |||
58626721ad | |||
584c8c07b8 | |||
a93ec03d2c | |||
7bd3a8e004 | |||
912a5f951e | |||
6869089111 | |||
6fd32fe850 | |||
81e2b36d38 | |||
7d811afab1 | |||
39f5aaab8b | |||
5fc81dd6c8 | |||
491a530d90 | |||
c12da50f9b | |||
41e8500fc5 | |||
a7f59ef3c1 | |||
f4466c8c0a | |||
bc6d6b20fa | |||
01326936e6 | |||
c960e8d351 | |||
fc69d31914 | |||
8d425e127b | |||
3cfb07ea38 | |||
76679ffb92 | |||
dc2ec925d7 | |||
81d6ba3ec5 | |||
014bdaa355 | |||
0c60fdd2ce | |||
43d986d14e | |||
123d7c6a37 | |||
5ac7df17f9 | |||
bc0dde696a | |||
c323bd3c87 | |||
5c672adc21 | |||
2f80747dc7 | |||
95749ed0e3 | |||
94eea3abec | |||
fe32159673 | |||
07aa2e1260 | |||
6fec8fad57 | |||
84df487f7d | |||
49708e92d3 | |||
daadae7987 | |||
2b788d06b7 | |||
90cd9bd533 | |||
d63506f98c | |||
17de6876bb | |||
fc540395f9 | |||
da2b4962a9 | |||
3abe305a21 | |||
46e8c09bd8 | |||
e683c34a89 | |||
54e4f75081 | |||
9f256f0929 | |||
ef169a6652 | |||
eaec25f940 | |||
6a87d8975c | |||
b8cf5f9427 | |||
2f1e585446 | |||
f9309b46aa | |||
22f5985f1b | |||
c59c38e50e | |||
232e1bb8a3 | |||
1fbb34620c | |||
89f5b803c9 | |||
55179101cd | |||
132495b1fc | |||
a03d7bf5cd | |||
3bf225e85f | |||
cc2bb290c4 | |||
878ca8c5c5 | |||
4bc41d81ee | |||
f6ca176fc8 | |||
0bec360a31 | |||
04f30710c5 | |||
98c0a2af87 | |||
9db42c1769 | |||
849bced602 | |||
27f29019ef | |||
8642a41f2b | |||
bf902ef5bc | |||
7656b55c22 | |||
7d3d4b9443 | |||
15c093c5e2 | |||
116166f62d | |||
26b19dde75 | |||
c8ddc68f13 | |||
7c9681007c | |||
13206e4976 | |||
2f18302d32 | |||
ddb21d151d | |||
c64a9fb456 | |||
ee19b4f86e | |||
14239e584f | |||
112aecf6eb | |||
c1783d77d7 | |||
f089abb3c5 | |||
8e551f5e32 | |||
290960c3b5 | |||
62af09adbe | |||
e39c0b34e5 | |||
8ad90807ee | |||
533b3170a7 | |||
7732f3f5fb | |||
f52f02a434 | |||
4d7d4d673e | |||
9a437f0d38 | |||
c385f8bb6e | |||
fa44be2a9d | |||
117ab0c141 | |||
7488d19ae6 | |||
60524ad5f2 | |||
fad7ff8bf0 | |||
383d445ba1 | |||
803dcb0800 | |||
fde320e2f2 | |||
8ea97141ea | |||
9f232bac58 | |||
8295cc11c0 | |||
70f80adb9a | |||
9a7cac1e07 | |||
c584a25ec9 | |||
bff32bf7bc | |||
d0e7450389 | |||
4da89ac8a9 | |||
f7032f7d9a | |||
7c7e3931a0 | |||
6be3d62d89 | |||
6f509a8a1e | |||
4379fabf16 | |||
6b66e1a077 | |||
c11a3e0fdc | |||
3418033c55 | |||
caa9a846ed | |||
8ee76bcea0 | |||
47325cbe01 | |||
e0c8417297 | |||
9238ee9572 | |||
64af37e0cd | |||
9f9b79f30b | |||
265f41887f | |||
4f09e5d04c | |||
434f321336 | |||
f4e0d1be58 | |||
e5bae0604b | |||
e7da083c31 | |||
367c32dabe | |||
e054238af6 | |||
e8faf6d59a | |||
baa4ea3cd8 | |||
75ef0f0329 | |||
65185c0011 | |||
eb94613d7d | |||
67f4f4fb49 | |||
a7ecf4ac4c | |||
45765b625a | |||
aa0a184ebe | |||
069f9f0d5d | |||
c82b520ea8 | |||
9d6e5bde4a | |||
0eb3669fbf | |||
30449b6054 | |||
f5f71a19b8 | |||
0135971769 | |||
8579795c40 | |||
9d77fd7eec | |||
8c40d1bd72 | |||
7a0bc7d888 | |||
1e07014f86 | |||
49281b24e5 | |||
a8b1980de4 | |||
b8cd5f0482 | |||
cc9f0788aa | |||
209910299d | |||
17926ff5d9 | |||
957fb0667c | |||
8d17aed785 | |||
7ef8d5ddde | |||
9930a2e167 | |||
a86be9ebf2 | |||
ad6665c8b6 | |||
923162ae9d | |||
dd2bd67049 | |||
d500bbff04 | |||
e759bd1a99 | |||
94daf4cea4 | |||
2379792e0a | |||
dba6d7a8a6 | |||
086c206b76 | |||
5dd567deef | |||
b6d8f737ca | |||
491ba9da84 | |||
a420a9293f | |||
c1bc5f6a07 | |||
9834c251d0 | |||
54340ed4c6 | |||
96a0a9202c | |||
a4c081d3a1 | |||
d1b6206858 | |||
0eb6849fe3 | |||
b725fdb093 | |||
1436bb1ff2 | |||
5a44c36b1f | |||
5d990502cb | |||
64735da716 | |||
95b82aa6dc | |||
f09952f3d7 | |||
b98e04dc56 | |||
cb436250da | |||
4376032e3a | |||
c231331e05 | |||
624c151ca2 | |||
5d0356f74b | |||
b019416518 | |||
4fcd9e3bd6 | |||
66bf889c39 | |||
a2811842c8 | |||
1929601425 | |||
282afee47e | |||
e701ccc949 | |||
6543497c17 | |||
7d9af5a937 | |||
720c54a5bb | |||
5dca3c41f2 | |||
929546f60b | |||
cb0ce9986c | |||
064eba00fd | |||
a4336a39d6 | |||
298989c4b9 | |||
48c28c2267 | |||
d76ecbc9c9 | |||
79fb9c00aa | |||
c9e03f37ce | |||
aa5f1699a7 | |||
e1e9126d03 | |||
672a4b3723 | |||
955f76baab | |||
7da8a5e2d1 | |||
ff82fbf112 | |||
8503a0a58f | |||
b1e9512f44 | |||
608def9c78 | |||
bcb21bc1d8 | |||
f63096620a | |||
9b26892bae | |||
572475ce14 | |||
876d7995e1 | |||
b8655e30d4 | |||
7cf0d55546 | |||
ce60b960c0 | |||
cebcb5b92d | |||
11a0f96f5e | |||
74ebaf1744 | |||
f7496ea6d1 | |||
bebba7dc1f | |||
afb2bf442c | |||
c7de48c982 | |||
f906112c03 | |||
8ef864fb39 | |||
1c9b5ab53c | |||
c10faae3b5 | |||
2104dd5a0a | |||
fbe64037db | |||
d8c50b150c | |||
8871bb2d8e | |||
a148454376 | |||
be518b569b | |||
c998fbe2ae | |||
9f12cd0c09 | |||
0d0fee1ca1 | |||
a0410c4677 | |||
8fe464cfa3 | |||
3e2d6d9e8b | |||
32d677787b | |||
dfd1c4eab3 | |||
36bb1f989d | |||
684f4c59e0 | |||
1b77e8a69a | |||
662e10c3e0 | |||
c935fdb12f | |||
9e16937914 | |||
f705202381 | |||
f5532ad9f7 | |||
570e71f050 | |||
c9cc4b4369 | |||
7111aa3b18 | |||
12eba4bcc7 | |||
4610de8fdd | |||
3fcc2dd944 | |||
8299bae2d4 | |||
604ccf7552 | |||
f3dd47948a | |||
c3bb207488 | |||
9009d1bfb3 | |||
fa4d9e8bcb | |||
34b77efc87 | |||
5ca0ccbcd2 | |||
6aa4e52480 | |||
f98e9a2ad7 | |||
c6134cc25b | |||
0443b39264 | |||
8b0b8efbcb | |||
97449cee43 | |||
ab5252c750 | |||
05a27cb34d | |||
b02eab57d2 |
14
.buildkite/hooks/post-command
Executable file
14
.buildkite/hooks/post-command
Executable file
@ -0,0 +1,14 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n "$CARGO_TARGET_CACHE_NAME" ]] || exit 0
|
||||
|
||||
#
|
||||
# Save target/ for the next CI build on this machine
|
||||
#
|
||||
(
|
||||
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
|
||||
mkdir -p $d
|
||||
set -x
|
||||
rsync -a --delete --link-dest=$PWD target $d
|
||||
du -hs $d
|
||||
)
|
13
.buildkite/hooks/pre-command
Executable file
13
.buildkite/hooks/pre-command
Executable file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n "$CARGO_TARGET_CACHE_NAME" ]] || exit 0
|
||||
|
||||
#
|
||||
# Restore target/ from the previous CI build on this machine
|
||||
#
|
||||
(
|
||||
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
|
||||
mkdir -p $d/target
|
||||
set -x
|
||||
rsync -a --delete --link-dest=$d $d/target .
|
||||
)
|
@ -1,2 +1,5 @@
|
||||
ignore:
|
||||
- "src/bin"
|
||||
coverage:
|
||||
status:
|
||||
patch: off
|
||||
|
11
.gitignore
vendored
11
.gitignore
vendored
@ -1,4 +1,11 @@
|
||||
|
||||
Cargo.lock
|
||||
/target/
|
||||
**/*.rs.bk
|
||||
Cargo.lock
|
||||
.cargo
|
||||
|
||||
# node configuration files
|
||||
/config/
|
||||
/config-private/
|
||||
/config-drone/
|
||||
/config-validator/
|
||||
/config-client/
|
||||
|
22
.travis.yml
22
.travis.yml
@ -1,22 +0,0 @@
|
||||
language: rust
|
||||
required: sudo
|
||||
services:
|
||||
- docker
|
||||
matrix:
|
||||
allow_failures:
|
||||
- rust: nightly
|
||||
include:
|
||||
- rust: stable
|
||||
- rust: nightly
|
||||
env:
|
||||
- FEATURES='asm,unstable'
|
||||
before_script: |
|
||||
export PATH="$PATH:$HOME/.cargo/bin"
|
||||
rustup component add rustfmt-preview
|
||||
script:
|
||||
- cargo fmt -- --write-mode=diff
|
||||
- cargo build --verbose --features "$FEATURES"
|
||||
- cargo test --verbose --features "$FEATURES"
|
||||
after_success: |
|
||||
docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
|
||||
bash <(curl -s https://codecov.io/bash) -s target/cov
|
77
Cargo.toml
77
Cargo.toml
@ -1,33 +1,82 @@
|
||||
[package]
|
||||
name = "silk"
|
||||
description = "A silky smooth implementation of the Loom architecture"
|
||||
version = "0.2.3"
|
||||
documentation = "https://docs.rs/silk"
|
||||
homepage = "http://loomprotocol.com/"
|
||||
repository = "https://github.com/loomprotocol/silk"
|
||||
name = "solana"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.7.0-beta"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "http://solana.com/"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
authors = [
|
||||
"Anatoly Yakovenko <aeyakovenko@gmail.com>",
|
||||
"Greg Fitzgerald <garious@gmail.com>",
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
"Greg Fitzgerald <greg@solana.com>",
|
||||
"Stephen Akridge <stephen@solana.com>",
|
||||
]
|
||||
license = "Apache-2.0"
|
||||
|
||||
[[bin]]
|
||||
name = "silk-demo"
|
||||
path = "src/bin/demo.rs"
|
||||
name = "solana-client-demo"
|
||||
path = "src/bin/client-demo.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-wallet"
|
||||
path = "src/bin/wallet.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-fullnode"
|
||||
path = "src/bin/fullnode.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-fullnode-config"
|
||||
path = "src/bin/fullnode-config.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-genesis"
|
||||
path = "src/bin/genesis.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-mint"
|
||||
path = "src/bin/mint.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-drone"
|
||||
path = "src/bin/drone.rs"
|
||||
|
||||
[badges]
|
||||
codecov = { repository = "loomprotocol/silk", branch = "master", service = "github" }
|
||||
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
||||
|
||||
[features]
|
||||
unstable = []
|
||||
asm = ["sha2-asm"]
|
||||
ipv6 = []
|
||||
cuda = []
|
||||
erasure = []
|
||||
|
||||
[dependencies]
|
||||
rayon = "1.0.0"
|
||||
sha2 = "0.7.0"
|
||||
sha2-asm = {version="0.3", optional=true}
|
||||
generic-array = { version = "0.9.0", default-features = false, features = ["serde"] }
|
||||
generic-array = { version = "0.11.1", default-features = false, features = ["serde"] }
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
serde_json = "1.0.10"
|
||||
ring = "0.12.1"
|
||||
untrusted = "0.5.1"
|
||||
bincode = "1.0.0"
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
log = "0.4.2"
|
||||
env_logger = "0.5.10"
|
||||
matches = "0.1.6"
|
||||
byteorder = "1.2.1"
|
||||
libc = "0.2.1"
|
||||
getopts = "0.2"
|
||||
atty = "0.2"
|
||||
rand = "0.5.1"
|
||||
pnet_datalink = "0.21.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-core = "0.1.17"
|
||||
tokio-io = "0.1"
|
||||
itertools = "0.7.8"
|
||||
bs58 = "0.2.0"
|
||||
p2p = "0.5.2"
|
||||
futures = "0.1.21"
|
||||
clap = "2.31"
|
||||
reqwest = "0.8.6"
|
||||
|
2
LICENSE
2
LICENSE
@ -1,4 +1,4 @@
|
||||
Copyright 2018 Anatoly Yakovenko <anatoly@loomprotocol.com> and Greg Fitzgerald <garious@gmail.com>
|
||||
Copyright 2018 Solana Labs, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
341
README.md
341
README.md
@ -1,87 +1,225 @@
|
||||
[](https://crates.io/crates/silk)
|
||||
[](https://docs.rs/silk)
|
||||
[](https://travis-ci.org/loomprotocol/silk)
|
||||
[](https://codecov.io/gh/loomprotocol/silk)
|
||||
[](https://crates.io/crates/solana)
|
||||
[](https://docs.rs/solana)
|
||||
[](https://solana-ci-gate.herokuapp.com/buildkite_public_log?https://buildkite.com/solana-labs/solana/builds/latest/master)
|
||||
[](https://codecov.io/gh/solana-labs/solana)
|
||||
|
||||
# Silk, a silky smooth implementation of the Loom specification
|
||||
Blockchain, Rebuilt for Scale
|
||||
===
|
||||
|
||||
Loom is a new achitecture for a high performance blockchain. Its whitepaper boasts a theoretical
|
||||
throughput of 710k transactions per second on a 1 gbps network. The specification is implemented
|
||||
in two git repositories. Reserach is performed in the loom repository. That work drives the
|
||||
Loom specification forward. This repository, on the other hand, aims to implement the specification
|
||||
as-is. We care a great deal about quality, clarity and short learning curve. We avoid the use
|
||||
of `unsafe` Rust and write tests for *everything*. Optimizations are only added when
|
||||
corresponding benchmarks are also added that demonstrate real performance boosts. We expect the
|
||||
feature set here will always be a ways behind the loom repo, but that this is an implementation
|
||||
you can take to the bank, literally.
|
||||
Solana™ is a new blockchain architecture built from the ground up for scale. The architecture supports
|
||||
up to 710 thousand transactions per second on a gigabit network.
|
||||
|
||||
# Usage
|
||||
Disclaimer
|
||||
===
|
||||
|
||||
Add the latest [silk package](https://crates.io/crates/silk) to the `[dependencies]` section
|
||||
of your Cargo.toml.
|
||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
||||
|
||||
Create a *Historian* and send it *events* to generate an *event log*, where each log *entry*
|
||||
is tagged with the historian's latest *hash*. Then ensure the order of events was not tampered
|
||||
with by verifying each entry's hash can be generated from the hash in the previous entry:
|
||||
Introduction
|
||||
===
|
||||
|
||||

|
||||
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [H.T.Kung, J.T.Robinson (1981)]. At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! Furthermore, and much to our surprise, it can implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
|
||||
|
||||
```rust
|
||||
extern crate silk;
|
||||
|
||||
use silk::historian::Historian;
|
||||
use silk::log::{verify_slice, Entry, Event, Sha256Hash};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use std::sync::mpsc::SendError;
|
||||
Testnet Demos
|
||||
===
|
||||
|
||||
fn create_log(hist: &Historian) -> Result<(), SendError<Event>> {
|
||||
sleep(Duration::from_millis(15));
|
||||
let data = Sha256Hash::default();
|
||||
hist.sender.send(Event::Discovery { data })?;
|
||||
sleep(Duration::from_millis(10));
|
||||
Ok(())
|
||||
}
|
||||
The Solana repo contains all the scripts you might need to spin up your own
|
||||
local testnet. Depending on what you're looking to achieve, you may want to
|
||||
run a different variation, as the full-fledged, performance-enhanced
|
||||
multinode testnet is considerably more complex to set up than a Rust-only,
|
||||
singlenode testnode. If you are looking to develop high-level features, such
|
||||
as experimenting with smart contracts, save yourself some setup headaches and
|
||||
stick to the Rust-only singlenode demo. If you're doing performance optimization
|
||||
of the transaction pipeline, consider the enhanced singlenode demo. If you're
|
||||
doing consensus work, you'll need at least a Rust-only multinode demo. If you want
|
||||
to reproduce our TPS metrics, run the enhanced multinode demo.
|
||||
|
||||
fn main() {
|
||||
let seed = Sha256Hash::default();
|
||||
let hist = Historian::new(&seed, Some(10));
|
||||
create_log(&hist).expect("send error");
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry> = hist.receiver.iter().collect();
|
||||
for entry in &entries {
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
For all four variations, you'd need the latest Rust toolchain and the Solana
|
||||
source code:
|
||||
|
||||
// Proof-of-History: Verify the historian learned about the events
|
||||
// in the same order they appear in the vector.
|
||||
assert!(verify_slice(&entries, &seed));
|
||||
}
|
||||
First, install Rust's package manager Cargo.
|
||||
|
||||
```bash
|
||||
$ curl https://sh.rustup.rs -sSf | sh
|
||||
$ source $HOME/.cargo/env
|
||||
```
|
||||
|
||||
Running the program should produce a log similar to:
|
||||
Now checkout the code from github:
|
||||
|
||||
```rust
|
||||
Entry { num_hashes: 0, end_hash: [0, ...], event: Tick }
|
||||
Entry { num_hashes: 2, end_hash: [67, ...], event: Discovery { data: [37, ...] } }
|
||||
Entry { num_hashes: 3, end_hash: [123, ...], event: Tick }
|
||||
```bash
|
||||
$ git clone https://github.com/solana-labs/solana.git
|
||||
$ cd solana
|
||||
```
|
||||
|
||||
Proof-of-History
|
||||
The demo code is sometimes broken between releases as we add new low-level
|
||||
features, so if this is your first time running the demo, you'll improve
|
||||
your odds of success if you check out the
|
||||
[latest release](https://github.com/solana-labs/solana/releases)
|
||||
before proceeding:
|
||||
|
||||
```bash
|
||||
$ git checkout v0.7.0-beta
|
||||
```
|
||||
|
||||
Configuration Setup
|
||||
---
|
||||
|
||||
Take note of the last line:
|
||||
The network is initialized with a genesis ledger and leader/validator configuration files.
|
||||
These files can be generated by running the following script.
|
||||
|
||||
```rust
|
||||
assert!(verify_slice(&entries, &seed));
|
||||
```bash
|
||||
$ ./multinode-demo/setup.sh
|
||||
```
|
||||
|
||||
[It's a proof!](https://en.wikipedia.org/wiki/Curry–Howard_correspondence) For each entry returned by the
|
||||
historian, we can verify that `end_hash` is the result of applying a sha256 hash to the previous `end_hash`
|
||||
exactly `num_hashes` times, and then hashing then event data on top of that. Because the event data is
|
||||
included in the hash, the events cannot be reordered without regenerating all the hashes.
|
||||
Singlenode Testnet
|
||||
---
|
||||
|
||||
# Developing
|
||||
Before you start a fullnode, make sure you know the IP address of the machine you
|
||||
want to be the leader for the demo, and make sure that udp ports 8000-10000 are
|
||||
open on all the machines you want to test with.
|
||||
|
||||
Now start the server:
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/leader.sh
|
||||
```
|
||||
|
||||
To run a performance-enhanced fullnode on Linux,
|
||||
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
|
||||
your system:
|
||||
```bash
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
|
||||
```
|
||||
|
||||
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
|
||||
receive transactions.
|
||||
|
||||
Multinode Testnet
|
||||
---
|
||||
|
||||
To run a multinode testnet, after starting a leader node, spin up some validator nodes:
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
```
|
||||
|
||||
To run a performance-enhanced fullnode on Linux,
|
||||
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
|
||||
your system:
|
||||
```bash
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
```
|
||||
|
||||
|
||||
Testnet Client Demo
|
||||
---
|
||||
|
||||
Now that your singlenode or multinode testnet is up and running, in a separate shell, let's send it some transactions! Note we pass in
|
||||
the JSON configuration file here, not the genesis ledger.
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/client.sh ubuntu@10.0.1.51:~/solana 2 #The leader machine and the total number of nodes in the network
|
||||
```
|
||||
|
||||
What just happened? The client demo spins up several threads to send 500,000 transactions
|
||||
to the testnet as quickly as it can. The client then pings the testnet periodically to see
|
||||
how many transactions it processed in that time. Take note that the demo intentionally
|
||||
floods the network with UDP packets, such that the network will almost certainly drop a
|
||||
bunch of them. This ensures the testnet has an opportunity to reach 710k TPS. The client
|
||||
demo completes after it has convinced itself the testnet won't process any additional
|
||||
transactions. You should see several TPS measurements printed to the screen. In the
|
||||
multinode variation, you'll see TPS measurements for each validator node as well.
|
||||
|
||||
Linux Snap
|
||||
---
|
||||
A Linux [Snap](https://snapcraft.io/) is available, which can be used to
|
||||
easily get Solana running on supported Linux systems without building anything
|
||||
from source. The `edge` Snap channel is updated daily with the latest
|
||||
development from the `master` branch. To install:
|
||||
```bash
|
||||
$ sudo snap install solana --edge --devmode
|
||||
```
|
||||
(`--devmode` flag is required only for `solana.fullnode-cuda`)
|
||||
|
||||
Once installed the usual Solana programs will be available as `solona.*` instead
|
||||
of `solana-*`. For example, `solana.fullnode` instead of `solana-fullnode`.
|
||||
|
||||
Update to the latest version at any time with
|
||||
```bash
|
||||
$ snap info solana
|
||||
$ sudo snap refresh solana --devmode
|
||||
```
|
||||
|
||||
### Daemon support
|
||||
The snap supports running a leader, validator or leader+drone node as a system
|
||||
daemon.
|
||||
|
||||
Run `sudo snap get solana` to view the current daemon configuration, and
|
||||
`sudo snap logs -f solana` to view the daemon logs.
|
||||
|
||||
Disable the daemon at any time by running:
|
||||
```bash
|
||||
$ sudo snap set solana mode=
|
||||
```
|
||||
|
||||
Runtime configuration files for the daemon can be found in
|
||||
`/var/snap/solana/current/config`.
|
||||
|
||||
#### Leader daemon
|
||||
```bash
|
||||
$ sudo snap set solana mode=leader
|
||||
```
|
||||
|
||||
If CUDA is available:
|
||||
```bash
|
||||
$ sudo snap set solana mode=leader enable-cuda=1
|
||||
```
|
||||
|
||||
`rsync` must be configured and running on the leader.
|
||||
|
||||
1. Ensure rsync is installed with `sudo apt-get -y install rsync`
|
||||
2. Edit `/etc/rsyncd.conf` to include the following
|
||||
```
|
||||
[config]
|
||||
path = /var/snap/solana/current/config
|
||||
hosts allow = *
|
||||
read only = true
|
||||
```
|
||||
3. Run `sudo systemctl enable rsync; sudo systemctl start rsync`
|
||||
4. Test by running `rsync -Pzravv rsync://<ip-address-of-leader>/config
|
||||
solana-config` from another machine. **If the leader is running on a cloud
|
||||
provider it may be necessary to configure the Firewall rules to permit ingress
|
||||
to port tcp:873, tcp:9900 and the port range udp:8000-udp:10000**
|
||||
|
||||
|
||||
To run both the Leader and Drone:
|
||||
```bash
|
||||
$ sudo snap set solana mode=leader+drone
|
||||
|
||||
```
|
||||
|
||||
#### Validator daemon
|
||||
```bash
|
||||
$ sudo snap set solana mode=validator
|
||||
|
||||
```
|
||||
If CUDA is available:
|
||||
```bash
|
||||
$ sudo snap set solana mode=validator enable-cuda=1
|
||||
```
|
||||
|
||||
By default the validator will connect to **testnet.solana.com**, override
|
||||
the leader IP address by running:
|
||||
```bash
|
||||
$ sudo snap set solana mode=validator leader-address=127.0.0.1 #<-- change IP address
|
||||
```
|
||||
It's assumed that the leader will be running `rsync` configured as described in
|
||||
the previous **Leader daemon** section.
|
||||
|
||||
Developing
|
||||
===
|
||||
|
||||
Building
|
||||
---
|
||||
@ -94,11 +232,22 @@ $ source $HOME/.cargo/env
|
||||
$ rustup component add rustfmt-preview
|
||||
```
|
||||
|
||||
If your rustc version is lower than 1.26.1, please update it:
|
||||
|
||||
```bash
|
||||
$ rustup update
|
||||
```
|
||||
|
||||
On Linux systems you may need to install libssl-dev and pkg-config. On Ubuntu:
|
||||
```bash
|
||||
$ sudo apt-get install libssl-dev pkg-config
|
||||
```
|
||||
|
||||
Download the source code:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/loomprotocol/silk.git
|
||||
$ cd silk
|
||||
$ git clone https://github.com/solana-labs/solana.git
|
||||
$ cd solana
|
||||
```
|
||||
|
||||
Testing
|
||||
@ -107,9 +256,37 @@ Testing
|
||||
Run the test suite:
|
||||
|
||||
```bash
|
||||
cargo test
|
||||
$ cargo test
|
||||
```
|
||||
|
||||
To emulate all the tests that will run on a Pull Request, run:
|
||||
```bash
|
||||
$ ./ci/run-local.sh
|
||||
```
|
||||
|
||||
Debugging
|
||||
---
|
||||
|
||||
There are some useful debug messages in the code, you can enable them on a per-module and per-level
|
||||
basis with the normal RUST\_LOG environment variable. Run the fullnode with this syntax:
|
||||
```bash
|
||||
$ RUST_LOG=solana::streamer=debug,solana::server=info cat genesis.log | ./target/release/solana-fullnode > transactions0.log
|
||||
```
|
||||
to see the debug and info sections for streamer and server respectively. Generally
|
||||
we are using debug for infrequent debug messages, trace for potentially frequent messages and
|
||||
info for performance-related logging.
|
||||
|
||||
Attaching to a running process with gdb
|
||||
|
||||
```
|
||||
$ sudo gdb
|
||||
attach <PID>
|
||||
set logging on
|
||||
thread apply all bt
|
||||
```
|
||||
|
||||
This will dump all the threads stack traces into gdb.txt
|
||||
|
||||
Benchmarking
|
||||
---
|
||||
|
||||
@ -122,5 +299,37 @@ $ rustup install nightly
|
||||
Run the benchmarks:
|
||||
|
||||
```bash
|
||||
$ cargo +nightly bench --features="asm,unstable"
|
||||
$ cargo +nightly bench --features="unstable"
|
||||
```
|
||||
|
||||
Code coverage
|
||||
---
|
||||
|
||||
To generate code coverage statistics, install cargo-cov. Note: the tool currently only works
|
||||
in Rust nightly.
|
||||
|
||||
```bash
|
||||
$ cargo +nightly install cargo-cov
|
||||
```
|
||||
|
||||
Run cargo-cov and generate a report:
|
||||
|
||||
```bash
|
||||
$ cargo +nightly cov test
|
||||
$ cargo +nightly cov report --open
|
||||
```
|
||||
|
||||
The coverage report will be written to `./target/cov/report/index.html`
|
||||
|
||||
|
||||
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
|
||||
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
|
||||
some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running
|
||||
the test suite should indicate that your change didn't *infringe* on anyone else's solutions. Adding a
|
||||
test *protects* your solution from future changes. Say you don't understand why a line of code exists,
|
||||
try deleting it and running the unit-tests. The nearest test failure should tell you what problem
|
||||
was solved by that code. If no test fails, go ahead and submit a Pull Request that asks, "what
|
||||
problem is solved by this code?" On the other hand, if a test does fail and you can think of a
|
||||
better way to solve the same problem, a Pull Request with your solution would most certainly be
|
||||
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
|
||||
send us that patch!
|
||||
|
1
_config.yml
Normal file
1
_config.yml
Normal file
@ -0,0 +1 @@
|
||||
theme: jekyll-theme-slate
|
16
build.rs
Normal file
16
build.rs
Normal file
@ -0,0 +1,16 @@
|
||||
use std::env;
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rustc-link-search=native=.");
|
||||
if !env::var("CARGO_FEATURE_CUDA").is_err() {
|
||||
println!("cargo:rustc-link-lib=static=cuda_verify_ed25519");
|
||||
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
|
||||
println!("cargo:rustc-link-lib=dylib=cudart");
|
||||
println!("cargo:rustc-link-lib=dylib=cuda");
|
||||
println!("cargo:rustc-link-lib=dylib=cudadevrt");
|
||||
}
|
||||
if !env::var("CARGO_FEATURE_ERASURE").is_err() {
|
||||
println!("cargo:rustc-link-lib=dylib=Jerasure");
|
||||
println!("cargo:rustc-link-lib=dylib=gf_complete");
|
||||
}
|
||||
}
|
3
ci/.gitignore
vendored
Normal file
3
ci/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
/node_modules/
|
||||
/package-lock.json
|
||||
/snapcraft.credentials
|
88
ci/README.md
Normal file
88
ci/README.md
Normal file
@ -0,0 +1,88 @@
|
||||
|
||||
Our CI infrastructure is built around [BuildKite](https://buildkite.com) with some
|
||||
additional GitHub integration provided by https://github.com/mvines/ci-gate
|
||||
|
||||
## Agent Queues
|
||||
|
||||
We define two [Agent Queues](https://buildkite.com/docs/agent/v3/queues):
|
||||
`queue=default` and `queue=cuda`. The `default` queue should be favored and
|
||||
runs on lower-cost CPU instances. The `cuda` queue is only necessary for
|
||||
running **tests** that depend on GPU (via CUDA) access -- CUDA builds may still
|
||||
be run on the `default` queue, and the [buildkite artifact
|
||||
system](https://buildkite.com/docs/builds/artifacts) used to transfer build
|
||||
products over to a GPU instance for testing.
|
||||
|
||||
## Buildkite Agent Management
|
||||
|
||||
### Buildkite GCP Setup
|
||||
|
||||
CI runs on Google Cloud Platform via two Compute Engine Instance groups:
|
||||
`ci-default` and `ci-cuda`. Autoscaling is currently disabled and the number of
|
||||
VM Instances in each group is manually adjusted.
|
||||
|
||||
#### Updating a CI Disk Image
|
||||
|
||||
Each Instance group has its own disk image, `ci-default-vX` and
|
||||
`ci-cuda-vY`, where *X* and *Y* are incremented each time the image is changed.
|
||||
|
||||
The process to update a disk image is as follows (TODO: make this less manual):
|
||||
|
||||
1. Create a new VM Instance using the disk image to modify.
|
||||
2. Once the VM boots, ssh to it and modify the disk as desired.
|
||||
3. Stop the VM Instance running the modified disk. Remember the name of the VM disk
|
||||
4. From another machine, `gcloud auth login`, then create a new Disk Image based
|
||||
off the modified VM Instance:
|
||||
```
|
||||
$ gcloud compute images create ci-default-v5 --source-disk xxx --source-disk-zone us-east1-b
|
||||
```
|
||||
or
|
||||
```
|
||||
$ gcloud compute images create ci-cuda-v5 --source-disk xxx --source-disk-zone us-east1-b
|
||||
```
|
||||
5. Delete the new VM instance.
|
||||
6. Go to the Instance templates tab, find the existing template named
|
||||
`ci-default-vX` or `ci-cuda-vY` and select it. Use the "Copy" button to create
|
||||
a new Instance template called `ci-default-vX+1` or `ci-cuda-vY+1` with the
|
||||
newly created Disk image.
|
||||
7. Go to the Instance Groups tag and find the applicable group, `ci-default` or
|
||||
`ci-cuda`. Edit the Instance Group in two steps: (a) Set the number of
|
||||
instances to 0 and wait for them all to terminate, (b) Update the Instance
|
||||
template and restore the number of instances to the original value.
|
||||
8. Clean up the previous version by deleting it from Instance Templates and
|
||||
Images.
|
||||
|
||||
|
||||
## Reference
|
||||
|
||||
### Buildkite AWS CloudFormation Setup
|
||||
|
||||
**AWS CloudFormation is currently inactive, although it may be restored in the
|
||||
future**
|
||||
|
||||
AWS CloudFormation can be used to scale machines up and down based on the
|
||||
current CI load. If no machine is currently running it can take up to 60
|
||||
seconds to spin up a new instance, please remain calm during this time.
|
||||
|
||||
#### AMI
|
||||
We use a custom AWS AMI built via https://github.com/solana-labs/elastic-ci-stack-for-aws/tree/solana/cuda.
|
||||
|
||||
Use the following process to update this AMI as dependencies change:
|
||||
```bash
|
||||
$ export AWS_ACCESS_KEY_ID=my_access_key
|
||||
$ export AWS_SECRET_ACCESS_KEY=my_secret_access_key
|
||||
$ git clone https://github.com/solana-labs/elastic-ci-stack-for-aws.git -b solana/cuda
|
||||
$ cd elastic-ci-stack-for-aws/
|
||||
$ make build
|
||||
$ make build-ami
|
||||
```
|
||||
|
||||
Watch for the *"amazon-ebs: AMI:"* log message to extract the name of the new
|
||||
AMI. For example:
|
||||
```
|
||||
amazon-ebs: AMI: ami-07118545e8b4ce6dc
|
||||
```
|
||||
The new AMI should also now be visible in your EC2 Dashboard. Go to the desired
|
||||
AWS CloudFormation stack, update the **ImageId** field to the new AMI id, and
|
||||
*apply* the stack changes.
|
||||
|
||||
|
4
ci/buildkite-snap.yml
Normal file
4
ci/buildkite-snap.yml
Normal file
@ -0,0 +1,4 @@
|
||||
steps:
|
||||
- command: "ci/snap.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "snap [public]"
|
38
ci/buildkite.yml
Normal file
38
ci/buildkite.yml
Normal file
@ -0,0 +1,38 @@
|
||||
steps:
|
||||
- command: "ci/docker-run.sh rust ci/test-stable.sh"
|
||||
name: "stable [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck [public]"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh"
|
||||
name: "nightly [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "nightly"
|
||||
timeout_in_minutes: 30
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable-perf"
|
||||
timeout_in_minutes: 20
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: "*"
|
||||
limit: 2
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
- command: "ci/pr-snap.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "snap [public]"
|
||||
- wait
|
||||
- command: "ci/publish-crate.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "publish crate [public]"
|
||||
- command: "ci/hoover.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "clean agent [public]"
|
||||
- trigger: "solana-snap"
|
||||
branches: "!pull/*"
|
||||
async: true
|
49
ci/docker-run.sh
Executable file
49
ci/docker-run.sh
Executable file
@ -0,0 +1,49 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 [docker image name] [command]"
|
||||
echo
|
||||
echo Runs command in the specified docker image with
|
||||
echo a CI-appropriate environment
|
||||
echo
|
||||
}
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
IMAGE="$1"
|
||||
if [[ -z "$IMAGE" ]]; then
|
||||
echo Error: image not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker pull "$IMAGE"
|
||||
shift
|
||||
|
||||
ARGS=(
|
||||
--workdir /solana
|
||||
--volume "$PWD:/solana"
|
||||
--volume "$HOME:/home"
|
||||
--env "CARGO_HOME=/home/.cargo"
|
||||
--rm
|
||||
)
|
||||
|
||||
# kcov tries to set the personality of the binary which docker
|
||||
# doesn't allow by default.
|
||||
ARGS+=(--security-opt "seccomp=unconfined")
|
||||
|
||||
# Ensure files are created with the current host uid/gid
|
||||
if [[ -z "$SOLANA_DOCKER_RUN_NOSETUID" ]]; then
|
||||
ARGS+=(--user "$(id -u):$(id -g)")
|
||||
fi
|
||||
|
||||
# Environment variables to propagate into the container
|
||||
ARGS+=(
|
||||
--env BUILDKITE_BRANCH
|
||||
--env BUILDKITE_TAG
|
||||
--env CODECOV_TOKEN
|
||||
--env CRATES_IO_TOKEN
|
||||
--env SNAPCRAFT_CREDENTIALS_KEY
|
||||
)
|
||||
|
||||
set -x
|
||||
exec docker run "${ARGS[@]}" "$IMAGE" "$@"
|
7
ci/docker-snapcraft/Dockerfile
Normal file
7
ci/docker-snapcraft/Dockerfile
Normal file
@ -0,0 +1,7 @@
|
||||
FROM snapcraft/xenial-amd64
|
||||
|
||||
# Update snapcraft to latest version
|
||||
RUN apt-get update -qq \
|
||||
&& apt-get install -y snapcraft \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& snapcraft --version
|
6
ci/docker-snapcraft/build.sh
Executable file
6
ci/docker-snapcraft/build.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
docker build -t solanalabs/snapcraft .
|
||||
docker push solanalabs/snapcraft
|
57
ci/hoover.sh
Executable file
57
ci/hoover.sh
Executable file
@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Regular maintenance performed on a buildkite agent to control disk usage
|
||||
#
|
||||
|
||||
echo --- Delete all exited containers first
|
||||
(
|
||||
set -x
|
||||
exited=$(docker ps -aq --no-trunc --filter "status=exited")
|
||||
if [[ -n "$exited" ]]; then
|
||||
# shellcheck disable=SC2086 # Don't want to double quote "$exited"
|
||||
docker rm $exited
|
||||
fi
|
||||
)
|
||||
|
||||
echo --- Delete untagged images
|
||||
(
|
||||
set -x
|
||||
untagged=$(docker images | grep '<none>'| awk '{ print $3 }')
|
||||
if [[ -n "$untagged" ]]; then
|
||||
# shellcheck disable=SC2086 # Don't want to double quote "$untagged"
|
||||
docker rmi $untagged
|
||||
fi
|
||||
)
|
||||
|
||||
echo --- Delete all dangling images
|
||||
(
|
||||
set -x
|
||||
dangling=$(docker images --filter 'dangling=true' -q --no-trunc | sort | uniq)
|
||||
if [[ -n "$dangling" ]]; then
|
||||
# shellcheck disable=SC2086 # Don't want to double quote "$dangling"
|
||||
docker rmi $dangling
|
||||
fi
|
||||
)
|
||||
|
||||
echo --- Remove unused docker networks
|
||||
(
|
||||
set -x
|
||||
docker network prune -f
|
||||
)
|
||||
|
||||
echo "--- Delete /tmp files older than 1 day owned by $(whoami)"
|
||||
(
|
||||
set -x
|
||||
find /tmp -maxdepth 1 -user "$(whoami)" -mtime +1 -print0 | xargs -0 rm -rf
|
||||
)
|
||||
|
||||
echo --- System Status
|
||||
(
|
||||
set -x
|
||||
docker images
|
||||
docker ps
|
||||
docker network ls
|
||||
df -h
|
||||
)
|
||||
|
||||
exit 0
|
8
ci/is-pr.sh
Executable file
8
ci/is-pr.sh
Executable file
@ -0,0 +1,8 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
|
||||
# to how solana-ci-gate is used to trigger PR builds rather than using the
|
||||
# standard Buildkite PR trigger.
|
||||
#
|
||||
|
||||
[[ $BUILDKITE_BRANCH =~ pull/* ]]
|
18
ci/pr-snap.sh
Executable file
18
ci/pr-snap.sh
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Only run snap.sh for pull requests that modify files under /snap
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
if ./is-pr.sh; then
|
||||
affected_files="$(buildkite-agent meta-data get affected_files)"
|
||||
echo "Affected files in this PR: $affected_files"
|
||||
if [[ ! ":$affected_files:" =~ :snap/ ]]; then
|
||||
echo "Skipping snap build as no files under /snap were modified"
|
||||
exit 0
|
||||
fi
|
||||
exec ./snap.sh
|
||||
else
|
||||
echo "Skipping snap build as this is not a pull request"
|
||||
fi
|
19
ci/publish-crate.sh
Executable file
19
ci/publish-crate.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if [[ -z "$BUILDKITE_TAG" ]]; then
|
||||
# Skip publish if this is not a tagged release
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -z "$CRATES_IO_TOKEN" ]]; then
|
||||
echo CRATES_IO_TOKEN undefined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
||||
ci/docker-run.sh rust \
|
||||
bash -exc "cargo package; cargo publish --token $CRATES_IO_TOKEN"
|
||||
|
||||
exit 0
|
19
ci/run-local.sh
Executable file
19
ci/run-local.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Run the entire buildkite CI pipeline locally for pre-testing before sending a
|
||||
# Github pull request
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
BKRUN=ci/node_modules/.bin/bkrun
|
||||
|
||||
if [[ ! -x $BKRUN ]]; then
|
||||
(
|
||||
set -x
|
||||
cd ci/
|
||||
npm install bkrun
|
||||
)
|
||||
fi
|
||||
|
||||
set -x
|
||||
exec ./ci/node_modules/.bin/bkrun ci/buildkite.yml
|
11
ci/shellcheck.sh
Executable file
11
ci/shellcheck.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Reference: https://github.com/koalaman/shellcheck/wiki/Directive
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
set -x
|
||||
find . -name "*.sh" -not -regex ".*/.cargo/.*" -not -regex ".*/node_modules/.*" -print0 \
|
||||
| xargs -0 \
|
||||
ci/docker-run.sh koalaman/shellcheck --color=always --external-sources --shell=bash
|
||||
exit 0
|
43
ci/snap.sh
Executable file
43
ci/snap.sh
Executable file
@ -0,0 +1,43 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
DRYRUN=
|
||||
if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
|
||||
DRYRUN="echo"
|
||||
fi
|
||||
|
||||
if [[ -z "$BUILDKITE_TAG" ]]; then
|
||||
SNAP_CHANNEL=edge
|
||||
else
|
||||
SNAP_CHANNEL=beta
|
||||
fi
|
||||
|
||||
if [[ -z $DRYRUN ]]; then
|
||||
[[ -n $SNAPCRAFT_CREDENTIALS_KEY ]] || {
|
||||
echo SNAPCRAFT_CREDENTIALS_KEY not defined
|
||||
exit 1;
|
||||
}
|
||||
(
|
||||
openssl aes-256-cbc -d \
|
||||
-in ci/snapcraft.credentials.enc \
|
||||
-out ci/snapcraft.credentials \
|
||||
-k "$SNAPCRAFT_CREDENTIALS_KEY"
|
||||
|
||||
snapcraft login --with ci/snapcraft.credentials
|
||||
) || {
|
||||
rm -f ci/snapcraft.credentials;
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
||||
echo --- build
|
||||
snapcraft
|
||||
|
||||
source ci/upload_ci_artifact.sh
|
||||
upload_ci_artifact solana_*.snap
|
||||
|
||||
echo --- publish
|
||||
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL
|
BIN
ci/snapcraft.credentials.enc
Normal file
BIN
ci/snapcraft.credentials.enc
Normal file
Binary file not shown.
34
ci/test-nightly.sh
Executable file
34
ci/test-nightly.sh
Executable file
@ -0,0 +1,34 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo build --verbose --features unstable
|
||||
_ cargo test --verbose --features unstable
|
||||
_ cargo bench --verbose --features unstable
|
||||
|
||||
|
||||
exit 0
|
||||
|
||||
# Coverage disabled (see issue #433)
|
||||
_ cargo install --force cargo-cov
|
||||
_ cargo cov test
|
||||
_ cargo cov report
|
||||
|
||||
echo --- Coverage report:
|
||||
ls -l target/cov/report/index.html
|
||||
|
||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo CODECOV_TOKEN undefined
|
||||
else
|
||||
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov gcov'
|
||||
fi
|
||||
|
12
ci/test-stable-perf.sh
Executable file
12
ci/test-stable-perf.sh
Executable file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
|
||||
export LD_LIBRARY_PATH=$PWD:/usr/local/cuda/lib64
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
set -x
|
||||
exec cargo test --features=cuda,erasure
|
18
ci/test-stable.sh
Executable file
18
ci/test-stable.sh
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ rustup component add rustfmt-preview
|
||||
_ cargo fmt -- --write-mode=check
|
||||
_ cargo build --verbose
|
||||
_ cargo test --verbose
|
||||
_ cargo test -- --ignored
|
18
ci/upload_ci_artifact.sh
Normal file
18
ci/upload_ci_artifact.sh
Normal file
@ -0,0 +1,18 @@
|
||||
# |source| me
|
||||
|
||||
upload_ci_artifact() {
|
||||
echo "--- artifact: $1"
|
||||
if [[ -r "$1" ]]; then
|
||||
ls -l "$1"
|
||||
if ${BUILDKITE:-false}; then
|
||||
(
|
||||
set -x
|
||||
buildkite-agent artifact upload "$1"
|
||||
)
|
||||
fi
|
||||
else
|
||||
echo ^^^ +++
|
||||
echo "$1 not found"
|
||||
fi
|
||||
}
|
||||
|
@ -1,18 +0,0 @@
|
||||
msc {
|
||||
client,historian,logger;
|
||||
|
||||
logger=>historian [ label = "e0 = Entry{hash: h0, n: 0, event: Tick}" ] ;
|
||||
logger=>logger [ label = "h1 = hash(h0)" ] ;
|
||||
logger=>logger [ label = "h2 = hash(h1)" ] ;
|
||||
client=>historian [ label = "Discovery(d0)" ] ;
|
||||
historian=>logger [ label = "Discovery(d0)" ] ;
|
||||
logger=>logger [ label = "h3 = hash(h2 + d0)" ] ;
|
||||
logger=>historian [ label = "e1 = Entry{hash: hash(h3), n: 2, event: Discovery(d0)}" ] ;
|
||||
logger=>logger [ label = "h4 = hash(h3)" ] ;
|
||||
logger=>logger [ label = "h5 = hash(h4)" ] ;
|
||||
logger=>logger [ label = "h6 = hash(h5)" ] ;
|
||||
logger=>historian [ label = "e2 = Entry{hash: h6, n: 3, event: Tick}" ] ;
|
||||
client=>historian [ label = "collect()" ] ;
|
||||
historian=>client [ label = "entries = [e0, e1, e2]" ] ;
|
||||
client=>client [ label = "verify_slice(entries, h0)" ] ;
|
||||
}
|
15
doc/consensus.msc
Normal file
15
doc/consensus.msc
Normal file
@ -0,0 +1,15 @@
|
||||
msc {
|
||||
client,leader,verifier_a,verifier_b,verifier_c;
|
||||
|
||||
client=>leader [ label = "SUBMIT" ] ;
|
||||
leader=>client [ label = "CONFIRMED" ] ;
|
||||
leader=>verifier_a [ label = "CONFIRMED" ] ;
|
||||
leader=>verifier_b [ label = "CONFIRMED" ] ;
|
||||
leader=>verifier_c [ label = "CONFIRMED" ] ;
|
||||
verifier_a=>leader [ label = "VERIFIED" ] ;
|
||||
verifier_b=>leader [ label = "VERIFIED" ] ;
|
||||
leader=>client [ label = "FINALIZED" ] ;
|
||||
leader=>verifier_a [ label = "FINALIZED" ] ;
|
||||
leader=>verifier_b [ label = "FINALIZED" ] ;
|
||||
leader=>verifier_c [ label = "FINALIZED" ] ;
|
||||
}
|
37
fetch-perf-libs.sh
Executable file
37
fetch-perf-libs.sh
Executable file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
echo Performance libraries are only available for Linux
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(uname -m) != x86_64 ]]; then
|
||||
echo Performance libraries are only available for x86_64 architecture
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
curl -o solana-perf.tgz \
|
||||
https://solana-perf.s3.amazonaws.com/master/x86_64-unknown-linux-gnu/solana-perf.tgz
|
||||
tar zxvf solana-perf.tgz
|
||||
)
|
||||
|
||||
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then
|
||||
if ! diff /usr/local/cuda/version.txt cuda-version.txt > /dev/null; then
|
||||
echo ==============================================
|
||||
echo Warning: possible CUDA version mismatch
|
||||
echo
|
||||
echo "Expected version: $(cat cuda-version.txt)"
|
||||
echo "Detected version: $(cat /usr/local/cuda/version.txt)"
|
||||
echo ==============================================
|
||||
fi
|
||||
else
|
||||
echo ==============================================
|
||||
echo Warning: unable to validate CUDA version
|
||||
echo ==============================================
|
||||
fi
|
||||
|
||||
echo "Downloaded solana-perf version: $(cat solana-perf-HEAD.txt)"
|
||||
|
||||
exit 0
|
23
multinode-demo/client.sh
Executable file
23
multinode-demo/client.sh
Executable file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# usage: $0 <rsync network path to solana repo on leader machine> <number of nodes in the network>"
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
leader=${1:-${here}/..} # Default to local solana repo
|
||||
count=${2:-1}
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
|
||||
set -ex
|
||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
$rsync -vPz "$rsync_leader_url"/config-private/mint.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
|
||||
# shellcheck disable=SC2086 # $solana_client_demo should not be quoted
|
||||
exec $solana_client_demo \
|
||||
-n "$count" -l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json \
|
||||
< "$SOLANA_CONFIG_CLIENT_DIR"/mint.json
|
85
multinode-demo/common.sh
Normal file
85
multinode-demo/common.sh
Normal file
@ -0,0 +1,85 @@
|
||||
# |source| this file
|
||||
#
|
||||
# Disable complaints about unused variables in this file:
|
||||
# shellcheck disable=2034
|
||||
|
||||
rsync=rsync
|
||||
if [[ -d "$SNAP" ]]; then # Running inside a Linux Snap?
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
if [[ "$program" = wallet ]]; then
|
||||
# TODO: Merge wallet.sh functionality into solana-wallet proper and
|
||||
# remove this special case
|
||||
printf "%s/bin/solana-%s" "$SNAP" "$program"
|
||||
else
|
||||
printf "%s/command-%s.wrapper" "$SNAP" "$program"
|
||||
fi
|
||||
}
|
||||
rsync="$SNAP"/bin/rsync
|
||||
SOLANA_CUDA="$(snapctl get enable-cuda)"
|
||||
|
||||
elif [[ -n "$USE_SNAP" ]]; then # Use the Linux Snap binaries
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
printf "solana.%s" "$program"
|
||||
}
|
||||
elif [[ -n "$USE_INSTALL" ]]; then # Assume |cargo install| was run
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
printf "solana-%s" "$program"
|
||||
}
|
||||
# CUDA was/wasn't selected at build time, can't affect CUDA state here
|
||||
unset SOLANA_CUDA
|
||||
else
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
declare features=""
|
||||
if [[ "$program" =~ ^(.*)-cuda$ ]]; then
|
||||
program=${BASH_REMATCH[1]}
|
||||
features="--features=cuda,erasure"
|
||||
fi
|
||||
if [[ -z "$DEBUG" ]]; then
|
||||
maybe_release=--release
|
||||
fi
|
||||
printf "cargo run $maybe_release --bin solana-%s %s -- " "$program" "$features"
|
||||
}
|
||||
fi
|
||||
|
||||
solana_client_demo=$(solana_program client-demo)
|
||||
solana_wallet=$(solana_program wallet)
|
||||
solana_drone=$(solana_program drone)
|
||||
solana_fullnode=$(solana_program fullnode)
|
||||
solana_fullnode_config=$(solana_program fullnode-config)
|
||||
solana_fullnode_cuda=$(solana_program fullnode-cuda)
|
||||
solana_genesis=$(solana_program genesis)
|
||||
solana_mint=$(solana_program mint)
|
||||
|
||||
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
tune_networking() {
|
||||
[[ $(uname) = Linux ]] && (set -x; sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null)
|
||||
}
|
||||
|
||||
SOLANA_CONFIG_DIR=${SNAP_DATA:-$PWD}/config
|
||||
SOLANA_CONFIG_PRIVATE_DIR=${SNAP_DATA:-$PWD}/config-private
|
||||
SOLANA_CONFIG_CLIENT_DIR=${SNAP_USER_DATA:-$PWD}/config-client-client
|
||||
|
||||
rsync_url() { # adds the 'rsync://` prefix to URLs that need it
|
||||
declare url="$1"
|
||||
|
||||
if [[ "$url" =~ ^.*:.*$ ]]; then
|
||||
# assume remote-shell transport when colon is present, use $url unmodified
|
||||
echo "$url"
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ -d "$url" ]]; then
|
||||
# assume local directory if $url is a valid directory, use $url unmodified
|
||||
echo "$url"
|
||||
return
|
||||
fi
|
||||
|
||||
# Default to rsync:// URL
|
||||
echo "rsync://$url"
|
||||
}
|
41
multinode-demo/drone.sh
Executable file
41
multinode-demo/drone.sh
Executable file
@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# usage: $0 <rsync network path to solana repo on leader machine>
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
SOLANA_CONFIG_DIR="$SOLANA_CONFIG_DIR"-drone
|
||||
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
# Exit if mode is not yet configured
|
||||
# (typically the case after the Snap is first installed)
|
||||
[[ -n "$(snapctl get mode)" ]] || exit 0
|
||||
|
||||
# Select leader from the Snap configuration
|
||||
leader_address="$(snapctl get leader-address)"
|
||||
if [[ -z "$leader_address" ]]; then
|
||||
# Assume drone is running on the same node as the leader by default
|
||||
leader_address="localhost"
|
||||
fi
|
||||
leader="$leader_address"
|
||||
else
|
||||
leader=${1:-${here}/..} # Default to local solana repo
|
||||
fi
|
||||
|
||||
[[ -f "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json ]] || {
|
||||
echo "$SOLANA_CONFIG_PRIVATE_DIR/mint.json not found, create it by running:"
|
||||
echo
|
||||
echo " ${here}/setup.sh -t leader"
|
||||
exit 1
|
||||
}
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
set -ex
|
||||
mkdir -p "$SOLANA_CONFIG_DIR"
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_DIR"/
|
||||
|
||||
# shellcheck disable=SC2086 # $solana_drone should not be quoted
|
||||
exec $solana_drone \
|
||||
-l "$SOLANA_CONFIG_DIR"/leader.json < "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
33
multinode-demo/leader.sh
Executable file
33
multinode-demo/leader.sh
Executable file
@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
# Exit if mode is not yet configured
|
||||
# (typically the case after the Snap is first installed)
|
||||
[[ -n "$(snapctl get mode)" ]] || exit 0
|
||||
fi
|
||||
|
||||
[[ -f "$SOLANA_CONFIG_DIR"/leader.json ]] || {
|
||||
echo "$SOLANA_CONFIG_DIR/leader.json not found, create it by running:"
|
||||
echo
|
||||
echo " ${here}/setup.sh -t leader"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [[ -n "$SOLANA_CUDA" ]]; then
|
||||
program="$solana_fullnode_cuda"
|
||||
else
|
||||
program="$solana_fullnode"
|
||||
fi
|
||||
|
||||
tune_networking
|
||||
|
||||
# shellcheck disable=SC2086 # $program should not be quoted
|
||||
exec $program \
|
||||
-l "$SOLANA_CONFIG_DIR"/leader.json \
|
||||
< <(shopt -s nullglob && cat "$SOLANA_CONFIG_DIR"/genesis.log \
|
||||
"$SOLANA_CONFIG_DIR"/tx-*.log) \
|
||||
> "$SOLANA_CONFIG_DIR"/tx-"$(date -u +%Y%m%d%H%M%S%N)".log
|
59
multinode-demo/myip.sh
Executable file
59
multinode-demo/myip.sh
Executable file
@ -0,0 +1,59 @@
|
||||
#!/bin/bash
|
||||
|
||||
function myip()
|
||||
{
|
||||
# shellcheck disable=SC2207
|
||||
declare ipaddrs=(
|
||||
# query interwebs
|
||||
$(curl -s ifconfig.co)
|
||||
# machine's interfaces
|
||||
$(ifconfig |
|
||||
awk '/inet addr:/ {gsub("addr:","",$2); print $2; next}
|
||||
/inet6 addr:/ {gsub("/.*", "", $3); print $3; next}
|
||||
/inet(6)? / {print $2}'
|
||||
)
|
||||
)
|
||||
|
||||
if (( ! ${#ipaddrs[*]} ))
|
||||
then
|
||||
echo "
|
||||
myip: error: I'm having trouble determining what our IP address is...
|
||||
Are we connected to a network?
|
||||
|
||||
"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
declare prompt="
|
||||
Please choose the IP address you want to advertise to the network:
|
||||
|
||||
0) ${ipaddrs[0]} <====== this one was returned by the interwebs...
|
||||
"
|
||||
|
||||
for ((i=1; i < ${#ipaddrs[*]}; i++))
|
||||
do
|
||||
prompt+=" $i) ${ipaddrs[i]}
|
||||
"
|
||||
done
|
||||
|
||||
while read -r -p "${prompt}
|
||||
please enter a number [0 for default]: " which
|
||||
do
|
||||
[[ -z ${which} ]] && break;
|
||||
[[ ${which} =~ [0-9]+ ]] && (( which < ${#ipaddrs[*]} )) && break;
|
||||
echo "Ug. invalid entry \"${which}\"...
|
||||
"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
which=${which:-0}
|
||||
|
||||
echo "${ipaddrs[which]}"
|
||||
|
||||
}
|
||||
|
||||
if [[ ${0} == "${BASH_SOURCE[0]}" ]]
|
||||
then
|
||||
myip "$@"
|
||||
fi
|
105
multinode-demo/setup.sh
Executable file
105
multinode-demo/setup.sh
Executable file
@ -0,0 +1,105 @@
|
||||
#!/bin/bash
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
usage () {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [-n num_tokens] [-l] [-p] [-t node_type]
|
||||
|
||||
Creates a fullnode configuration
|
||||
|
||||
-n num_tokens - Number of tokens to create
|
||||
-l - Detect network address from local machine configuration, which
|
||||
may be a private IP address unaccessible on the Intenet (default)
|
||||
-p - Detect public address using public Internet servers
|
||||
-t node_type - Create configuration files only for this kind of node. Valid
|
||||
options are validator or leader. Creates configuration files
|
||||
for both by default
|
||||
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
ip_address_arg=-l
|
||||
num_tokens=1000000000
|
||||
node_type_leader=true
|
||||
node_type_validator=true
|
||||
while getopts "h?n:lpt:" opt; do
|
||||
case $opt in
|
||||
h|\?)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
l)
|
||||
ip_address_arg=-l
|
||||
;;
|
||||
p)
|
||||
ip_address_arg=-p
|
||||
;;
|
||||
n)
|
||||
num_tokens="$OPTARG"
|
||||
;;
|
||||
t)
|
||||
node_type="$OPTARG"
|
||||
case $OPTARG in
|
||||
leader)
|
||||
node_type_leader=true
|
||||
node_type_validator=false
|
||||
;;
|
||||
validator)
|
||||
node_type_leader=false
|
||||
node_type_validator=true
|
||||
;;
|
||||
*)
|
||||
usage "Error: unknown node type: $node_type"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
leader_address_args=("$ip_address_arg")
|
||||
validator_address_args=("$ip_address_arg" -b 9000)
|
||||
|
||||
set -e
|
||||
|
||||
echo "Cleaning $SOLANA_CONFIG_DIR"
|
||||
rm -rvf "$SOLANA_CONFIG_DIR"
|
||||
mkdir -p "$SOLANA_CONFIG_DIR"
|
||||
|
||||
|
||||
if $node_type_leader; then
|
||||
rm -rvf "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
mkdir -p "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_DIR/mint.json with $num_tokens tokens"
|
||||
$solana_mint <<<"$num_tokens" > "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_DIR/genesis.log"
|
||||
$solana_genesis < "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json > "$SOLANA_CONFIG_DIR"/genesis.log
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_DIR/leader.json"
|
||||
$solana_fullnode_config "${leader_address_args[@]}" > "$SOLANA_CONFIG_DIR"/leader.json
|
||||
fi
|
||||
|
||||
|
||||
if $node_type_validator; then
|
||||
echo "Creating $SOLANA_CONFIG_DIR/validator.json"
|
||||
$solana_fullnode_config "${validator_address_args[@]}" > "$SOLANA_CONFIG_DIR"/validator.json
|
||||
fi
|
||||
|
||||
ls -lh "$SOLANA_CONFIG_DIR"/
|
||||
if $node_type_leader; then
|
||||
ls -lh "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
fi
|
80
multinode-demo/start_nodes.sh
Executable file
80
multinode-demo/start_nodes.sh
Executable file
@ -0,0 +1,80 @@
|
||||
#!/bin/bash
|
||||
|
||||
ip_addr_file=$1
|
||||
remote_user=$2
|
||||
ssh_keys=$3
|
||||
|
||||
usage() {
|
||||
echo -e "\\tUsage: $0 <IP Address array> <username> [path to ssh keys]\\n"
|
||||
echo -e "\\t <IP Address array>: A bash script that exports an array of IP addresses, ip_addr_array. Elements of the array are public IP address of remote nodes."
|
||||
echo -e "\\t <username>: The username for logging into remote nodes."
|
||||
echo -e "\\t [path to ssh keys]: The public/private key pair that remote nodes can use to perform rsync and ssh among themselves. Must contain pub, priv and authorized_keys.\\n"
|
||||
}
|
||||
|
||||
# Sample IP Address array file contents
|
||||
# ip_addr_array=(192.168.1.1 192.168.1.5 192.168.2.2)
|
||||
|
||||
if [[ -z "$ip_addr_file" ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$remote_user" ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Build and install locally
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
cargo install --force
|
||||
|
||||
ip_addr_array=()
|
||||
# Get IP address array
|
||||
# shellcheck source=/dev/null
|
||||
source "$ip_addr_file"
|
||||
|
||||
# shellcheck disable=SC2089,SC2016
|
||||
ssh_command_prefix='export PATH="$HOME/.cargo/bin:$PATH"; cd solana; USE_INSTALL=1 ./multinode-demo/'
|
||||
|
||||
count=0
|
||||
leader=
|
||||
for ip_addr in "${ip_addr_array[@]}"; do
|
||||
echo "$ip_addr"
|
||||
|
||||
# Deploy build and scripts to remote node
|
||||
rsync -r -av ~/.cargo/bin "$remote_user"@"$ip_addr":~/.cargo
|
||||
rsync -r -av ./multinode-demo "$remote_user"@"$ip_addr":~/solana/
|
||||
|
||||
# If provided, deploy SSH keys
|
||||
if [[ -z $ssh_keys ]]; then
|
||||
echo "skip copying the ssh keys"
|
||||
else
|
||||
rsync -r -av "$ssh_keys"/* "$remote_user"@"$ip_addr":~/.ssh/
|
||||
fi
|
||||
|
||||
# Stop current nodes
|
||||
ssh "$remote_user"@"$ip_addr" 'pkill -9 solana-fullnode'
|
||||
ssh "$remote_user"@"$ip_addr" 'pkill -9 solana-client-demo'
|
||||
|
||||
# Run setup
|
||||
ssh "$remote_user"@"$ip_addr" "$ssh_command_prefix"'setup.sh -p "$ip_addr"'
|
||||
|
||||
if (( !count )); then
|
||||
# Start the leader on the first node
|
||||
echo "Starting leader node $ip_addr"
|
||||
ssh -n -f "$remote_user"@"$ip_addr" "$ssh_command_prefix"'leader.sh > leader.log 2>&1'
|
||||
leader=${ip_addr_array[0]}
|
||||
else
|
||||
# Start validator on all other nodes
|
||||
echo "Starting validator node $ip_addr"
|
||||
ssh -n -f "$remote_user"@"$ip_addr" "$ssh_command_prefix""validator.sh $remote_user@$leader:~/solana $leader > validator.log 2>&1"
|
||||
fi
|
||||
|
||||
(( count++ ))
|
||||
|
||||
if (( count == ${#ip_addr_array[@]} )); then
|
||||
# Launch client demo on the last node
|
||||
echo "Starting client demo on $ip_addr"
|
||||
ssh -n -f "$remote_user"@"$ip_addr" "$ssh_command_prefix""client.sh $remote_user@$leader:~/solana $count > client.log 2>&1"
|
||||
fi
|
||||
done
|
41
multinode-demo/test/wallet-sanity.sh
Executable file
41
multinode-demo/test/wallet-sanity.sh
Executable file
@ -0,0 +1,41 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Wallet sanity test
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
cd "$here"
|
||||
|
||||
wallet="../wallet.sh $1"
|
||||
|
||||
# Tokens transferred to this address are lost forever...
|
||||
garbage_address=vS3ngn1TfQmpsW1Z4NkLuqNAQFF3dYQw8UZ6TCx9bmq
|
||||
|
||||
check_balance_output() {
|
||||
declare expected_output="$1"
|
||||
exec 42>&1
|
||||
output=$($wallet balance | tee >(cat - >&42))
|
||||
if [[ ! "$output" =~ $expected_output ]]; then
|
||||
echo "Balance is incorrect. Expected: $expected_output"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
pay_and_confirm() {
|
||||
exec 42>&1
|
||||
signature=$($wallet pay "$@" | tee >(cat - >&42))
|
||||
$wallet confirm "$signature"
|
||||
}
|
||||
|
||||
$wallet reset
|
||||
$wallet address
|
||||
check_balance_output "Your balance is: 0"
|
||||
$wallet airdrop --tokens 60
|
||||
check_balance_output "Your balance is: 60"
|
||||
$wallet airdrop --tokens 40
|
||||
check_balance_output "Your balance is: 100"
|
||||
pay_and_confirm --to $garbage_address --tokens 99
|
||||
check_balance_output "Your balance is: 1"
|
||||
|
||||
echo PASS
|
||||
exit 0
|
80
multinode-demo/validator.sh
Executable file
80
multinode-demo/validator.sh
Executable file
@ -0,0 +1,80 @@
|
||||
#!/bin/bash
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
usage() {
|
||||
if [[ -n "$1" ]]; then
|
||||
echo "$*"
|
||||
echo
|
||||
fi
|
||||
echo "usage: $0 [rsync network path to solana repo on leader machine] [network ip address of leader]"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [[ "$1" = "-h" || -n "$3" ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
# Exit if mode is not yet configured
|
||||
# (typically the case after the Snap is first installed)
|
||||
[[ -n "$(snapctl get mode)" ]] || exit 0
|
||||
|
||||
# Select leader from the Snap configuration
|
||||
leader_address="$(snapctl get leader-address)"
|
||||
if [[ -z "$leader_address" ]]; then
|
||||
# Assume public testnet by default
|
||||
leader_address=35.230.65.68 # testnet.solana.com
|
||||
fi
|
||||
leader="$leader_address"
|
||||
else
|
||||
if [[ -n "$3" ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
leader=${1:-${here}/..} # Default to local solana repo
|
||||
leader_address=${2:-127.0.0.1} # Default to local leader
|
||||
elif [[ -z "$2" ]]; then
|
||||
leader="$1"
|
||||
leader_address=$(dig +short "$1" | head -n1)
|
||||
if [[ -z "$leader_address" ]]; then
|
||||
usage "Error: unable to resolve IP address for $leader"
|
||||
fi
|
||||
else
|
||||
leader="$1"
|
||||
leader_address="$2"
|
||||
fi
|
||||
fi
|
||||
leader_port=8001
|
||||
|
||||
if [[ -n "$SOLANA_CUDA" ]]; then
|
||||
program="$solana_fullnode_cuda"
|
||||
else
|
||||
program="$solana_fullnode"
|
||||
fi
|
||||
|
||||
|
||||
[[ -f "$SOLANA_CONFIG_DIR"/validator.json ]] || {
|
||||
echo "$SOLANA_CONFIG_DIR/validator.json not found, create it by running:"
|
||||
echo
|
||||
echo " ${here}/setup.sh -t validator"
|
||||
exit 1
|
||||
}
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
|
||||
set -ex
|
||||
SOLANA_LEADER_CONFIG_DIR="$SOLANA_CONFIG_DIR"/leader-config
|
||||
rm -rf "$SOLANA_LEADER_CONFIG_DIR"
|
||||
$rsync -vPrz "$rsync_leader_url"/config/ "$SOLANA_LEADER_CONFIG_DIR"
|
||||
ls -lh "$SOLANA_LEADER_CONFIG_DIR"
|
||||
|
||||
tune_networking
|
||||
|
||||
# shellcheck disable=SC2086 # $program should not be quoted
|
||||
exec $program \
|
||||
-l "$SOLANA_CONFIG_DIR"/validator.json -t "$leader_address:$leader_port" \
|
||||
< <(shopt -s nullglob && cat "$SOLANA_LEADER_CONFIG_DIR"/genesis.log \
|
||||
"$SOLANA_LEADER_CONFIG_DIR"/tx-*.log)
|
47
multinode-demo/wallet.sh
Executable file
47
multinode-demo/wallet.sh
Executable file
@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# usage: $0 <rsync network path to solana repo on leader machine>"
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
# if $1 isn't host:path, something.com, or a valid local path
|
||||
if [[ ${1%:} != "$1" || "$1" =~ [^.]\.[^.] || -d $1 ]]; then
|
||||
leader=$1 # interpret
|
||||
shift
|
||||
else
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
leader=testnet.solana.com # Default to testnet when running as a Snap
|
||||
else
|
||||
leader=$here/.. # Default to local solana repo
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$1" = "reset" ]]; then
|
||||
echo Wallet resetting
|
||||
rm -rf "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
|
||||
set -e
|
||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
if [[ ! -r "$SOLANA_CONFIG_CLIENT_DIR"/leader.json ]]; then
|
||||
(
|
||||
set -x
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
)
|
||||
fi
|
||||
|
||||
client_json="$SOLANA_CONFIG_CLIENT_DIR"/client.json
|
||||
if [[ ! -r $client_json ]]; then
|
||||
$solana_mint <<<0 > "$client_json"
|
||||
fi
|
||||
|
||||
set -x
|
||||
# shellcheck disable=SC2086 # $solana_wallet should not be quoted
|
||||
exec $solana_wallet \
|
||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -m "$client_json" "$@"
|
190
rfcs/rfc-001-smart-contracts-engine.md
Normal file
190
rfcs/rfc-001-smart-contracts-engine.md
Normal file
@ -0,0 +1,190 @@
|
||||
# Smart Contracts Engine
|
||||
|
||||
The goal of this RFC is to define a set of constraints for APIs and runtime such that we can execute our smart contracts safely on massively parallel hardware such as a GPU. Our runtime is built around an OS *syscall* primitive. The difference in blockchain is that now the OS does a cryptographic check of memory region ownership before accessing the memory in the Solana kernel.
|
||||
|
||||
## Version
|
||||
|
||||
version 0.1
|
||||
|
||||
## Toolchain Stack
|
||||
|
||||
+---------------------+ +---------------------+
|
||||
| | | |
|
||||
| +------------+ | | +------------+ |
|
||||
| | | | | | | |
|
||||
| | frontend | | | | verifier | |
|
||||
| | | | | | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | | | |
|
||||
| | llvm | | | | loader | |
|
||||
| | | +------>+ | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | | | |
|
||||
| | ELF | | | | runtime | |
|
||||
| | | | | | | |
|
||||
| +------------+ | | +------------+ |
|
||||
| | | |
|
||||
| client | | solana |
|
||||
+---------------------+ +---------------------+
|
||||
|
||||
[Figure 1. Smart Contracts Stack]
|
||||
|
||||
In Figure 1 an untrusted client, creates a program in the front-end language of her choice, (like C/C++/Rust/Lua), and compiles it with LLVM to a position independent shared object ELF, targeting BPF bytecode. Solana will safely load and execute the ELF.
|
||||
|
||||
## Bytecode
|
||||
|
||||
Our bytecode is based on Berkley Packet Filter. The requirements for BPF overlap almost exactly with the requirements we have:
|
||||
|
||||
1. Deterministic amount of time to execute the code
|
||||
2. Bytecode that is portable between machine instruction sets
|
||||
3. Verified memory accesses
|
||||
4. Fast to load the object, verify the bytecode and JIT to local machine instruction set
|
||||
|
||||
For 1, that means that loops are unrolled, and for any jumps back we can guard them with a check against the number of instruction that have been executed at this point. If the limit is reached, the program yields its execution. This involves saving the stack and current instruction index.
|
||||
|
||||
For 2, the BPF bytecode already easily maps to x86–64, arm64 and other instruction sets.
|
||||
|
||||
For 3, every load and store that is relative can be checked to be within the expected memory that is passed into the ELF. Dynamic load and stores can do a runtime check against available memory, these will be slow and should be avoided.
|
||||
|
||||
For 4, Fully linked PIC ELF with just a single RX segment. Effectively we are linking a shared object with `-fpic -target bpf` and with a linker script to collect everything into a single RX segment. Writable globals are not supported.
|
||||
|
||||
### Address Checks
|
||||
|
||||
The interface to the module takes a `&mut Vec<Vec<u8>>` in rust, or a `int sz, void* data[sz], int szs[sz]` in `C`. Given the module's bytecode, for each method, we need to analyze the bounds on load and stores into each buffer the module uses. This check needs to be done `on chain`, and after those bounds are computed we can verify that the user supplied array of buffers will not cause a memory fault. For load and stores that we cannot analyze, we can replace with a `safe_load` and `safe_store` instruction that will check the table for access.
|
||||
|
||||
## Loader
|
||||
The loader is our first smart contract. The job of this contract is to load the actual program with its own instance data. The loader will verify the bytecode and that the object implements the expected entry points.
|
||||
|
||||
Since there is only one RX segment, the context for the contract instance is passed into each entry point as well as the event data for that entry point.
|
||||
|
||||
A client will create a transaction to create a new loader instance:
|
||||
|
||||
`Solana_NewLoader(Loader Instance PubKey, proof of key ownership, space I need for my elf)`
|
||||
|
||||
A client will then do a bunch of transactions to load its elf into the loader instance they created:
|
||||
|
||||
`Loader_UploadElf(Loader Instance PubKey, proof of key ownership, pos start, pos end, data)`
|
||||
|
||||
At this point the client can create a new instance of the module with its own instance address:
|
||||
|
||||
`Loader_NewInstance(Loader Instance PubKey, proof of key ownership, Instance PubKey, proof of key ownership)`
|
||||
|
||||
Once the instance has been created, the client may need to upload more user data to solana to configure this instance:
|
||||
|
||||
`Instance_UploadModuleData(Instance PubKey, proof of key ownership, pos start, pos end, data)`
|
||||
|
||||
Now clients can `start` the instance:
|
||||
|
||||
`Instance_Start(Instance PubKey, proof of key ownership)`
|
||||
|
||||
## Runtime
|
||||
|
||||
Our goal with the runtime is to have a general purpose execution environment that is highly parallelizable and doesn't require dynamic resource management. We want to execute as many contracts as we can in parallel, and have them pass or fail without a destructive state change.
|
||||
|
||||
### State and Entry Point
|
||||
|
||||
State is addressed by an account which is at the moment simply the PubKey. Our goal is to eliminate dynamic memory allocation in the smart contract itself, so the contract is a function that takes a mapping of [(PubKey,State)] and returns [(PubKey, State')]. The output of keys is a subset of the input. Three basic kinds of state exist:
|
||||
|
||||
* Instance State
|
||||
* Participant State
|
||||
* Caller State
|
||||
|
||||
There isn't any difference in how each is implemented, but conceptually Participant State is memory that is allocated for each participant in the contract. Instance State is memory that is allocated for the contract itself, and Caller State is memory that the transactions caller has allocated.
|
||||
|
||||
|
||||
### Call
|
||||
|
||||
```
|
||||
void call(
|
||||
const struct instance_data *data,
|
||||
const uint8_t kind[], //instance|participant|caller|read|write
|
||||
const uint8_t *keys[],
|
||||
uint8_t *data[],
|
||||
int num,
|
||||
uint8_t dirty[], //dirty memory bits
|
||||
uint8_t *userdata, //current transaction data
|
||||
);
|
||||
```
|
||||
|
||||
To call this operation, the transaction that is destined to the contract instance specifies what keyed state it should present to the `call` function. To allocate the state memory or a call context, the client has to first call a function on the contract with the designed address that will own the state.
|
||||
|
||||
At its core, this is a system call that requires cryptographic proof of ownership of memory regions instead of an OS that checks page tables for access rights.
|
||||
|
||||
* `Instance_AllocateContext(Instance PubKey, My PubKey, Proof of key ownership)`
|
||||
|
||||
Any transaction can then call `call` on the contract with a set of keys. It's up to the contract itself to manage ownership:
|
||||
|
||||
* `Instance_Call(Instance PubKey, [Context PubKeys], proofs of ownership, userdata...)`
|
||||
|
||||
Contracts should be able to read any state that is part of solana, but only write to state that the contract allocated.
|
||||
|
||||
#### Caller State
|
||||
|
||||
Caller `state` is memory allocated for the `call` that belongs to the public key that is issuing the `call`. This is the caller's context.
|
||||
|
||||
#### Instance State
|
||||
|
||||
Instance `state` is memory that belongs to this contract instance. We may also need module-wide `state` as well.
|
||||
|
||||
#### Participant State
|
||||
|
||||
Participant `state` is any other memory. In some cases it may make sense to have these allocated as part of the call by the caller.
|
||||
|
||||
### Reduce
|
||||
|
||||
Some operations on the contract will require iteration over all the keys. To make this parallelizable the iteration is broken up into reduce calls which are combined.
|
||||
|
||||
```
|
||||
void reduce_m(
|
||||
const struct instance_data *data,
|
||||
const uint8_t *keys[],
|
||||
const uint8_t *data[],
|
||||
int num,
|
||||
uint8_t *reduce_data,
|
||||
);
|
||||
|
||||
void reduce_r(
|
||||
const struct instance_data *data,
|
||||
const uint8_t *reduce_data[],
|
||||
int num,
|
||||
uint8_t *reduce_data,
|
||||
);
|
||||
```
|
||||
|
||||
### Execution
|
||||
|
||||
Transactions are batched and processed in parallel at each stage.
|
||||
```
|
||||
+-----------+ +--------------+ +-----------+ +---------------+
|
||||
| sigverify |-+->| debit commit |---+->| execution |-+->| memory commit |
|
||||
+-----------+ | +--------------+ | +-----------+ | +---------------+
|
||||
| | |
|
||||
| +---------------+ | | +--------------+
|
||||
|->| memory verify |->+ +->| debit undo |
|
||||
+---------------+ | +--------------+
|
||||
|
|
||||
| +---------------+
|
||||
+->| credit commit |
|
||||
+---------------+
|
||||
|
||||
|
||||
```
|
||||
The `debit verify` stage is very similar to `memory verify`. Proof of key ownership is used to check if the callers key has some state allocated with the contract, then the memory is loaded and executed. After execution stage, the dirty pages are written back by the contract. Because know all the memory accesses during execution, we can batch transactions that do not interfere with each other. We can also apply the `debit undo` and `credit commit` stages of the transaction. `debit undo` is run in case of an exception during contract execution, only transfers may be reversed, fees are commited to solana.
|
||||
|
||||
### GPU execution
|
||||
|
||||
A single contract can read and write to separate key pairs without interference. These separate calls to the same contract can execute on the same GPU thread over different memory using different SIMD lanes.
|
||||
|
||||
## Notes
|
||||
|
||||
1. There is no dynamic memory allocation.
|
||||
2. Persistant Memory is allocated to a Key with ownership
|
||||
3. Contracts can `call` to update key owned state
|
||||
4. Contracts can `reduce` over the memory to aggregate state
|
||||
5. `call` is just a *syscall* that does a cryptographic check of memory owndershp
|
122
rfcs/rfc-002-consensus.md
Normal file
122
rfcs/rfc-002-consensus.md
Normal file
@ -0,0 +1,122 @@
|
||||
# Consensus
|
||||
|
||||
VERY WIP
|
||||
|
||||
The goal of this RFC is to define the consensus algorithm used in solana. This proposal covers a Proof of Stake algorithm that leverages Proof of History. PoH is a permissionless clock for blockchain that is available before consensus. This PoS approach leverages PoH to make strong assumptions about time between partitions.
|
||||
|
||||
## Version
|
||||
|
||||
version 0.1
|
||||
|
||||
## Message Flow
|
||||
|
||||
1. Transactions are ingested at the leader.
|
||||
2. Leader filters for valid transactions
|
||||
3. Leader executes valid transactions on its state
|
||||
4. Leader packages transactions into blobs
|
||||
5. Leader transmits blobs to validator nodes.
|
||||
a. The set of supermajority + `M` by stake weight of nodes is rotated in round robin fashion.
|
||||
6. Validators retransmit blobs to peers in their set and to further downstream nodes.
|
||||
7. Validators validate the transactions and execute them on their state.
|
||||
8. Validators compute the hash of the state.
|
||||
9. Validators transmit votes to the leader.
|
||||
a. Votes are signatures of the hash of the computed state.
|
||||
10. Leader executes the votes as any other transaction and broadcasts them out to the network
|
||||
11. Validators observe their votes, and all the votes from the network.
|
||||
12. Validators continue voting if the supermajority of stake is observed in the vote for the same hash.
|
||||
|
||||
Supermajority is defined as `2/3rds + 1` vote of the PoS stakes.
|
||||
|
||||
## Staking
|
||||
|
||||
Validators `stake` some of their spendable sol into a staking account. The stakes are not spendable and can only be used for voting.
|
||||
|
||||
```
|
||||
CreateStake(
|
||||
PoH count,
|
||||
PoH hash,
|
||||
source public key,
|
||||
amount,
|
||||
destination public key,
|
||||
proof of ownership of destination public key,
|
||||
signature of the message with the source keypair
|
||||
)
|
||||
```
|
||||
|
||||
Creating the stake has a warmup period of TBD. Unstaking requires the node to miss a certain amount of validation votes.
|
||||
|
||||
## Validation Votes
|
||||
|
||||
```
|
||||
Validate(
|
||||
PoH count,
|
||||
PoH hash,
|
||||
stake public key,
|
||||
signature of the state,
|
||||
signature of the message with the stake keypair
|
||||
)
|
||||
```
|
||||
|
||||
## Validator Slashing
|
||||
|
||||
Validators `stake` some of their spendable sol into a staking account. The stakes are not spendable and can only be used for voting.
|
||||
|
||||
```
|
||||
Slash(Validate(
|
||||
PoH count,
|
||||
PoH hash,
|
||||
stake public key,
|
||||
...
|
||||
signature of the message with the stake keypair
|
||||
))
|
||||
```
|
||||
|
||||
When the `Slash` vote is processed, validators should lookup `PoH hash` at `PoH count` and compare it with the message. If they do not match, the stake at `stake public key` should be set to `0`.
|
||||
|
||||
## Leader Slashing
|
||||
|
||||
TBD. The goal of this is to discourage leaders from generating multiple PoH streams.
|
||||
|
||||
## Validation Vote Contract
|
||||
|
||||
The goal of this contract is to simulate economic cost of mining on a shorter branch.
|
||||
|
||||
1. With my signature I am certifying that I computed `state hash` at `PoH count` and `PoH hash`.
|
||||
2. I will not vote on a branch that doesn't contain this message for at least `N` counts, or until `PoH count` + `N` is reached by the PoH stream.
|
||||
3. I will not vote for any other branch below `PoH count`.
|
||||
a. if there are other votes not present in this PoH history the validator may need to `cancel` them before creating this vote.
|
||||
|
||||
## Leader Seed Generation
|
||||
|
||||
Leader selection is decided via a random seed. The process is as follows:
|
||||
|
||||
1. Periodically at a specific `PoH count` select the first vote signatures that create a supermajority from the previous round.
|
||||
2. append them together
|
||||
3. hash the string for `N` counts via a similar process as PoH itself.
|
||||
4. The resulting hash is the random seed for `M` counts, where M > N
|
||||
|
||||
## Leader Ranking and Rotation
|
||||
|
||||
Leader's transmit for a count of `T`. When `T` is reached all the validators should switch to the next ranked leader. To rank leaders, the supermajority + `M` nodes are shuffled with the using the above calculated random seed.
|
||||
|
||||
TBD: define a ranking for critical partitions without a node from supermajority + `M` set.
|
||||
|
||||
## Partition selection
|
||||
|
||||
Validators should select the first branch to reach finality, or the highest ranking leader.
|
||||
|
||||
## Examples
|
||||
|
||||
### Small Partition
|
||||
1. Network partition M occurs for 10% of the nodes
|
||||
2. The larger partition K, with 90% of the stake weight continues to operate as normal
|
||||
3. M cycles through the ranks until one of them is leader.
|
||||
4. M validators observe 10% of the vote pool, finality is not reached
|
||||
5. M and K re-connect.
|
||||
6. M validators cancel their votes on K which are below K's `PoH count`
|
||||
|
||||
### Leader Timeout
|
||||
1. Next rank node observes a timeout.
|
||||
2. Nodes receiving both PoH streams pick the higher rank node.
|
||||
3. 2, causes a partition, since nodes can only vote for 1 leader.
|
||||
4. Partition is resolved just like in the [Small Partition](#small-parition)
|
54
rfcs/rfc-003-storage.md
Normal file
54
rfcs/rfc-003-storage.md
Normal file
@ -0,0 +1,54 @@
|
||||
# Storage
|
||||
|
||||
The goal of this RFC is to define a protocol for storing a very large ledger over a p2p network that is verified by solana validators. At full capacity on a 1gbps network solana will generate 4 petabytes of data per year. To prevent the network from centralizing around full nodes that have to store the full data set this protocol proposes a way for mining nodes to provide storage capacity for pieces of the network.
|
||||
|
||||
# Version
|
||||
|
||||
version 0.1
|
||||
|
||||
# Background
|
||||
|
||||
The basic idea to Proof of Replication is encrypting a dataset with a public symmetric key using CBC encryption, then hash the encrypted dataset. The main problem with the naive approach is that a dishonest storage node can stream the encryption and delete the data as its hashed. The simple solution is to force the hash to be done on the reverse of the encryption, or perhaps with a random order. This ensures that all the data is present during the generation of the proof and it also requires the validator to have the entirety of the encrypted data present for verification of every proof of every identity. So the space required to validate is `(Number of Proofs)*(data size)`
|
||||
|
||||
# Optimization with PoH
|
||||
|
||||
Our improvement on this approach is to randomly sample the encrypted blocks faster than it takes to encrypt, and record the hash of those samples into the PoH ledger. Thus the blocks stay in the exact same order for every PoRep and verification can stream the data and verify all the proofs in a single batch. This way we can verify multiple proofs concurrently, each one on its own CUDA core. With the current generation of graphics cards our network can support up to 14k replication identities or symmetric keys. The total space required for verification is `(2 CBC blocks) * (Number of Identities)`, with core count of equal to (Number of Identities). A CBC block is expected to be 1MB in size.
|
||||
|
||||
# Network
|
||||
|
||||
Validators for PoRep are the same validators that are verifying transactions. They have some stake that they have put up as collateral that ensures that their work is honest. If you can prove that a validator verified a fake PoRep, then the validators stake can be slashed.
|
||||
|
||||
Replicators are specialized thin clients. They download a part of the ledger and store it, and provide PoReps of storing the ledger. For each verified PoRep replicators earn a reward of sol from the mining pool.
|
||||
|
||||
# Constraints
|
||||
|
||||
We have the following constraints:
|
||||
* At most 14k replication identities can be used, because thats how many CUDA cores we can fit in a $5k box at the moment.
|
||||
* Verification requires generating the CBC blocks. That requires space of 2 blocks per identity, and 1 CUDA core per identity for the same dataset. So as many identities at once should be batched with as many proofs for those identities verified concurrently for the same dataset.
|
||||
|
||||
# Validation and Replication Protocol
|
||||
|
||||
1. Network sets the replication target number, lets say 1k. 1k PoRep identities are created from signatures of a PoH hash. So they are tied to a specific PoH hash. It doesn't matter who creates them, or simply the last 1k validation signatures we saw for the ledger at that count. This maybe just the initial batch of identities, because we want to stagger identity rotation.
|
||||
2. Any client can use any of these identities to create PoRep proofs. Replicator identities are the CBC encryption keys.
|
||||
3. Periodically at a specific PoH count, replicator that want to create PoRep proofs sign the PoH hash at that count. That signature is the seed used to pick the block and identity to replicate. A block is 1TB of ledger.
|
||||
4. Periodically at a specific PoH count, replicator submits PoRep proofs for their selected block. A signature of the PoH hash at that count is the seed used to sample the 1TB encrypted block, and hash it. This is done faster than it takes to encrypt the 1TB block with the original identity.
|
||||
5. Replicators must submit some number of fake proofs, which they can prove to be fake by providing the seed for the hash result.
|
||||
6. Periodically at a specific PoH count, validators sign the hash and use the signature to select the 1TB block that they need to validate. They batch all the identities and proofs and submit approval for all the verified ones.
|
||||
7. After #6, replicator client submit the proofs of fake proofs.
|
||||
|
||||
For any random seed, we force everyone to use a signature that is derived from a PoH hash. Everyone must use the same count, so the same PoH hash is signed by every participant. The signatures are then each cryptographically tied to the keypair, which prevents a leader from grinding on the resulting value for more than 1 identity.
|
||||
|
||||
We need to stagger the rotation of the identity keys. Once this gets going, the next identity could be generated by hashing itself with a PoH hash, or via some other process based on the validation signatures.
|
||||
|
||||
Since there are many more client identities then encryption identities, we need to split the reward for multiple clients, and prevent Sybil attacks from generating many clients to acquire the same block of data. To remain BFT we want to avoid a single human entity from storing all the replications of a single chunk of the ledger.
|
||||
|
||||
Our solution to this is to force the clients to continue using the same identity. If the first round is used to acquire the same block for many client identities, the second round for the same client identities will force a redistribution of the signatures, and therefore PoRep identities and blocks. Thus to get a reward for storage clients need to store the first block for free and the network can reward long lived client identities more than new ones.
|
||||
|
||||
# Notes
|
||||
|
||||
* We can reduce the costs of verification of PoRep by using PoH, and actually make it feasible to verify a large number of proofs for a global dataset.
|
||||
* We can eliminate grinding by forcing everyone to sign the same PoH hash and use the signatures as the seed
|
||||
* The game between validators and replicators is over random blocks and random encryption identities and random data samples. The goal of randomization is to prevent colluding groups from having overlap on data or validation.
|
||||
* Replicator clients fish for lazy validators by submitting fake proofs that they can prove are fake.
|
||||
* Replication identities are just symmetric encryption keys, the number of them on the network is our storage replication target. Many more client identities can exist than replicator identities, so unlimited number of clients can provide proofs of the same replicator identity.
|
||||
* To defend against Sybil client identities that try to store the same block we force the clients to store for multiple rounds before receiving a reward.
|
43
scripts/perf-plot.py
Executable file
43
scripts/perf-plot.py
Executable file
@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import matplotlib
|
||||
matplotlib.use('Agg')
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import json
|
||||
import sys
|
||||
|
||||
stages_to_counters = {}
|
||||
stages_to_time = {}
|
||||
|
||||
if len(sys.argv) != 2:
|
||||
print("USAGE: {} <input file>".format(sys.argv[0]))
|
||||
sys.exit(1)
|
||||
|
||||
with open(sys.argv[1]) as fh:
|
||||
for line in fh.readlines():
|
||||
if "COUNTER" in line:
|
||||
json_part = line[line.find("{"):]
|
||||
x = json.loads(json_part)
|
||||
counter = x['name']
|
||||
if not (counter in stages_to_counters):
|
||||
stages_to_counters[counter] = []
|
||||
stages_to_time[counter] = []
|
||||
stages_to_counters[counter].append(x['counts'])
|
||||
stages_to_time[counter].append(x['now'])
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
|
||||
for stage in stages_to_counters.keys():
|
||||
plt.plot(stages_to_time[stage], stages_to_counters[stage], label=stage)
|
||||
|
||||
plt.xlabel('ms')
|
||||
plt.ylabel('count')
|
||||
|
||||
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
|
||||
ncol=2, mode="expand", borderaxespad=0.)
|
||||
|
||||
plt.locator_params(axis='x', nbins=10)
|
||||
plt.grid(True)
|
||||
|
||||
plt.savefig("perf.pdf")
|
17
snap/README.md
Normal file
17
snap/README.md
Normal file
@ -0,0 +1,17 @@
|
||||
## Development
|
||||
|
||||
If you're running Ubuntu 16.04 and already have `snapcraft` installed, simply
|
||||
run:
|
||||
```
|
||||
$ snapcraft
|
||||
```
|
||||
|
||||
For other systems we provide a docker image that can be used for snap
|
||||
development:
|
||||
```
|
||||
$ ./ci/docker-run.sh solanalabs/snapcraft snapcraft -d
|
||||
```
|
||||
|
||||
## Reference
|
||||
* https://docs.snapcraft.io/
|
||||
|
34
snap/hooks/configure
vendored
Executable file
34
snap/hooks/configure
vendored
Executable file
@ -0,0 +1,34 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
echo Stopping daemons
|
||||
snapctl stop --disable solana.daemon-drone
|
||||
snapctl stop --disable solana.daemon-leader
|
||||
snapctl stop --disable solana.daemon-validator
|
||||
|
||||
mode="$(snapctl get mode)"
|
||||
if [[ -z "$mode" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ip_address_arg=-p # Use public IP address (TODO: make this configurable?)
|
||||
num_tokens="$(snapctl get num-tokens)"
|
||||
|
||||
case $mode in
|
||||
leader+drone)
|
||||
$SNAP/bin/setup.sh ${num_tokens:+-n $num_tokens} ${ip_address_arg} -t leader
|
||||
snapctl start --enable solana.daemon-leader
|
||||
snapctl start --enable solana.daemon-drone
|
||||
;;
|
||||
leader)
|
||||
$SNAP/bin/setup.sh ${num_tokens:+-n $num_tokens} ${ip_address_arg} -t leader
|
||||
snapctl start --enable solana.daemon-leader
|
||||
;;
|
||||
validator)
|
||||
$SNAP/bin/setup.sh ${ip_address_arg} -t validator
|
||||
snapctl start --enable solana.daemon-validator
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown mode: $mode"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
92
snap/snapcraft.yaml
Normal file
92
snap/snapcraft.yaml
Normal file
@ -0,0 +1,92 @@
|
||||
name: solana
|
||||
version: git
|
||||
summary: Blockchain, Rebuilt for Scale
|
||||
description: |
|
||||
710,000 tx/s with off-the-shelf hardware and no sharding.
|
||||
Scales with Moore's Law.
|
||||
grade: devel
|
||||
|
||||
# TODO: solana-perf-fullnode does not yet run with 'strict' confinement due to the
|
||||
# CUDA dependency, so use 'devmode' confinement for now
|
||||
confinement: devmode
|
||||
|
||||
hooks:
|
||||
configure:
|
||||
plugs: [network]
|
||||
|
||||
apps:
|
||||
drone:
|
||||
command: solana-drone
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
fullnode:
|
||||
command: solana-fullnode
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
fullnode-cuda:
|
||||
command: solana-fullnode-cuda
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
fullnode-config:
|
||||
command: solana-fullnode-config
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
genesis:
|
||||
command: solana-genesis
|
||||
mint:
|
||||
command: solana-mint
|
||||
client-demo:
|
||||
command: solana-client-demo
|
||||
wallet:
|
||||
# TODO: Merge wallet.sh functionality into solana-wallet proper
|
||||
command: wallet.sh
|
||||
#command: solana-wallet
|
||||
|
||||
daemon-validator:
|
||||
daemon: simple
|
||||
command: validator.sh
|
||||
|
||||
daemon-leader:
|
||||
daemon: simple
|
||||
command: leader.sh
|
||||
|
||||
daemon-drone:
|
||||
daemon: simple
|
||||
command: drone.sh
|
||||
|
||||
parts:
|
||||
solana:
|
||||
plugin: nil
|
||||
prime:
|
||||
- bin
|
||||
- usr/lib/libgf_complete.so.1
|
||||
- usr/lib/libJerasure.so.2
|
||||
override-build: |
|
||||
# Build/install solana-fullnode-cuda
|
||||
./fetch-perf-libs.sh
|
||||
cargo install --features=cuda,erasure --root $SNAPCRAFT_PART_INSTALL --bin solana-fullnode
|
||||
mv $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode $SNAPCRAFT_PART_INSTALL
|
||||
rm -rf $SNAPCRAFT_PART_INSTALL/bin/*
|
||||
mv $SNAPCRAFT_PART_INSTALL/solana-fullnode $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode-cuda
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/
|
||||
cp -f libJerasure.so $SNAPCRAFT_PART_INSTALL/usr/lib/libJerasure.so.2
|
||||
cp -f libgf_complete.so $SNAPCRAFT_PART_INSTALL/usr/lib/libgf_complete.so.1
|
||||
|
||||
# Build/install all other programs
|
||||
cargo install --root $SNAPCRAFT_PART_INSTALL --bins
|
||||
|
||||
# Install multinode scripts
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
||||
cp -av multinode-demo/* $SNAPCRAFT_PART_INSTALL/bin/
|
||||
|
||||
# TODO: build rsync from source instead of sneaking it in from the host
|
||||
# system...
|
||||
set -x
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
||||
cp -av /usr/bin/rsync $SNAPCRAFT_PART_INSTALL/bin/
|
885
src/bank.rs
Normal file
885
src/bank.rs
Normal file
@ -0,0 +1,885 @@
|
||||
//! The `bank` module tracks client balances and the progress of smart
|
||||
//! contracts. It offers a high-level API that signs transactions
|
||||
//! on behalf of the caller, and a low-level API for when they have
|
||||
//! already been signed and verified.
|
||||
|
||||
extern crate libc;
|
||||
|
||||
use chrono::prelude::*;
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use itertools::Itertools;
|
||||
use ledger::Block;
|
||||
use mint::Mint;
|
||||
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::collections::hash_map::Entry::Occupied;
|
||||
use std::collections::{HashMap, HashSet, VecDeque};
|
||||
use std::result;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::RwLock;
|
||||
use std::time::Instant;
|
||||
use timing::duration_as_us;
|
||||
use transaction::{Instruction, Plan, Transaction};
|
||||
|
||||
/// The number of most recent `last_id` values that the bank will track the signatures
|
||||
/// of. Once the bank discards a `last_id`, it will reject any transactions that use
|
||||
/// that `last_id` in a transaction. Lowering this value reduces memory consumption,
|
||||
/// but requires clients to update its `last_id` more frequently. Raising the value
|
||||
/// lengthens the time a client must wait to be certain a missing transaction will
|
||||
/// not be processed by the network.
|
||||
pub const MAX_ENTRY_IDS: usize = 1024 * 16;
|
||||
|
||||
pub const VERIFY_BLOCK_SIZE: usize = 16;
|
||||
|
||||
/// Reasons a transaction might be rejected.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum BankError {
|
||||
/// Attempt to debit from `PublicKey`, but no found no record of a prior credit.
|
||||
AccountNotFound(PublicKey),
|
||||
|
||||
/// The requested debit from `PublicKey` has the potential to draw the balance
|
||||
/// below zero. This can occur when a debit and credit are processed in parallel.
|
||||
/// The bank may reject the debit or push it to a future entry.
|
||||
InsufficientFunds(PublicKey),
|
||||
|
||||
/// The bank has seen `Signature` before. This can occur under normal operation
|
||||
/// when a UDP packet is duplicated, as a user error from a client not updating
|
||||
/// its `last_id`, or as a double-spend attack.
|
||||
DuplicateSignature(Signature),
|
||||
|
||||
/// The bank has not seen the given `last_id` or the transaction is too old and
|
||||
/// the `last_id` has been discarded.
|
||||
LastIdNotFound(Hash),
|
||||
|
||||
/// The transaction is invalid and has requested a debit or credit of negative
|
||||
/// tokens.
|
||||
NegativeTokens,
|
||||
|
||||
/// Proof of History verification failed.
|
||||
LedgerVerificationFailed,
|
||||
}
|
||||
|
||||
pub type Result<T> = result::Result<T, BankError>;
|
||||
|
||||
/// The state of all accounts and contracts after processing its entries.
|
||||
pub struct Bank {
|
||||
/// A map of account public keys to the balance in that account.
|
||||
balances: RwLock<HashMap<PublicKey, i64>>,
|
||||
|
||||
/// A map of smart contract transaction signatures to what remains of its payment
|
||||
/// plan. Each transaction that targets the plan should cause it to be reduced.
|
||||
/// Once it cannot be reduced, final payments are made and it is discarded.
|
||||
pending: RwLock<HashMap<Signature, Plan>>,
|
||||
|
||||
/// A FIFO queue of `last_id` items, where each item is a set of signatures
|
||||
/// that have been processed using that `last_id`. Rejected `last_id`
|
||||
/// values are so old that the `last_id` has been pulled out of the queue.
|
||||
last_ids: RwLock<VecDeque<Hash>>,
|
||||
|
||||
// Mapping of hashes to signature sets. The bank uses this data to
|
||||
/// reject transactions with signatures its seen before
|
||||
last_ids_sigs: RwLock<HashMap<Hash, HashSet<Signature>>>,
|
||||
|
||||
/// The set of trusted timekeepers. A Timestamp transaction from a `PublicKey`
|
||||
/// outside this set will be discarded. Note that if validators do not have the
|
||||
/// same set as leaders, they may interpret the ledger differently.
|
||||
time_sources: RwLock<HashSet<PublicKey>>,
|
||||
|
||||
/// The most recent timestamp from a trusted timekeeper. This timestamp is applied
|
||||
/// to every smart contract when it enters the system. If it is waiting on a
|
||||
/// timestamp witness before that timestamp, the bank will execute it immediately.
|
||||
last_time: RwLock<DateTime<Utc>>,
|
||||
|
||||
/// The number of transactions the bank has processed without error since the
|
||||
/// start of the ledger.
|
||||
transaction_count: AtomicUsize,
|
||||
}
|
||||
|
||||
impl Default for Bank {
|
||||
fn default() -> Self {
|
||||
Bank {
|
||||
balances: RwLock::new(HashMap::new()),
|
||||
pending: RwLock::new(HashMap::new()),
|
||||
last_ids: RwLock::new(VecDeque::new()),
|
||||
last_ids_sigs: RwLock::new(HashMap::new()),
|
||||
time_sources: RwLock::new(HashSet::new()),
|
||||
last_time: RwLock::new(Utc.timestamp(0, 0)),
|
||||
transaction_count: AtomicUsize::new(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Bank {
|
||||
/// Create an Bank using a deposit.
|
||||
pub fn new_from_deposit(deposit: &Payment) -> Self {
|
||||
let bank = Self::default();
|
||||
bank.apply_payment(deposit);
|
||||
bank
|
||||
}
|
||||
|
||||
/// Create an Bank with only a Mint. Typically used by unit tests.
|
||||
pub fn new(mint: &Mint) -> Self {
|
||||
let deposit = Payment {
|
||||
to: mint.pubkey(),
|
||||
tokens: mint.tokens,
|
||||
};
|
||||
let bank = Self::new_from_deposit(&deposit);
|
||||
bank.register_entry_id(&mint.last_id());
|
||||
bank
|
||||
}
|
||||
|
||||
/// Commit funds to the `payment.to` party.
|
||||
fn apply_payment(&self, payment: &Payment) {
|
||||
let mut balances = self.balances.write().unwrap();
|
||||
if balances.contains_key(&payment.to) {
|
||||
*balances.get_mut(&payment.to).unwrap() += payment.tokens;
|
||||
} else {
|
||||
balances.insert(payment.to, payment.tokens);
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the last entry ID registered.
|
||||
pub fn last_id(&self) -> Hash {
|
||||
let last_ids = self.last_ids.read().expect("'last_ids' read lock");
|
||||
let last_item = last_ids
|
||||
.iter()
|
||||
.last()
|
||||
.expect("get last item from 'last_ids' list");
|
||||
*last_item
|
||||
}
|
||||
|
||||
/// Store the given signature. The bank will reject any transaction with the same signature.
|
||||
fn reserve_signature(signatures: &mut HashSet<Signature>, sig: &Signature) -> Result<()> {
|
||||
if let Some(sig) = signatures.get(sig) {
|
||||
return Err(BankError::DuplicateSignature(*sig));
|
||||
}
|
||||
signatures.insert(*sig);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Forget the given `signature` because its transaction was rejected.
|
||||
fn forget_signature(signatures: &mut HashSet<Signature>, signature: &Signature) {
|
||||
signatures.remove(signature);
|
||||
}
|
||||
|
||||
/// Forget the given `signature` with `last_id` because the transaction was rejected.
|
||||
fn forget_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) {
|
||||
if let Some(entry) = self.last_ids_sigs
|
||||
.write()
|
||||
.expect("'last_ids' read lock in forget_signature_with_last_id")
|
||||
.get_mut(last_id)
|
||||
{
|
||||
Self::forget_signature(entry, signature);
|
||||
}
|
||||
}
|
||||
|
||||
fn reserve_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) -> Result<()> {
|
||||
if let Some(entry) = self.last_ids_sigs
|
||||
.write()
|
||||
.expect("'last_ids' read lock in reserve_signature_with_last_id")
|
||||
.get_mut(last_id)
|
||||
{
|
||||
return Self::reserve_signature(entry, signature);
|
||||
}
|
||||
Err(BankError::LastIdNotFound(*last_id))
|
||||
}
|
||||
|
||||
/// Tell the bank which Entry IDs exist on the ledger. This function
|
||||
/// assumes subsequent calls correspond to later entries, and will boot
|
||||
/// the oldest ones once its internal cache is full. Once boot, the
|
||||
/// bank will reject transactions using that `last_id`.
|
||||
pub fn register_entry_id(&self, last_id: &Hash) {
|
||||
let mut last_ids = self.last_ids
|
||||
.write()
|
||||
.expect("'last_ids' write lock in register_entry_id");
|
||||
let mut last_ids_sigs = self.last_ids_sigs
|
||||
.write()
|
||||
.expect("last_ids_sigs write lock");
|
||||
if last_ids.len() >= MAX_ENTRY_IDS {
|
||||
let id = last_ids.pop_front().unwrap();
|
||||
last_ids_sigs.remove(&id);
|
||||
}
|
||||
last_ids_sigs.insert(*last_id, HashSet::new());
|
||||
last_ids.push_back(*last_id);
|
||||
}
|
||||
|
||||
/// Deduct tokens from the 'from' address the account has sufficient
|
||||
/// funds and isn't a duplicate.
|
||||
fn apply_debits(&self, tx: &Transaction) -> Result<()> {
|
||||
let mut bals = self.balances.write().unwrap();
|
||||
let mut purge = false;
|
||||
{
|
||||
let option = bals.get_mut(&tx.from);
|
||||
if option.is_none() {
|
||||
return Err(BankError::AccountNotFound(tx.from));
|
||||
}
|
||||
let bal = option.unwrap();
|
||||
|
||||
self.reserve_signature_with_last_id(&tx.sig, &tx.last_id)?;
|
||||
|
||||
if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
if contract.tokens < 0 {
|
||||
return Err(BankError::NegativeTokens);
|
||||
}
|
||||
|
||||
if *bal < contract.tokens {
|
||||
self.forget_signature_with_last_id(&tx.sig, &tx.last_id);
|
||||
return Err(BankError::InsufficientFunds(tx.from));
|
||||
} else if *bal == contract.tokens {
|
||||
purge = true;
|
||||
} else {
|
||||
*bal -= contract.tokens;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if purge {
|
||||
bals.remove(&tx.from);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Apply only a transaction's credits. Credits from multiple transactions
|
||||
/// may safely be applied in parallel.
|
||||
fn apply_credits(&self, tx: &Transaction) {
|
||||
match &tx.instruction {
|
||||
Instruction::NewContract(contract) => {
|
||||
let mut plan = contract.plan.clone();
|
||||
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
||||
.read()
|
||||
.expect("timestamp creation in apply_credits")));
|
||||
|
||||
if let Some(payment) = plan.final_payment() {
|
||||
self.apply_payment(&payment);
|
||||
} else {
|
||||
let mut pending = self.pending
|
||||
.write()
|
||||
.expect("'pending' write lock in apply_credits");
|
||||
pending.insert(tx.sig, plan);
|
||||
}
|
||||
}
|
||||
Instruction::ApplyTimestamp(dt) => {
|
||||
let _ = self.apply_timestamp(tx.from, *dt);
|
||||
}
|
||||
Instruction::ApplySignature(tx_sig) => {
|
||||
let _ = self.apply_signature(tx.from, *tx_sig);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a Transaction. If it contains a payment plan that requires a witness
|
||||
/// to progress, the payment plan will be stored in the bank.
|
||||
fn process_transaction(&self, tx: &Transaction) -> Result<()> {
|
||||
self.apply_debits(tx)?;
|
||||
self.apply_credits(tx);
|
||||
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a batch of transactions.
|
||||
#[must_use]
|
||||
pub fn process_transactions(&self, txs: Vec<Transaction>) -> Vec<Result<Transaction>> {
|
||||
debug!("processing Transactions {}", txs.len());
|
||||
let txs_len = txs.len();
|
||||
let now = Instant::now();
|
||||
let results: Vec<_> = txs.into_iter()
|
||||
.map(|tx| self.apply_debits(&tx).map(|_| tx))
|
||||
.collect(); // Calling collect() here forces all debits to complete before moving on.
|
||||
|
||||
let debits = now.elapsed();
|
||||
let now = Instant::now();
|
||||
|
||||
let res: Vec<_> = results
|
||||
.into_iter()
|
||||
.map(|result| {
|
||||
result.map(|tx| {
|
||||
self.apply_credits(&tx);
|
||||
tx
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
debug!(
|
||||
"debits: {} us credits: {:?} us tx: {}",
|
||||
duration_as_us(&debits),
|
||||
duration_as_us(&now.elapsed()),
|
||||
txs_len
|
||||
);
|
||||
|
||||
let mut tx_count = 0;
|
||||
for r in &res {
|
||||
if r.is_ok() {
|
||||
tx_count += 1;
|
||||
} else {
|
||||
info!("tx error: {:?}", r);
|
||||
}
|
||||
}
|
||||
self.transaction_count
|
||||
.fetch_add(tx_count, Ordering::Relaxed);
|
||||
res
|
||||
}
|
||||
|
||||
/// Process an ordered list of entries.
|
||||
pub fn process_entries<I>(&self, entries: I) -> Result<u64>
|
||||
where
|
||||
I: IntoIterator<Item = Entry>,
|
||||
{
|
||||
let mut entry_count = 0;
|
||||
for entry in entries {
|
||||
entry_count += 1;
|
||||
|
||||
if !entry.transactions.is_empty() {
|
||||
for result in self.process_transactions(entry.transactions) {
|
||||
result?;
|
||||
}
|
||||
}
|
||||
// TODO: verify this is ok in cases like:
|
||||
// 1. an untrusted genesis or tx-<DATE>.log
|
||||
// 2. a crazy leader..
|
||||
if !entry.has_more {
|
||||
self.register_entry_id(&entry.id);
|
||||
}
|
||||
}
|
||||
Ok(entry_count)
|
||||
}
|
||||
|
||||
/// Append entry blocks to the ledger, verifying them along the way.
|
||||
pub fn process_blocks<I>(&self, entries: I) -> Result<u64>
|
||||
where
|
||||
I: IntoIterator<Item = Entry>,
|
||||
{
|
||||
// Ledger verification needs to be parallelized, but we can't pull the whole
|
||||
// thing into memory. We therefore chunk it.
|
||||
let mut entry_count = 0;
|
||||
for block in &entries.into_iter().chunks(VERIFY_BLOCK_SIZE) {
|
||||
let block: Vec<_> = block.collect();
|
||||
if !block.verify(&self.last_id()) {
|
||||
return Err(BankError::LedgerVerificationFailed);
|
||||
}
|
||||
entry_count += self.process_entries(block)?;
|
||||
}
|
||||
Ok(entry_count)
|
||||
}
|
||||
|
||||
/// Process a full ledger.
|
||||
pub fn process_ledger<I>(&self, entries: I) -> Result<u64>
|
||||
where
|
||||
I: IntoIterator<Item = Entry>,
|
||||
{
|
||||
let mut entries = entries.into_iter();
|
||||
|
||||
// The first item in the ledger is required to be an entry with zero num_hashes,
|
||||
// which implies its id can be used as the ledger's seed.
|
||||
let entry0 = entries.next().expect("invalid ledger: empty");
|
||||
|
||||
// The second item in the ledger is a special transaction where the to and from
|
||||
// fields are the same. That entry should be treated as a deposit, not a
|
||||
// transfer to oneself.
|
||||
let entry1 = entries
|
||||
.next()
|
||||
.expect("invalid ledger: need at least 2 entries");
|
||||
let tx = &entry1.transactions[0];
|
||||
let deposit = if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
contract.plan.final_payment()
|
||||
} else {
|
||||
None
|
||||
}.expect("invalid ledger, needs to start with a contract");
|
||||
|
||||
self.apply_payment(&deposit);
|
||||
self.register_entry_id(&entry0.id);
|
||||
self.register_entry_id(&entry1.id);
|
||||
|
||||
let mut entry_count = 2;
|
||||
entry_count += self.process_blocks(entries)?;
|
||||
Ok(entry_count)
|
||||
}
|
||||
|
||||
/// Process a Witness Signature. Any payment plans waiting on this signature
|
||||
/// will progress one step.
|
||||
fn apply_signature(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
|
||||
if let Occupied(mut e) = self.pending
|
||||
.write()
|
||||
.expect("write() in apply_signature")
|
||||
.entry(tx_sig)
|
||||
{
|
||||
e.get_mut().apply_witness(&Witness::Signature(from));
|
||||
if let Some(payment) = e.get().final_payment() {
|
||||
self.apply_payment(&payment);
|
||||
e.remove_entry();
|
||||
}
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a Witness Timestamp. Any payment plans waiting on this timestamp
|
||||
/// will progress one step.
|
||||
fn apply_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
||||
// If this is the first timestamp we've seen, it probably came from the genesis block,
|
||||
// so we'll trust it.
|
||||
if *self.last_time
|
||||
.read()
|
||||
.expect("'last_time' read lock on first timestamp check")
|
||||
== Utc.timestamp(0, 0)
|
||||
{
|
||||
self.time_sources
|
||||
.write()
|
||||
.expect("'time_sources' write lock on first timestamp")
|
||||
.insert(from);
|
||||
}
|
||||
|
||||
if self.time_sources
|
||||
.read()
|
||||
.expect("'time_sources' read lock")
|
||||
.contains(&from)
|
||||
{
|
||||
if dt > *self.last_time.read().expect("'last_time' read lock") {
|
||||
*self.last_time.write().expect("'last_time' write lock") = dt;
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check to see if any timelocked transactions can be completed.
|
||||
let mut completed = vec![];
|
||||
|
||||
// Hold 'pending' write lock until the end of this function. Otherwise another thread can
|
||||
// double-spend if it enters before the modified plan is removed from 'pending'.
|
||||
let mut pending = self.pending
|
||||
.write()
|
||||
.expect("'pending' write lock in apply_timestamp");
|
||||
for (key, plan) in pending.iter_mut() {
|
||||
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
||||
.read()
|
||||
.expect("'last_time' read lock when creating timestamp")));
|
||||
if let Some(payment) = plan.final_payment() {
|
||||
self.apply_payment(&payment);
|
||||
completed.push(key.clone());
|
||||
}
|
||||
}
|
||||
|
||||
for key in completed {
|
||||
pending.remove(&key);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create, sign, and process a Transaction from `keypair` to `to` of
|
||||
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
|
||||
pub fn transfer(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tx = Transaction::new(keypair, to, n, last_id);
|
||||
let sig = tx.sig;
|
||||
self.process_transaction(&tx).map(|_| sig)
|
||||
}
|
||||
|
||||
/// Create, sign, and process a postdated Transaction from `keypair`
|
||||
/// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID
|
||||
/// observed by the client.
|
||||
pub fn transfer_on_date(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
dt: DateTime<Utc>,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tx = Transaction::new_on_date(keypair, to, dt, n, last_id);
|
||||
let sig = tx.sig;
|
||||
self.process_transaction(&tx).map(|_| sig)
|
||||
}
|
||||
|
||||
pub fn get_balance(&self, pubkey: &PublicKey) -> i64 {
|
||||
let bals = self.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in get_balance");
|
||||
bals.get(pubkey).map(|x| *x).unwrap_or(0)
|
||||
}
|
||||
|
||||
pub fn transaction_count(&self) -> usize {
|
||||
self.transaction_count.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn has_signature(&self, signature: &Signature) -> bool {
|
||||
let last_ids_sigs = self.last_ids_sigs
|
||||
.read()
|
||||
.expect("'last_ids_sigs' read lock");
|
||||
for (_hash, signatures) in last_ids_sigs.iter() {
|
||||
if signatures.contains(signature) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use entry::next_entry;
|
||||
use entry_writer::{self, EntryWriter};
|
||||
use hash::hash;
|
||||
use ledger::next_entries;
|
||||
use signature::KeyPairUtil;
|
||||
use std::io::{BufRead, BufReader, Cursor, Seek, SeekFrom};
|
||||
|
||||
#[test]
|
||||
fn test_two_payments_to_one_party() {
|
||||
let mint = Mint::new(10_000);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let bank = Bank::new(&mint);
|
||||
assert_eq!(bank.last_id(), mint.last_id());
|
||||
|
||||
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey), 1_000);
|
||||
|
||||
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey), 1_500);
|
||||
assert_eq!(bank.transaction_count(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_negative_tokens() {
|
||||
let mint = Mint::new(1);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let bank = Bank::new(&mint);
|
||||
assert_eq!(
|
||||
bank.transfer(-1, &mint.keypair(), pubkey, mint.last_id()),
|
||||
Err(BankError::NegativeTokens)
|
||||
);
|
||||
assert_eq!(bank.transaction_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_account_not_found() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let keypair = KeyPair::new();
|
||||
assert_eq!(
|
||||
bank.transfer(1, &keypair, mint.pubkey(), mint.last_id()),
|
||||
Err(BankError::AccountNotFound(keypair.pubkey()))
|
||||
);
|
||||
assert_eq!(bank.transaction_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_insufficient_funds() {
|
||||
let mint = Mint::new(11_000);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
assert_eq!(
|
||||
bank.transfer(10_001, &mint.keypair(), pubkey, mint.last_id()),
|
||||
Err(BankError::InsufficientFunds(mint.pubkey()))
|
||||
);
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
let mint_pubkey = mint.keypair().pubkey();
|
||||
assert_eq!(bank.get_balance(&mint_pubkey), 10_000);
|
||||
assert_eq!(bank.get_balance(&pubkey), 1_000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_to_newb() {
|
||||
let mint = Mint::new(10_000);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey), 500);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_on_date() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||
.unwrap();
|
||||
|
||||
// Mint's balance will be zero because all funds are locked up.
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), 0);
|
||||
|
||||
// tx count is 1, because debits were applied.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
// pubkey's balance will be None because the funds have not been
|
||||
// sent.
|
||||
assert_eq!(bank.get_balance(&pubkey), 0);
|
||||
|
||||
// Now, acknowledge the time in the condition occurred and
|
||||
// that pubkey's funds are now available.
|
||||
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||
|
||||
// tx count is still 1, because we chose not to count timestamp transactions
|
||||
// tx count.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
bank.apply_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
|
||||
assert_ne!(bank.get_balance(&pubkey), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_after_date() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
|
||||
|
||||
// It's now past now, so this transfer should be processed immediately.
|
||||
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), 0);
|
||||
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cancel_transfer() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
let sig = bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||
.unwrap();
|
||||
|
||||
// Assert the debit counts as a transaction.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
// Mint's balance will be zero because all funds are locked up.
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), 0);
|
||||
|
||||
// pubkey's balance will be None because the funds have not been
|
||||
// sent.
|
||||
assert_eq!(bank.get_balance(&pubkey), 0);
|
||||
|
||||
// Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them.
|
||||
bank.apply_signature(mint.pubkey(), sig).unwrap();
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
||||
assert_eq!(bank.get_balance(&pubkey), 0);
|
||||
|
||||
// Assert cancel doesn't cause count to go backward.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
bank.apply_signature(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
||||
assert_ne!(bank.get_balance(&mint.pubkey()), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_duplicate_transaction_signature() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
assert!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||
.is_ok()
|
||||
);
|
||||
assert_eq!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
|
||||
Err(BankError::DuplicateSignature(sig))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_forget_signature() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||
.unwrap();
|
||||
bank.forget_signature_with_last_id(&sig, &mint.last_id());
|
||||
assert!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||
.is_ok()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_has_signature() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||
.expect("reserve signature");
|
||||
assert!(bank.has_signature(&sig));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reject_old_last_id() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
for i in 0..MAX_ENTRY_IDS {
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
}
|
||||
// Assert we're no longer able to use the oldest entry ID.
|
||||
assert_eq!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
|
||||
Err(BankError::LastIdNotFound(mint.last_id()))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debits_before_credits() {
|
||||
let mint = Mint::new(2);
|
||||
let bank = Bank::new(&mint);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id());
|
||||
let tx1 = Transaction::new(&keypair, mint.pubkey(), 1, mint.last_id());
|
||||
let txs = vec![tx0, tx1];
|
||||
let results = bank.process_transactions(txs);
|
||||
assert!(results[1].is_err());
|
||||
|
||||
// Assert bad transactions aren't counted.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_empty_entry_is_registered() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let keypair = KeyPair::new();
|
||||
let entry = next_entry(&mint.last_id(), 1, vec![]);
|
||||
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, entry.id);
|
||||
|
||||
// First, ensure the TX is rejected because of the unregistered last ID
|
||||
assert_eq!(
|
||||
bank.process_transaction(&tx),
|
||||
Err(BankError::LastIdNotFound(entry.id))
|
||||
);
|
||||
|
||||
// Now ensure the TX is accepted despite pointing to the ID of an empty entry.
|
||||
bank.process_entries(vec![entry]).unwrap();
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_genesis() {
|
||||
let mint = Mint::new(1);
|
||||
let genesis = mint.create_entries();
|
||||
let bank = Bank::default();
|
||||
bank.process_ledger(genesis).unwrap();
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
||||
}
|
||||
|
||||
fn create_sample_block(mint: &Mint) -> impl Iterator<Item = Entry> {
|
||||
let keypair = KeyPair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, mint.last_id());
|
||||
next_entries(&mint.last_id(), 0, vec![tx]).into_iter()
|
||||
}
|
||||
|
||||
fn create_sample_ledger() -> (impl Iterator<Item = Entry>, PublicKey) {
|
||||
let mint = Mint::new(2);
|
||||
let genesis = mint.create_entries();
|
||||
let block = create_sample_block(&mint);
|
||||
(genesis.into_iter().chain(block), mint.pubkey())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_ledger() {
|
||||
let (ledger, pubkey) = create_sample_ledger();
|
||||
let bank = Bank::default();
|
||||
bank.process_ledger(ledger).unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||
}
|
||||
|
||||
// Write the given entries to a file and then return a file iterator to them.
|
||||
fn to_file_iter(entries: impl Iterator<Item = Entry>) -> impl Iterator<Item = Entry> {
|
||||
let mut file = Cursor::new(vec![]);
|
||||
EntryWriter::write_entries(&mut file, entries).unwrap();
|
||||
file.seek(SeekFrom::Start(0)).unwrap();
|
||||
|
||||
let reader = BufReader::new(file);
|
||||
reader
|
||||
.lines()
|
||||
.map(|line| entry_writer::read_entry(line.unwrap()).unwrap())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_ledger_from_file() {
|
||||
let (ledger, pubkey) = create_sample_ledger();
|
||||
let ledger = to_file_iter(ledger);
|
||||
|
||||
let bank = Bank::default();
|
||||
bank.process_ledger(ledger).unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_ledger_from_files() {
|
||||
let mint = Mint::new(2);
|
||||
let genesis = to_file_iter(mint.create_entries().into_iter());
|
||||
let block = to_file_iter(create_sample_block(&mint));
|
||||
|
||||
let bank = Bank::default();
|
||||
bank.process_ledger(genesis.chain(block)).unwrap();
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use bank::*;
|
||||
use bincode::serialize;
|
||||
use hash::hash;
|
||||
use rayon::prelude::*;
|
||||
use signature::KeyPairUtil;
|
||||
|
||||
#[bench]
|
||||
fn bench_process_transaction(bencher: &mut Bencher) {
|
||||
let mint = Mint::new(100_000_000);
|
||||
let bank = Bank::new(&mint);
|
||||
// Create transactions between unrelated parties.
|
||||
let transactions: Vec<_> = (0..4096)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
// Seed the 'from' account.
|
||||
let rando0 = KeyPair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
|
||||
bank.process_transaction(&tx).unwrap();
|
||||
|
||||
// Seed the 'to' account and a cell for its signature.
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
|
||||
let rando1 = KeyPair::new();
|
||||
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||
bank.process_transaction(&tx).unwrap();
|
||||
|
||||
// Finally, return a transaction that's unique
|
||||
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
})
|
||||
.collect();
|
||||
bencher.iter(|| {
|
||||
// Since benchmarker runs this multiple times, we need to clear the signatures.
|
||||
for (_, sigs) in bank.last_ids_sigs.write().unwrap().iter_mut() {
|
||||
sigs.clear();
|
||||
}
|
||||
|
||||
assert!(
|
||||
bank.process_transactions(transactions.clone())
|
||||
.iter()
|
||||
.all(|x| x.is_ok())
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
428
src/banking_stage.rs
Normal file
428
src/banking_stage.rs
Normal file
@ -0,0 +1,428 @@
|
||||
//! The `banking_stage` processes Transaction messages. It is intended to be used
|
||||
//! to contruct a software pipeline. The stage uses all available CPU cores and
|
||||
//! can do its processing in parallel with signature verification on the GPU.
|
||||
|
||||
use bank::Bank;
|
||||
use bincode::deserialize;
|
||||
use counter::Counter;
|
||||
use packet::{PacketRecycler, Packets, SharedPackets};
|
||||
use rayon::prelude::*;
|
||||
use record_stage::Signal;
|
||||
use result::Result;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use timing;
|
||||
use transaction::Transaction;
|
||||
|
||||
/// Stores the stage's thread handle and output receiver.
|
||||
pub struct BankingStage {
|
||||
/// Handle to the stage's thread.
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl BankingStage {
|
||||
/// Create the stage using `bank`. Exit when either `exit` is set or
|
||||
/// when `verified_receiver` or the stage's output receiver is dropped.
|
||||
/// Discard input packets using `packet_recycler` to minimize memory
|
||||
/// allocations in a previous stage such as the `fetch_stage`.
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
exit: Arc<AtomicBool>,
|
||||
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
packet_recycler: PacketRecycler,
|
||||
) -> (Self, Receiver<Signal>) {
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-banking-stage".to_string())
|
||||
.spawn(move || loop {
|
||||
let e = Self::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
);
|
||||
if e.is_err() {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
(BankingStage { thread_hdl }, signal_receiver)
|
||||
}
|
||||
|
||||
/// Convert the transactions from a blob of binary data to a vector of transactions and
|
||||
/// an unused `SocketAddr` that could be used to send a response.
|
||||
fn deserialize_transactions(p: &Packets) -> Vec<Option<(Transaction, SocketAddr)>> {
|
||||
p.packets
|
||||
.par_iter()
|
||||
.map(|x| {
|
||||
deserialize(&x.data[0..x.meta.size])
|
||||
.map(|req| (req, x.meta.addr()))
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Process the incoming packets and send output `Signal` messages to `signal_sender`.
|
||||
/// Discard packets via `packet_recycler`.
|
||||
fn process_packets(
|
||||
bank: Arc<Bank>,
|
||||
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
signal_sender: &Sender<Signal>,
|
||||
packet_recycler: &PacketRecycler,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let recv_start = Instant::now();
|
||||
let mms = verified_receiver.recv_timeout(timer)?;
|
||||
let mut reqs_len = 0;
|
||||
let mms_len = mms.len();
|
||||
info!(
|
||||
"@{:?} process start stalled for: {:?}ms batches: {}",
|
||||
timing::timestamp(),
|
||||
timing::duration_as_ms(&recv_start.elapsed()),
|
||||
mms.len(),
|
||||
);
|
||||
let count = mms.iter().map(|x| x.1.len()).sum();
|
||||
static mut COUNTER: Counter = create_counter!("banking_stage_process_packets", 1);
|
||||
let proc_start = Instant::now();
|
||||
for (msgs, vers) in mms {
|
||||
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
|
||||
reqs_len += transactions.len();
|
||||
let transactions = transactions
|
||||
.into_iter()
|
||||
.zip(vers)
|
||||
.filter_map(|(tx, ver)| match tx {
|
||||
None => None,
|
||||
Some((tx, _addr)) => if tx.verify_plan() && ver != 0 {
|
||||
Some(tx)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
})
|
||||
.collect();
|
||||
|
||||
debug!("process_transactions");
|
||||
let results = bank.process_transactions(transactions);
|
||||
let transactions = results.into_iter().filter_map(|x| x.ok()).collect();
|
||||
signal_sender.send(Signal::Transactions(transactions))?;
|
||||
debug!("done process_transactions");
|
||||
|
||||
packet_recycler.recycle(msgs);
|
||||
}
|
||||
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
||||
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
||||
info!(
|
||||
"@{:?} done processing transaction batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
||||
timing::timestamp(),
|
||||
mms_len,
|
||||
total_time_ms,
|
||||
reqs_len,
|
||||
(reqs_len as f32) / (total_time_s)
|
||||
);
|
||||
inc_counter!(COUNTER, count, proc_start);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: When banking is pulled out of RequestStage, add this test back in.
|
||||
|
||||
//use bank::Bank;
|
||||
//use entry::Entry;
|
||||
//use hash::Hash;
|
||||
//use record_stage::RecordStage;
|
||||
//use record_stage::Signal;
|
||||
//use result::Result;
|
||||
//use std::sync::mpsc::{channel, Sender};
|
||||
//use std::sync::{Arc, Mutex};
|
||||
//use std::time::Duration;
|
||||
//use transaction::Transaction;
|
||||
//
|
||||
//#[cfg(test)]
|
||||
//mod tests {
|
||||
// use bank::Bank;
|
||||
// use mint::Mint;
|
||||
// use signature::{KeyPair, KeyPairUtil};
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// #[test]
|
||||
// // TODO: Move this test banking_stage. Calling process_transactions() directly
|
||||
// // defeats the purpose of this test.
|
||||
// fn test_banking_sequential_consistency() {
|
||||
// // In this attack we'll demonstrate that a verifier can interpret the ledger
|
||||
// // differently if either the server doesn't signal the ledger to add an
|
||||
// // Entry OR if the verifier tries to parallelize across multiple Entries.
|
||||
// let mint = Mint::new(2);
|
||||
// let bank = Bank::new(&mint);
|
||||
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// // Process a batch that includes a transaction that receives two tokens.
|
||||
// let alice = KeyPair::new();
|
||||
// let tx = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
|
||||
// let transactions = vec![tx];
|
||||
// let entry0 = banking_stage.process_transactions(transactions).unwrap();
|
||||
//
|
||||
// // Process a second batch that spends one of those tokens.
|
||||
// let tx = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
|
||||
// let transactions = vec![tx];
|
||||
// let entry1 = banking_stage.process_transactions(transactions).unwrap();
|
||||
//
|
||||
// // Collect the ledger and feed it to a new bank.
|
||||
// let entries = vec![entry0, entry1];
|
||||
//
|
||||
// // Assert the user holds one token, not two. If the server only output one
|
||||
// // entry, then the second transaction will be rejected, because it drives
|
||||
// // the account balance below zero before the credit is added.
|
||||
// let bank = Bank::new(&mint);
|
||||
// for entry in entries {
|
||||
// assert!(
|
||||
// bank
|
||||
// .process_transactions(entry.transactions)
|
||||
// .into_iter()
|
||||
// .all(|x| x.is_ok())
|
||||
// );
|
||||
// }
|
||||
// assert_eq!(bank.get_balance(&alice.pubkey()), Some(1));
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//#[cfg(all(feature = "unstable", test))]
|
||||
//mod bench {
|
||||
// extern crate test;
|
||||
// use self::test::Bencher;
|
||||
// use bank::{Bank, MAX_ENTRY_IDS};
|
||||
// use bincode::serialize;
|
||||
// use hash::hash;
|
||||
// use mint::Mint;
|
||||
// use rayon::prelude::*;
|
||||
// use signature::{KeyPair, KeyPairUtil};
|
||||
// use std::collections::HashSet;
|
||||
// use std::time::Instant;
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// #[bench]
|
||||
// fn bench_process_transactions(_bencher: &mut Bencher) {
|
||||
// let mint = Mint::new(100_000_000);
|
||||
// let bank = Bank::new(&mint);
|
||||
// // Create transactions between unrelated parties.
|
||||
// let txs = 100_000;
|
||||
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
|
||||
// let transactions: Vec<_> = (0..txs)
|
||||
// .into_par_iter()
|
||||
// .map(|i| {
|
||||
// // Seed the 'to' account and a cell for its signature.
|
||||
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
|
||||
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
|
||||
// {
|
||||
// let mut last_ids = last_ids.lock().unwrap();
|
||||
// if !last_ids.contains(&last_id) {
|
||||
// last_ids.insert(last_id);
|
||||
// bank.register_entry_id(&last_id);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Seed the 'from' account.
|
||||
// let rando0 = KeyPair::new();
|
||||
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// let rando1 = KeyPair::new();
|
||||
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// // Finally, return a transaction that's unique
|
||||
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
// })
|
||||
// .collect();
|
||||
//
|
||||
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// let now = Instant::now();
|
||||
// assert!(banking_stage.process_transactions(transactions).is_ok());
|
||||
// let duration = now.elapsed();
|
||||
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
|
||||
// let tps = txs as f64 / sec;
|
||||
//
|
||||
// // Ensure that all transactions were successfully logged.
|
||||
// drop(banking_stage.historian_input);
|
||||
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
|
||||
// assert_eq!(entries.len(), 1);
|
||||
// assert_eq!(entries[0].transactions.len(), txs as usize);
|
||||
//
|
||||
// println!("{} tps", tps);
|
||||
// }
|
||||
//}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use bank::*;
|
||||
use banking_stage::BankingStage;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use packet::{to_packets_chunked, PacketRecycler};
|
||||
use rayon::prelude::*;
|
||||
use record_stage::Signal;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::iter;
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
use transaction::Transaction;
|
||||
|
||||
fn check_txs(batches: usize, receiver: &Receiver<Signal>, ref_tx_count: usize) {
|
||||
let mut total = 0;
|
||||
for _ in 0..batches {
|
||||
let signal = receiver.recv().unwrap();
|
||||
if let Signal::Transactions(transactions) = signal {
|
||||
total += transactions.len();
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
}
|
||||
assert_eq!(total, ref_tx_count);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
logger::setup();
|
||||
let tx = 10_000_usize;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let mint = Mint::new(mint_total);
|
||||
let num_dst_accounts = 8 * 1024;
|
||||
let num_src_accounts = 8 * 1024;
|
||||
|
||||
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| KeyPair::new()).collect();
|
||||
let dstkeys: Vec<_> = (0..num_dst_accounts)
|
||||
.map(|_| KeyPair::new().pubkey())
|
||||
.collect();
|
||||
|
||||
info!("created keys src: {} dst: {}", srckeys.len(), dstkeys.len());
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&srckeys[i % num_src_accounts],
|
||||
dstkeys[i % num_dst_accounts],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
info!("created transactions");
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
let setup_transactions: Vec<_> = (0..num_src_accounts)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
srckeys[i].pubkey(),
|
||||
mint_total / num_src_accounts as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
|
||||
let verified_setup: Vec<_> =
|
||||
to_packets_chunked(&packet_recycler, setup_transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_setup_len = verified_setup.len();
|
||||
verified_sender.send(verified_setup).unwrap();
|
||||
BankingStage::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
|
||||
check_txs(verified_setup_len, &signal_receiver, num_src_accounts);
|
||||
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions.clone(), 192)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_len = verified.len();
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
|
||||
check_txs(verified_len, &signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
|
||||
logger::setup();
|
||||
let tx = 10_000_usize;
|
||||
let mint = Mint::new(1_000_000_000_000);
|
||||
let mut pubkeys = Vec::new();
|
||||
let num_keys = 8;
|
||||
for _ in 0..num_keys {
|
||||
pubkeys.push(KeyPair::new().pubkey());
|
||||
}
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
pubkeys[i % num_keys],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
let verified_len = verified.len();
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
|
||||
check_txs(verified_len, &signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
383
src/bin/client-demo.rs
Normal file
383
src/bin/client-demo.rs
Normal file
@ -0,0 +1,383 @@
|
||||
extern crate atty;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use getopts::Options;
|
||||
use rayon::prelude::*;
|
||||
use solana::crdt::{Crdt, ReplicatedData};
|
||||
use solana::hash::Hash;
|
||||
use solana::mint::Mint;
|
||||
use solana::nat::udp_public_bind;
|
||||
use solana::ncp::Ncp;
|
||||
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
|
||||
use solana::streamer::default_window;
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::timing::{duration_as_ms, duration_as_s};
|
||||
use solana::transaction::Transaction;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::thread::Builder;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
|
||||
brief += " Solana client demo creates a number of transactions and\n";
|
||||
brief += " sends them to a target node.";
|
||||
brief += " Takes json formatted mint file to stdin.";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn sample_tx_count(
|
||||
exit: Arc<AtomicBool>,
|
||||
maxes: Arc<RwLock<Vec<(f64, u64)>>>,
|
||||
first_count: u64,
|
||||
v: ReplicatedData,
|
||||
sample_period: u64,
|
||||
) {
|
||||
let mut client = mk_client(&v);
|
||||
let mut now = Instant::now();
|
||||
let mut initial_tx_count = client.transaction_count();
|
||||
let mut max_tps = 0.0;
|
||||
let mut total;
|
||||
loop {
|
||||
let tx_count = client.transaction_count();
|
||||
let duration = now.elapsed();
|
||||
now = Instant::now();
|
||||
let sample = tx_count - initial_tx_count;
|
||||
initial_tx_count = tx_count;
|
||||
println!("{}: Transactions processed {}", v.transactions_addr, sample);
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||
if tps > max_tps {
|
||||
max_tps = tps;
|
||||
}
|
||||
println!("{}: {:.2} tps", v.transactions_addr, tps);
|
||||
total = tx_count - first_count;
|
||||
println!(
|
||||
"{}: Total Transactions processed {}",
|
||||
v.transactions_addr, total
|
||||
);
|
||||
sleep(Duration::new(sample_period, 0));
|
||||
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
println!("exiting validator thread");
|
||||
maxes.write().unwrap().push((max_tps, total));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_and_send_txs(
|
||||
client: &mut ThinClient,
|
||||
tx_clients: &Vec<ThinClient>,
|
||||
mint: &Mint,
|
||||
keypairs: &Vec<KeyPair>,
|
||||
leader: &ReplicatedData,
|
||||
txs: i64,
|
||||
last_id: &mut Hash,
|
||||
threads: usize,
|
||||
) {
|
||||
println!("Signing transactions... {}", keypairs.len(),);
|
||||
let signing_start = Instant::now();
|
||||
let transactions: Vec<_> = keypairs
|
||||
.par_iter()
|
||||
.map(|keypair| Transaction::new(&mint.keypair(), keypair.pubkey(), 1, *last_id))
|
||||
.collect();
|
||||
|
||||
let duration = signing_start.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = txs as f64 / ns as f64;
|
||||
let nsps = ns as f64 / txs as f64;
|
||||
println!(
|
||||
"Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time",
|
||||
bsps * 1_000_000_f64,
|
||||
nsps / 1_000_f64,
|
||||
duration_as_ms(&duration),
|
||||
);
|
||||
|
||||
println!("Transfering {} transactions in {} batches", txs, threads);
|
||||
let transfer_start = Instant::now();
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
chunks
|
||||
.into_par_iter()
|
||||
.zip(tx_clients)
|
||||
.for_each(|(txs, client)| {
|
||||
println!(
|
||||
"Transferring 1 unit {} times... to {:?}",
|
||||
txs.len(),
|
||||
leader.transactions_addr
|
||||
);
|
||||
for tx in txs {
|
||||
client.transfer_signed(tx.clone()).unwrap();
|
||||
}
|
||||
});
|
||||
println!(
|
||||
"Transfer done. {:?} ms {} tps",
|
||||
duration_as_ms(&transfer_start.elapsed()),
|
||||
txs as f32 / (duration_as_s(&transfer_start.elapsed()))
|
||||
);
|
||||
|
||||
loop {
|
||||
let new_id = client.get_last_id();
|
||||
if *last_id != new_id {
|
||||
*last_id = new_id;
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
env_logger::init();
|
||||
let mut threads = 4usize;
|
||||
let mut num_nodes = 1usize;
|
||||
let mut time_sec = 60;
|
||||
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("l", "", "leader", "leader.json");
|
||||
opts.optopt("t", "", "number of threads", &format!("{}", threads));
|
||||
opts.optopt(
|
||||
"s",
|
||||
"",
|
||||
"send transactions for this many seconds",
|
||||
&format!("{}", time_sec),
|
||||
);
|
||||
opts.optopt(
|
||||
"n",
|
||||
"",
|
||||
"number of nodes to converge to",
|
||||
&format!("{}", num_nodes),
|
||||
);
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
if matches.opt_present("t") {
|
||||
threads = matches.opt_str("t").unwrap().parse().expect("integer");
|
||||
}
|
||||
if matches.opt_present("n") {
|
||||
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
|
||||
}
|
||||
if matches.opt_present("s") {
|
||||
time_sec = matches.opt_str("s").unwrap().parse().expect("integer");
|
||||
}
|
||||
|
||||
let leader = if matches.opt_present("l") {
|
||||
read_leader(matches.opt_str("l").unwrap())
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
ReplicatedData::new_leader(&server_addr)
|
||||
};
|
||||
|
||||
let signal = Arc::new(AtomicBool::new(false));
|
||||
let mut c_threads = vec![];
|
||||
let validators = converge(&leader, signal.clone(), num_nodes, &mut c_threads);
|
||||
assert_eq!(validators.len(), num_nodes);
|
||||
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
println!("Parsing stdin...");
|
||||
let mint: Mint = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
let mut client = mk_client(&leader);
|
||||
|
||||
println!("Get last ID...");
|
||||
let mut last_id = client.get_last_id();
|
||||
println!("Got last ID {:?}", last_id);
|
||||
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&mint.keypair().public_key_bytes()[..32]);
|
||||
let rnd = GenKeys::new(seed);
|
||||
|
||||
println!("Creating keypairs...");
|
||||
let txs = 500_000;
|
||||
let keypairs = rnd.gen_n_keypairs(txs);
|
||||
|
||||
let first_count = client.transaction_count();
|
||||
println!("initial count {}", first_count);
|
||||
|
||||
println!("Sampling tps every second...",);
|
||||
|
||||
// Setup a thread per validator to sample every period
|
||||
// collect the max transaction rate and total tx count seen
|
||||
let maxes = Arc::new(RwLock::new(Vec::new()));
|
||||
let sample_period = 1; // in seconds
|
||||
let v_threads: Vec<_> = validators
|
||||
.into_iter()
|
||||
.map(|v| {
|
||||
let exit = signal.clone();
|
||||
let maxes = maxes.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sample".to_string())
|
||||
.spawn(move || {
|
||||
sample_tx_count(exit, maxes, first_count, v, sample_period);
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
let clients = (0..threads).map(|_| mk_client(&leader)).collect();
|
||||
|
||||
// generate and send transactions for the specified duration
|
||||
let time = Duration::new(time_sec, 0);
|
||||
let now = Instant::now();
|
||||
while now.elapsed() < time {
|
||||
generate_and_send_txs(
|
||||
&mut client,
|
||||
&clients,
|
||||
&mint,
|
||||
&keypairs,
|
||||
&leader,
|
||||
txs,
|
||||
&mut last_id,
|
||||
threads,
|
||||
);
|
||||
}
|
||||
|
||||
// Stop the sampling threads so it will collect the stats
|
||||
signal.store(true, Ordering::Relaxed);
|
||||
for t in v_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
|
||||
// Compute/report stats
|
||||
let mut max_of_maxes = 0.0;
|
||||
let mut total_txs = 0;
|
||||
for (max, txs) in maxes.read().unwrap().iter() {
|
||||
if *max > max_of_maxes {
|
||||
max_of_maxes = *max;
|
||||
}
|
||||
total_txs += *txs;
|
||||
}
|
||||
println!(
|
||||
"\nHighest TPS: {:.2} sampling period {}s total transactions: {} clients: {}",
|
||||
max_of_maxes,
|
||||
sample_period,
|
||||
total_txs,
|
||||
maxes.read().unwrap().len()
|
||||
);
|
||||
|
||||
// join the crdt client threads
|
||||
for t in c_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn mk_client(r: &ReplicatedData) -> ThinClient {
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
|
||||
ThinClient::new(
|
||||
r.requests_addr,
|
||||
requests_socket,
|
||||
r.transactions_addr,
|
||||
transactions_socket,
|
||||
)
|
||||
}
|
||||
|
||||
fn spy_node() -> (ReplicatedData, UdpSocket) {
|
||||
let gossip_socket_pair = udp_public_bind("gossip");
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let node = ReplicatedData::new(
|
||||
pubkey,
|
||||
//gossip.local_addr().unwrap(),
|
||||
gossip_socket_pair.addr,
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
);
|
||||
(node, gossip_socket_pair.receiver)
|
||||
}
|
||||
|
||||
fn converge(
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) -> Vec<ReplicatedData> {
|
||||
//lets spy on the network
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let (spy, spy_gossip) = spy_node();
|
||||
let mut spy_crdt = Crdt::new(spy);
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let window = default_window();
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let ncp = Ncp::new(
|
||||
spy_ref.clone(),
|
||||
window.clone(),
|
||||
spy_gossip,
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
).expect("DataReplicator::new");
|
||||
let mut rv = vec![];
|
||||
//wait for the network to converge, 30 seconds should be plenty
|
||||
for _ in 0..30 {
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.requests_addr != daddr)
|
||||
.cloned()
|
||||
.collect();
|
||||
if v.len() >= num_nodes {
|
||||
println!("CONVERGED!");
|
||||
rv.extend(v.into_iter());
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
threads.extend(ncp.thread_hdls.into_iter());
|
||||
rv
|
||||
}
|
||||
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
|
||||
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
extern crate silk;
|
||||
|
||||
use silk::historian::Historian;
|
||||
use silk::log::{verify_slice, Entry, Event, Sha256Hash};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use std::sync::mpsc::SendError;
|
||||
|
||||
fn create_log(hist: &Historian) -> Result<(), SendError<Event>> {
|
||||
sleep(Duration::from_millis(15));
|
||||
let data = Sha256Hash::default();
|
||||
hist.sender.send(Event::Discovery { data })?;
|
||||
sleep(Duration::from_millis(10));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let seed = Sha256Hash::default();
|
||||
let hist = Historian::new(&seed, Some(10));
|
||||
create_log(&hist).expect("send error");
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry> = hist.receiver.iter().collect();
|
||||
for entry in &entries {
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
assert!(verify_slice(&entries, &seed));
|
||||
}
|
167
src/bin/drone.rs
Normal file
167
src/bin/drone.rs
Normal file
@ -0,0 +1,167 @@
|
||||
extern crate atty;
|
||||
extern crate bincode;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
extern crate tokio;
|
||||
extern crate tokio_codec;
|
||||
extern crate tokio_io;
|
||||
|
||||
use atty::{is, Stream as atty_stream};
|
||||
use bincode::deserialize;
|
||||
use getopts::Options;
|
||||
use solana::crdt::ReplicatedData;
|
||||
use solana::drone::{Drone, DroneRequest};
|
||||
use solana::mint::Mint;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::process::exit;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::prelude::*;
|
||||
use tokio_codec::{BytesCodec, Decoder};
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
|
||||
brief += " Run a Solana Drone to act as the custodian of the mint's remaining tokens\n";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn main() {
|
||||
env_logger::init();
|
||||
let mut opts = Options::new();
|
||||
opts.optopt(
|
||||
"t",
|
||||
"",
|
||||
"time",
|
||||
"time slice over which to limit token requests to drone",
|
||||
);
|
||||
opts.optopt("c", "", "cap", "request limit for time slice");
|
||||
opts.optopt("l", "", "leader", "leader.json");
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
let time_slice: Option<u64>;
|
||||
if matches.opt_present("t") {
|
||||
time_slice = matches
|
||||
.opt_str("t")
|
||||
.expect("unexpected string from input")
|
||||
.parse()
|
||||
.ok();
|
||||
} else {
|
||||
time_slice = None;
|
||||
}
|
||||
let request_cap: Option<u64>;
|
||||
if matches.opt_present("c") {
|
||||
request_cap = matches
|
||||
.opt_str("c")
|
||||
.expect("unexpected string from input")
|
||||
.parse()
|
||||
.ok();
|
||||
} else {
|
||||
request_cap = None;
|
||||
}
|
||||
let leader = if matches.opt_present("l") {
|
||||
read_leader(matches.opt_str("l").unwrap())
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
ReplicatedData::new_leader(&server_addr)
|
||||
};
|
||||
|
||||
if is(atty_stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mint: Mint = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let mint_keypair = mint.keypair();
|
||||
|
||||
let drone_addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
|
||||
let drone = Arc::new(Mutex::new(Drone::new(
|
||||
mint_keypair,
|
||||
drone_addr,
|
||||
leader.transactions_addr,
|
||||
leader.requests_addr,
|
||||
time_slice,
|
||||
request_cap,
|
||||
)));
|
||||
|
||||
let drone1 = drone.clone();
|
||||
thread::spawn(move || loop {
|
||||
let time = drone1.lock().unwrap().time_slice;
|
||||
thread::sleep(time);
|
||||
drone1.lock().unwrap().clear_request_count();
|
||||
});
|
||||
|
||||
let socket = TcpListener::bind(&drone_addr).unwrap();
|
||||
println!("Drone started. Listening on: {}", drone_addr);
|
||||
let done = socket
|
||||
.incoming()
|
||||
.map_err(|e| println!("failed to accept socket; error = {:?}", e))
|
||||
.for_each(move |socket| {
|
||||
let drone2 = drone.clone();
|
||||
// let client_ip = socket.peer_addr().expect("drone peer_addr").ip();
|
||||
let framed = BytesCodec::new().framed(socket);
|
||||
let (_writer, reader) = framed.split();
|
||||
|
||||
let processor = reader
|
||||
.for_each(move |bytes| {
|
||||
let req: DroneRequest =
|
||||
deserialize(&bytes).expect("deserialize packet in drone");
|
||||
println!("Airdrop requested...");
|
||||
// let res = drone2.lock().unwrap().check_rate_limit(client_ip);
|
||||
let res1 = drone2.lock().unwrap().send_airdrop(req);
|
||||
match res1 {
|
||||
Ok(_) => println!("Airdrop sent!"),
|
||||
Err(_) => println!("Request limit reached for this time slice"),
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
.and_then(|()| {
|
||||
println!("Socket received FIN packet and closed connection");
|
||||
Ok(())
|
||||
})
|
||||
.or_else(|err| {
|
||||
println!("Socket closed with error: {:?}", err);
|
||||
Err(err)
|
||||
})
|
||||
.then(|result| {
|
||||
println!("Socket closed with result: {:?}", result);
|
||||
Ok(())
|
||||
});
|
||||
tokio::spawn(processor)
|
||||
});
|
||||
tokio::run(done);
|
||||
}
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
|
||||
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
|
||||
}
|
66
src/bin/fullnode-config.rs
Normal file
66
src/bin/fullnode-config.rs
Normal file
@ -0,0 +1,66 @@
|
||||
extern crate getopts;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use getopts::Options;
|
||||
use solana::crdt::{get_ip_addr, parse_port_or_addr, ReplicatedData};
|
||||
use solana::nat::get_public_ip_addr;
|
||||
use std::env;
|
||||
use std::io;
|
||||
use std::net::SocketAddr;
|
||||
use std::process::exit;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: {} [options]\n\n", program);
|
||||
brief += " Create a solana fullnode config file\n";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("b", "", "bind", "bind to port or address");
|
||||
opts.optflag(
|
||||
"p",
|
||||
"",
|
||||
"detect public network address using public servers",
|
||||
);
|
||||
opts.optflag(
|
||||
"l",
|
||||
"",
|
||||
"detect network address from local machine configuration",
|
||||
);
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
|
||||
let bind_addr: SocketAddr = {
|
||||
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
|
||||
if matches.opt_present("l") {
|
||||
let ip = get_ip_addr().unwrap();
|
||||
bind_addr.set_ip(ip);
|
||||
}
|
||||
if matches.opt_present("p") {
|
||||
let ip = get_public_ip_addr().unwrap();
|
||||
bind_addr.set_ip(ip);
|
||||
}
|
||||
bind_addr
|
||||
};
|
||||
|
||||
// we need all the receiving sockets to be bound within the expected
|
||||
// port range that we open on aws
|
||||
let repl_data = ReplicatedData::new_leader(&bind_addr);
|
||||
let stdout = io::stdout();
|
||||
serde_json::to_writer(stdout, &repl_data).expect("serialize");
|
||||
}
|
96
src/bin/fullnode.rs
Normal file
96
src/bin/fullnode.rs
Normal file
@ -0,0 +1,96 @@
|
||||
extern crate atty;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate log;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use getopts::Options;
|
||||
use solana::crdt::{ReplicatedData, TestNode};
|
||||
use solana::fullnode::FullNode;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
//use std::time::Duration;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
|
||||
brief += " Run a Solana node to handle transactions and\n";
|
||||
brief += " write a new transaction log to stdout.\n";
|
||||
brief += " Takes existing transaction log from stdin.";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn main() -> () {
|
||||
env_logger::init();
|
||||
let mut opts = Options::new();
|
||||
opts.optflag("h", "help", "print help");
|
||||
opts.optopt("l", "", "run with the identity found in FILE", "FILE");
|
||||
opts.optopt(
|
||||
"t",
|
||||
"",
|
||||
"testnet; connect to the network at this gossip entry point",
|
||||
"HOST:PORT",
|
||||
);
|
||||
opts.optopt(
|
||||
"o",
|
||||
"",
|
||||
"output log to FILE, defaults to stdout (ignored by validators)",
|
||||
"FILE",
|
||||
);
|
||||
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a log file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
let mut repl_data = ReplicatedData::new_leader(&bind_addr);
|
||||
if matches.opt_present("l") {
|
||||
let path = matches.opt_str("l").unwrap();
|
||||
if let Ok(file) = File::open(path.clone()) {
|
||||
if let Ok(data) = serde_json::from_reader(file) {
|
||||
repl_data = data;
|
||||
} else {
|
||||
eprintln!("failed to parse {}", path);
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
eprintln!("failed to read {}", path);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
let mut node = TestNode::new_with_bind_addr(repl_data, bind_addr);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let fullnode = if matches.opt_present("t") {
|
||||
let testnet_address_string = matches.opt_str("t").unwrap();
|
||||
let testnet_addr = testnet_address_string.parse().unwrap();
|
||||
FullNode::new(node, false, None, Some(testnet_addr), None, exit)
|
||||
} else {
|
||||
node.data.current_leader_id = node.data.id.clone();
|
||||
|
||||
let outfile = matches.opt_str("o");
|
||||
FullNode::new(node, true, None, None, outfile, exit)
|
||||
};
|
||||
for t in fullnode.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
}
|
31
src/bin/genesis.rs
Normal file
31
src/bin/genesis.rs
Normal file
@ -0,0 +1,31 @@
|
||||
//! A command-line executable for generating the chain's genesis block.
|
||||
|
||||
extern crate atty;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use solana::entry_writer::EntryWriter;
|
||||
use solana::mint::Mint;
|
||||
use std::error;
|
||||
use std::io::{stdin, stdout, Read};
|
||||
use std::process::exit;
|
||||
|
||||
fn main() -> Result<(), Box<error::Error>> {
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer)?;
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mint: Mint = serde_json::from_str(&buffer)?;
|
||||
let mut writer = stdout();
|
||||
EntryWriter::write_entries(&mut writer, mint.create_entries())?;
|
||||
Ok(())
|
||||
}
|
29
src/bin/mint.rs
Normal file
29
src/bin/mint.rs
Normal file
@ -0,0 +1,29 @@
|
||||
extern crate atty;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use solana::mint::Mint;
|
||||
use std::io;
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
let mut input_text = String::new();
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a token number");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
io::stdin().read_line(&mut input_text).unwrap();
|
||||
let trimmed = input_text.trim();
|
||||
let tokens = trimmed.parse::<i64>().unwrap_or_else(|e| {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
});
|
||||
let mint = Mint::new(tokens);
|
||||
let serialized = serde_json::to_string(&mint).unwrap_or_else(|e| {
|
||||
eprintln!("failed to serialize: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
println!("{}", serialized);
|
||||
}
|
337
src/bin/wallet.rs
Normal file
337
src/bin/wallet.rs
Normal file
@ -0,0 +1,337 @@
|
||||
extern crate atty;
|
||||
extern crate bincode;
|
||||
extern crate bs58;
|
||||
extern crate clap;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use bincode::serialize;
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use solana::crdt::ReplicatedData;
|
||||
use solana::drone::DroneRequest;
|
||||
use solana::mint::Mint;
|
||||
use solana::signature::{PublicKey, Signature};
|
||||
use solana::thin_client::ThinClient;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::io::prelude::*;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
enum WalletCommand {
|
||||
Address,
|
||||
Balance,
|
||||
AirDrop(i64),
|
||||
Pay(i64, PublicKey),
|
||||
Confirm(Signature),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum WalletError {
|
||||
CommandNotRecognized(String),
|
||||
BadParameter(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for WalletError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "invalid")
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for WalletError {
|
||||
fn description(&self) -> &str {
|
||||
"invalid"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
// Generic error, underlying cause isn't tracked.
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
struct WalletConfig {
|
||||
leader: ReplicatedData,
|
||||
id: Mint,
|
||||
drone_addr: SocketAddr,
|
||||
command: WalletCommand,
|
||||
}
|
||||
|
||||
impl Default for WalletConfig {
|
||||
fn default() -> WalletConfig {
|
||||
let default_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
WalletConfig {
|
||||
leader: ReplicatedData::new_leader(&default_addr.clone()),
|
||||
id: Mint::new(0),
|
||||
drone_addr: default_addr.clone(),
|
||||
command: WalletCommand::Balance,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
let matches = App::new("solana-wallet")
|
||||
.arg(
|
||||
Arg::with_name("leader")
|
||||
.short("l")
|
||||
.long("leader")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/leader.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("mint")
|
||||
.short("m")
|
||||
.long("mint")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/mint.json"),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("airdrop")
|
||||
.about("Request a batch of tokens")
|
||||
.arg(
|
||||
Arg::with_name("tokens")
|
||||
// .index(1)
|
||||
.long("tokens")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("The number of tokens to request"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("pay")
|
||||
.about("Send a payment")
|
||||
.arg(
|
||||
Arg::with_name("tokens")
|
||||
// .index(2)
|
||||
.long("tokens")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("the number of tokens to send"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("to")
|
||||
// .index(1)
|
||||
.long("to")
|
||||
.value_name("PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("The pubkey of recipient"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("confirm")
|
||||
.about("Confirm your payment by signature")
|
||||
.arg(
|
||||
Arg::with_name("signature")
|
||||
.index(1)
|
||||
.value_name("SIGNATURE")
|
||||
.required(true)
|
||||
.help("The transaction signature to confirm"),
|
||||
),
|
||||
)
|
||||
.subcommand(SubCommand::with_name("balance").about("Get your balance"))
|
||||
.subcommand(SubCommand::with_name("address").about("Get your public key"))
|
||||
.get_matches();
|
||||
|
||||
let leader: ReplicatedData;
|
||||
if let Some(l) = matches.value_of("leader") {
|
||||
leader = read_leader(l.to_string());
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
leader = ReplicatedData::new_leader(&server_addr);
|
||||
};
|
||||
|
||||
let id: Mint;
|
||||
if let Some(m) = matches.value_of("mint") {
|
||||
id = read_mint(m.to_string())?;
|
||||
} else {
|
||||
eprintln!("No mint found!");
|
||||
exit(1);
|
||||
};
|
||||
|
||||
let mut drone_addr = leader.transactions_addr.clone();
|
||||
drone_addr.set_port(9900);
|
||||
|
||||
let command = match matches.subcommand() {
|
||||
("airdrop", Some(airdrop_matches)) => {
|
||||
let mut tokens: i64 = id.tokens;
|
||||
if airdrop_matches.is_present("tokens") {
|
||||
tokens = airdrop_matches.value_of("tokens").unwrap().parse()?;
|
||||
}
|
||||
Ok(WalletCommand::AirDrop(tokens))
|
||||
}
|
||||
("pay", Some(pay_matches)) => {
|
||||
let to: PublicKey;
|
||||
if pay_matches.is_present("to") {
|
||||
let pubkey_vec = bs58::decode(pay_matches.value_of("to").unwrap())
|
||||
.into_vec()
|
||||
.expect("base58-encoded public key");
|
||||
|
||||
if pubkey_vec.len() != std::mem::size_of::<PublicKey>() {
|
||||
display_actions();
|
||||
Err(WalletError::BadParameter("Invalid public key".to_string()))?;
|
||||
}
|
||||
to = PublicKey::clone_from_slice(&pubkey_vec);
|
||||
} else {
|
||||
to = id.pubkey();
|
||||
}
|
||||
let mut tokens: i64 = id.tokens;
|
||||
if pay_matches.is_present("tokens") {
|
||||
tokens = pay_matches.value_of("tokens").unwrap().parse()?;
|
||||
}
|
||||
Ok(WalletCommand::Pay(tokens, to))
|
||||
}
|
||||
("confirm", Some(confirm_matches)) => {
|
||||
let sig_vec = bs58::decode(confirm_matches.value_of("signature").unwrap())
|
||||
.into_vec()
|
||||
.expect("base58-encoded signature");
|
||||
|
||||
if sig_vec.len() == std::mem::size_of::<Signature>() {
|
||||
let sig = Signature::clone_from_slice(&sig_vec);
|
||||
Ok(WalletCommand::Confirm(sig))
|
||||
} else {
|
||||
display_actions();
|
||||
Err(WalletError::BadParameter("Invalid signature".to_string()))
|
||||
}
|
||||
}
|
||||
("balance", Some(_balance_matches)) => Ok(WalletCommand::Balance),
|
||||
("address", Some(_address_matches)) => Ok(WalletCommand::Address),
|
||||
("", None) => {
|
||||
display_actions();
|
||||
Err(WalletError::CommandNotRecognized(
|
||||
"no subcommand given".to_string(),
|
||||
))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}?;
|
||||
|
||||
Ok(WalletConfig {
|
||||
leader,
|
||||
id,
|
||||
drone_addr, // TODO: Add an option for this.
|
||||
command,
|
||||
})
|
||||
}
|
||||
|
||||
fn process_command(
|
||||
config: &WalletConfig,
|
||||
client: &mut ThinClient,
|
||||
) -> Result<(), Box<error::Error>> {
|
||||
match config.command {
|
||||
// Check client balance
|
||||
WalletCommand::Address => {
|
||||
println!("{}", bs58::encode(config.id.pubkey()).into_string());
|
||||
}
|
||||
WalletCommand::Balance => {
|
||||
println!("Balance requested...");
|
||||
let balance = client.poll_get_balance(&config.id.pubkey());
|
||||
match balance {
|
||||
Ok(balance) => {
|
||||
println!("Your balance is: {:?}", balance);
|
||||
}
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::Other => {
|
||||
println!("No account found! Request an airdrop to get started.");
|
||||
}
|
||||
Err(error) => {
|
||||
println!("An error occurred: {:?}", error);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Request an airdrop from Solana Drone;
|
||||
// Request amount is set in request_airdrop function
|
||||
WalletCommand::AirDrop(tokens) => {
|
||||
println!("Airdrop requested...");
|
||||
println!("Airdropping {:?} tokens", tokens);
|
||||
let _airdrop = request_airdrop(&config.drone_addr, &config.id, tokens as u64)?;
|
||||
// TODO: return airdrop Result from Drone
|
||||
sleep(Duration::from_millis(100));
|
||||
println!(
|
||||
"Your balance is: {:?}",
|
||||
client.poll_get_balance(&config.id.pubkey()).unwrap()
|
||||
);
|
||||
}
|
||||
// If client has positive balance, spend tokens in {balance} number of transactions
|
||||
WalletCommand::Pay(tokens, to) => {
|
||||
let last_id = client.get_last_id();
|
||||
let sig = client.transfer(tokens, &config.id.keypair(), to, &last_id)?;
|
||||
println!("{}", bs58::encode(sig).into_string());
|
||||
}
|
||||
// Confirm the last client transaction by signature
|
||||
WalletCommand::Confirm(sig) => {
|
||||
if client.check_signature(&sig) {
|
||||
println!("Confirmed");
|
||||
} else {
|
||||
println!("Not found");
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn display_actions() {
|
||||
println!("");
|
||||
println!("Commands:");
|
||||
println!(" address Get your public key");
|
||||
println!(" balance Get your account balance");
|
||||
println!(" airdrop Request a batch of tokens");
|
||||
println!(" pay Send tokens to a public key");
|
||||
println!(" confirm Confirm your last payment by signature");
|
||||
println!("");
|
||||
}
|
||||
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
|
||||
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
|
||||
}
|
||||
|
||||
fn read_mint(path: String) -> Result<Mint, Box<error::Error>> {
|
||||
let file = File::open(path.clone())?;
|
||||
let mint = serde_json::from_reader(file)?;
|
||||
Ok(mint)
|
||||
}
|
||||
|
||||
fn mk_client(r: &ReplicatedData) -> io::Result<ThinClient> {
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
|
||||
Ok(ThinClient::new(
|
||||
r.requests_addr,
|
||||
requests_socket,
|
||||
r.transactions_addr,
|
||||
transactions_socket,
|
||||
))
|
||||
}
|
||||
|
||||
fn request_airdrop(
|
||||
drone_addr: &SocketAddr,
|
||||
id: &Mint,
|
||||
tokens: u64,
|
||||
) -> Result<(), Box<error::Error>> {
|
||||
let mut stream = TcpStream::connect(drone_addr)?;
|
||||
let req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: tokens,
|
||||
client_public_key: id.pubkey(),
|
||||
};
|
||||
let tx = serialize(&req).expect("serialize drone request");
|
||||
stream.write_all(&tx).unwrap();
|
||||
// TODO: add timeout to this function, in case of unresponsive drone
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Box<error::Error>> {
|
||||
env_logger::init();
|
||||
let config = parse_args()?;
|
||||
let mut client = mk_client(&config.leader)?;
|
||||
process_command(&config, &mut client)
|
||||
}
|
43
src/blob_fetch_stage.rs
Normal file
43
src/blob_fetch_stage.rs
Normal file
@ -0,0 +1,43 @@
|
||||
//! The `blob_fetch_stage` pulls blobs from UDP sockets and sends it to a channel.
|
||||
|
||||
use packet::BlobRecycler;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use streamer::{self, BlobReceiver};
|
||||
|
||||
pub struct BlobFetchStage {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl BlobFetchStage {
|
||||
pub fn new(
|
||||
socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: BlobRecycler,
|
||||
) -> (Self, BlobReceiver) {
|
||||
Self::new_multi_socket(vec![socket], exit, blob_recycler)
|
||||
}
|
||||
pub fn new_multi_socket(
|
||||
sockets: Vec<UdpSocket>,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: BlobRecycler,
|
||||
) -> (Self, BlobReceiver) {
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let thread_hdls: Vec<_> = sockets
|
||||
.into_iter()
|
||||
.map(|socket| {
|
||||
streamer::blob_receiver(
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
socket,
|
||||
blob_sender.clone(),
|
||||
).expect("blob receiver init")
|
||||
})
|
||||
.collect();
|
||||
|
||||
(BlobFetchStage { thread_hdls }, blob_receiver)
|
||||
}
|
||||
}
|
175
src/budget.rs
Normal file
175
src/budget.rs
Normal file
@ -0,0 +1,175 @@
|
||||
//! The `budget` module provides a domain-specific language for payment plans. Users create Budget objects that
|
||||
//! are given to an interpreter. The interpreter listens for `Witness` transactions,
|
||||
//! which it uses to reduce the payment plan. When the budget is reduced to a
|
||||
//! `Payment`, the payment is executed.
|
||||
|
||||
use chrono::prelude::*;
|
||||
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use signature::PublicKey;
|
||||
use std::mem;
|
||||
|
||||
/// A data type representing a `Witness` that the payment plan is waiting on.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Condition {
|
||||
/// Wait for a `Timestamp` `Witness` at or after the given `DateTime`.
|
||||
Timestamp(DateTime<Utc>),
|
||||
|
||||
/// Wait for a `Signature` `Witness` from `PublicKey`.
|
||||
Signature(PublicKey),
|
||||
}
|
||||
|
||||
impl Condition {
|
||||
/// Return true if the given Witness satisfies this Condition.
|
||||
pub fn is_satisfied(&self, witness: &Witness) -> bool {
|
||||
match (self, witness) {
|
||||
(Condition::Signature(pubkey), Witness::Signature(from)) => pubkey == from,
|
||||
(Condition::Timestamp(dt), Witness::Timestamp(last_time)) => dt <= last_time,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A data type reprsenting a payment plan.
|
||||
#[repr(C)]
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Budget {
|
||||
/// Make a payment.
|
||||
Pay(Payment),
|
||||
|
||||
/// Make a payment after some condition.
|
||||
After(Condition, Payment),
|
||||
|
||||
/// Either make a payment after one condition or a different payment after another
|
||||
/// condition, which ever condition is satisfied first.
|
||||
Or((Condition, Payment), (Condition, Payment)),
|
||||
}
|
||||
|
||||
impl Budget {
|
||||
/// Create the simplest budget - one that pays `tokens` to PublicKey.
|
||||
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
|
||||
Budget::Pay(Payment { tokens, to })
|
||||
}
|
||||
|
||||
/// Create a budget that pays `tokens` to `to` after being witnessed by `from`.
|
||||
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
|
||||
Budget::After(Condition::Signature(from), Payment { tokens, to })
|
||||
}
|
||||
|
||||
/// Create a budget that pays `tokens` to `to` after the given DateTime.
|
||||
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
|
||||
Budget::After(Condition::Timestamp(dt), Payment { tokens, to })
|
||||
}
|
||||
|
||||
/// Create a budget that pays `tokens` to `to` after the given DateTime
|
||||
/// unless cancelled by `from`.
|
||||
pub fn new_cancelable_future_payment(
|
||||
dt: DateTime<Utc>,
|
||||
from: PublicKey,
|
||||
tokens: i64,
|
||||
to: PublicKey,
|
||||
) -> Self {
|
||||
Budget::Or(
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl PaymentPlan for Budget {
|
||||
/// Return Payment if the budget requires no additional Witnesses.
|
||||
fn final_payment(&self) -> Option<Payment> {
|
||||
match self {
|
||||
Budget::Pay(payment) => Some(payment.clone()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return true if the budget spends exactly `spendable_tokens`.
|
||||
fn verify(&self, spendable_tokens: i64) -> bool {
|
||||
match self {
|
||||
Budget::Pay(payment) | Budget::After(_, payment) => payment.tokens == spendable_tokens,
|
||||
Budget::Or(a, b) => a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens,
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply a witness to the budget to see if the budget can be reduced.
|
||||
/// If so, modify the budget in-place.
|
||||
fn apply_witness(&mut self, witness: &Witness) {
|
||||
let new_payment = match self {
|
||||
Budget::After(cond, payment) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Or((cond, payment), _) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Or(_, (cond, payment)) if cond.is_satisfied(witness) => Some(payment),
|
||||
_ => None,
|
||||
}.cloned();
|
||||
|
||||
if let Some(payment) = new_payment {
|
||||
mem::replace(self, Budget::Pay(payment));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_signature_satisfied() {
|
||||
let sig = PublicKey::default();
|
||||
assert!(Condition::Signature(sig).is_satisfied(&Witness::Signature(sig)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_satisfied() {
|
||||
let dt1 = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let dt2 = Utc.ymd(2014, 11, 14).and_hms(10, 9, 8);
|
||||
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt1)));
|
||||
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt2)));
|
||||
assert!(!Condition::Timestamp(dt2).is_satisfied(&Witness::Timestamp(dt1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
assert!(Budget::new_payment(42, to).verify(42));
|
||||
assert!(Budget::new_authorized_payment(from, 42, to).verify(42));
|
||||
assert!(Budget::new_future_payment(dt, 42, to).verify(42));
|
||||
assert!(Budget::new_cancelable_future_payment(dt, from, 42, to).verify(42));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_authorized_payment() {
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
|
||||
let mut budget = Budget::new_authorized_payment(from, 42, to);
|
||||
budget.apply_witness(&Witness::Signature(from));
|
||||
assert_eq!(budget, Budget::new_payment(42, to));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_future_payment() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let to = PublicKey::default();
|
||||
|
||||
let mut budget = Budget::new_future_payment(dt, 42, to);
|
||||
budget.apply_witness(&Witness::Timestamp(dt));
|
||||
assert_eq!(budget, Budget::new_payment(42, to));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cancelable_future_payment() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
|
||||
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
|
||||
budget.apply_witness(&Witness::Timestamp(dt));
|
||||
assert_eq!(budget, Budget::new_payment(42, to));
|
||||
|
||||
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
|
||||
budget.apply_witness(&Witness::Signature(from));
|
||||
assert_eq!(budget, Budget::new_payment(42, from));
|
||||
}
|
||||
}
|
333
src/choose_gossip_peer_strategy.rs
Normal file
333
src/choose_gossip_peer_strategy.rs
Normal file
@ -0,0 +1,333 @@
|
||||
use crdt::ReplicatedData;
|
||||
use rand::distributions::{Distribution, Weighted, WeightedChoice};
|
||||
use rand::thread_rng;
|
||||
use result::{Error, Result};
|
||||
use signature::PublicKey;
|
||||
use std;
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub const DEFAULT_WEIGHT: u32 = 1;
|
||||
|
||||
pub trait ChooseGossipPeerStrategy {
|
||||
fn choose_peer<'a>(&self, options: Vec<&'a ReplicatedData>) -> Result<&'a ReplicatedData>;
|
||||
}
|
||||
|
||||
pub struct ChooseRandomPeerStrategy<'a> {
|
||||
random: &'a Fn() -> u64,
|
||||
}
|
||||
|
||||
// Given a source of randomness "random", this strategy will randomly pick a validator
|
||||
// from the input options. This strategy works in isolation, but doesn't leverage any
|
||||
// rumors from the rest of the gossip network to make more informed decisions about
|
||||
// which validators have more/less updates
|
||||
impl<'a, 'b> ChooseRandomPeerStrategy<'a> {
|
||||
pub fn new(random: &'a Fn() -> u64) -> Self {
|
||||
ChooseRandomPeerStrategy { random }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ChooseGossipPeerStrategy for ChooseRandomPeerStrategy<'a> {
|
||||
fn choose_peer<'b>(&self, options: Vec<&'b ReplicatedData>) -> Result<&'b ReplicatedData> {
|
||||
if options.is_empty() {
|
||||
return Err(Error::CrdtTooSmall);
|
||||
}
|
||||
|
||||
let n = ((self.random)() as usize) % options.len();
|
||||
Ok(options[n])
|
||||
}
|
||||
}
|
||||
|
||||
// This strategy uses rumors accumulated from the rest of the network to weight
|
||||
// the importance of communicating with a particular validator based on cumulative network
|
||||
// perceiption of the number of updates the validator has to offer. A validator is randomly
|
||||
// picked based on a weighted sample from the pool of viable choices. The "weight", w, of a
|
||||
// particular validator "v" is calculated as follows:
|
||||
//
|
||||
// w = [Sum for all i in I_v: (rumor_v(i) - observed(v)) * stake(i)] /
|
||||
// [Sum for all i in I_v: Sum(stake(i))]
|
||||
//
|
||||
// where I_v is the set of all validators that returned a rumor about the update_index of
|
||||
// validator "v", stake(i) is the size of the stake of validator "i", observed(v) is the
|
||||
// observed update_index from the last direct communication validator "v", and
|
||||
// rumor_v(i) is the rumored update_index of validator "v" propagated by fellow validator "i".
|
||||
|
||||
// This could be a problem if there are validators with large stakes lying about their
|
||||
// observed updates. There could also be a problem in network partitions, or even just
|
||||
// when certain validators are disproportionately active, where we hear more rumors about
|
||||
// certain clusters of nodes that then propagate more rumros about each other. Hopefully
|
||||
// this can be resolved with a good baseline DEFAULT_WEIGHT, or by implementing lockout
|
||||
// periods for very active validators in the future.
|
||||
|
||||
pub struct ChooseWeightedPeerStrategy<'a> {
|
||||
// The map of last directly observed update_index for each active validator.
|
||||
// This is how we get observed(v) from the formula above.
|
||||
remote: &'a HashMap<PublicKey, u64>,
|
||||
// The map of rumored update_index for each active validator. Using the formula above,
|
||||
// to find rumor_v(i), we would first look up "v" in the outer map, then look up
|
||||
// "i" in the inner map, i.e. look up external_liveness[v][i]
|
||||
external_liveness: &'a HashMap<PublicKey, HashMap<PublicKey, u64>>,
|
||||
// A function returning the size of the stake for a particular validator, corresponds
|
||||
// to stake(i) in the formula above.
|
||||
get_stake: &'a Fn(PublicKey) -> f64,
|
||||
}
|
||||
|
||||
impl<'a> ChooseWeightedPeerStrategy<'a> {
|
||||
pub fn new(
|
||||
remote: &'a HashMap<PublicKey, u64>,
|
||||
external_liveness: &'a HashMap<PublicKey, HashMap<PublicKey, u64>>,
|
||||
get_stake: &'a Fn(PublicKey) -> f64,
|
||||
) -> Self {
|
||||
ChooseWeightedPeerStrategy {
|
||||
remote,
|
||||
external_liveness,
|
||||
get_stake,
|
||||
}
|
||||
}
|
||||
|
||||
fn calculate_weighted_remote_index(&self, peer_id: PublicKey) -> u32 {
|
||||
let mut last_seen_index = 0;
|
||||
// If the peer is not in our remote table, then we leave last_seen_index as zero.
|
||||
// Only happens when a peer appears in our crdt.table but not in our crdt.remote,
|
||||
// which means a validator was directly injected into our crdt.table
|
||||
if let Some(index) = self.remote.get(&peer_id) {
|
||||
last_seen_index = *index;
|
||||
}
|
||||
|
||||
let liveness_entry = self.external_liveness.get(&peer_id);
|
||||
if liveness_entry.is_none() {
|
||||
return DEFAULT_WEIGHT;
|
||||
}
|
||||
|
||||
let votes = liveness_entry.unwrap();
|
||||
|
||||
if votes.is_empty() {
|
||||
return DEFAULT_WEIGHT;
|
||||
}
|
||||
|
||||
// Calculate the weighted average of the rumors
|
||||
let mut relevant_votes = vec![];
|
||||
|
||||
let total_stake = votes.iter().fold(0.0, |total_stake, (&id, &vote)| {
|
||||
let stake = (self.get_stake)(id);
|
||||
// If the total stake is going to overflow u64, pick
|
||||
// the larger of either the current total_stake, or the
|
||||
// new stake, this way we are guaranteed to get at least u64/2
|
||||
// sample of stake in our weighted calculation
|
||||
if std::f64::MAX - total_stake < stake {
|
||||
if stake > total_stake {
|
||||
relevant_votes = vec![(stake, vote)];
|
||||
stake
|
||||
} else {
|
||||
total_stake
|
||||
}
|
||||
} else {
|
||||
relevant_votes.push((stake, vote));
|
||||
total_stake + stake
|
||||
}
|
||||
});
|
||||
|
||||
let weighted_vote = relevant_votes.iter().fold(0.0, |sum, &(stake, vote)| {
|
||||
if vote < last_seen_index {
|
||||
// This should never happen because we maintain the invariant that the indexes
|
||||
// in the external_liveness table are always greater than the corresponding
|
||||
// indexes in the remote table, if the index exists in the remote table at all.
|
||||
|
||||
// Case 1: Attempt to insert bigger index into the "external_liveness" table
|
||||
// happens after an insertion into the "remote" table. In this case,
|
||||
// (see apply_updates()) function, we prevent the insertion if the entry
|
||||
// in the remote table >= the atempted insertion into the "external" liveness
|
||||
// table.
|
||||
|
||||
// Case 2: Bigger index in the "external_liveness" table inserted before
|
||||
// a smaller insertion into the "remote" table. We clear the corresponding
|
||||
// "external_liveness" table entry on all insertions into the "remote" table
|
||||
// See apply_updates() function.
|
||||
|
||||
warn!("weighted peer index was smaller than local entry in remote table");
|
||||
return sum;
|
||||
}
|
||||
|
||||
let vote_difference = (vote - last_seen_index) as f64;
|
||||
let new_weight = vote_difference * (stake / total_stake);
|
||||
|
||||
if std::f64::MAX - sum < new_weight {
|
||||
return f64::max(new_weight, sum);
|
||||
}
|
||||
|
||||
sum + new_weight
|
||||
});
|
||||
|
||||
// Return u32 b/c the weighted sampling API from rand::distributions
|
||||
// only takes u32 for weights
|
||||
if weighted_vote >= std::u32::MAX as f64 {
|
||||
return std::u32::MAX;
|
||||
}
|
||||
|
||||
// If the weighted rumors we've heard about aren't any greater than
|
||||
// what we've directly learned from the last time we communicated with the
|
||||
// peer (i.e. weighted_vote == 0), then return a weight of 1.
|
||||
// Otherwise, return the calculated weight.
|
||||
weighted_vote as u32 + DEFAULT_WEIGHT
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ChooseGossipPeerStrategy for ChooseWeightedPeerStrategy<'a> {
|
||||
fn choose_peer<'b>(&self, options: Vec<&'b ReplicatedData>) -> Result<&'b ReplicatedData> {
|
||||
if options.len() < 1 {
|
||||
return Err(Error::CrdtTooSmall);
|
||||
}
|
||||
|
||||
let mut weighted_peers = vec![];
|
||||
for peer in options {
|
||||
let weight = self.calculate_weighted_remote_index(peer.id);
|
||||
weighted_peers.push(Weighted { weight, item: peer });
|
||||
}
|
||||
|
||||
let mut rng = thread_rng();
|
||||
Ok(WeightedChoice::new(&mut weighted_peers).sample(&mut rng))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use choose_gossip_peer_strategy::{ChooseWeightedPeerStrategy, DEFAULT_WEIGHT};
|
||||
use logger;
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
use std;
|
||||
use std::collections::HashMap;
|
||||
|
||||
fn get_stake(_id: PublicKey) -> f64 {
|
||||
1.0
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default() {
|
||||
logger::setup();
|
||||
|
||||
// Initialize the filler keys
|
||||
let key1 = KeyPair::new().pubkey();
|
||||
|
||||
let remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||
|
||||
let weighted_strategy =
|
||||
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
|
||||
|
||||
// If external_liveness table doesn't contain this entry,
|
||||
// return the default weight
|
||||
let result = weighted_strategy.calculate_weighted_remote_index(key1);
|
||||
assert_eq!(result, DEFAULT_WEIGHT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_only_external_liveness() {
|
||||
logger::setup();
|
||||
|
||||
// Initialize the filler keys
|
||||
let key1 = KeyPair::new().pubkey();
|
||||
let key2 = KeyPair::new().pubkey();
|
||||
|
||||
let remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||
|
||||
// If only the liveness table contains the entry, should return the
|
||||
// weighted liveness entries
|
||||
let test_value: u32 = 5;
|
||||
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||
rumors.insert(key2, test_value as u64);
|
||||
external_liveness.insert(key1, rumors);
|
||||
|
||||
let weighted_strategy =
|
||||
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
|
||||
|
||||
let result = weighted_strategy.calculate_weighted_remote_index(key1);
|
||||
assert_eq!(result, test_value + DEFAULT_WEIGHT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_overflow_votes() {
|
||||
logger::setup();
|
||||
|
||||
// Initialize the filler keys
|
||||
let key1 = KeyPair::new().pubkey();
|
||||
let key2 = KeyPair::new().pubkey();
|
||||
|
||||
let remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||
|
||||
// If the vote index is greater than u32::MAX, default to u32::MAX
|
||||
let test_value = (std::u32::MAX as u64) + 10;
|
||||
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||
rumors.insert(key2, test_value);
|
||||
external_liveness.insert(key1, rumors);
|
||||
|
||||
let weighted_strategy =
|
||||
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
|
||||
|
||||
let result = weighted_strategy.calculate_weighted_remote_index(key1);
|
||||
assert_eq!(result, std::u32::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_many_validators() {
|
||||
logger::setup();
|
||||
|
||||
// Initialize the filler keys
|
||||
let key1 = KeyPair::new().pubkey();
|
||||
|
||||
let mut remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||
|
||||
// Test many validators' rumors in external_liveness
|
||||
let num_peers = 10;
|
||||
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||
|
||||
remote.insert(key1, 0);
|
||||
|
||||
for i in 0..num_peers {
|
||||
let pk = KeyPair::new().pubkey();
|
||||
rumors.insert(pk, i);
|
||||
}
|
||||
|
||||
external_liveness.insert(key1, rumors);
|
||||
|
||||
let weighted_strategy =
|
||||
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
|
||||
|
||||
let result = weighted_strategy.calculate_weighted_remote_index(key1);
|
||||
assert_eq!(result, (num_peers / 2) as u32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_many_validators2() {
|
||||
logger::setup();
|
||||
|
||||
// Initialize the filler keys
|
||||
let key1 = KeyPair::new().pubkey();
|
||||
|
||||
let mut remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||
|
||||
// Test many validators' rumors in external_liveness
|
||||
let num_peers = 10;
|
||||
let old_index = 20;
|
||||
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||
|
||||
remote.insert(key1, old_index);
|
||||
|
||||
for _i in 0..num_peers {
|
||||
let pk = KeyPair::new().pubkey();
|
||||
rumors.insert(pk, old_index);
|
||||
}
|
||||
|
||||
external_liveness.insert(key1, rumors);
|
||||
|
||||
let weighted_strategy =
|
||||
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
|
||||
|
||||
let result = weighted_strategy.calculate_weighted_remote_index(key1);
|
||||
|
||||
// If nobody has seen a newer update then revert to default
|
||||
assert_eq!(result, DEFAULT_WEIGHT);
|
||||
}
|
||||
}
|
69
src/counter.rs
Normal file
69
src/counter.rs
Normal file
@ -0,0 +1,69 @@
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::Duration;
|
||||
use timing;
|
||||
|
||||
pub struct Counter {
|
||||
pub name: &'static str,
|
||||
pub counts: AtomicUsize,
|
||||
pub nanos: AtomicUsize,
|
||||
pub times: AtomicUsize,
|
||||
pub lograte: usize,
|
||||
}
|
||||
|
||||
macro_rules! create_counter {
|
||||
($name:expr, $lograte:expr) => {
|
||||
Counter {
|
||||
name: $name,
|
||||
counts: AtomicUsize::new(0),
|
||||
nanos: AtomicUsize::new(0),
|
||||
times: AtomicUsize::new(0),
|
||||
lograte: $lograte,
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! inc_counter {
|
||||
($name:expr, $count:expr, $start:expr) => {
|
||||
unsafe { $name.inc($count, $start.elapsed()) };
|
||||
};
|
||||
}
|
||||
|
||||
impl Counter {
|
||||
pub fn inc(&mut self, events: usize, dur: Duration) {
|
||||
let total = dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64;
|
||||
let counts = self.counts.fetch_add(events, Ordering::Relaxed);
|
||||
let nanos = self.nanos.fetch_add(total as usize, Ordering::Relaxed);
|
||||
let times = self.times.fetch_add(1, Ordering::Relaxed);
|
||||
if times % self.lograte == 0 && times > 0 {
|
||||
info!(
|
||||
"COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"nanos\": {}, \"samples\": {}, \"rate\": {}, \"now\": {}}}",
|
||||
self.name,
|
||||
counts,
|
||||
nanos,
|
||||
times,
|
||||
counts as f64 * 1e9 / nanos as f64,
|
||||
timing::timestamp(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use counter::Counter;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::Instant;
|
||||
#[test]
|
||||
fn test_counter() {
|
||||
static mut COUNTER: Counter = create_counter!("test", 100);
|
||||
let start = Instant::now();
|
||||
let count = 1;
|
||||
inc_counter!(COUNTER, count, start);
|
||||
unsafe {
|
||||
assert_eq!(COUNTER.counts.load(Ordering::Relaxed), 1);
|
||||
assert_ne!(COUNTER.nanos.load(Ordering::Relaxed), 0);
|
||||
assert_eq!(COUNTER.times.load(Ordering::Relaxed), 1);
|
||||
assert_eq!(COUNTER.lograte, 100);
|
||||
assert_eq!(COUNTER.name, "test");
|
||||
}
|
||||
}
|
||||
}
|
1349
src/crdt.rs
Normal file
1349
src/crdt.rs
Normal file
File diff suppressed because it is too large
Load Diff
313
src/drone.rs
Normal file
313
src/drone.rs
Normal file
@ -0,0 +1,313 @@
|
||||
//! The `drone` module provides an object for launching a Solana Drone,
|
||||
//! which is the custodian of any remaining tokens in a mint.
|
||||
//! The Solana Drone builds and send airdrop transactions,
|
||||
//! checking requests against a request cap for a given time time_slice
|
||||
//! and (to come) an IP rate limit.
|
||||
|
||||
use signature::{KeyPair, PublicKey};
|
||||
use std::io;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::net::{IpAddr, SocketAddr, UdpSocket};
|
||||
use std::time::Duration;
|
||||
use thin_client::ThinClient;
|
||||
use transaction::Transaction;
|
||||
|
||||
pub const TIME_SLICE: u64 = 60;
|
||||
pub const REQUEST_CAP: u64 = 150_000;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum DroneRequest {
|
||||
GetAirdrop {
|
||||
airdrop_request_amount: u64,
|
||||
client_public_key: PublicKey,
|
||||
},
|
||||
}
|
||||
|
||||
pub struct Drone {
|
||||
mint_keypair: KeyPair,
|
||||
ip_cache: Vec<IpAddr>,
|
||||
_airdrop_addr: SocketAddr,
|
||||
transactions_addr: SocketAddr,
|
||||
requests_addr: SocketAddr,
|
||||
pub time_slice: Duration,
|
||||
request_cap: u64,
|
||||
pub request_current: u64,
|
||||
}
|
||||
|
||||
impl Drone {
|
||||
pub fn new(
|
||||
mint_keypair: KeyPair,
|
||||
_airdrop_addr: SocketAddr,
|
||||
transactions_addr: SocketAddr,
|
||||
requests_addr: SocketAddr,
|
||||
time_input: Option<u64>,
|
||||
request_cap_input: Option<u64>,
|
||||
) -> Drone {
|
||||
let time_slice = match time_input {
|
||||
Some(time) => Duration::new(time, 0),
|
||||
None => Duration::new(TIME_SLICE, 0),
|
||||
};
|
||||
let request_cap = match request_cap_input {
|
||||
Some(cap) => cap,
|
||||
None => REQUEST_CAP,
|
||||
};
|
||||
Drone {
|
||||
mint_keypair,
|
||||
ip_cache: Vec::new(),
|
||||
_airdrop_addr,
|
||||
transactions_addr,
|
||||
requests_addr,
|
||||
time_slice,
|
||||
request_cap,
|
||||
request_current: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_request_limit(&mut self, request_amount: u64) -> bool {
|
||||
(self.request_current + request_amount) <= self.request_cap
|
||||
}
|
||||
|
||||
pub fn clear_request_count(&mut self) {
|
||||
self.request_current = 0;
|
||||
}
|
||||
|
||||
pub fn add_ip_to_cache(&mut self, ip: IpAddr) {
|
||||
self.ip_cache.push(ip);
|
||||
}
|
||||
|
||||
pub fn clear_ip_cache(&mut self) {
|
||||
self.ip_cache.clear();
|
||||
}
|
||||
|
||||
pub fn check_rate_limit(&mut self, ip: IpAddr) -> Result<IpAddr, IpAddr> {
|
||||
// [WIP] This is placeholder code for a proper rate limiter.
|
||||
// Right now it will only allow one total drone request per IP
|
||||
if self.ip_cache.contains(&ip) {
|
||||
// Add proper error handling here
|
||||
Err(ip)
|
||||
} else {
|
||||
self.add_ip_to_cache(ip);
|
||||
Ok(ip)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_airdrop(&mut self, req: DroneRequest) -> Result<usize, io::Error> {
|
||||
let tx: Transaction;
|
||||
let request_amount: u64;
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
let mut client = ThinClient::new(
|
||||
self.requests_addr,
|
||||
requests_socket,
|
||||
self.transactions_addr,
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id();
|
||||
|
||||
match req {
|
||||
DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount,
|
||||
client_public_key,
|
||||
} => {
|
||||
request_amount = airdrop_request_amount.clone();
|
||||
tx = Transaction::new(
|
||||
&self.mint_keypair,
|
||||
client_public_key,
|
||||
airdrop_request_amount as i64,
|
||||
last_id,
|
||||
);
|
||||
}
|
||||
}
|
||||
if self.check_request_limit(request_amount) {
|
||||
self.request_current += request_amount;
|
||||
client.transfer_signed(tx)
|
||||
} else {
|
||||
Err(Error::new(ErrorKind::Other, "token limit reached"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bank::Bank;
|
||||
use crdt::{get_ip_addr, TestNode};
|
||||
use drone::{Drone, DroneRequest, REQUEST_CAP, TIME_SLICE};
|
||||
use fullnode::FullNode;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use thin_client::ThinClient;
|
||||
|
||||
#[test]
|
||||
fn test_check_request_limit() {
|
||||
let keypair = KeyPair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let requests_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let mut drone = Drone::new(
|
||||
keypair,
|
||||
addr,
|
||||
transactions_addr,
|
||||
requests_addr,
|
||||
None,
|
||||
Some(3),
|
||||
);
|
||||
assert!(drone.check_request_limit(1));
|
||||
drone.request_current = 3;
|
||||
assert!(!drone.check_request_limit(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear_request_count() {
|
||||
let keypair = KeyPair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let requests_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let mut drone = Drone::new(keypair, addr, transactions_addr, requests_addr, None, None);
|
||||
drone.request_current = drone.request_current + 256;
|
||||
assert_eq!(drone.request_current, 256);
|
||||
drone.clear_request_count();
|
||||
assert_eq!(drone.request_current, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_ip_to_cache() {
|
||||
let keypair = KeyPair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let requests_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let mut drone = Drone::new(keypair, addr, transactions_addr, requests_addr, None, None);
|
||||
let ip = "127.0.0.1".parse().expect("create IpAddr from string");
|
||||
assert_eq!(drone.ip_cache.len(), 0);
|
||||
drone.add_ip_to_cache(ip);
|
||||
assert_eq!(drone.ip_cache.len(), 1);
|
||||
assert!(drone.ip_cache.contains(&ip));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear_ip_cache() {
|
||||
let keypair = KeyPair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let requests_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let mut drone = Drone::new(keypair, addr, transactions_addr, requests_addr, None, None);
|
||||
let ip = "127.0.0.1".parse().expect("create IpAddr from string");
|
||||
assert_eq!(drone.ip_cache.len(), 0);
|
||||
drone.add_ip_to_cache(ip);
|
||||
assert_eq!(drone.ip_cache.len(), 1);
|
||||
drone.clear_ip_cache();
|
||||
assert_eq!(drone.ip_cache.len(), 0);
|
||||
assert!(drone.ip_cache.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drone_default_init() {
|
||||
let keypair = KeyPair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let requests_addr = "0.0.0.0:0".parse().unwrap();
|
||||
let time_slice: Option<u64> = None;
|
||||
let request_cap: Option<u64> = None;
|
||||
let drone = Drone::new(
|
||||
keypair,
|
||||
addr,
|
||||
transactions_addr,
|
||||
requests_addr,
|
||||
time_slice,
|
||||
request_cap,
|
||||
);
|
||||
assert_eq!(drone.time_slice, Duration::new(TIME_SLICE, 0));
|
||||
assert_eq!(drone.request_cap, REQUEST_CAP);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_send_airdrop() {
|
||||
const SMALL_BATCH: i64 = 50;
|
||||
const TPS_BATCH: i64 = 5_000_000;
|
||||
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
|
||||
let alice = Mint::new(10_000_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let carlos_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let server = FullNode::new_leader(
|
||||
bank,
|
||||
0,
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
sleep(Duration::from_millis(900));
|
||||
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().expect("bind to drone socket");
|
||||
addr.set_ip(get_ip_addr().expect("drone get_ip_addr"));
|
||||
let mut drone = Drone::new(
|
||||
alice.keypair(),
|
||||
addr,
|
||||
leader.data.transactions_addr,
|
||||
leader.data.requests_addr,
|
||||
None,
|
||||
Some(5_000_050),
|
||||
);
|
||||
|
||||
let bob_req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: 50,
|
||||
client_public_key: bob_pubkey,
|
||||
};
|
||||
let bob_result = drone.send_airdrop(bob_req).expect("send airdrop test");
|
||||
assert!(bob_result > 0);
|
||||
|
||||
let carlos_req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: 5_000_000,
|
||||
client_public_key: carlos_pubkey,
|
||||
};
|
||||
let carlos_result = drone.send_airdrop(carlos_req).expect("send airdrop test");
|
||||
assert!(carlos_result > 0);
|
||||
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").expect("drone bind to requests socket");
|
||||
let transactions_socket =
|
||||
UdpSocket::bind("0.0.0.0:0").expect("drone bind to transactions socket");
|
||||
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
requests_socket,
|
||||
leader.data.transactions_addr,
|
||||
transactions_socket,
|
||||
);
|
||||
|
||||
let bob_balance = client.poll_get_balance(&bob_pubkey);
|
||||
info!("Small request balance: {:?}", bob_balance);
|
||||
assert_eq!(bob_balance.unwrap(), SMALL_BATCH);
|
||||
|
||||
let carlos_balance = client.poll_get_balance(&carlos_pubkey);
|
||||
info!("TPS request balance: {:?}", carlos_balance);
|
||||
assert_eq!(carlos_balance.unwrap(), TPS_BATCH);
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
236
src/entry.rs
Normal file
236
src/entry.rs
Normal file
@ -0,0 +1,236 @@
|
||||
//! The `entry` module is a fundamental building block of Proof of History. It contains a
|
||||
//! unique ID that is the hash of the Entry before it, plus the hash of the
|
||||
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
|
||||
//! represents an approximate amount of time since the last Entry was created.
|
||||
use bincode::serialized_size;
|
||||
use hash::{extend_and_hash, hash, Hash};
|
||||
use packet::BLOB_DATA_SIZE;
|
||||
use rayon::prelude::*;
|
||||
use transaction::Transaction;
|
||||
|
||||
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
|
||||
/// of hashes performed since the previous entry. The `id` field is the result
|
||||
/// of hashing `id` from the previous entry `num_hashes` times. The `transactions`
|
||||
/// field points to Transactions that took place shortly before `id` was generated.
|
||||
///
|
||||
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
|
||||
/// get a duration estimate since the last Entry. Since processing power increases
|
||||
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
|
||||
/// An upper bound on Duration can be estimated by assuming each hash was generated by the
|
||||
/// world's fastest processor at the time the entry was recorded. Or said another way, it
|
||||
/// is physically not possible for a shorter duration to have occurred if one assumes the
|
||||
/// hash was computed by the world's fastest processor at that time. The hash chain is both
|
||||
/// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof or
|
||||
/// Work consensus!)
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Entry {
|
||||
/// The number of hashes since the previous Entry ID.
|
||||
pub num_hashes: u64,
|
||||
|
||||
/// The SHA-256 hash `num_hashes` after the previous Entry ID.
|
||||
pub id: Hash,
|
||||
|
||||
/// An unordered list of transactions that were observed before the Entry ID was
|
||||
/// generated. The may have been observed before a previous Entry ID but were
|
||||
/// pushed back into this list to ensure deterministic interpretation of the ledger.
|
||||
pub transactions: Vec<Transaction>,
|
||||
|
||||
/// Indication that:
|
||||
/// 1. the next Entry in the ledger has transactions that can potentially
|
||||
/// be verified in parallel with these transactions
|
||||
/// 2. this Entry can be left out of the bank's entry_id cache for
|
||||
/// purposes of duplicate rejection
|
||||
pub has_more: bool,
|
||||
|
||||
/// Erasure requires that Entry be a multiple of 4 bytes in size
|
||||
pad: [u8; 3],
|
||||
}
|
||||
|
||||
impl Entry {
|
||||
/// Creates the next Entry `num_hashes` after `start_hash`.
|
||||
pub fn new(
|
||||
start_hash: &Hash,
|
||||
cur_hashes: u64,
|
||||
transactions: Vec<Transaction>,
|
||||
has_more: bool,
|
||||
) -> Self {
|
||||
let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
|
||||
let id = next_hash(start_hash, 0, &transactions);
|
||||
let entry = Entry {
|
||||
num_hashes,
|
||||
id,
|
||||
transactions,
|
||||
has_more,
|
||||
pad: [0, 0, 0],
|
||||
};
|
||||
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
||||
entry
|
||||
}
|
||||
|
||||
pub fn will_fit(transactions: Vec<Transaction>) -> bool {
|
||||
serialized_size(&Entry {
|
||||
num_hashes: 0,
|
||||
id: Hash::default(),
|
||||
transactions,
|
||||
has_more: false,
|
||||
pad: [0, 0, 0],
|
||||
}).unwrap() <= BLOB_DATA_SIZE as u64
|
||||
}
|
||||
|
||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
||||
pub fn new_mut(
|
||||
start_hash: &mut Hash,
|
||||
cur_hashes: &mut u64,
|
||||
transactions: Vec<Transaction>,
|
||||
has_more: bool,
|
||||
) -> Self {
|
||||
let entry = Self::new(start_hash, *cur_hashes, transactions, has_more);
|
||||
*start_hash = entry.id;
|
||||
*cur_hashes = 0;
|
||||
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
||||
entry
|
||||
}
|
||||
|
||||
/// Creates a Entry from the number of hashes `num_hashes` since the previous transaction
|
||||
/// and that resulting `id`.
|
||||
pub fn new_tick(num_hashes: u64, id: &Hash) -> Self {
|
||||
Entry {
|
||||
num_hashes,
|
||||
id: *id,
|
||||
transactions: vec![],
|
||||
has_more: false,
|
||||
pad: [0, 0, 0],
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
|
||||
/// If the transaction is not a Tick, then hash that as well.
|
||||
pub fn verify(&self, start_hash: &Hash) -> bool {
|
||||
self.transactions.par_iter().all(|tx| tx.verify_plan())
|
||||
&& self.id == next_hash(start_hash, self.num_hashes, &self.transactions)
|
||||
}
|
||||
}
|
||||
|
||||
fn add_transaction_data(hash_data: &mut Vec<u8>, tx: &Transaction) {
|
||||
hash_data.push(0u8);
|
||||
hash_data.extend_from_slice(&tx.sig);
|
||||
}
|
||||
|
||||
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
|
||||
/// a signature, the final hash will be a hash of both the previous ID and
|
||||
/// the signature. If num_hashes is zero and there's no transaction data,
|
||||
/// start_hash is returned.
|
||||
fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
|
||||
let mut id = *start_hash;
|
||||
for _ in 1..num_hashes {
|
||||
id = hash(&id);
|
||||
}
|
||||
|
||||
// Hash all the transaction data
|
||||
let mut hash_data = vec![];
|
||||
for tx in transactions {
|
||||
add_transaction_data(&mut hash_data, tx);
|
||||
}
|
||||
|
||||
if !hash_data.is_empty() {
|
||||
extend_and_hash(&id, &hash_data)
|
||||
} else if num_hashes != 0 {
|
||||
hash(&id)
|
||||
} else {
|
||||
id
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
|
||||
pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
|
||||
assert!(num_hashes > 0 || transactions.len() == 0);
|
||||
Entry {
|
||||
num_hashes,
|
||||
id: next_hash(start_hash, num_hashes, &transactions),
|
||||
transactions,
|
||||
has_more: false,
|
||||
pad: [0, 0, 0],
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use chrono::prelude::*;
|
||||
use entry::Entry;
|
||||
use hash::hash;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[test]
|
||||
fn test_entry_verify() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case
|
||||
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
|
||||
assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step
|
||||
assert!(!next_entry(&zero, 1, vec![]).verify(&one)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transaction_reorder_attack() {
|
||||
let zero = Hash::default();
|
||||
|
||||
// First, verify entries
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
|
||||
let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero);
|
||||
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false);
|
||||
assert!(e0.verify(&zero));
|
||||
|
||||
// Next, swap two transactions and ensure verification fails.
|
||||
e0.transactions[0] = tx1; // <-- attack
|
||||
e0.transactions[1] = tx0;
|
||||
assert!(!e0.verify(&zero));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_witness_reorder_attack() {
|
||||
let zero = Hash::default();
|
||||
|
||||
// First, verify entries
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
|
||||
let tx1 = Transaction::new_signature(&keypair, Default::default(), zero);
|
||||
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false);
|
||||
assert!(e0.verify(&zero));
|
||||
|
||||
// Next, swap two witness transactions and ensure verification fails.
|
||||
e0.transactions[0] = tx1; // <-- attack
|
||||
e0.transactions[1] = tx0;
|
||||
assert!(!e0.verify(&zero));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_entry() {
|
||||
let zero = Hash::default();
|
||||
let tick = next_entry(&zero, 1, vec![]);
|
||||
assert_eq!(tick.num_hashes, 1);
|
||||
assert_ne!(tick.id, zero);
|
||||
|
||||
let tick = next_entry(&zero, 0, vec![]);
|
||||
assert_eq!(tick.num_hashes, 0);
|
||||
assert_eq!(tick.id, zero);
|
||||
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
|
||||
let entry0 = next_entry(&zero, 1, vec![tx0.clone()]);
|
||||
assert_eq!(entry0.num_hashes, 1);
|
||||
assert_eq!(entry0.id, next_hash(&zero, 1, &vec![tx0]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_next_entry_panic() {
|
||||
let zero = Hash::default();
|
||||
let keypair = KeyPair::new();
|
||||
let tx = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
|
||||
next_entry(&zero, 0, vec![tx]);
|
||||
}
|
||||
}
|
101
src/entry_writer.rs
Normal file
101
src/entry_writer.rs
Normal file
@ -0,0 +1,101 @@
|
||||
//! The `entry_writer` module helps implement the TPU's write stage. It
|
||||
//! writes entries to the given writer, which is typically a file or
|
||||
//! stdout, and then sends the Entry to its output channel.
|
||||
|
||||
use bank::Bank;
|
||||
use entry::Entry;
|
||||
use serde_json;
|
||||
use std::io::{self, BufRead, Error, ErrorKind, Write};
|
||||
|
||||
pub struct EntryWriter<'a, W> {
|
||||
bank: &'a Bank,
|
||||
writer: W,
|
||||
}
|
||||
|
||||
impl<'a, W: Write> EntryWriter<'a, W> {
|
||||
/// Create a new Tpu that wraps the given Bank.
|
||||
pub fn new(bank: &'a Bank, writer: W) -> Self {
|
||||
EntryWriter { bank, writer }
|
||||
}
|
||||
|
||||
fn write_entry(writer: &mut W, entry: &Entry) -> io::Result<()> {
|
||||
let serialized = serde_json::to_string(entry).unwrap();
|
||||
writeln!(writer, "{}", serialized)
|
||||
}
|
||||
|
||||
pub fn write_entries<I>(writer: &mut W, entries: I) -> io::Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = Entry>,
|
||||
{
|
||||
for entry in entries {
|
||||
Self::write_entry(writer, &entry)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_and_register_entry(&mut self, entry: &Entry) -> io::Result<()> {
|
||||
trace!("write_and_register_entry entry");
|
||||
if !entry.has_more {
|
||||
self.bank.register_entry_id(&entry.id);
|
||||
}
|
||||
Self::write_entry(&mut self.writer, entry)
|
||||
}
|
||||
|
||||
pub fn write_and_register_entries(&mut self, entries: &[Entry]) -> io::Result<()> {
|
||||
for entry in entries {
|
||||
self.write_and_register_entry(&entry)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_entry(s: String) -> io::Result<Entry> {
|
||||
serde_json::from_str(&s).map_err(|e| Error::new(ErrorKind::Other, e.to_string()))
|
||||
}
|
||||
|
||||
// TODO: How to implement this without attaching the input's lifetime to the output?
|
||||
pub fn read_entries<'a, R: BufRead>(
|
||||
reader: &'a mut R,
|
||||
) -> impl Iterator<Item = io::Result<Entry>> + 'a {
|
||||
reader.lines().map(|s| read_entry(s?))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ledger;
|
||||
use mint::Mint;
|
||||
use packet::BLOB_DATA_SIZE;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[test]
|
||||
fn test_dont_register_partial_entries() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
|
||||
let writer = io::sink();
|
||||
let mut entry_writer = EntryWriter::new(&bank, writer);
|
||||
let keypair = KeyPair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, mint.last_id());
|
||||
|
||||
// NOTE: if Entry grows to larger than a transaction, the code below falls over
|
||||
let threshold = (BLOB_DATA_SIZE / 256) - 1; // 256 is transaction size
|
||||
|
||||
// Verify large entries are split up and the first sets has_more.
|
||||
let txs = vec![tx.clone(); threshold * 2];
|
||||
let entries = ledger::next_entries(&mint.last_id(), 0, txs);
|
||||
assert_eq!(entries.len(), 2);
|
||||
assert!(entries[0].has_more);
|
||||
assert!(!entries[1].has_more);
|
||||
|
||||
// Verify that write_and_register_entry doesn't register the first entries after a split.
|
||||
assert_eq!(bank.last_id(), mint.last_id());
|
||||
entry_writer.write_and_register_entry(&entries[0]).unwrap();
|
||||
assert_eq!(bank.last_id(), mint.last_id());
|
||||
|
||||
// Verify that write_and_register_entry registers the final entry after a split.
|
||||
entry_writer.write_and_register_entry(&entries[1]).unwrap();
|
||||
assert_eq!(bank.last_id(), entries[1].id);
|
||||
}
|
||||
}
|
624
src/erasure.rs
Normal file
624
src/erasure.rs
Normal file
@ -0,0 +1,624 @@
|
||||
// Support erasure coding
|
||||
|
||||
use packet::{BlobRecycler, SharedBlob, BLOB_HEADER_SIZE};
|
||||
use std::result;
|
||||
|
||||
//TODO(sakridge) pick these values
|
||||
pub const NUM_CODED: usize = 20;
|
||||
pub const MAX_MISSING: usize = 4;
|
||||
const NUM_DATA: usize = NUM_CODED - MAX_MISSING;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum ErasureError {
|
||||
NotEnoughBlocksToDecode,
|
||||
DecodeError,
|
||||
EncodeError,
|
||||
InvalidBlockSize,
|
||||
}
|
||||
|
||||
pub type Result<T> = result::Result<T, ErasureError>;
|
||||
|
||||
// k = number of data devices
|
||||
// m = number of coding devices
|
||||
// w = word size
|
||||
|
||||
extern "C" {
|
||||
fn jerasure_matrix_encode(
|
||||
k: i32,
|
||||
m: i32,
|
||||
w: i32,
|
||||
matrix: *const i32,
|
||||
data_ptrs: *const *const u8,
|
||||
coding_ptrs: *const *mut u8,
|
||||
size: i32,
|
||||
);
|
||||
fn jerasure_matrix_decode(
|
||||
k: i32,
|
||||
m: i32,
|
||||
w: i32,
|
||||
matrix: *const i32,
|
||||
row_k_ones: i32,
|
||||
erasures: *const i32,
|
||||
data_ptrs: *const *mut u8,
|
||||
coding_ptrs: *const *const u8,
|
||||
size: i32,
|
||||
) -> i32;
|
||||
fn galois_single_divide(a: i32, b: i32, w: i32) -> i32;
|
||||
}
|
||||
|
||||
fn get_matrix(m: i32, k: i32, w: i32) -> Vec<i32> {
|
||||
let mut matrix = vec![0; (m * k) as usize];
|
||||
for i in 0..m {
|
||||
for j in 0..k {
|
||||
unsafe {
|
||||
matrix[(i * k + j) as usize] = galois_single_divide(1, i ^ (m + j), w);
|
||||
}
|
||||
}
|
||||
}
|
||||
matrix
|
||||
}
|
||||
|
||||
pub const ERASURE_W: i32 = 32;
|
||||
|
||||
// Generate coding blocks into coding
|
||||
// There are some alignment restrictions, blocks should be aligned by 16 bytes
|
||||
// which means their size should be >= 16 bytes
|
||||
pub fn generate_coding_blocks(coding: &mut [&mut [u8]], data: &[&[u8]]) -> Result<()> {
|
||||
if data.len() == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
let m = coding.len() as i32;
|
||||
let block_len = data[0].len();
|
||||
let matrix: Vec<i32> = get_matrix(m, data.len() as i32, ERASURE_W);
|
||||
let mut coding_arg = Vec::new();
|
||||
let mut data_arg = Vec::new();
|
||||
for block in data {
|
||||
if block_len != block.len() {
|
||||
trace!(
|
||||
"data block size incorrect {} expected {}",
|
||||
block.len(),
|
||||
block_len
|
||||
);
|
||||
return Err(ErasureError::InvalidBlockSize);
|
||||
}
|
||||
data_arg.push(block.as_ptr());
|
||||
}
|
||||
for mut block in coding {
|
||||
if block_len != block.len() {
|
||||
trace!(
|
||||
"coding block size incorrect {} expected {}",
|
||||
block.len(),
|
||||
block_len
|
||||
);
|
||||
return Err(ErasureError::InvalidBlockSize);
|
||||
}
|
||||
coding_arg.push(block.as_mut_ptr());
|
||||
}
|
||||
|
||||
unsafe {
|
||||
jerasure_matrix_encode(
|
||||
data.len() as i32,
|
||||
m,
|
||||
ERASURE_W,
|
||||
matrix.as_ptr(),
|
||||
data_arg.as_ptr(),
|
||||
coding_arg.as_ptr(),
|
||||
data[0].len() as i32,
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Recover data + coding blocks into data blocks
|
||||
// data: array of blocks to recover into
|
||||
// coding: arry of coding blocks
|
||||
// erasures: list of indices in data where blocks should be recovered
|
||||
pub fn decode_blocks(data: &mut [&mut [u8]], coding: &[&[u8]], erasures: &[i32]) -> Result<()> {
|
||||
if data.len() == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
let block_len = data[0].len();
|
||||
let matrix: Vec<i32> = get_matrix(coding.len() as i32, data.len() as i32, ERASURE_W);
|
||||
|
||||
// generate coding pointers, blocks should be the same size
|
||||
let mut coding_arg: Vec<*const u8> = Vec::new();
|
||||
for x in coding.iter() {
|
||||
if x.len() != block_len {
|
||||
return Err(ErasureError::InvalidBlockSize);
|
||||
}
|
||||
coding_arg.push(x.as_ptr());
|
||||
}
|
||||
|
||||
// generate data pointers, blocks should be the same size
|
||||
let mut data_arg: Vec<*mut u8> = Vec::new();
|
||||
for x in data.iter_mut() {
|
||||
if x.len() != block_len {
|
||||
return Err(ErasureError::InvalidBlockSize);
|
||||
}
|
||||
data_arg.push(x.as_mut_ptr());
|
||||
}
|
||||
unsafe {
|
||||
let ret = jerasure_matrix_decode(
|
||||
data.len() as i32,
|
||||
coding.len() as i32,
|
||||
ERASURE_W,
|
||||
matrix.as_ptr(),
|
||||
0,
|
||||
erasures.as_ptr(),
|
||||
data_arg.as_ptr(),
|
||||
coding_arg.as_ptr(),
|
||||
data[0].len() as i32,
|
||||
);
|
||||
trace!("jerasure_matrix_decode ret: {}", ret);
|
||||
for x in data[erasures[0] as usize][0..8].iter() {
|
||||
trace!("{} ", x)
|
||||
}
|
||||
trace!("");
|
||||
if ret < 0 {
|
||||
return Err(ErasureError::DecodeError);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Allocate some coding blobs and insert into the blobs array
|
||||
pub fn add_coding_blobs(recycler: &BlobRecycler, blobs: &mut Vec<SharedBlob>, consumed: u64) {
|
||||
let mut added = 0;
|
||||
let blobs_len = blobs.len() as u64;
|
||||
for i in consumed..consumed + blobs_len {
|
||||
let is = i as usize;
|
||||
if is != 0 && ((is + MAX_MISSING) % NUM_CODED) == 0 {
|
||||
for _ in 0..MAX_MISSING {
|
||||
trace!("putting coding at {}", (i - consumed));
|
||||
let new_blob = recycler.allocate();
|
||||
let new_blob_clone = new_blob.clone();
|
||||
let mut new_blob_l = new_blob_clone.write().unwrap();
|
||||
new_blob_l.set_size(0);
|
||||
new_blob_l.set_coding().unwrap();
|
||||
drop(new_blob_l);
|
||||
blobs.insert((i - consumed) as usize, new_blob);
|
||||
added += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
info!(
|
||||
"add_coding consumed: {} blobs.len(): {} added: {}",
|
||||
consumed,
|
||||
blobs.len(),
|
||||
added
|
||||
);
|
||||
}
|
||||
|
||||
// Generate coding blocks in window starting from consumed
|
||||
pub fn generate_coding(
|
||||
window: &mut Vec<Option<SharedBlob>>,
|
||||
consumed: usize,
|
||||
num_blobs: usize,
|
||||
) -> Result<()> {
|
||||
let mut block_start = consumed - (consumed % NUM_CODED);
|
||||
|
||||
for i in consumed..consumed + num_blobs {
|
||||
if (i % NUM_CODED) == (NUM_CODED - 1) {
|
||||
let mut data_blobs = Vec::new();
|
||||
let mut coding_blobs = Vec::new();
|
||||
let mut data_locks = Vec::new();
|
||||
let mut data_ptrs: Vec<&[u8]> = Vec::new();
|
||||
let mut coding_locks = Vec::new();
|
||||
let mut coding_ptrs: Vec<&mut [u8]> = Vec::new();
|
||||
|
||||
info!(
|
||||
"generate_coding start: {} end: {} consumed: {} num_blobs: {}",
|
||||
block_start,
|
||||
block_start + NUM_DATA,
|
||||
consumed,
|
||||
num_blobs
|
||||
);
|
||||
for i in block_start..block_start + NUM_DATA {
|
||||
let n = i % window.len();
|
||||
trace!("window[{}] = {:?}", n, window[n]);
|
||||
if window[n].is_none() {
|
||||
trace!("data block is null @ {}", n);
|
||||
return Ok(());
|
||||
}
|
||||
data_blobs.push(
|
||||
window[n]
|
||||
.clone()
|
||||
.expect("'data_blobs' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
let mut max_data_size = 0;
|
||||
for b in &data_blobs {
|
||||
let lck = b.write().expect("'b' write lock in pub fn generate_coding");
|
||||
if lck.meta.size > max_data_size {
|
||||
max_data_size = lck.meta.size;
|
||||
}
|
||||
data_locks.push(lck);
|
||||
}
|
||||
trace!("max_data_size: {}", max_data_size);
|
||||
for (i, l) in data_locks.iter_mut().enumerate() {
|
||||
trace!("i: {} data: {}", i, l.data[0]);
|
||||
data_ptrs.push(&l.data[..max_data_size]);
|
||||
}
|
||||
|
||||
// generate coding ptr array
|
||||
let coding_start = block_start + NUM_DATA;
|
||||
let coding_end = block_start + NUM_CODED;
|
||||
for i in coding_start..coding_end {
|
||||
let n = i % window.len();
|
||||
if window[n].is_none() {
|
||||
trace!("coding block is null @ {}", n);
|
||||
return Ok(());
|
||||
}
|
||||
let w_l = window[n].clone().unwrap();
|
||||
w_l.write().unwrap().set_size(max_data_size);
|
||||
if w_l.write().unwrap().set_coding().is_err() {
|
||||
return Err(ErasureError::EncodeError);
|
||||
}
|
||||
coding_blobs.push(
|
||||
window[n]
|
||||
.clone()
|
||||
.expect("'coding_blobs' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
for b in &coding_blobs {
|
||||
coding_locks.push(
|
||||
b.write()
|
||||
.expect("'coding_locks' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
for (i, l) in coding_locks.iter_mut().enumerate() {
|
||||
trace!("i: {} coding: {} size: {}", i, l.data[0], max_data_size);
|
||||
coding_ptrs.push(&mut l.data_mut()[..max_data_size]);
|
||||
}
|
||||
|
||||
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?;
|
||||
debug!(
|
||||
"consumed: {} data: {}:{} coding: {}:{}",
|
||||
consumed,
|
||||
block_start,
|
||||
block_start + NUM_DATA,
|
||||
coding_start,
|
||||
coding_end
|
||||
);
|
||||
block_start += NUM_CODED;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Recover missing blocks into window
|
||||
// missing blocks should be None, will use re
|
||||
// to allocate new ones. Returns err if not enough
|
||||
// coding blocks are present to restore
|
||||
pub fn recover(
|
||||
re: &BlobRecycler,
|
||||
window: &mut Vec<Option<SharedBlob>>,
|
||||
consumed: usize,
|
||||
received: usize,
|
||||
) -> Result<()> {
|
||||
//recover with erasure coding
|
||||
if received <= consumed {
|
||||
return Ok(());
|
||||
}
|
||||
let num_blocks = (received - consumed) / NUM_CODED;
|
||||
let mut block_start = consumed - (consumed % NUM_CODED);
|
||||
|
||||
if num_blocks > 0 {
|
||||
debug!(
|
||||
"num_blocks: {} received: {} consumed: {}",
|
||||
num_blocks, received, consumed
|
||||
);
|
||||
}
|
||||
|
||||
for i in 0..num_blocks {
|
||||
if i > 100 {
|
||||
break;
|
||||
}
|
||||
let mut data_missing = 0;
|
||||
let mut coded_missing = 0;
|
||||
let coding_start = block_start + NUM_DATA;
|
||||
let coding_end = block_start + NUM_CODED;
|
||||
trace!(
|
||||
"recover: block_start: {} coding_start: {} coding_end: {}",
|
||||
block_start,
|
||||
coding_start,
|
||||
coding_end
|
||||
);
|
||||
for i in block_start..coding_end {
|
||||
let n = i % window.len();
|
||||
if window[n].is_none() {
|
||||
if i >= coding_start {
|
||||
coded_missing += 1;
|
||||
} else {
|
||||
data_missing += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (data_missing + coded_missing) != NUM_CODED && (data_missing + coded_missing) != 0 {
|
||||
debug!(
|
||||
"1: start: {} recovering: data: {} coding: {}",
|
||||
block_start, data_missing, coded_missing
|
||||
);
|
||||
}
|
||||
if data_missing > 0 {
|
||||
if (data_missing + coded_missing) <= MAX_MISSING {
|
||||
debug!(
|
||||
"2: recovering: data: {} coding: {}",
|
||||
data_missing, coded_missing
|
||||
);
|
||||
let mut blobs: Vec<SharedBlob> = Vec::new();
|
||||
let mut locks = Vec::new();
|
||||
let mut erasures: Vec<i32> = Vec::new();
|
||||
let mut meta = None;
|
||||
let mut size = None;
|
||||
for i in block_start..coding_end {
|
||||
let j = i % window.len();
|
||||
let mut b = &mut window[j];
|
||||
if b.is_some() {
|
||||
if i >= NUM_DATA && size.is_none() {
|
||||
let bl = b.clone().unwrap();
|
||||
size = Some(bl.read().unwrap().meta.size - BLOB_HEADER_SIZE);
|
||||
}
|
||||
if meta.is_none() {
|
||||
let bl = b.clone().unwrap();
|
||||
meta = Some(bl.read().unwrap().meta.clone());
|
||||
}
|
||||
blobs.push(b.clone().expect("'blobs' arr in pb fn recover"));
|
||||
continue;
|
||||
}
|
||||
let n = re.allocate();
|
||||
*b = Some(n.clone());
|
||||
//mark the missing memory
|
||||
blobs.push(n);
|
||||
erasures.push((i - block_start) as i32);
|
||||
}
|
||||
erasures.push(-1);
|
||||
trace!(
|
||||
"erasures: {:?} data_size: {} header_size: {}",
|
||||
erasures,
|
||||
size.unwrap(),
|
||||
BLOB_HEADER_SIZE
|
||||
);
|
||||
//lock everything
|
||||
for b in &blobs {
|
||||
locks.push(b.write().expect("'locks' arr in pb fn recover"));
|
||||
}
|
||||
{
|
||||
let mut coding_ptrs: Vec<&[u8]> = Vec::new();
|
||||
let mut data_ptrs: Vec<&mut [u8]> = Vec::new();
|
||||
for (i, l) in locks.iter_mut().enumerate() {
|
||||
if i >= NUM_DATA {
|
||||
trace!("pushing coding: {}", i);
|
||||
coding_ptrs.push(&l.data()[..size.unwrap()]);
|
||||
} else {
|
||||
trace!("pushing data: {}", i);
|
||||
data_ptrs.push(&mut l.data[..size.unwrap()]);
|
||||
}
|
||||
}
|
||||
trace!(
|
||||
"coding_ptrs.len: {} data_ptrs.len {}",
|
||||
coding_ptrs.len(),
|
||||
data_ptrs.len()
|
||||
);
|
||||
decode_blocks(data_ptrs.as_mut_slice(), &coding_ptrs, &erasures)?;
|
||||
}
|
||||
for i in &erasures[..erasures.len() - 1] {
|
||||
let idx = *i as usize;
|
||||
let data_size = locks[idx].get_data_size().unwrap() - BLOB_HEADER_SIZE as u64;
|
||||
locks[idx].meta = meta.clone().unwrap();
|
||||
locks[idx].set_size(data_size as usize);
|
||||
trace!(
|
||||
"erasures[{}] size: {} data[0]: {}",
|
||||
*i,
|
||||
data_size,
|
||||
locks[idx].data()[0]
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
block_start += NUM_CODED;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crdt;
|
||||
use erasure;
|
||||
use logger;
|
||||
use packet::{BlobRecycler, SharedBlob, BLOB_HEADER_SIZE};
|
||||
use signature::KeyPair;
|
||||
use signature::KeyPairUtil;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
#[test]
|
||||
pub fn test_coding() {
|
||||
let zero_vec = vec![0; 16];
|
||||
let mut vs: Vec<Vec<u8>> = (0..4).map(|i| (i..(16 + i)).collect()).collect();
|
||||
let v_orig: Vec<u8> = vs[0].clone();
|
||||
|
||||
let m = 2;
|
||||
let mut coding_blocks: Vec<_> = (0..m).map(|_| vec![0u8; 16]).collect();
|
||||
|
||||
{
|
||||
let mut coding_blocks_slices: Vec<_> =
|
||||
coding_blocks.iter_mut().map(|x| x.as_mut_slice()).collect();
|
||||
let v_slices: Vec<_> = vs.iter().map(|x| x.as_slice()).collect();
|
||||
|
||||
assert!(
|
||||
erasure::generate_coding_blocks(
|
||||
coding_blocks_slices.as_mut_slice(),
|
||||
v_slices.as_slice()
|
||||
).is_ok()
|
||||
);
|
||||
}
|
||||
trace!("coding blocks:");
|
||||
for b in &coding_blocks {
|
||||
trace!("{:?}", b);
|
||||
}
|
||||
let erasure: i32 = 1;
|
||||
let erasures = vec![erasure, -1];
|
||||
// clear an entry
|
||||
vs[erasure as usize].copy_from_slice(zero_vec.as_slice());
|
||||
|
||||
{
|
||||
let coding_blocks_slices: Vec<_> = coding_blocks.iter().map(|x| x.as_slice()).collect();
|
||||
let mut v_slices: Vec<_> = vs.iter_mut().map(|x| x.as_mut_slice()).collect();
|
||||
|
||||
assert!(
|
||||
erasure::decode_blocks(
|
||||
v_slices.as_mut_slice(),
|
||||
coding_blocks_slices.as_slice(),
|
||||
erasures.as_slice(),
|
||||
).is_ok()
|
||||
);
|
||||
}
|
||||
|
||||
trace!("vs:");
|
||||
for v in &vs {
|
||||
trace!("{:?}", v);
|
||||
}
|
||||
assert_eq!(v_orig, vs[0]);
|
||||
}
|
||||
|
||||
fn print_window(window: &Vec<Option<SharedBlob>>) {
|
||||
for (i, w) in window.iter().enumerate() {
|
||||
print!("window({}): ", i);
|
||||
if w.is_some() {
|
||||
let window_l1 = w.clone().unwrap();
|
||||
let window_l2 = window_l1.read().unwrap();
|
||||
print!(
|
||||
"index: {:?} meta.size: {} data: ",
|
||||
window_l2.get_index(),
|
||||
window_l2.meta.size
|
||||
);
|
||||
for i in 0..8 {
|
||||
print!("{} ", window_l2.data()[i]);
|
||||
}
|
||||
} else {
|
||||
print!("null");
|
||||
}
|
||||
println!("");
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_window(
|
||||
data_len: usize,
|
||||
blob_recycler: &BlobRecycler,
|
||||
offset: usize,
|
||||
num_blobs: usize,
|
||||
) -> (Vec<Option<SharedBlob>>, usize) {
|
||||
let mut window = vec![None; 32];
|
||||
let mut blobs = Vec::new();
|
||||
for i in 0..num_blobs {
|
||||
let b = blob_recycler.allocate();
|
||||
let b_ = b.clone();
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_size(data_len);
|
||||
for k in 0..data_len {
|
||||
w.data_mut()[k] = (k + i) as u8;
|
||||
}
|
||||
blobs.push(b_);
|
||||
}
|
||||
erasure::add_coding_blobs(blob_recycler, &mut blobs, offset as u64);
|
||||
let blobs_len = blobs.len();
|
||||
|
||||
let d = crdt::ReplicatedData::new(
|
||||
KeyPair::new().pubkey(),
|
||||
"127.0.0.1:1234".parse().unwrap(),
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
"127.0.0.1:1238".parse().unwrap(),
|
||||
);
|
||||
let crdt = Arc::new(RwLock::new(crdt::Crdt::new(d.clone())));
|
||||
|
||||
assert!(crdt::Crdt::index_blobs(&crdt, &blobs, &mut (offset as u64)).is_ok());
|
||||
for b in blobs {
|
||||
let idx = b.read().unwrap().get_index().unwrap() as usize;
|
||||
window[idx] = Some(b);
|
||||
}
|
||||
(window, blobs_len)
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_window_recover_basic() {
|
||||
logger::setup();
|
||||
let data_len = 16;
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
|
||||
// Generate a window
|
||||
let offset = 1;
|
||||
let num_blobs = erasure::NUM_DATA + 2;
|
||||
let (mut window, blobs_len) = generate_window(data_len, &blob_recycler, 0, num_blobs);
|
||||
println!("** after-gen-window:");
|
||||
print_window(&window);
|
||||
|
||||
// Generate the coding blocks
|
||||
assert!(erasure::generate_coding(&mut window, offset, blobs_len).is_ok());
|
||||
println!("** after-gen-coding:");
|
||||
print_window(&window);
|
||||
|
||||
let erase_offset = offset;
|
||||
// Create a hole in the window
|
||||
let refwindow = window[erase_offset].clone();
|
||||
window[erase_offset] = None;
|
||||
|
||||
// Recover it from coding
|
||||
assert!(erasure::recover(&blob_recycler, &mut window, offset, offset + blobs_len).is_ok());
|
||||
println!("** after-recover:");
|
||||
print_window(&window);
|
||||
|
||||
// Check the result
|
||||
let window_l = window[erase_offset].clone().unwrap();
|
||||
let window_l2 = window_l.read().unwrap();
|
||||
let ref_l = refwindow.clone().unwrap();
|
||||
let ref_l2 = ref_l.read().unwrap();
|
||||
assert_eq!(
|
||||
window_l2.data[..(data_len + BLOB_HEADER_SIZE)],
|
||||
ref_l2.data[..(data_len + BLOB_HEADER_SIZE)]
|
||||
);
|
||||
assert_eq!(window_l2.meta.size, ref_l2.meta.size);
|
||||
assert_eq!(window_l2.meta.addr, ref_l2.meta.addr);
|
||||
assert_eq!(window_l2.meta.port, ref_l2.meta.port);
|
||||
assert_eq!(window_l2.meta.v6, ref_l2.meta.v6);
|
||||
assert_eq!(window_l2.get_index().unwrap(), erase_offset as u64);
|
||||
}
|
||||
|
||||
//TODO This needs to be reworked
|
||||
#[test]
|
||||
#[ignore]
|
||||
pub fn test_window_recover() {
|
||||
logger::setup();
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let offset = 4;
|
||||
let data_len = 16;
|
||||
let num_blobs = erasure::NUM_DATA + 2;
|
||||
let (mut window, blobs_len) = generate_window(data_len, &blob_recycler, offset, num_blobs);
|
||||
println!("** after-gen:");
|
||||
print_window(&window);
|
||||
assert!(erasure::generate_coding(&mut window, offset, blobs_len).is_ok());
|
||||
println!("** after-coding:");
|
||||
print_window(&window);
|
||||
let refwindow = window[offset + 1].clone();
|
||||
window[offset + 1] = None;
|
||||
window[offset + 2] = None;
|
||||
window[offset + erasure::NUM_CODED + 3] = None;
|
||||
window[offset + (2 * erasure::NUM_CODED) + 0] = None;
|
||||
window[offset + (2 * erasure::NUM_CODED) + 1] = None;
|
||||
window[offset + (2 * erasure::NUM_CODED) + 2] = None;
|
||||
let window_l0 = &(window[offset + (3 * erasure::NUM_CODED)]).clone().unwrap();
|
||||
window_l0.write().unwrap().data[0] = 55;
|
||||
println!("** after-nulling:");
|
||||
print_window(&window);
|
||||
assert!(erasure::recover(&blob_recycler, &mut window, offset, offset + blobs_len).is_ok());
|
||||
println!("** after-restore:");
|
||||
print_window(&window);
|
||||
let window_l = window[offset + 1].clone().unwrap();
|
||||
let ref_l = refwindow.clone().unwrap();
|
||||
assert_eq!(
|
||||
window_l.read().unwrap().data()[..data_len],
|
||||
ref_l.read().unwrap().data()[..data_len]
|
||||
);
|
||||
}
|
||||
}
|
43
src/fetch_stage.rs
Normal file
43
src/fetch_stage.rs
Normal file
@ -0,0 +1,43 @@
|
||||
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
|
||||
|
||||
use packet::PacketRecycler;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use streamer::{self, PacketReceiver};
|
||||
|
||||
pub struct FetchStage {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl FetchStage {
|
||||
pub fn new(
|
||||
socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_recycler: PacketRecycler,
|
||||
) -> (Self, PacketReceiver) {
|
||||
Self::new_multi_socket(vec![socket], exit, packet_recycler)
|
||||
}
|
||||
pub fn new_multi_socket(
|
||||
sockets: Vec<UdpSocket>,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_recycler: PacketRecycler,
|
||||
) -> (Self, PacketReceiver) {
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let thread_hdls: Vec<_> = sockets
|
||||
.into_iter()
|
||||
.map(|socket| {
|
||||
streamer::receiver(
|
||||
socket,
|
||||
exit.clone(),
|
||||
packet_recycler.clone(),
|
||||
packet_sender.clone(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
(FetchStage { thread_hdls }, packet_receiver)
|
||||
}
|
||||
}
|
312
src/fullnode.rs
Normal file
312
src/fullnode.rs
Normal file
@ -0,0 +1,312 @@
|
||||
//! The `fullnode` module hosts all the fullnode microservices.
|
||||
|
||||
use bank::Bank;
|
||||
use crdt::{Crdt, ReplicatedData, TestNode};
|
||||
use entry_writer;
|
||||
use ncp::Ncp;
|
||||
use packet::BlobRecycler;
|
||||
use rpu::Rpu;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::io::{stdin, stdout, BufReader};
|
||||
use std::net::SocketAddr;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
use tpu::Tpu;
|
||||
use tvu::Tvu;
|
||||
|
||||
//use std::time::Duration;
|
||||
pub struct FullNode {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl FullNode {
|
||||
pub fn new(
|
||||
mut node: TestNode,
|
||||
leader: bool,
|
||||
infile: Option<String>,
|
||||
network_entry_for_validator: Option<SocketAddr>,
|
||||
outfile_for_leader: Option<String>,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> FullNode {
|
||||
info!("creating bank...");
|
||||
let bank = Bank::default();
|
||||
let entry_height = if let Some(path) = infile {
|
||||
let f = File::open(path).unwrap();
|
||||
let mut r = BufReader::new(f);
|
||||
let entries =
|
||||
entry_writer::read_entries(&mut r).map(|e| e.expect("failed to parse entry"));
|
||||
info!("processing ledger...");
|
||||
bank.process_ledger(entries).expect("process_ledger")
|
||||
} else {
|
||||
let mut r = BufReader::new(stdin());
|
||||
let entries =
|
||||
entry_writer::read_entries(&mut r).map(|e| e.expect("failed to parse entry"));
|
||||
info!("processing ledger...");
|
||||
bank.process_ledger(entries).expect("process_ledger")
|
||||
};
|
||||
|
||||
// entry_height is the network-wide agreed height of the ledger.
|
||||
// initialize it from the input ledger
|
||||
info!("processed {} ledger...", entry_height);
|
||||
|
||||
info!("creating networking stack...");
|
||||
|
||||
let local_gossip_addr = node.sockets.gossip.local_addr().unwrap();
|
||||
let local_requests_addr = node.sockets.requests.local_addr().unwrap();
|
||||
info!(
|
||||
"starting... local gossip address: {} (advertising {})",
|
||||
local_gossip_addr, node.data.gossip_addr
|
||||
);
|
||||
if !leader {
|
||||
let testnet_addr = network_entry_for_validator.expect("validator requires entry");
|
||||
|
||||
let network_entry_point = ReplicatedData::new_entry_point(testnet_addr);
|
||||
let server = FullNode::new_validator(
|
||||
bank,
|
||||
entry_height,
|
||||
node.data.clone(),
|
||||
node.sockets.requests,
|
||||
node.sockets.respond,
|
||||
node.sockets.replicate,
|
||||
node.sockets.gossip,
|
||||
node.sockets.repair,
|
||||
network_entry_point,
|
||||
exit.clone(),
|
||||
);
|
||||
info!(
|
||||
"validator ready... local request address: {} (advertising {}) connected to: {}",
|
||||
local_requests_addr, node.data.requests_addr, testnet_addr
|
||||
);
|
||||
server
|
||||
} else {
|
||||
node.data.current_leader_id = node.data.id.clone();
|
||||
let server = if let Some(file) = outfile_for_leader {
|
||||
FullNode::new_leader(
|
||||
bank,
|
||||
entry_height,
|
||||
//Some(Duration::from_millis(1000)),
|
||||
None,
|
||||
node.data.clone(),
|
||||
node.sockets.requests,
|
||||
node.sockets.transaction,
|
||||
node.sockets.broadcast,
|
||||
node.sockets.respond,
|
||||
node.sockets.gossip,
|
||||
exit.clone(),
|
||||
File::create(file).expect("opening ledger file"),
|
||||
)
|
||||
} else {
|
||||
FullNode::new_leader(
|
||||
bank,
|
||||
entry_height,
|
||||
//Some(Duration::from_millis(1000)),
|
||||
None,
|
||||
node.data.clone(),
|
||||
node.sockets.requests,
|
||||
node.sockets.transaction,
|
||||
node.sockets.broadcast,
|
||||
node.sockets.respond,
|
||||
node.sockets.gossip,
|
||||
exit.clone(),
|
||||
stdout(),
|
||||
)
|
||||
};
|
||||
info!(
|
||||
"leader ready... local request address: {} (advertising {})",
|
||||
local_requests_addr, node.data.requests_addr
|
||||
);
|
||||
server
|
||||
}
|
||||
}
|
||||
/// Create a server instance acting as a leader.
|
||||
///
|
||||
/// ```text
|
||||
/// .---------------------.
|
||||
/// | Leader |
|
||||
/// | |
|
||||
/// .--------. | .-----. |
|
||||
/// | |---->| | |
|
||||
/// | Client | | | RPU | |
|
||||
/// | |<----| | |
|
||||
/// `----+---` | `-----` |
|
||||
/// | | ^ |
|
||||
/// | | | |
|
||||
/// | | .--+---. |
|
||||
/// | | | Bank | |
|
||||
/// | | `------` |
|
||||
/// | | ^ |
|
||||
/// | | | | .------------.
|
||||
/// | | .--+--. .-----. | | |
|
||||
/// `-------->| TPU +-->| NCP +------>| Validators |
|
||||
/// | `-----` `-----` | | |
|
||||
/// | | `------------`
|
||||
/// `---------------------`
|
||||
/// ```
|
||||
pub fn new_leader<W: Write + Send + 'static>(
|
||||
bank: Bank,
|
||||
entry_height: u64,
|
||||
tick_duration: Option<Duration>,
|
||||
me: ReplicatedData,
|
||||
requests_socket: UdpSocket,
|
||||
transactions_socket: UdpSocket,
|
||||
broadcast_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
gossip_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
writer: W,
|
||||
) -> Self {
|
||||
let bank = Arc::new(bank);
|
||||
let mut thread_hdls = vec![];
|
||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
||||
thread_hdls.extend(rpu.thread_hdls);
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let (tpu, blob_receiver) = Tpu::new(
|
||||
bank.clone(),
|
||||
tick_duration,
|
||||
transactions_socket,
|
||||
blob_recycler.clone(),
|
||||
exit.clone(),
|
||||
writer,
|
||||
);
|
||||
thread_hdls.extend(tpu.thread_hdls);
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
let window = streamer::default_window();
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let ncp = Ncp::new(
|
||||
crdt.clone(),
|
||||
window.clone(),
|
||||
gossip_socket,
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
).expect("Ncp::new");
|
||||
thread_hdls.extend(ncp.thread_hdls);
|
||||
|
||||
let t_broadcast = streamer::broadcaster(
|
||||
broadcast_socket,
|
||||
exit.clone(),
|
||||
crdt,
|
||||
window,
|
||||
entry_height,
|
||||
blob_recycler.clone(),
|
||||
blob_receiver,
|
||||
);
|
||||
thread_hdls.extend(vec![t_broadcast]);
|
||||
|
||||
FullNode { thread_hdls }
|
||||
}
|
||||
|
||||
/// Create a server instance acting as a validator.
|
||||
///
|
||||
/// ```text
|
||||
/// .-------------------------------.
|
||||
/// | Validator |
|
||||
/// | |
|
||||
/// .--------. | .-----. |
|
||||
/// | |-------------->| | |
|
||||
/// | Client | | | RPU | |
|
||||
/// | |<--------------| | |
|
||||
/// `--------` | `-----` |
|
||||
/// | ^ |
|
||||
/// | | |
|
||||
/// | .--+---. |
|
||||
/// | | Bank | |
|
||||
/// | `------` |
|
||||
/// | ^ |
|
||||
/// .--------. | | | .------------.
|
||||
/// | | | .--+--. | | |
|
||||
/// | Leader |<------------->| TVU +<--------------->| |
|
||||
/// | | | `-----` | | Validators |
|
||||
/// | | | ^ | | |
|
||||
/// | | | | | | |
|
||||
/// | | | .--+--. | | |
|
||||
/// | |<------------->| NCP +<--------------->| |
|
||||
/// | | | `-----` | | |
|
||||
/// `--------` | | `------------`
|
||||
/// `-------------------------------`
|
||||
/// ```
|
||||
pub fn new_validator(
|
||||
bank: Bank,
|
||||
entry_height: u64,
|
||||
me: ReplicatedData,
|
||||
requests_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
replicate_socket: UdpSocket,
|
||||
gossip_listen_socket: UdpSocket,
|
||||
repair_socket: UdpSocket,
|
||||
entry_point: ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let bank = Arc::new(bank);
|
||||
let mut thread_hdls = vec![];
|
||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
||||
thread_hdls.extend(rpu.thread_hdls);
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
crdt.write()
|
||||
.expect("'crdt' write lock before insert() in pub fn replicate")
|
||||
.insert(&entry_point);
|
||||
let window = streamer::default_window();
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let retransmit_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let ncp = Ncp::new(
|
||||
crdt.clone(),
|
||||
window.clone(),
|
||||
gossip_listen_socket,
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
).expect("Ncp::new");
|
||||
|
||||
let tvu = Tvu::new(
|
||||
bank.clone(),
|
||||
entry_height,
|
||||
crdt.clone(),
|
||||
window.clone(),
|
||||
replicate_socket,
|
||||
repair_socket,
|
||||
retransmit_socket,
|
||||
exit.clone(),
|
||||
);
|
||||
thread_hdls.extend(tvu.thread_hdls);
|
||||
thread_hdls.extend(ncp.thread_hdls);
|
||||
FullNode { thread_hdls }
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bank::Bank;
|
||||
use crdt::TestNode;
|
||||
use fullnode::FullNode;
|
||||
use mint::Mint;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
#[test]
|
||||
fn validator_exit() {
|
||||
let tn = TestNode::new();
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let v = FullNode::new_validator(
|
||||
bank,
|
||||
0,
|
||||
tn.data.clone(),
|
||||
tn.sockets.requests,
|
||||
tn.sockets.respond,
|
||||
tn.sockets.replicate,
|
||||
tn.sockets.gossip,
|
||||
tn.sockets.repair,
|
||||
tn.data,
|
||||
exit.clone(),
|
||||
);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in v.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
24
src/hash.rs
Normal file
24
src/hash.rs
Normal file
@ -0,0 +1,24 @@
|
||||
//! The `hash` module provides functions for creating SHA-256 hashes.
|
||||
|
||||
use generic_array::typenum::U32;
|
||||
use generic_array::GenericArray;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
pub type Hash = GenericArray<u8, U32>;
|
||||
|
||||
/// Return a Sha256 hash for the given data.
|
||||
pub fn hash(val: &[u8]) -> Hash {
|
||||
let mut hasher = Sha256::default();
|
||||
hasher.input(val);
|
||||
|
||||
// At the time of this writing, the sha2 library is stuck on an old version
|
||||
// of generic_array (0.9.0). Decouple ourselves with a clone to our version.
|
||||
GenericArray::clone_from_slice(hasher.result().as_slice())
|
||||
}
|
||||
|
||||
/// Return the hash of the given hash extended with the given value.
|
||||
pub fn extend_and_hash(id: &Hash, val: &[u8]) -> Hash {
|
||||
let mut hash_data = id.to_vec();
|
||||
hash_data.extend_from_slice(val);
|
||||
hash(&hash_data)
|
||||
}
|
186
src/historian.rs
186
src/historian.rs
@ -1,186 +0,0 @@
|
||||
//! The `historian` crate provides a microservice for generating a Proof-of-History.
|
||||
//! It logs Event items on behalf of its users. It continuously generates
|
||||
//! new hashes, only stopping to check if it has been sent an Event item. It
|
||||
//! tags each Event with an Entry and sends it back. The Entry includes the
|
||||
//! Event, the latest hash, and the number of hashes since the last event.
|
||||
//! The resulting stream of entries represents ordered events in time.
|
||||
|
||||
use std::thread::JoinHandle;
|
||||
use std::sync::mpsc::{Receiver, Sender};
|
||||
use std::time::{Duration, SystemTime};
|
||||
use log::{hash, hash_event, Entry, Event, Sha256Hash};
|
||||
|
||||
pub struct Historian {
|
||||
pub sender: Sender<Event>,
|
||||
pub receiver: Receiver<Entry>,
|
||||
pub thread_hdl: JoinHandle<(Entry, ExitReason)>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum ExitReason {
|
||||
RecvDisconnected,
|
||||
SendDisconnected,
|
||||
}
|
||||
fn log_event(
|
||||
sender: &Sender<Entry>,
|
||||
num_hashes: &mut u64,
|
||||
end_hash: &mut Sha256Hash,
|
||||
event: Event,
|
||||
) -> Result<(), (Entry, ExitReason)> {
|
||||
*end_hash = hash_event(end_hash, &event);
|
||||
let entry = Entry {
|
||||
end_hash: *end_hash,
|
||||
num_hashes: *num_hashes,
|
||||
event,
|
||||
};
|
||||
if let Err(_) = sender.send(entry.clone()) {
|
||||
return Err((entry, ExitReason::SendDisconnected));
|
||||
}
|
||||
*num_hashes = 0;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn log_events(
|
||||
receiver: &Receiver<Event>,
|
||||
sender: &Sender<Entry>,
|
||||
num_hashes: &mut u64,
|
||||
end_hash: &mut Sha256Hash,
|
||||
epoch: SystemTime,
|
||||
num_ticks: &mut u64,
|
||||
ms_per_tick: Option<u64>,
|
||||
) -> Result<(), (Entry, ExitReason)> {
|
||||
use std::sync::mpsc::TryRecvError;
|
||||
loop {
|
||||
if let Some(ms) = ms_per_tick {
|
||||
let now = SystemTime::now();
|
||||
if now > epoch + Duration::from_millis((*num_ticks + 1) * ms) {
|
||||
log_event(sender, num_hashes, end_hash, Event::Tick)?;
|
||||
*num_ticks += 1;
|
||||
}
|
||||
}
|
||||
match receiver.try_recv() {
|
||||
Ok(event) => {
|
||||
log_event(sender, num_hashes, end_hash, event)?;
|
||||
}
|
||||
Err(TryRecvError::Empty) => {
|
||||
return Ok(());
|
||||
}
|
||||
Err(TryRecvError::Disconnected) => {
|
||||
let entry = Entry {
|
||||
end_hash: *end_hash,
|
||||
num_hashes: *num_hashes,
|
||||
event: Event::Tick,
|
||||
};
|
||||
return Err((entry, ExitReason::RecvDisconnected));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A background thread that will continue tagging received Event messages and
|
||||
/// sending back Entry messages until either the receiver or sender channel is closed.
|
||||
pub fn create_logger(
|
||||
start_hash: Sha256Hash,
|
||||
ms_per_tick: Option<u64>,
|
||||
receiver: Receiver<Event>,
|
||||
sender: Sender<Entry>,
|
||||
) -> JoinHandle<(Entry, ExitReason)> {
|
||||
use std::thread;
|
||||
thread::spawn(move || {
|
||||
let mut end_hash = start_hash;
|
||||
let mut num_hashes = 0;
|
||||
let mut num_ticks = 0;
|
||||
let epoch = SystemTime::now();
|
||||
loop {
|
||||
if let Err(err) = log_events(
|
||||
&receiver,
|
||||
&sender,
|
||||
&mut num_hashes,
|
||||
&mut end_hash,
|
||||
epoch,
|
||||
&mut num_ticks,
|
||||
ms_per_tick,
|
||||
) {
|
||||
return err;
|
||||
}
|
||||
end_hash = hash(&end_hash);
|
||||
num_hashes += 1;
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
impl Historian {
|
||||
pub fn new(start_hash: &Sha256Hash, ms_per_tick: Option<u64>) -> Self {
|
||||
use std::sync::mpsc::channel;
|
||||
let (sender, event_receiver) = channel();
|
||||
let (entry_sender, receiver) = channel();
|
||||
let thread_hdl = create_logger(*start_hash, ms_per_tick, event_receiver, entry_sender);
|
||||
Historian {
|
||||
sender,
|
||||
receiver,
|
||||
thread_hdl,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use log::*;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_historian() {
|
||||
let zero = Sha256Hash::default();
|
||||
let hist = Historian::new(&zero, None);
|
||||
|
||||
hist.sender.send(Event::Tick).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
hist.sender.send(Event::Discovery { data: zero }).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
hist.sender.send(Event::Tick).unwrap();
|
||||
|
||||
let entry0 = hist.receiver.recv().unwrap();
|
||||
let entry1 = hist.receiver.recv().unwrap();
|
||||
let entry2 = hist.receiver.recv().unwrap();
|
||||
|
||||
drop(hist.sender);
|
||||
assert_eq!(
|
||||
hist.thread_hdl.join().unwrap().1,
|
||||
ExitReason::RecvDisconnected
|
||||
);
|
||||
|
||||
assert!(verify_slice(&[entry0, entry1, entry2], &zero));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_historian_closed_sender() {
|
||||
let zero = Sha256Hash::default();
|
||||
let hist = Historian::new(&zero, None);
|
||||
drop(hist.receiver);
|
||||
hist.sender.send(Event::Tick).unwrap();
|
||||
assert_eq!(
|
||||
hist.thread_hdl.join().unwrap().1,
|
||||
ExitReason::SendDisconnected
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ticking_historian() {
|
||||
let zero = Sha256Hash::default();
|
||||
let hist = Historian::new(&zero, Some(20));
|
||||
sleep(Duration::from_millis(30));
|
||||
hist.sender.send(Event::Discovery { data: zero }).unwrap();
|
||||
sleep(Duration::from_millis(15));
|
||||
drop(hist.sender);
|
||||
assert_eq!(
|
||||
hist.thread_hdl.join().unwrap().1,
|
||||
ExitReason::RecvDisconnected
|
||||
);
|
||||
|
||||
let entries: Vec<Entry> = hist.receiver.iter().collect();
|
||||
assert!(entries.len() > 1);
|
||||
assert!(verify_slice(&entries, &zero));
|
||||
}
|
||||
}
|
227
src/ledger.rs
Normal file
227
src/ledger.rs
Normal file
@ -0,0 +1,227 @@
|
||||
//! The `ledger` module provides functions for parallel verification of the
|
||||
//! Proof of History ledger.
|
||||
|
||||
use bincode::{self, deserialize, serialize_into};
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use packet::{self, SharedBlob, BLOB_SIZE};
|
||||
use rayon::prelude::*;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::Cursor;
|
||||
use transaction::Transaction;
|
||||
|
||||
// a Block is a slice of Entries
|
||||
|
||||
pub trait Block {
|
||||
/// Verifies the hashes and counts of a slice of transactions are all consistent.
|
||||
fn verify(&self, start_hash: &Hash) -> bool;
|
||||
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>);
|
||||
}
|
||||
|
||||
impl Block for [Entry] {
|
||||
fn verify(&self, start_hash: &Hash) -> bool {
|
||||
let genesis = [Entry::new_tick(0, start_hash)];
|
||||
let entry_pairs = genesis.par_iter().chain(self).zip(self);
|
||||
entry_pairs.all(|(x0, x1)| x1.verify(&x0.id))
|
||||
}
|
||||
|
||||
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>) {
|
||||
for entry in self {
|
||||
let blob = blob_recycler.allocate();
|
||||
let pos = {
|
||||
let mut bd = blob.write().unwrap();
|
||||
let mut out = Cursor::new(bd.data_mut());
|
||||
serialize_into(&mut out, &entry).expect("failed to serialize output");
|
||||
out.position() as usize
|
||||
};
|
||||
assert!(pos < BLOB_SIZE);
|
||||
blob.write().unwrap().set_size(pos);
|
||||
q.push_back(blob);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reconstruct_entries_from_blobs(blobs: VecDeque<SharedBlob>) -> bincode::Result<Vec<Entry>> {
|
||||
let mut entries: Vec<Entry> = Vec::with_capacity(blobs.len());
|
||||
|
||||
for blob in blobs {
|
||||
let entry = {
|
||||
let msg = blob.read().unwrap();
|
||||
deserialize(&msg.data()[..msg.meta.size])
|
||||
};
|
||||
|
||||
match entry {
|
||||
Ok(entry) => entries.push(entry),
|
||||
Err(err) => {
|
||||
trace!("reconstruct_entry_from_blobs: {}", err);
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
/// Creates the next entries for given transactions, outputs
|
||||
/// updates start_hash to id of last Entry, sets cur_hashes to 0
|
||||
pub fn next_entries_mut(
|
||||
start_hash: &mut Hash,
|
||||
cur_hashes: &mut u64,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> Vec<Entry> {
|
||||
if transactions.is_empty() {
|
||||
vec![Entry::new_mut(start_hash, cur_hashes, transactions, false)]
|
||||
} else {
|
||||
let mut chunk_len = transactions.len();
|
||||
|
||||
// check for fit, make sure they can be serialized
|
||||
while !Entry::will_fit(transactions[0..chunk_len].to_vec()) {
|
||||
chunk_len /= 2;
|
||||
}
|
||||
|
||||
let mut num_chunks = if transactions.len() % chunk_len == 0 {
|
||||
transactions.len() / chunk_len
|
||||
} else {
|
||||
transactions.len() / chunk_len + 1
|
||||
};
|
||||
|
||||
let mut entries = Vec::with_capacity(num_chunks);
|
||||
|
||||
for chunk in transactions.chunks(chunk_len) {
|
||||
num_chunks -= 1;
|
||||
entries.push(Entry::new_mut(
|
||||
start_hash,
|
||||
cur_hashes,
|
||||
chunk.to_vec(),
|
||||
num_chunks > 0,
|
||||
));
|
||||
}
|
||||
entries
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the next Entries for given transactions
|
||||
pub fn next_entries(
|
||||
start_hash: &Hash,
|
||||
cur_hashes: u64,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> Vec<Entry> {
|
||||
let mut id = *start_hash;
|
||||
let mut num_hashes = cur_hashes;
|
||||
next_entries_mut(&mut id, &mut num_hashes, transactions)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use entry::{next_entry, Entry};
|
||||
use hash::hash;
|
||||
use packet::{BlobRecycler, BLOB_DATA_SIZE};
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
assert!(vec![][..].verify(&zero)); // base case
|
||||
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
|
||||
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
|
||||
assert!(vec![next_entry(&zero, 0, vec![]); 2][..].verify(&zero)); // inductive step
|
||||
|
||||
let mut bad_ticks = vec![next_entry(&zero, 0, vec![]); 2];
|
||||
bad_ticks[1].id = one;
|
||||
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entries_to_blobs() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0; 10_000];
|
||||
let entries = next_entries(&zero, 0, transactions);
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let mut blob_q = VecDeque::new();
|
||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||
|
||||
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap(), entries);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bad_blobs_attack() {
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
let blobs_q = packet::to_blobs(vec![(0, addr)], &blob_recycler).unwrap(); // <-- attack!
|
||||
assert!(reconstruct_entries_from_blobs(blobs_q).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_entries() {
|
||||
let id = Hash::default();
|
||||
let next_id = hash(&id);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
|
||||
|
||||
// NOTE: if Entry grows to larger than a transaction, the code below falls over
|
||||
let threshold = (BLOB_DATA_SIZE / 256) - 1; // 256 is transaction size
|
||||
|
||||
// verify no split
|
||||
let transactions = vec![tx0.clone(); threshold];
|
||||
let entries0 = next_entries(&id, 0, transactions.clone());
|
||||
assert_eq!(entries0.len(), 1);
|
||||
assert!(entries0.verify(&id));
|
||||
|
||||
// verify the split
|
||||
let transactions = vec![tx0.clone(); threshold * 2];
|
||||
let entries0 = next_entries(&id, 0, transactions.clone());
|
||||
assert_eq!(entries0.len(), 2);
|
||||
assert!(entries0[0].has_more);
|
||||
assert!(!entries0[entries0.len() - 1].has_more);
|
||||
|
||||
assert!(entries0.verify(&id));
|
||||
// test hand-construction... brittle, changes if split method changes... ?
|
||||
// let mut entries1 = vec![];
|
||||
// entries1.push(Entry::new(&id, 1, transactions[..threshold].to_vec(), true));
|
||||
// id = entries1[0].id;
|
||||
// entries1.push(Entry::new(
|
||||
// &id,
|
||||
// 1,
|
||||
// transactions[threshold..].to_vec(),
|
||||
// false,
|
||||
// ));
|
||||
//
|
||||
// assert_eq!(entries0, entries1);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use hash::hash;
|
||||
use ledger::*;
|
||||
use packet::BlobRecycler;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[bench]
|
||||
fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0; 10];
|
||||
let entries = next_entries(&zero, 1, transactions);
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
bencher.iter(|| {
|
||||
let mut blob_q = VecDeque::new();
|
||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap(), entries);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
67
src/lib.rs
67
src/lib.rs
@ -1,11 +1,74 @@
|
||||
//! The `solana` library implements the Solana high-performance blockchain architecture.
|
||||
//! It includes a full Rust implementation of the architecture (see
|
||||
//! [Server](server/struct.Server.html)) as well as hooks to GPU implementations of its most
|
||||
//! paralellizable components (i.e. [SigVerify](sigverify/index.html)). It also includes
|
||||
//! command-line tools to spin up fullnodes and a Rust library
|
||||
//! (see [ThinClient](thin_client/struct.ThinClient.html)) to interact with them.
|
||||
//!
|
||||
|
||||
#![cfg_attr(feature = "unstable", feature(test))]
|
||||
pub mod log;
|
||||
pub mod historian;
|
||||
#[macro_use]
|
||||
pub mod counter;
|
||||
pub mod bank;
|
||||
pub mod banking_stage;
|
||||
pub mod blob_fetch_stage;
|
||||
pub mod budget;
|
||||
pub mod choose_gossip_peer_strategy;
|
||||
pub mod crdt;
|
||||
pub mod drone;
|
||||
pub mod entry;
|
||||
pub mod entry_writer;
|
||||
#[cfg(feature = "erasure")]
|
||||
pub mod erasure;
|
||||
pub mod fetch_stage;
|
||||
pub mod fullnode;
|
||||
pub mod hash;
|
||||
pub mod ledger;
|
||||
pub mod logger;
|
||||
pub mod mint;
|
||||
pub mod nat;
|
||||
pub mod ncp;
|
||||
pub mod packet;
|
||||
pub mod payment_plan;
|
||||
pub mod record_stage;
|
||||
pub mod recorder;
|
||||
pub mod replicate_stage;
|
||||
pub mod request;
|
||||
pub mod request_processor;
|
||||
pub mod request_stage;
|
||||
pub mod result;
|
||||
pub mod rpu;
|
||||
pub mod signature;
|
||||
pub mod sigverify;
|
||||
pub mod sigverify_stage;
|
||||
pub mod streamer;
|
||||
pub mod thin_client;
|
||||
pub mod timing;
|
||||
pub mod tpu;
|
||||
pub mod transaction;
|
||||
pub mod tvu;
|
||||
pub mod window_stage;
|
||||
pub mod write_stage;
|
||||
extern crate bincode;
|
||||
extern crate byteorder;
|
||||
extern crate chrono;
|
||||
extern crate generic_array;
|
||||
extern crate itertools;
|
||||
extern crate libc;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate rayon;
|
||||
extern crate ring;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate pnet_datalink;
|
||||
extern crate serde_json;
|
||||
extern crate sha2;
|
||||
extern crate untrusted;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate matches;
|
||||
|
||||
extern crate rand;
|
||||
|
320
src/log.rs
320
src/log.rs
@ -1,320 +0,0 @@
|
||||
//! The `log` crate provides the foundational data structures for Proof-of-History,
|
||||
//! an ordered log of events in time.
|
||||
|
||||
/// Each log entry contains three pieces of data. The 'num_hashes' field is the number
|
||||
/// of hashes performed since the previous entry. The 'end_hash' field is the result
|
||||
/// of hashing 'end_hash' from the previous entry 'num_hashes' times. The 'event'
|
||||
/// field points to an Event that took place shortly after 'end_hash' was generated.
|
||||
///
|
||||
/// If you divide 'num_hashes' by the amount of time it takes to generate a new hash, you
|
||||
/// get a duration estimate since the last event. Since processing power increases
|
||||
/// over time, one should expect the duration 'num_hashes' represents to decrease proportionally.
|
||||
/// Though processing power varies across nodes, the network gives priority to the
|
||||
/// fastest processor. Duration should therefore be estimated by assuming that the hash
|
||||
/// was generated by the fastest processor at the time the entry was logged.
|
||||
|
||||
use generic_array::GenericArray;
|
||||
use generic_array::typenum::{U32, U64};
|
||||
use ring::signature::Ed25519KeyPair;
|
||||
pub type Sha256Hash = GenericArray<u8, U32>;
|
||||
pub type PublicKey = GenericArray<u8, U32>;
|
||||
pub type Signature = GenericArray<u8, U64>;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Entry {
|
||||
pub num_hashes: u64,
|
||||
pub end_hash: Sha256Hash,
|
||||
pub event: Event,
|
||||
}
|
||||
|
||||
/// When 'event' is Tick, the event represents a simple clock tick, and exists for the
|
||||
/// sole purpose of improving the performance of event log verification. A tick can
|
||||
/// be generated in 'num_hashes' hashes and verified in 'num_hashes' hashes. By logging
|
||||
/// a hash alongside the tick, each tick and be verified in parallel using the 'end_hash'
|
||||
/// of the preceding tick to seed its hashing.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Event {
|
||||
Tick,
|
||||
Discovery {
|
||||
data: Sha256Hash,
|
||||
},
|
||||
Claim {
|
||||
key: PublicKey,
|
||||
data: Sha256Hash,
|
||||
sig: Signature,
|
||||
},
|
||||
}
|
||||
|
||||
impl Entry {
|
||||
/// Creates a Entry from the number of hashes 'num_hashes' since the previous event
|
||||
/// and that resulting 'end_hash'.
|
||||
pub fn new_tick(num_hashes: u64, end_hash: &Sha256Hash) -> Self {
|
||||
Entry {
|
||||
num_hashes,
|
||||
end_hash: *end_hash,
|
||||
event: Event::Tick,
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies self.end_hash is the result of hashing a 'start_hash' 'self.num_hashes' times.
|
||||
/// If the event is not a Tick, then hash that as well.
|
||||
pub fn verify(self: &Self, start_hash: &Sha256Hash) -> bool {
|
||||
if let Event::Claim { key, data, sig } = self.event {
|
||||
if !verify_signature(&key, &data, &sig) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
self.end_hash == next_hash(start_hash, self.num_hashes, &self.event)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a Claim Event for the given hash and key-pair.
|
||||
pub fn sign_hash(data: &Sha256Hash, key_pair: &Ed25519KeyPair) -> Event {
|
||||
let sig = key_pair.sign(data);
|
||||
let peer_public_key_bytes = key_pair.public_key_bytes();
|
||||
let sig_bytes = sig.as_ref();
|
||||
Event::Claim {
|
||||
key: GenericArray::clone_from_slice(peer_public_key_bytes),
|
||||
data: GenericArray::clone_from_slice(data),
|
||||
sig: GenericArray::clone_from_slice(sig_bytes),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a Sha256 hash for the given data.
|
||||
pub fn hash(val: &[u8]) -> Sha256Hash {
|
||||
use sha2::{Digest, Sha256};
|
||||
let mut hasher = Sha256::default();
|
||||
hasher.input(val);
|
||||
hasher.result()
|
||||
}
|
||||
|
||||
/// Return the hash of the given hash extended with the given value.
|
||||
pub fn extend_and_hash(end_hash: &Sha256Hash, ty: u8, val: &[u8]) -> Sha256Hash {
|
||||
let mut hash_data = end_hash.to_vec();
|
||||
hash_data.push(ty);
|
||||
hash_data.extend_from_slice(val);
|
||||
hash(&hash_data)
|
||||
}
|
||||
|
||||
pub fn hash_event(end_hash: &Sha256Hash, event: &Event) -> Sha256Hash {
|
||||
match *event {
|
||||
Event::Tick => *end_hash,
|
||||
Event::Discovery { data } => extend_and_hash(end_hash, 1, &data),
|
||||
Event::Claim { key, data, sig } => {
|
||||
let mut event_data = data.to_vec();
|
||||
event_data.extend_from_slice(&sig);
|
||||
event_data.extend_from_slice(&key);
|
||||
extend_and_hash(end_hash, 2, &event_data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn next_hash(start_hash: &Sha256Hash, num_hashes: u64, event: &Event) -> Sha256Hash {
|
||||
let mut end_hash = *start_hash;
|
||||
for _ in 0..num_hashes {
|
||||
end_hash = hash(&end_hash);
|
||||
}
|
||||
hash_event(&end_hash, event)
|
||||
}
|
||||
|
||||
/// Creates the next Tick Entry 'num_hashes' after 'start_hash'.
|
||||
pub fn next_entry(start_hash: &Sha256Hash, num_hashes: u64, event: Event) -> Entry {
|
||||
Entry {
|
||||
num_hashes,
|
||||
end_hash: next_hash(start_hash, num_hashes, &event),
|
||||
event,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the next Tick Entry 'num_hashes' after 'start_hash'.
|
||||
pub fn next_tick(start_hash: &Sha256Hash, num_hashes: u64) -> Entry {
|
||||
next_entry(start_hash, num_hashes, Event::Tick)
|
||||
}
|
||||
|
||||
/// Verifies the hashes and counts of a slice of events are all consistent.
|
||||
pub fn verify_slice(events: &[Entry], start_hash: &Sha256Hash) -> bool {
|
||||
use rayon::prelude::*;
|
||||
let genesis = [Entry::new_tick(Default::default(), start_hash)];
|
||||
let event_pairs = genesis.par_iter().chain(events).zip(events);
|
||||
event_pairs.all(|(x0, x1)| x1.verify(&x0.end_hash))
|
||||
}
|
||||
|
||||
/// Verifies the hashes and events serially. Exists only for reference.
|
||||
pub fn verify_slice_seq(events: &[Entry], start_hash: &Sha256Hash) -> bool {
|
||||
let genesis = [Entry::new_tick(0, start_hash)];
|
||||
let mut event_pairs = genesis.iter().chain(events).zip(events);
|
||||
event_pairs.all(|(x0, x1)| x1.verify(&x0.end_hash))
|
||||
}
|
||||
|
||||
/// Verify a signed message with the given public key.
|
||||
pub fn verify_signature(peer_public_key_bytes: &[u8], msg_bytes: &[u8], sig_bytes: &[u8]) -> bool {
|
||||
use untrusted;
|
||||
use ring::signature;
|
||||
let peer_public_key = untrusted::Input::from(peer_public_key_bytes);
|
||||
let msg = untrusted::Input::from(msg_bytes);
|
||||
let sig = untrusted::Input::from(sig_bytes);
|
||||
signature::verify(&signature::ED25519, peer_public_key, msg, sig).is_ok()
|
||||
}
|
||||
|
||||
/// Create a vector of Ticks of length 'len' from 'start_hash' hash and 'num_hashes'.
|
||||
pub fn create_ticks(start_hash: &Sha256Hash, num_hashes: u64, len: usize) -> Vec<Entry> {
|
||||
use std::iter;
|
||||
let mut end_hash = *start_hash;
|
||||
iter::repeat(Event::Tick)
|
||||
.take(len)
|
||||
.map(|event| {
|
||||
let entry = next_entry(&end_hash, num_hashes, event);
|
||||
end_hash = entry.end_hash;
|
||||
entry
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_event_verify() {
|
||||
let zero = Sha256Hash::default();
|
||||
let one = hash(&zero);
|
||||
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case
|
||||
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
|
||||
assert!(next_tick(&zero, 1).verify(&zero)); // inductive step
|
||||
assert!(!next_tick(&zero, 1).verify(&one)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_tick() {
|
||||
let zero = Sha256Hash::default();
|
||||
assert_eq!(next_tick(&zero, 1).num_hashes, 1)
|
||||
}
|
||||
|
||||
fn verify_slice_generic(verify_slice: fn(&[Entry], &Sha256Hash) -> bool) {
|
||||
let zero = Sha256Hash::default();
|
||||
let one = hash(&zero);
|
||||
assert!(verify_slice(&vec![], &zero)); // base case
|
||||
assert!(verify_slice(&vec![Entry::new_tick(0, &zero)], &zero)); // singleton case 1
|
||||
assert!(!verify_slice(&vec![Entry::new_tick(0, &zero)], &one)); // singleton case 2, bad
|
||||
assert!(verify_slice(&create_ticks(&zero, 0, 2), &zero)); // inductive step
|
||||
|
||||
let mut bad_ticks = create_ticks(&zero, 0, 2);
|
||||
bad_ticks[1].end_hash = one;
|
||||
assert!(!verify_slice(&bad_ticks, &zero)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice() {
|
||||
verify_slice_generic(verify_slice);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice_seq() {
|
||||
verify_slice_generic(verify_slice_seq);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reorder_attack() {
|
||||
let zero = Sha256Hash::default();
|
||||
let one = hash(&zero);
|
||||
|
||||
// First, verify Discovery events
|
||||
let mut end_hash = zero;
|
||||
let events = [
|
||||
Event::Discovery { data: zero },
|
||||
Event::Discovery { data: one },
|
||||
];
|
||||
let mut entries: Vec<Entry> = events
|
||||
.iter()
|
||||
.map(|event| {
|
||||
let entry = next_entry(&end_hash, 0, event.clone());
|
||||
end_hash = entry.end_hash;
|
||||
entry
|
||||
})
|
||||
.collect();
|
||||
assert!(verify_slice(&entries, &zero));
|
||||
|
||||
// Next, swap two Discovery events and ensure verification fails.
|
||||
let event0 = entries[0].event.clone();
|
||||
let event1 = entries[1].event.clone();
|
||||
entries[0].event = event1;
|
||||
entries[1].event = event0;
|
||||
assert!(!verify_slice(&entries, &zero));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_signature() {
|
||||
use untrusted;
|
||||
use ring::{rand, signature};
|
||||
let rng = rand::SystemRandom::new();
|
||||
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).unwrap();
|
||||
let key_pair =
|
||||
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)).unwrap();
|
||||
const MESSAGE: &'static [u8] = b"hello, world";
|
||||
let event0 = sign_hash(&hash(MESSAGE), &key_pair);
|
||||
let zero = Sha256Hash::default();
|
||||
let mut end_hash = zero;
|
||||
let entries: Vec<Entry> = [event0]
|
||||
.iter()
|
||||
.map(|event| {
|
||||
let entry = next_entry(&end_hash, 0, event.clone());
|
||||
end_hash = entry.end_hash;
|
||||
entry
|
||||
})
|
||||
.collect();
|
||||
assert!(verify_slice(&entries, &zero));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bad_signature() {
|
||||
use untrusted;
|
||||
use ring::{rand, signature};
|
||||
let rng = rand::SystemRandom::new();
|
||||
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).unwrap();
|
||||
let key_pair =
|
||||
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)).unwrap();
|
||||
const MESSAGE: &'static [u8] = b"hello, world";
|
||||
let mut event0 = sign_hash(&hash(MESSAGE), &key_pair);
|
||||
if let Event::Claim { key, sig, .. } = event0 {
|
||||
const GOODBYE: &'static [u8] = b"goodbye cruel world";
|
||||
let data = hash(GOODBYE);
|
||||
event0 = Event::Claim { key, data, sig };
|
||||
}
|
||||
let zero = Sha256Hash::default();
|
||||
let mut end_hash = zero;
|
||||
let entries: Vec<Entry> = [event0]
|
||||
.iter()
|
||||
.map(|event| {
|
||||
let entry = next_entry(&end_hash, 0, event.clone());
|
||||
end_hash = entry.end_hash;
|
||||
entry
|
||||
})
|
||||
.collect();
|
||||
assert!(!verify_slice(&entries, &zero));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use log::*;
|
||||
|
||||
#[bench]
|
||||
fn event_bench(bencher: &mut Bencher) {
|
||||
let start_hash = Default::default();
|
||||
let events = create_ticks(&start_hash, 10_000, 8);
|
||||
bencher.iter(|| {
|
||||
assert!(verify_slice(&events, &start_hash));
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn event_bench_seq(bencher: &mut Bencher) {
|
||||
let start_hash = Default::default();
|
||||
let events = create_ticks(&start_hash, 10_000, 8);
|
||||
bencher.iter(|| {
|
||||
assert!(verify_slice_seq(&events, &start_hash));
|
||||
});
|
||||
}
|
||||
}
|
14
src/logger.rs
Normal file
14
src/logger.rs
Normal file
@ -0,0 +1,14 @@
|
||||
//! The `logger` module provides a setup function for `env_logger`. Its only function,
|
||||
//! `setup()` may be called multiple times.
|
||||
|
||||
use std::sync::{Once, ONCE_INIT};
|
||||
extern crate env_logger;
|
||||
|
||||
static INIT: Once = ONCE_INIT;
|
||||
|
||||
/// Setup function that is only run once, even if called multiple times.
|
||||
pub fn setup() {
|
||||
INIT.call_once(|| {
|
||||
let _ = env_logger::init();
|
||||
});
|
||||
}
|
86
src/mint.rs
Normal file
86
src/mint.rs
Normal file
@ -0,0 +1,86 @@
|
||||
//! The `mint` module is a library for generating the chain's genesis block.
|
||||
|
||||
use entry::Entry;
|
||||
use hash::{hash, Hash};
|
||||
use ring::rand::SystemRandom;
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
use transaction::Transaction;
|
||||
use untrusted::Input;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Mint {
|
||||
pub pkcs8: Vec<u8>,
|
||||
pubkey: PublicKey,
|
||||
pub tokens: i64,
|
||||
}
|
||||
|
||||
impl Mint {
|
||||
pub fn new(tokens: i64) -> Self {
|
||||
let rnd = SystemRandom::new();
|
||||
let pkcs8 = KeyPair::generate_pkcs8(&rnd)
|
||||
.expect("generate_pkcs8 in mint pub fn new")
|
||||
.to_vec();
|
||||
let keypair =
|
||||
KeyPair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in mint pub fn new");
|
||||
let pubkey = keypair.pubkey();
|
||||
Mint {
|
||||
pkcs8,
|
||||
pubkey,
|
||||
tokens,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn seed(&self) -> Hash {
|
||||
hash(&self.pkcs8)
|
||||
}
|
||||
|
||||
pub fn last_id(&self) -> Hash {
|
||||
self.create_entries()[1].id
|
||||
}
|
||||
|
||||
pub fn keypair(&self) -> KeyPair {
|
||||
KeyPair::from_pkcs8(Input::from(&self.pkcs8)).expect("from_pkcs8 in mint pub fn keypair")
|
||||
}
|
||||
|
||||
pub fn pubkey(&self) -> PublicKey {
|
||||
self.pubkey
|
||||
}
|
||||
|
||||
pub fn create_transactions(&self) -> Vec<Transaction> {
|
||||
let keypair = self.keypair();
|
||||
let tx = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed());
|
||||
vec![tx]
|
||||
}
|
||||
|
||||
pub fn create_entries(&self) -> Vec<Entry> {
|
||||
let e0 = Entry::new(&self.seed(), 0, vec![], false);
|
||||
let e1 = Entry::new(&e0.id, 0, self.create_transactions(), false);
|
||||
vec![e0, e1]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use budget::Budget;
|
||||
use ledger::Block;
|
||||
use transaction::{Instruction, Plan};
|
||||
|
||||
#[test]
|
||||
fn test_create_transactions() {
|
||||
let mut transactions = Mint::new(100).create_transactions().into_iter();
|
||||
let tx = transactions.next().unwrap();
|
||||
if let Instruction::NewContract(contract) = tx.instruction {
|
||||
if let Plan::Budget(Budget::Pay(payment)) = contract.plan {
|
||||
assert_eq!(tx.from, payment.to);
|
||||
}
|
||||
}
|
||||
assert_eq!(transactions.next(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_entries() {
|
||||
let entries = Mint::new(100).create_entries();
|
||||
assert!(entries[..].verify(&entries[0].id));
|
||||
}
|
||||
}
|
97
src/nat.rs
Normal file
97
src/nat.rs
Normal file
@ -0,0 +1,97 @@
|
||||
//! The `nat` module assists with NAT traversal
|
||||
|
||||
extern crate futures;
|
||||
extern crate p2p;
|
||||
extern crate reqwest;
|
||||
extern crate tokio_core;
|
||||
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
|
||||
use self::futures::Future;
|
||||
use self::p2p::UdpSocketExt;
|
||||
use std::env;
|
||||
use std::str;
|
||||
|
||||
/// A data type representing a public Udp socket
|
||||
pub struct UdpSocketPair {
|
||||
pub addr: SocketAddr, // Public address of the socket
|
||||
pub receiver: UdpSocket, // Locally bound socket that can receive from the public address
|
||||
pub sender: UdpSocket, // Locally bound socket to send via public address
|
||||
}
|
||||
|
||||
/// Tries to determine the public IP address of this machine
|
||||
pub fn get_public_ip_addr() -> Result<IpAddr, String> {
|
||||
let body = reqwest::get("http://ifconfig.co/ip")
|
||||
.map_err(|err| err.to_string())?
|
||||
.text()
|
||||
.map_err(|err| err.to_string())?;
|
||||
|
||||
match body.lines().next() {
|
||||
Some(ip) => Result::Ok(ip.parse().unwrap()),
|
||||
None => Result::Err("Empty response body".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Binds a private Udp address to a public address using UPnP if possible
|
||||
pub fn udp_public_bind(label: &str) -> UdpSocketPair {
|
||||
let private_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||
|
||||
let mut core = tokio_core::reactor::Core::new().unwrap();
|
||||
let handle = core.handle();
|
||||
let mc = p2p::P2p::default();
|
||||
let res = core.run({
|
||||
tokio_core::net::UdpSocket::bind_public(&private_addr, &handle, &mc)
|
||||
.map_err(|e| {
|
||||
info!("Failed to bind public socket for {}: {}", label, e);
|
||||
})
|
||||
.and_then(|(socket, public_addr)| Ok((public_addr, socket.local_addr().unwrap())))
|
||||
});
|
||||
|
||||
match res {
|
||||
Ok((public_addr, local_addr)) => {
|
||||
info!(
|
||||
"Using local address {} mapped to UPnP public address {} for {}",
|
||||
local_addr, public_addr, label
|
||||
);
|
||||
|
||||
// NAT should now be forwarding inbound packets directed at
|
||||
// |public_addr| to the local |receiver| socket...
|
||||
let receiver = UdpSocket::bind(local_addr).unwrap();
|
||||
|
||||
// TODO: try to autodetect a broken NAT (issue #496)
|
||||
let sender = if env::var("BROKEN_NAT").is_err() {
|
||||
receiver.try_clone().unwrap()
|
||||
} else {
|
||||
// ... however for outbound packets, some NATs *will not* rewrite the
|
||||
// source port from |receiver.local_addr().port()| to |public_addr.port()|.
|
||||
// This is currently a problem when talking with a fullnode as it
|
||||
// assumes it can send UDP packets back at the source. This hits the
|
||||
// NAT as a datagram for |receiver.local_addr().port()| on the NAT's public
|
||||
// IP, which the NAT promptly discards. As a short term hack, create a
|
||||
// local UDP socket, |sender|, with the same port as |public_addr.port()|.
|
||||
//
|
||||
// TODO: Remove the |sender| socket and deal with the downstream changes to
|
||||
// the UDP signalling
|
||||
let mut local_addr_sender = local_addr.clone();
|
||||
local_addr_sender.set_port(public_addr.port());
|
||||
UdpSocket::bind(local_addr_sender).unwrap()
|
||||
};
|
||||
|
||||
UdpSocketPair {
|
||||
addr: public_addr,
|
||||
receiver,
|
||||
sender,
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
let sender = UdpSocket::bind(private_addr).unwrap();
|
||||
let local_addr = sender.local_addr().unwrap();
|
||||
info!("Using local address {} for {}", local_addr, label);
|
||||
UdpSocketPair {
|
||||
addr: private_addr,
|
||||
receiver: sender.try_clone().unwrap(),
|
||||
sender,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
86
src/ncp.rs
Normal file
86
src/ncp.rs
Normal file
@ -0,0 +1,86 @@
|
||||
//! The `ncp` module implements the network control plane.
|
||||
|
||||
use crdt::Crdt;
|
||||
use packet::{BlobRecycler, SharedBlob};
|
||||
use result::Result;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
|
||||
pub struct Ncp {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Ncp {
|
||||
pub fn new(
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
gossip_listen_socket: UdpSocket,
|
||||
gossip_send_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Result<Ncp> {
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let (request_sender, request_receiver) = channel();
|
||||
trace!(
|
||||
"Ncp: id: {:?}, listening on: {:?}",
|
||||
&crdt.read().unwrap().me[..4],
|
||||
gossip_listen_socket.local_addr().unwrap()
|
||||
);
|
||||
let t_receiver = streamer::blob_receiver(
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
gossip_listen_socket,
|
||||
request_sender,
|
||||
)?;
|
||||
let (response_sender, response_receiver) = channel();
|
||||
let t_responder = streamer::responder(
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
response_receiver,
|
||||
);
|
||||
let t_listen = Crdt::listen(
|
||||
crdt.clone(),
|
||||
window,
|
||||
blob_recycler.clone(),
|
||||
request_receiver,
|
||||
response_sender.clone(),
|
||||
exit.clone(),
|
||||
);
|
||||
let t_gossip = Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit);
|
||||
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
|
||||
Ok(Ncp { thread_hdls })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crdt::{Crdt, TestNode};
|
||||
use ncp::Ncp;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
#[test]
|
||||
// test that stage will exit when flag is set
|
||||
fn test_exit() {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let tn = TestNode::new();
|
||||
let crdt = Crdt::new(tn.data.clone());
|
||||
let c = Arc::new(RwLock::new(crdt));
|
||||
let w = Arc::new(RwLock::new(vec![]));
|
||||
let d = Ncp::new(
|
||||
c.clone(),
|
||||
w,
|
||||
tn.sockets.gossip,
|
||||
tn.sockets.gossip_send,
|
||||
exit.clone(),
|
||||
).unwrap();
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in d.thread_hdls {
|
||||
t.join().expect("thread join");
|
||||
}
|
||||
}
|
||||
}
|
596
src/packet.rs
Normal file
596
src/packet.rs
Normal file
@ -0,0 +1,596 @@
|
||||
//! The `packet` module defines data structures and methods to pull data from the network.
|
||||
use bincode::{deserialize, serialize};
|
||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
use counter::Counter;
|
||||
use result::{Error, Result};
|
||||
use serde::Serialize;
|
||||
use signature::PublicKey;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::mem::size_of;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::Instant;
|
||||
|
||||
pub type SharedPackets = Arc<RwLock<Packets>>;
|
||||
pub type SharedBlob = Arc<RwLock<Blob>>;
|
||||
pub type SharedBlobs = VecDeque<SharedBlob>;
|
||||
pub type PacketRecycler = Recycler<Packets>;
|
||||
pub type BlobRecycler = Recycler<Blob>;
|
||||
|
||||
pub const NUM_PACKETS: usize = 1024 * 8;
|
||||
pub const BLOB_SIZE: usize = 64 * 1024;
|
||||
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_HEADER_SIZE;
|
||||
pub const PACKET_DATA_SIZE: usize = 256;
|
||||
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
#[repr(C)]
|
||||
pub struct Meta {
|
||||
pub size: usize,
|
||||
pub num_retransmits: u64,
|
||||
pub addr: [u16; 8],
|
||||
pub port: u16,
|
||||
pub v6: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[repr(C)]
|
||||
pub struct Packet {
|
||||
pub data: [u8; PACKET_DATA_SIZE],
|
||||
pub meta: Meta,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Packet {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Packet {{ size: {:?}, addr: {:?} }}",
|
||||
self.meta.size,
|
||||
self.meta.addr()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Packet {
|
||||
fn default() -> Packet {
|
||||
Packet {
|
||||
data: [0u8; PACKET_DATA_SIZE],
|
||||
meta: Meta::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Meta {
|
||||
pub fn addr(&self) -> SocketAddr {
|
||||
if !self.v6 {
|
||||
let addr = [
|
||||
self.addr[0] as u8,
|
||||
self.addr[1] as u8,
|
||||
self.addr[2] as u8,
|
||||
self.addr[3] as u8,
|
||||
];
|
||||
let ipv4: Ipv4Addr = From::<[u8; 4]>::from(addr);
|
||||
SocketAddr::new(IpAddr::V4(ipv4), self.port)
|
||||
} else {
|
||||
let ipv6: Ipv6Addr = From::<[u16; 8]>::from(self.addr);
|
||||
SocketAddr::new(IpAddr::V6(ipv6), self.port)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_addr(&mut self, a: &SocketAddr) {
|
||||
match *a {
|
||||
SocketAddr::V4(v4) => {
|
||||
let ip = v4.ip().octets();
|
||||
self.addr[0] = u16::from(ip[0]);
|
||||
self.addr[1] = u16::from(ip[1]);
|
||||
self.addr[2] = u16::from(ip[2]);
|
||||
self.addr[3] = u16::from(ip[3]);
|
||||
self.port = a.port();
|
||||
}
|
||||
SocketAddr::V6(v6) => {
|
||||
self.addr = v6.ip().segments();
|
||||
self.port = a.port();
|
||||
self.v6 = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Packets {
|
||||
pub packets: Vec<Packet>,
|
||||
}
|
||||
|
||||
//auto derive doesn't support large arrays
|
||||
impl Default for Packets {
|
||||
fn default() -> Packets {
|
||||
Packets {
|
||||
packets: vec![Packet::default(); NUM_PACKETS],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Blob {
|
||||
pub data: [u8; BLOB_SIZE],
|
||||
pub meta: Meta,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Blob {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Blob {{ size: {:?}, addr: {:?} }}",
|
||||
self.meta.size,
|
||||
self.meta.addr()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
//auto derive doesn't support large arrays
|
||||
impl Default for Blob {
|
||||
fn default() -> Blob {
|
||||
Blob {
|
||||
data: [0u8; BLOB_SIZE],
|
||||
meta: Meta::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Recycler<T> {
|
||||
gc: Arc<Mutex<Vec<Arc<RwLock<T>>>>>,
|
||||
}
|
||||
|
||||
impl<T: Default> Default for Recycler<T> {
|
||||
fn default() -> Recycler<T> {
|
||||
Recycler {
|
||||
gc: Arc::new(Mutex::new(vec![])),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Default> Clone for Recycler<T> {
|
||||
fn clone(&self) -> Recycler<T> {
|
||||
Recycler {
|
||||
gc: self.gc.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Default> Recycler<T> {
|
||||
pub fn allocate(&self) -> Arc<RwLock<T>> {
|
||||
let mut gc = self.gc.lock().expect("recycler lock in pb fn allocate");
|
||||
let x = gc.pop()
|
||||
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())));
|
||||
|
||||
// Only return the item if this recycler is the last reference to it.
|
||||
// Remove this check once `T` holds a Weak reference back to this
|
||||
// recycler and implements `Drop`. At the time of this writing, Weak can't
|
||||
// be passed across threads ('alloc' is a nightly-only API), and so our
|
||||
// reference-counted recyclables are awkwardly being recycled by hand,
|
||||
// which allows this race condition to exist.
|
||||
if Arc::strong_count(&x) > 1 {
|
||||
warn!("Recycled item still in use. Booting it.");
|
||||
drop(gc);
|
||||
self.allocate()
|
||||
} else {
|
||||
x
|
||||
}
|
||||
}
|
||||
pub fn recycle(&self, x: Arc<RwLock<T>>) {
|
||||
let mut gc = self.gc.lock().expect("recycler lock in pub fn recycle");
|
||||
gc.push(x);
|
||||
}
|
||||
}
|
||||
|
||||
impl Packets {
|
||||
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
||||
static mut COUNTER: Counter = create_counter!("packets", 10);
|
||||
self.packets.resize(NUM_PACKETS, Packet::default());
|
||||
let mut i = 0;
|
||||
//DOCUMENTED SIDE-EFFECT
|
||||
//Performance out of the IO without poll
|
||||
// * block on the socket until it's readable
|
||||
// * set the socket to non blocking
|
||||
// * read until it fails
|
||||
// * set it back to blocking before returning
|
||||
socket.set_nonblocking(false)?;
|
||||
let mut start = Instant::now();
|
||||
for p in &mut self.packets {
|
||||
p.meta.size = 0;
|
||||
trace!("receiving on {}", socket.local_addr().unwrap());
|
||||
match socket.recv_from(&mut p.data) {
|
||||
Err(_) if i > 0 => {
|
||||
inc_counter!(COUNTER, i, start);
|
||||
debug!("got {:?} messages on {}", i, socket.local_addr().unwrap());
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
trace!("recv_from err {:?}", e);
|
||||
return Err(Error::IO(e));
|
||||
}
|
||||
Ok((nrecv, from)) => {
|
||||
p.meta.size = nrecv;
|
||||
p.meta.set_addr(&from);
|
||||
if i == 0 {
|
||||
start = Instant::now();
|
||||
socket.set_nonblocking(true)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
Ok(i)
|
||||
}
|
||||
pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<()> {
|
||||
let sz = self.run_read_from(socket)?;
|
||||
self.packets.resize(sz, Packet::default());
|
||||
debug!("recv_from: {}", sz);
|
||||
Ok(())
|
||||
}
|
||||
pub fn send_to(&self, socket: &UdpSocket) -> Result<()> {
|
||||
for p in &self.packets {
|
||||
let a = p.meta.addr();
|
||||
socket.send_to(&p.data[..p.meta.size], &a)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_packets_chunked<T: Serialize>(
|
||||
r: &PacketRecycler,
|
||||
xs: Vec<T>,
|
||||
chunks: usize,
|
||||
) -> Vec<SharedPackets> {
|
||||
let mut out = vec![];
|
||||
for x in xs.chunks(chunks) {
|
||||
let p = r.allocate();
|
||||
p.write()
|
||||
.unwrap()
|
||||
.packets
|
||||
.resize(x.len(), Default::default());
|
||||
for (i, o) in x.iter().zip(p.write().unwrap().packets.iter_mut()) {
|
||||
let v = serialize(&i).expect("serialize request");
|
||||
let len = v.len();
|
||||
o.data[..len].copy_from_slice(&v);
|
||||
o.meta.size = len;
|
||||
}
|
||||
out.push(p);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPackets> {
|
||||
to_packets_chunked(r, xs, NUM_PACKETS)
|
||||
}
|
||||
|
||||
pub fn to_blob<T: Serialize>(
|
||||
resp: T,
|
||||
rsp_addr: SocketAddr,
|
||||
blob_recycler: &BlobRecycler,
|
||||
) -> Result<SharedBlob> {
|
||||
let blob = blob_recycler.allocate();
|
||||
{
|
||||
let mut b = blob.write().unwrap();
|
||||
let v = serialize(&resp)?;
|
||||
let len = v.len();
|
||||
assert!(len < BLOB_SIZE);
|
||||
b.data[..len].copy_from_slice(&v);
|
||||
b.meta.size = len;
|
||||
b.meta.set_addr(&rsp_addr);
|
||||
}
|
||||
Ok(blob)
|
||||
}
|
||||
|
||||
pub fn to_blobs<T: Serialize>(
|
||||
rsps: Vec<(T, SocketAddr)>,
|
||||
blob_recycler: &BlobRecycler,
|
||||
) -> Result<SharedBlobs> {
|
||||
let mut blobs = VecDeque::new();
|
||||
for (resp, rsp_addr) in rsps {
|
||||
blobs.push_back(to_blob(resp, rsp_addr, blob_recycler)?);
|
||||
}
|
||||
Ok(blobs)
|
||||
}
|
||||
|
||||
const BLOB_INDEX_END: usize = size_of::<u64>();
|
||||
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>();
|
||||
const BLOB_FLAGS_END: usize = BLOB_ID_END + size_of::<u32>();
|
||||
const BLOB_SIZE_END: usize = BLOB_FLAGS_END + size_of::<u64>();
|
||||
|
||||
macro_rules! align {
|
||||
($x:expr, $align:expr) => {
|
||||
$x + ($align - 1) & !($align - 1)
|
||||
};
|
||||
}
|
||||
|
||||
pub const BLOB_FLAG_IS_CODING: u32 = 0x1;
|
||||
pub const BLOB_HEADER_SIZE: usize = align!(BLOB_SIZE_END, 64);
|
||||
|
||||
impl Blob {
|
||||
pub fn get_index(&self) -> Result<u64> {
|
||||
let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_END]);
|
||||
let r = rdr.read_u64::<LittleEndian>()?;
|
||||
Ok(r)
|
||||
}
|
||||
pub fn set_index(&mut self, ix: u64) -> Result<()> {
|
||||
let mut wtr = vec![];
|
||||
wtr.write_u64::<LittleEndian>(ix)?;
|
||||
self.data[..BLOB_INDEX_END].clone_from_slice(&wtr);
|
||||
Ok(())
|
||||
}
|
||||
/// sender id, we use this for identifying if its a blob from the leader that we should
|
||||
/// retransmit. eventually blobs should have a signature that we can use ffor spam filtering
|
||||
pub fn get_id(&self) -> Result<PublicKey> {
|
||||
let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?;
|
||||
Ok(e)
|
||||
}
|
||||
|
||||
pub fn set_id(&mut self, id: PublicKey) -> Result<()> {
|
||||
let wtr = serialize(&id)?;
|
||||
self.data[BLOB_INDEX_END..BLOB_ID_END].clone_from_slice(&wtr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_flags(&self) -> Result<u32> {
|
||||
let mut rdr = io::Cursor::new(&self.data[BLOB_ID_END..BLOB_FLAGS_END]);
|
||||
let r = rdr.read_u32::<LittleEndian>()?;
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
pub fn set_flags(&mut self, ix: u32) -> Result<()> {
|
||||
let mut wtr = vec![];
|
||||
wtr.write_u32::<LittleEndian>(ix)?;
|
||||
self.data[BLOB_ID_END..BLOB_FLAGS_END].clone_from_slice(&wtr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_coding(&self) -> bool {
|
||||
return (self.get_flags().unwrap() & BLOB_FLAG_IS_CODING) != 0;
|
||||
}
|
||||
|
||||
pub fn set_coding(&mut self) -> Result<()> {
|
||||
let flags = self.get_flags().unwrap();
|
||||
self.set_flags(flags | BLOB_FLAG_IS_CODING)
|
||||
}
|
||||
|
||||
pub fn get_data_size(&self) -> Result<u64> {
|
||||
let mut rdr = io::Cursor::new(&self.data[BLOB_FLAGS_END..BLOB_SIZE_END]);
|
||||
let r = rdr.read_u64::<LittleEndian>()?;
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
pub fn set_data_size(&mut self, ix: u64) -> Result<()> {
|
||||
let mut wtr = vec![];
|
||||
wtr.write_u64::<LittleEndian>(ix)?;
|
||||
self.data[BLOB_FLAGS_END..BLOB_SIZE_END].clone_from_slice(&wtr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn data(&self) -> &[u8] {
|
||||
&self.data[BLOB_HEADER_SIZE..]
|
||||
}
|
||||
pub fn data_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.data[BLOB_HEADER_SIZE..]
|
||||
}
|
||||
pub fn set_size(&mut self, size: usize) {
|
||||
let new_size = size + BLOB_HEADER_SIZE;
|
||||
self.meta.size = new_size;
|
||||
self.set_data_size(new_size as u64).unwrap();
|
||||
}
|
||||
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<SharedBlobs> {
|
||||
let mut v = VecDeque::new();
|
||||
//DOCUMENTED SIDE-EFFECT
|
||||
//Performance out of the IO without poll
|
||||
// * block on the socket until it's readable
|
||||
// * set the socket to non blocking
|
||||
// * read until it fails
|
||||
// * set it back to blocking before returning
|
||||
socket.set_nonblocking(false)?;
|
||||
for i in 0..NUM_BLOBS {
|
||||
let r = re.allocate();
|
||||
{
|
||||
let mut p = r.write().expect("'r' write lock in pub fn recv_from");
|
||||
trace!("receiving on {}", socket.local_addr().unwrap());
|
||||
match socket.recv_from(&mut p.data) {
|
||||
Err(_) if i > 0 => {
|
||||
trace!("got {:?} messages on {}", i, socket.local_addr().unwrap());
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
if e.kind() != io::ErrorKind::WouldBlock {
|
||||
info!("recv_from err {:?}", e);
|
||||
}
|
||||
return Err(Error::IO(e));
|
||||
}
|
||||
Ok((nrecv, from)) => {
|
||||
p.meta.size = nrecv;
|
||||
p.meta.set_addr(&from);
|
||||
if i == 0 {
|
||||
socket.set_nonblocking(true)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
v.push_back(r);
|
||||
}
|
||||
Ok(v)
|
||||
}
|
||||
pub fn send_to(re: &BlobRecycler, socket: &UdpSocket, v: &mut SharedBlobs) -> Result<()> {
|
||||
while let Some(r) = v.pop_front() {
|
||||
{
|
||||
let p = r.read().expect("'r' read lock in pub fn send_to");
|
||||
let a = p.meta.addr();
|
||||
socket.send_to(&p.data[..p.meta.size], &a)?;
|
||||
}
|
||||
re.recycle(r);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use packet::{
|
||||
to_packets, Blob, BlobRecycler, Packet, PacketRecycler, Packets, Recycler, NUM_PACKETS,
|
||||
};
|
||||
use request::Request;
|
||||
use std::collections::VecDeque;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[test]
|
||||
pub fn packet_recycler_test() {
|
||||
let r = PacketRecycler::default();
|
||||
let p = r.allocate();
|
||||
r.recycle(p);
|
||||
assert_eq!(r.gc.lock().unwrap().len(), 1);
|
||||
let _ = r.allocate();
|
||||
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_leaked_recyclable() {
|
||||
// Ensure that the recycler won't return an item
|
||||
// that is still referenced outside the recycler.
|
||||
let r = Recycler::<u8>::default();
|
||||
let x0 = r.allocate();
|
||||
r.recycle(x0.clone());
|
||||
assert_eq!(Arc::strong_count(&x0), 2);
|
||||
assert_eq!(r.gc.lock().unwrap().len(), 1);
|
||||
|
||||
let x1 = r.allocate();
|
||||
assert_eq!(Arc::strong_count(&x1), 1);
|
||||
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_leaked_recyclable_recursion() {
|
||||
// In the case of a leaked recyclable, ensure the recycler drops its lock before recursing.
|
||||
let r = Recycler::<u8>::default();
|
||||
let x0 = r.allocate();
|
||||
let x1 = r.allocate();
|
||||
r.recycle(x0); // <-- allocate() of this will require locking the recycler's stack.
|
||||
r.recycle(x1.clone()); // <-- allocate() of this will cause it to be dropped and recurse.
|
||||
assert_eq!(Arc::strong_count(&x1), 2);
|
||||
assert_eq!(r.gc.lock().unwrap().len(), 2);
|
||||
|
||||
r.allocate(); // Ensure lock is released before recursing.
|
||||
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn blob_recycler_test() {
|
||||
let r = BlobRecycler::default();
|
||||
let p = r.allocate();
|
||||
r.recycle(p);
|
||||
assert_eq!(r.gc.lock().unwrap().len(), 1);
|
||||
let _ = r.allocate();
|
||||
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
||||
}
|
||||
#[test]
|
||||
pub fn packet_send_recv() {
|
||||
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let addr = reader.local_addr().unwrap();
|
||||
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let saddr = sender.local_addr().unwrap();
|
||||
let r = PacketRecycler::default();
|
||||
let p = r.allocate();
|
||||
p.write().unwrap().packets.resize(10, Packet::default());
|
||||
for m in p.write().unwrap().packets.iter_mut() {
|
||||
m.meta.set_addr(&addr);
|
||||
m.meta.size = 256;
|
||||
}
|
||||
p.read().unwrap().send_to(&sender).unwrap();
|
||||
p.write().unwrap().recv_from(&reader).unwrap();
|
||||
for m in p.write().unwrap().packets.iter_mut() {
|
||||
assert_eq!(m.meta.size, 256);
|
||||
assert_eq!(m.meta.addr(), saddr);
|
||||
}
|
||||
|
||||
r.recycle(p);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_packets() {
|
||||
let tx = Request::GetTransactionCount;
|
||||
let re = PacketRecycler::default();
|
||||
let rv = to_packets(&re, vec![tx.clone(); 1]);
|
||||
assert_eq!(rv.len(), 1);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
|
||||
|
||||
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS]);
|
||||
assert_eq!(rv.len(), 1);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||
|
||||
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS + 1]);
|
||||
assert_eq!(rv.len(), 2);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn blob_send_recv() {
|
||||
trace!("start");
|
||||
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let addr = reader.local_addr().unwrap();
|
||||
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let r = BlobRecycler::default();
|
||||
let p = r.allocate();
|
||||
p.write().unwrap().meta.set_addr(&addr);
|
||||
p.write().unwrap().meta.size = 1024;
|
||||
let mut v = VecDeque::new();
|
||||
v.push_back(p);
|
||||
assert_eq!(v.len(), 1);
|
||||
Blob::send_to(&r, &sender, &mut v).unwrap();
|
||||
trace!("send_to");
|
||||
assert_eq!(v.len(), 0);
|
||||
let mut rv = Blob::recv_from(&r, &reader).unwrap();
|
||||
trace!("recv_from");
|
||||
assert_eq!(rv.len(), 1);
|
||||
let rp = rv.pop_front().unwrap();
|
||||
assert_eq!(rp.write().unwrap().meta.size, 1024);
|
||||
r.recycle(rp);
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "ipv6", test))]
|
||||
#[test]
|
||||
pub fn blob_ipv6_send_recv() {
|
||||
let reader = UdpSocket::bind("[::1]:0").expect("bind");
|
||||
let addr = reader.local_addr().unwrap();
|
||||
let sender = UdpSocket::bind("[::1]:0").expect("bind");
|
||||
let r = BlobRecycler::default();
|
||||
let p = r.allocate();
|
||||
p.write().unwrap().meta.set_addr(&addr);
|
||||
p.write().unwrap().meta.size = 1024;
|
||||
let mut v = VecDeque::default();
|
||||
v.push_back(p);
|
||||
Blob::send_to(&r, &sender, &mut v).unwrap();
|
||||
let mut rv = Blob::recv_from(&r, &reader).unwrap();
|
||||
let rp = rv.pop_front().unwrap();
|
||||
assert_eq!(rp.write().unwrap().meta.size, 1024);
|
||||
r.recycle(rp);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn debug_trait() {
|
||||
write!(io::sink(), "{:?}", Packet::default()).unwrap();
|
||||
write!(io::sink(), "{:?}", Packets::default()).unwrap();
|
||||
write!(io::sink(), "{:?}", Blob::default()).unwrap();
|
||||
}
|
||||
#[test]
|
||||
pub fn blob_test() {
|
||||
let mut b = Blob::default();
|
||||
b.set_index(<u64>::max_value()).unwrap();
|
||||
assert_eq!(b.get_index().unwrap(), <u64>::max_value());
|
||||
b.data_mut()[0] = 1;
|
||||
assert_eq!(b.data()[0], 1);
|
||||
assert_eq!(b.get_index().unwrap(), <u64>::max_value());
|
||||
}
|
||||
|
||||
}
|
40
src/payment_plan.rs
Normal file
40
src/payment_plan.rs
Normal file
@ -0,0 +1,40 @@
|
||||
//! The `plan` module provides a domain-specific language for payment plans. Users create Budget objects that
|
||||
//! are given to an interpreter. The interpreter listens for `Witness` transactions,
|
||||
//! which it uses to reduce the payment plan. When the plan is reduced to a
|
||||
//! `Payment`, the payment is executed.
|
||||
|
||||
use chrono::prelude::*;
|
||||
use signature::PublicKey;
|
||||
|
||||
/// The types of events a payment plan can process.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Witness {
|
||||
/// The current time.
|
||||
Timestamp(DateTime<Utc>),
|
||||
|
||||
/// A siganture from PublicKey.
|
||||
Signature(PublicKey),
|
||||
}
|
||||
|
||||
/// Some amount of tokens that should be sent to the `to` `PublicKey`.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Payment {
|
||||
/// Amount to be paid.
|
||||
pub tokens: i64,
|
||||
|
||||
/// The `PublicKey` that `tokens` should be paid to.
|
||||
pub to: PublicKey,
|
||||
}
|
||||
|
||||
/// Interface to smart contracts.
|
||||
pub trait PaymentPlan {
|
||||
/// Return Payment if the payment plan requires no additional Witnesses.
|
||||
fn final_payment(&self) -> Option<Payment>;
|
||||
|
||||
/// Return true if the plan spends exactly `spendable_tokens`.
|
||||
fn verify(&self, spendable_tokens: i64) -> bool;
|
||||
|
||||
/// Apply a witness to the payment plan to see if the plan can be reduced.
|
||||
/// If so, modify the plan in-place.
|
||||
fn apply_witness(&mut self, witness: &Witness);
|
||||
}
|
203
src/record_stage.rs
Normal file
203
src/record_stage.rs
Normal file
@ -0,0 +1,203 @@
|
||||
//! The `record_stage` module provides an object for generating a Proof of History.
|
||||
//! It records Transaction items on behalf of its users. It continuously generates
|
||||
//! new hashes, only stopping to check if it has been sent an Transaction item. It
|
||||
//! tags each Transaction with an Entry, and sends it back. The Entry includes the
|
||||
//! Transaction, the latest hash, and the number of hashes since the last transaction.
|
||||
//! The resulting stream of entries represents ordered transactions in time.
|
||||
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use recorder::Recorder;
|
||||
use std::sync::mpsc::{channel, Receiver, RecvError, Sender, TryRecvError};
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::{Duration, Instant};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
pub enum Signal {
|
||||
Tick,
|
||||
Transactions(Vec<Transaction>),
|
||||
}
|
||||
|
||||
pub struct RecordStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl RecordStage {
|
||||
/// A background thread that will continue tagging received Transaction messages and
|
||||
/// sending back Entry messages until either the receiver or sender channel is closed.
|
||||
pub fn new(
|
||||
signal_receiver: Receiver<Signal>,
|
||||
start_hash: &Hash,
|
||||
) -> (Self, Receiver<Vec<Entry>>) {
|
||||
let (entry_sender, entry_receiver) = channel();
|
||||
let start_hash = start_hash.clone();
|
||||
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-record-stage".to_string())
|
||||
.spawn(move || {
|
||||
let mut recorder = Recorder::new(start_hash);
|
||||
let _ = Self::process_signals(&mut recorder, &signal_receiver, &entry_sender);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
(RecordStage { thread_hdl }, entry_receiver)
|
||||
}
|
||||
|
||||
/// Same as `RecordStage::new`, but will automatically produce entries every `tick_duration`.
|
||||
pub fn new_with_clock(
|
||||
signal_receiver: Receiver<Signal>,
|
||||
start_hash: &Hash,
|
||||
tick_duration: Duration,
|
||||
) -> (Self, Receiver<Vec<Entry>>) {
|
||||
let (entry_sender, entry_receiver) = channel();
|
||||
let start_hash = start_hash.clone();
|
||||
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-record-stage".to_string())
|
||||
.spawn(move || {
|
||||
let mut recorder = Recorder::new(start_hash);
|
||||
let start_time = Instant::now();
|
||||
loop {
|
||||
if let Err(_) = Self::try_process_signals(
|
||||
&mut recorder,
|
||||
start_time,
|
||||
tick_duration,
|
||||
&signal_receiver,
|
||||
&entry_sender,
|
||||
) {
|
||||
return;
|
||||
}
|
||||
recorder.hash();
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
(RecordStage { thread_hdl }, entry_receiver)
|
||||
}
|
||||
|
||||
fn process_signal(
|
||||
signal: Signal,
|
||||
recorder: &mut Recorder,
|
||||
sender: &Sender<Vec<Entry>>,
|
||||
) -> Result<(), ()> {
|
||||
let txs = if let Signal::Transactions(txs) = signal {
|
||||
txs
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
let entries = recorder.record(txs);
|
||||
sender.send(entries).or(Err(()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_signals(
|
||||
recorder: &mut Recorder,
|
||||
receiver: &Receiver<Signal>,
|
||||
sender: &Sender<Vec<Entry>>,
|
||||
) -> Result<(), ()> {
|
||||
loop {
|
||||
match receiver.recv() {
|
||||
Ok(signal) => Self::process_signal(signal, recorder, sender)?,
|
||||
Err(RecvError) => return Err(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn try_process_signals(
|
||||
recorder: &mut Recorder,
|
||||
start_time: Instant,
|
||||
tick_duration: Duration,
|
||||
receiver: &Receiver<Signal>,
|
||||
sender: &Sender<Vec<Entry>>,
|
||||
) -> Result<(), ()> {
|
||||
loop {
|
||||
if let Some(entry) = recorder.tick(start_time, tick_duration) {
|
||||
sender.send(vec![entry]).or(Err(()))?;
|
||||
}
|
||||
match receiver.try_recv() {
|
||||
Ok(signal) => Self::process_signal(signal, recorder, sender)?,
|
||||
Err(TryRecvError::Empty) => return Ok(()),
|
||||
Err(TryRecvError::Disconnected) => return Err(()),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ledger::Block;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::thread::sleep;
|
||||
|
||||
#[test]
|
||||
fn test_historian() {
|
||||
let (tx_sender, tx_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let (record_stage, entry_receiver) = RecordStage::new(tx_receiver, &zero);
|
||||
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
|
||||
let entry0 = entry_receiver.recv().unwrap()[0].clone();
|
||||
let entry1 = entry_receiver.recv().unwrap()[0].clone();
|
||||
let entry2 = entry_receiver.recv().unwrap()[0].clone();
|
||||
|
||||
assert_eq!(entry0.num_hashes, 0);
|
||||
assert_eq!(entry1.num_hashes, 0);
|
||||
assert_eq!(entry2.num_hashes, 0);
|
||||
|
||||
drop(tx_sender);
|
||||
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
|
||||
|
||||
assert!([entry0, entry1, entry2].verify(&zero));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_historian_closed_sender() {
|
||||
let (tx_sender, tx_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let (record_stage, entry_receiver) = RecordStage::new(tx_receiver, &zero);
|
||||
drop(entry_receiver);
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transactions() {
|
||||
let (tx_sender, signal_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let (_record_stage, entry_receiver) = RecordStage::new(signal_receiver, &zero);
|
||||
let alice_keypair = KeyPair::new();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
|
||||
let tx1 = Transaction::new(&alice_keypair, bob_pubkey, 2, zero);
|
||||
tx_sender
|
||||
.send(Signal::Transactions(vec![tx0, tx1]))
|
||||
.unwrap();
|
||||
drop(tx_sender);
|
||||
let entries: Vec<_> = entry_receiver.iter().collect();
|
||||
assert_eq!(entries.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clock() {
|
||||
let (tx_sender, tx_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let (_record_stage, entry_receiver) =
|
||||
RecordStage::new_with_clock(tx_receiver, &zero, Duration::from_millis(20));
|
||||
sleep(Duration::from_millis(900));
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
drop(tx_sender);
|
||||
let entries: Vec<_> = entry_receiver.iter().flat_map(|x| x).collect();
|
||||
assert!(entries.len() > 1);
|
||||
|
||||
// Ensure the ID is not the seed.
|
||||
assert_ne!(entries[0].id, zero);
|
||||
}
|
||||
}
|
48
src/recorder.rs
Normal file
48
src/recorder.rs
Normal file
@ -0,0 +1,48 @@
|
||||
//! The `recorder` module provides an object for generating a Proof of History.
|
||||
//! It records Transaction items on behalf of its users.
|
||||
|
||||
use entry::Entry;
|
||||
use hash::{hash, Hash};
|
||||
use ledger;
|
||||
use std::time::{Duration, Instant};
|
||||
use transaction::Transaction;
|
||||
|
||||
pub struct Recorder {
|
||||
last_hash: Hash,
|
||||
num_hashes: u64,
|
||||
num_ticks: u32,
|
||||
}
|
||||
|
||||
impl Recorder {
|
||||
pub fn new(last_hash: Hash) -> Self {
|
||||
Recorder {
|
||||
last_hash,
|
||||
num_hashes: 0,
|
||||
num_ticks: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hash(&mut self) {
|
||||
self.last_hash = hash(&self.last_hash);
|
||||
self.num_hashes += 1;
|
||||
}
|
||||
|
||||
pub fn record(&mut self, transactions: Vec<Transaction>) -> Vec<Entry> {
|
||||
ledger::next_entries_mut(&mut self.last_hash, &mut self.num_hashes, transactions)
|
||||
}
|
||||
|
||||
pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> {
|
||||
if start_time.elapsed() > tick_duration * (self.num_ticks + 1) {
|
||||
// TODO: don't let this overflow u32
|
||||
self.num_ticks += 1;
|
||||
Some(Entry::new_mut(
|
||||
&mut self.last_hash,
|
||||
&mut self.num_hashes,
|
||||
vec![],
|
||||
false,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
43
src/replicate_stage.rs
Normal file
43
src/replicate_stage.rs
Normal file
@ -0,0 +1,43 @@
|
||||
//! The `replicate_stage` replicates transactions broadcast by the leader.
|
||||
|
||||
use bank::Bank;
|
||||
use ledger;
|
||||
use result::Result;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer::BlobReceiver;
|
||||
|
||||
pub struct ReplicateStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl ReplicateStage {
|
||||
/// Process entry blobs, already in order
|
||||
fn replicate_requests(bank: &Arc<Bank>, blob_receiver: &BlobReceiver) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let blobs = blob_receiver.recv_timeout(timer)?;
|
||||
let blobs_len = blobs.len();
|
||||
let entries = ledger::reconstruct_entries_from_blobs(blobs)?;
|
||||
let res = bank.process_entries(entries);
|
||||
if res.is_err() {
|
||||
error!("process_entries {} {:?}", blobs_len, res);
|
||||
}
|
||||
res?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn new(bank: Arc<Bank>, exit: Arc<AtomicBool>, window_receiver: BlobReceiver) -> Self {
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-replicate-stage".to_string())
|
||||
.spawn(move || loop {
|
||||
let e = Self::replicate_requests(&bank, &window_receiver);
|
||||
if e.is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
ReplicateStage { thread_hdl }
|
||||
}
|
||||
}
|
28
src/request.rs
Normal file
28
src/request.rs
Normal file
@ -0,0 +1,28 @@
|
||||
//! The `request` module defines the messages for the thin client.
|
||||
|
||||
use hash::Hash;
|
||||
use signature::{PublicKey, Signature};
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub enum Request {
|
||||
GetBalance { key: PublicKey },
|
||||
GetLastId,
|
||||
GetTransactionCount,
|
||||
GetSignature { signature: Signature },
|
||||
}
|
||||
|
||||
impl Request {
|
||||
/// Verify the request is valid.
|
||||
pub fn verify(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum Response {
|
||||
Balance { key: PublicKey, val: i64 },
|
||||
LastId { id: Hash },
|
||||
TransactionCount { transaction_count: u64 },
|
||||
SignatureStatus { signature_status: bool },
|
||||
}
|
60
src/request_processor.rs
Normal file
60
src/request_processor.rs
Normal file
@ -0,0 +1,60 @@
|
||||
//! The `request_processor` processes thin client Request messages.
|
||||
|
||||
use bank::Bank;
|
||||
use request::{Request, Response};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct RequestProcessor {
|
||||
bank: Arc<Bank>,
|
||||
}
|
||||
|
||||
impl RequestProcessor {
|
||||
/// Create a new Tpu that wraps the given Bank.
|
||||
pub fn new(bank: Arc<Bank>) -> Self {
|
||||
RequestProcessor { bank }
|
||||
}
|
||||
|
||||
/// Process Request items sent by clients.
|
||||
fn process_request(
|
||||
&self,
|
||||
msg: Request,
|
||||
rsp_addr: SocketAddr,
|
||||
) -> Option<(Response, SocketAddr)> {
|
||||
match msg {
|
||||
Request::GetBalance { key } => {
|
||||
let val = self.bank.get_balance(&key);
|
||||
let rsp = (Response::Balance { key, val }, rsp_addr);
|
||||
info!("Response::Balance {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
Request::GetLastId => {
|
||||
let id = self.bank.last_id();
|
||||
let rsp = (Response::LastId { id }, rsp_addr);
|
||||
info!("Response::LastId {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
Request::GetTransactionCount => {
|
||||
let transaction_count = self.bank.transaction_count() as u64;
|
||||
let rsp = (Response::TransactionCount { transaction_count }, rsp_addr);
|
||||
info!("Response::TransactionCount {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
Request::GetSignature { signature } => {
|
||||
let signature_status = self.bank.has_signature(&signature);
|
||||
let rsp = (Response::SignatureStatus { signature_status }, rsp_addr);
|
||||
info!("Response::Signature {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_requests(
|
||||
&self,
|
||||
reqs: Vec<(Request, SocketAddr)>,
|
||||
) -> Vec<(Response, SocketAddr)> {
|
||||
reqs.into_iter()
|
||||
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
|
||||
.collect()
|
||||
}
|
||||
}
|
116
src/request_stage.rs
Normal file
116
src/request_stage.rs
Normal file
@ -0,0 +1,116 @@
|
||||
//! The `request_stage` processes thin client Request messages.
|
||||
|
||||
use bincode::deserialize;
|
||||
use packet::{to_blobs, BlobRecycler, PacketRecycler, Packets, SharedPackets};
|
||||
use rayon::prelude::*;
|
||||
use request::Request;
|
||||
use request_processor::RequestProcessor;
|
||||
use result::Result;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Instant;
|
||||
use streamer::{self, BlobReceiver, BlobSender};
|
||||
use timing;
|
||||
|
||||
pub struct RequestStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
pub request_processor: Arc<RequestProcessor>,
|
||||
}
|
||||
|
||||
impl RequestStage {
|
||||
pub fn deserialize_requests(p: &Packets) -> Vec<Option<(Request, SocketAddr)>> {
|
||||
p.packets
|
||||
.par_iter()
|
||||
.map(|x| {
|
||||
deserialize(&x.data[0..x.meta.size])
|
||||
.map(|req| (req, x.meta.addr()))
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn process_request_packets(
|
||||
request_processor: &RequestProcessor,
|
||||
packet_receiver: &Receiver<SharedPackets>,
|
||||
blob_sender: &BlobSender,
|
||||
packet_recycler: &PacketRecycler,
|
||||
blob_recycler: &BlobRecycler,
|
||||
) -> Result<()> {
|
||||
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
|
||||
|
||||
debug!(
|
||||
"@{:?} request_stage: processing: {}",
|
||||
timing::timestamp(),
|
||||
batch_len
|
||||
);
|
||||
|
||||
let mut reqs_len = 0;
|
||||
let proc_start = Instant::now();
|
||||
for msgs in batch {
|
||||
let reqs: Vec<_> = Self::deserialize_requests(&msgs.read().unwrap())
|
||||
.into_iter()
|
||||
.filter_map(|x| x)
|
||||
.collect();
|
||||
reqs_len += reqs.len();
|
||||
|
||||
let rsps = request_processor.process_requests(reqs);
|
||||
|
||||
let blobs = to_blobs(rsps, blob_recycler)?;
|
||||
if !blobs.is_empty() {
|
||||
info!("process: sending blobs: {}", blobs.len());
|
||||
//don't wake up the other side if there is nothing
|
||||
blob_sender.send(blobs)?;
|
||||
}
|
||||
packet_recycler.recycle(msgs);
|
||||
}
|
||||
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
||||
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
||||
debug!(
|
||||
"@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
||||
timing::timestamp(),
|
||||
batch_len,
|
||||
total_time_ms,
|
||||
reqs_len,
|
||||
(reqs_len as f32) / (total_time_s)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
pub fn new(
|
||||
request_processor: RequestProcessor,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_receiver: Receiver<SharedPackets>,
|
||||
packet_recycler: PacketRecycler,
|
||||
blob_recycler: BlobRecycler,
|
||||
) -> (Self, BlobReceiver) {
|
||||
let request_processor = Arc::new(request_processor);
|
||||
let request_processor_ = request_processor.clone();
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-request-stage".to_string())
|
||||
.spawn(move || loop {
|
||||
let e = Self::process_request_packets(
|
||||
&request_processor_,
|
||||
&packet_receiver,
|
||||
&blob_sender,
|
||||
&packet_recycler,
|
||||
&blob_recycler,
|
||||
);
|
||||
if e.is_err() {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
(
|
||||
RequestStage {
|
||||
thread_hdl,
|
||||
request_processor,
|
||||
},
|
||||
blob_receiver,
|
||||
)
|
||||
}
|
||||
}
|
136
src/result.rs
Normal file
136
src/result.rs
Normal file
@ -0,0 +1,136 @@
|
||||
//! The `result` module exposes a Result type that propagates one of many different Error types.
|
||||
|
||||
use bank;
|
||||
use bincode;
|
||||
use serde_json;
|
||||
use std;
|
||||
use std::any::Any;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
IO(std::io::Error),
|
||||
JSON(serde_json::Error),
|
||||
AddrParse(std::net::AddrParseError),
|
||||
JoinError(Box<Any + Send + 'static>),
|
||||
RecvError(std::sync::mpsc::RecvError),
|
||||
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
|
||||
Serialize(std::boxed::Box<bincode::ErrorKind>),
|
||||
BankError(bank::BankError),
|
||||
SendError,
|
||||
Services,
|
||||
CrdtTooSmall,
|
||||
GenericError,
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl std::convert::From<std::sync::mpsc::RecvError> for Error {
|
||||
fn from(e: std::sync::mpsc::RecvError) -> Error {
|
||||
Error::RecvError(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<std::sync::mpsc::RecvTimeoutError> for Error {
|
||||
fn from(e: std::sync::mpsc::RecvTimeoutError) -> Error {
|
||||
Error::RecvTimeoutError(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<bank::BankError> for Error {
|
||||
fn from(e: bank::BankError) -> Error {
|
||||
Error::BankError(e)
|
||||
}
|
||||
}
|
||||
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
|
||||
fn from(_e: std::sync::mpsc::SendError<T>) -> Error {
|
||||
Error::SendError
|
||||
}
|
||||
}
|
||||
impl std::convert::From<Box<Any + Send + 'static>> for Error {
|
||||
fn from(e: Box<Any + Send + 'static>) -> Error {
|
||||
Error::JoinError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::From<std::io::Error> for Error {
|
||||
fn from(e: std::io::Error) -> Error {
|
||||
Error::IO(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<serde_json::Error> for Error {
|
||||
fn from(e: serde_json::Error) -> Error {
|
||||
Error::JSON(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<std::net::AddrParseError> for Error {
|
||||
fn from(e: std::net::AddrParseError) -> Error {
|
||||
Error::AddrParse(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<std::boxed::Box<bincode::ErrorKind>> for Error {
|
||||
fn from(e: std::boxed::Box<bincode::ErrorKind>) -> Error {
|
||||
Error::Serialize(e)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use result::Error;
|
||||
use result::Result;
|
||||
use serde_json;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::net::SocketAddr;
|
||||
use std::panic;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::mpsc::RecvError;
|
||||
use std::sync::mpsc::RecvTimeoutError;
|
||||
use std::thread;
|
||||
|
||||
fn addr_parse_error() -> Result<SocketAddr> {
|
||||
let r = "12fdfasfsafsadfs".parse()?;
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
fn join_error() -> Result<()> {
|
||||
panic::set_hook(Box::new(|_info| {}));
|
||||
let r = thread::spawn(|| panic!("hi")).join()?;
|
||||
Ok(r)
|
||||
}
|
||||
fn json_error() -> Result<()> {
|
||||
let r = serde_json::from_slice("=342{;;;;:}".as_bytes())?;
|
||||
Ok(r)
|
||||
}
|
||||
fn send_error() -> Result<()> {
|
||||
let (s, r) = channel();
|
||||
drop(r);
|
||||
s.send(())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn from_test() {
|
||||
assert_matches!(addr_parse_error(), Err(Error::AddrParse(_)));
|
||||
assert_matches!(Error::from(RecvError {}), Error::RecvError(_));
|
||||
assert_matches!(
|
||||
Error::from(RecvTimeoutError::Timeout),
|
||||
Error::RecvTimeoutError(_)
|
||||
);
|
||||
assert_matches!(send_error(), Err(Error::SendError));
|
||||
assert_matches!(join_error(), Err(Error::JoinError(_)));
|
||||
let ioe = io::Error::new(io::ErrorKind::NotFound, "hi");
|
||||
assert_matches!(Error::from(ioe), Error::IO(_));
|
||||
}
|
||||
#[test]
|
||||
fn fmt_test() {
|
||||
write!(io::sink(), "{:?}", addr_parse_error()).unwrap();
|
||||
write!(io::sink(), "{:?}", Error::from(RecvError {})).unwrap();
|
||||
write!(io::sink(), "{:?}", Error::from(RecvTimeoutError::Timeout)).unwrap();
|
||||
write!(io::sink(), "{:?}", send_error()).unwrap();
|
||||
write!(io::sink(), "{:?}", join_error()).unwrap();
|
||||
write!(io::sink(), "{:?}", json_error()).unwrap();
|
||||
write!(
|
||||
io::sink(),
|
||||
"{:?}",
|
||||
Error::from(io::Error::new(io::ErrorKind::NotFound, "hi"))
|
||||
).unwrap();
|
||||
}
|
||||
}
|
77
src/rpu.rs
Normal file
77
src/rpu.rs
Normal file
@ -0,0 +1,77 @@
|
||||
//! The `rpu` module implements the Request Processing Unit, a
|
||||
//! 3-stage transaction processing pipeline in software. It listens
|
||||
//! for `Request` messages from clients and replies with `Response`
|
||||
//! messages.
|
||||
//!
|
||||
//! ```text
|
||||
//! .------.
|
||||
//! | Bank |
|
||||
//! `---+--`
|
||||
//! |
|
||||
//! .------------------|-------------------.
|
||||
//! | RPU | |
|
||||
//! | v |
|
||||
//! .---------. | .-------. .---------. .---------. | .---------.
|
||||
//! | Alice |--->| | | | | +---->| Alice |
|
||||
//! `---------` | | Fetch | | Request | | Respond | | `---------`
|
||||
//! | | Stage |->| Stage |->| Stage | |
|
||||
//! .---------. | | | | | | | | .---------.
|
||||
//! | Bob |--->| | | | | +---->| Bob |
|
||||
//! `---------` | `-------` `---------` `---------` | `---------`
|
||||
//! | |
|
||||
//! | |
|
||||
//! `--------------------------------------`
|
||||
//! ```
|
||||
|
||||
use bank::Bank;
|
||||
use packet::{BlobRecycler, PacketRecycler};
|
||||
use request_processor::RequestProcessor;
|
||||
use request_stage::RequestStage;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
|
||||
pub struct Rpu {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Rpu {
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
requests_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let t_receiver = streamer::receiver(
|
||||
requests_socket,
|
||||
exit.clone(),
|
||||
packet_recycler.clone(),
|
||||
packet_sender,
|
||||
);
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let request_processor = RequestProcessor::new(bank.clone());
|
||||
let (request_stage, blob_receiver) = RequestStage::new(
|
||||
request_processor,
|
||||
exit.clone(),
|
||||
packet_receiver,
|
||||
packet_recycler.clone(),
|
||||
blob_recycler.clone(),
|
||||
);
|
||||
|
||||
let t_responder = streamer::responder(
|
||||
respond_socket,
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
blob_receiver,
|
||||
);
|
||||
|
||||
let thread_hdls = vec![t_receiver, t_responder, request_stage.thread_hdl];
|
||||
Rpu { thread_hdls }
|
||||
}
|
||||
}
|
137
src/signature.rs
Normal file
137
src/signature.rs
Normal file
@ -0,0 +1,137 @@
|
||||
//! The `signature` module provides functionality for public, and private keys.
|
||||
|
||||
use generic_array::typenum::{U32, U64};
|
||||
use generic_array::GenericArray;
|
||||
use rand::{ChaChaRng, Rng, SeedableRng};
|
||||
use rayon::prelude::*;
|
||||
use ring::error::Unspecified;
|
||||
use ring::rand::SecureRandom;
|
||||
use ring::signature::Ed25519KeyPair;
|
||||
use ring::{rand, signature};
|
||||
use std::cell::RefCell;
|
||||
use untrusted;
|
||||
|
||||
pub type KeyPair = Ed25519KeyPair;
|
||||
pub type PublicKey = GenericArray<u8, U32>;
|
||||
pub type Signature = GenericArray<u8, U64>;
|
||||
|
||||
pub trait KeyPairUtil {
|
||||
fn new() -> Self;
|
||||
fn pubkey(&self) -> PublicKey;
|
||||
}
|
||||
|
||||
impl KeyPairUtil for Ed25519KeyPair {
|
||||
/// Return a new ED25519 keypair
|
||||
fn new() -> Self {
|
||||
let rng = rand::SystemRandom::new();
|
||||
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng)
|
||||
.expect("generate_pkcs8 in signature pb fn new");
|
||||
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes))
|
||||
.expect("from_pcks8 in signature pb fn new")
|
||||
}
|
||||
|
||||
/// Return the public key for the given keypair
|
||||
fn pubkey(&self) -> PublicKey {
|
||||
GenericArray::clone_from_slice(self.public_key_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait SignatureUtil {
|
||||
fn verify(&self, peer_public_key_bytes: &[u8], msg_bytes: &[u8]) -> bool;
|
||||
}
|
||||
|
||||
impl SignatureUtil for GenericArray<u8, U64> {
|
||||
fn verify(&self, peer_public_key_bytes: &[u8], msg_bytes: &[u8]) -> bool {
|
||||
let peer_public_key = untrusted::Input::from(peer_public_key_bytes);
|
||||
let msg = untrusted::Input::from(msg_bytes);
|
||||
let sig = untrusted::Input::from(self);
|
||||
signature::verify(&signature::ED25519, peer_public_key, msg, sig).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GenKeys {
|
||||
// This is necessary because the rng needs to mutate its state to remain
|
||||
// deterministic, and the fill trait requires an immuatble reference to self
|
||||
generator: RefCell<ChaChaRng>,
|
||||
}
|
||||
|
||||
impl GenKeys {
|
||||
pub fn new(seed: [u8; 32]) -> GenKeys {
|
||||
let rng = ChaChaRng::from_seed(seed);
|
||||
GenKeys {
|
||||
generator: RefCell::new(rng),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_key(&self) -> Vec<u8> {
|
||||
KeyPair::generate_pkcs8(self).unwrap().to_vec()
|
||||
}
|
||||
|
||||
pub fn gen_n_seeds(&self, n: i64) -> Vec<[u8; 32]> {
|
||||
let mut rng = self.generator.borrow_mut();
|
||||
(0..n).map(|_| rng.gen()).collect()
|
||||
}
|
||||
|
||||
pub fn gen_n_keypairs(&self, n: i64) -> Vec<KeyPair> {
|
||||
self.gen_n_seeds(n)
|
||||
.into_par_iter()
|
||||
.map(|seed| {
|
||||
let pkcs8 = GenKeys::new(seed).new_key();
|
||||
KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8)).unwrap()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl SecureRandom for GenKeys {
|
||||
fn fill(&self, dest: &mut [u8]) -> Result<(), Unspecified> {
|
||||
let mut rng = self.generator.borrow_mut();
|
||||
rng.fill(dest);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::collections::HashSet;
|
||||
|
||||
#[test]
|
||||
fn test_new_key_is_deterministic() {
|
||||
let seed = [0u8; 32];
|
||||
let rng0 = GenKeys::new(seed);
|
||||
let rng1 = GenKeys::new(seed);
|
||||
|
||||
for _ in 0..100 {
|
||||
assert_eq!(rng0.new_key(), rng1.new_key());
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_n_pubkeys(seed: [u8; 32], n: i64) -> HashSet<PublicKey> {
|
||||
GenKeys::new(seed)
|
||||
.gen_n_keypairs(n)
|
||||
.into_iter()
|
||||
.map(|x| x.pubkey())
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gen_n_pubkeys_deterministic() {
|
||||
let seed = [0u8; 32];
|
||||
assert_eq!(gen_n_pubkeys(seed, 50), gen_n_pubkeys(seed, 50));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
|
||||
use self::test::Bencher;
|
||||
use super::*;
|
||||
|
||||
#[bench]
|
||||
fn bench_gen_keys(b: &mut Bencher) {
|
||||
let rnd = GenKeys::new([0u8; 32]);
|
||||
b.iter(|| rnd.gen_n_keypairs(1000));
|
||||
}
|
||||
}
|
231
src/sigverify.rs
Normal file
231
src/sigverify.rs
Normal file
@ -0,0 +1,231 @@
|
||||
//! The `sigverify` module provides digital signature verification functions.
|
||||
//! By default, signatures are verified in parallel using all available CPU
|
||||
//! cores. When `--features=cuda` is enabled, signature verification is
|
||||
//! offloaded to the GPU.
|
||||
//!
|
||||
|
||||
use counter::Counter;
|
||||
use packet::{Packet, SharedPackets};
|
||||
use std::mem::size_of;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::time::Instant;
|
||||
use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET};
|
||||
|
||||
pub const TX_OFFSET: usize = 0;
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
#[repr(C)]
|
||||
struct Elems {
|
||||
elems: *const Packet,
|
||||
num: u32,
|
||||
}
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
#[link(name = "cuda_verify_ed25519")]
|
||||
extern "C" {
|
||||
fn ed25519_verify_many(
|
||||
vecs: *const Elems,
|
||||
num: u32, //number of vecs
|
||||
message_size: u32, //size of each element inside the elems field of the vec
|
||||
public_key_offset: u32,
|
||||
signature_offset: u32,
|
||||
signed_message_offset: u32,
|
||||
signed_message_len_offset: u32,
|
||||
out: *mut u8, //combined length of all the items in vecs
|
||||
) -> u32;
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "cuda"))]
|
||||
fn verify_packet(packet: &Packet) -> u8 {
|
||||
use ring::signature;
|
||||
use signature::{PublicKey, Signature};
|
||||
use untrusted;
|
||||
|
||||
let msg_start = TX_OFFSET + SIGNED_DATA_OFFSET;
|
||||
let sig_start = TX_OFFSET + SIG_OFFSET;
|
||||
let sig_end = sig_start + size_of::<Signature>();
|
||||
let pub_key_start = TX_OFFSET + PUB_KEY_OFFSET;
|
||||
let pub_key_end = pub_key_start + size_of::<PublicKey>();
|
||||
|
||||
if packet.meta.size <= msg_start {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let msg_end = packet.meta.size;
|
||||
signature::verify(
|
||||
&signature::ED25519,
|
||||
untrusted::Input::from(&packet.data[pub_key_start..pub_key_end]),
|
||||
untrusted::Input::from(&packet.data[msg_start..msg_end]),
|
||||
untrusted::Input::from(&packet.data[sig_start..sig_end]),
|
||||
).is_ok() as u8
|
||||
}
|
||||
|
||||
fn batch_size(batches: &Vec<SharedPackets>) -> usize {
|
||||
batches
|
||||
.iter()
|
||||
.map(|p| p.read().unwrap().packets.len())
|
||||
.sum()
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "cuda"))]
|
||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
use rayon::prelude::*;
|
||||
static mut COUNTER: Counter = create_counter!("ed25519_verify", 1);
|
||||
let start = Instant::now();
|
||||
let count = batch_size(batches);
|
||||
info!("CPU ECDSA for {}", batch_size(batches));
|
||||
let rv = batches
|
||||
.into_par_iter()
|
||||
.map(|p| {
|
||||
p.read()
|
||||
.expect("'p' read lock in ed25519_verify")
|
||||
.packets
|
||||
.par_iter()
|
||||
.map(verify_packet)
|
||||
.collect()
|
||||
})
|
||||
.collect();
|
||||
inc_counter!(COUNTER, count, start);
|
||||
rv
|
||||
}
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
use packet::PACKET_DATA_SIZE;
|
||||
static mut COUNTER: Counter = create_counter!("ed25519_verify_cuda", 1);
|
||||
let start = Instant::now();
|
||||
let count = batch_size(batches);
|
||||
info!("CUDA ECDSA for {}", batch_size(batches));
|
||||
let mut out = Vec::new();
|
||||
let mut elems = Vec::new();
|
||||
let mut locks = Vec::new();
|
||||
let mut rvs = Vec::new();
|
||||
|
||||
for packets in batches {
|
||||
locks.push(
|
||||
packets
|
||||
.read()
|
||||
.expect("'packets' read lock in pub fn ed25519_verify"),
|
||||
);
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in locks {
|
||||
elems.push(Elems {
|
||||
elems: p.packets.as_ptr(),
|
||||
num: p.packets.len() as u32,
|
||||
});
|
||||
let mut v = Vec::new();
|
||||
v.resize(p.packets.len(), 0);
|
||||
rvs.push(v);
|
||||
num += p.packets.len();
|
||||
}
|
||||
out.resize(num, 0);
|
||||
trace!("Starting verify num packets: {}", num);
|
||||
trace!("elem len: {}", elems.len() as u32);
|
||||
trace!("packet sizeof: {}", size_of::<Packet>() as u32);
|
||||
trace!("pub key: {}", (TX_OFFSET + PUB_KEY_OFFSET) as u32);
|
||||
trace!("sig offset: {}", (TX_OFFSET + SIG_OFFSET) as u32);
|
||||
trace!("sign data: {}", (TX_OFFSET + SIGNED_DATA_OFFSET) as u32);
|
||||
trace!("len offset: {}", PACKET_DATA_SIZE as u32);
|
||||
unsafe {
|
||||
let res = ed25519_verify_many(
|
||||
elems.as_ptr(),
|
||||
elems.len() as u32,
|
||||
size_of::<Packet>() as u32,
|
||||
(TX_OFFSET + PUB_KEY_OFFSET) as u32,
|
||||
(TX_OFFSET + SIG_OFFSET) as u32,
|
||||
(TX_OFFSET + SIGNED_DATA_OFFSET) as u32,
|
||||
PACKET_DATA_SIZE as u32,
|
||||
out.as_mut_ptr(),
|
||||
);
|
||||
if res != 0 {
|
||||
trace!("RETURN!!!: {}", res);
|
||||
}
|
||||
}
|
||||
trace!("done verify");
|
||||
let mut num = 0;
|
||||
for vs in rvs.iter_mut() {
|
||||
for mut v in vs.iter_mut() {
|
||||
*v = out[num];
|
||||
if *v != 0 {
|
||||
trace!("VERIFIED PACKET!!!!!");
|
||||
}
|
||||
num += 1;
|
||||
}
|
||||
}
|
||||
inc_counter!(COUNTER, count, start);
|
||||
rvs
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bincode::serialize;
|
||||
use packet::{Packet, Packets, SharedPackets};
|
||||
use sigverify;
|
||||
use std::sync::RwLock;
|
||||
use transaction::Transaction;
|
||||
use transaction::{memfind, test_tx};
|
||||
|
||||
#[test]
|
||||
fn test_layout() {
|
||||
let tx = test_tx();
|
||||
let tx_bytes = serialize(&tx).unwrap();
|
||||
let packet = serialize(&tx).unwrap();
|
||||
assert_matches!(memfind(&packet, &tx_bytes), Some(sigverify::TX_OFFSET));
|
||||
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
|
||||
}
|
||||
|
||||
fn make_packet_from_transaction(tx: Transaction) -> Packet {
|
||||
let tx_bytes = serialize(&tx).unwrap();
|
||||
let mut packet = Packet::default();
|
||||
packet.meta.size = tx_bytes.len();
|
||||
packet.data[..packet.meta.size].copy_from_slice(&tx_bytes);
|
||||
return packet;
|
||||
}
|
||||
|
||||
fn test_verify_n(n: usize, modify_data: bool) {
|
||||
let tx = test_tx();
|
||||
let mut packet = make_packet_from_transaction(tx);
|
||||
|
||||
// jumble some data to test failure
|
||||
if modify_data {
|
||||
packet.data[20] = 10;
|
||||
}
|
||||
|
||||
// generate packet vector
|
||||
let mut packets = Packets::default();
|
||||
packets.packets = Vec::new();
|
||||
for _ in 0..n {
|
||||
packets.packets.push(packet.clone());
|
||||
}
|
||||
let shared_packets = SharedPackets::new(RwLock::new(packets));
|
||||
let batches = vec![shared_packets.clone(), shared_packets.clone()];
|
||||
|
||||
// verify packets
|
||||
let ans = sigverify::ed25519_verify(&batches);
|
||||
|
||||
// check result
|
||||
let ref_ans = if modify_data { 0u8 } else { 1u8 };
|
||||
assert_eq!(ans, vec![vec![ref_ans; n], vec![ref_ans; n]]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_zero() {
|
||||
test_verify_n(0, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_one() {
|
||||
test_verify_n(1, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_seventy_one() {
|
||||
test_verify_n(71, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_fail() {
|
||||
test_verify_n(5, true);
|
||||
}
|
||||
}
|
100
src/sigverify_stage.rs
Normal file
100
src/sigverify_stage.rs
Normal file
@ -0,0 +1,100 @@
|
||||
//! The `sigverify_stage` implements the signature verification stage of the TPU. It
|
||||
//! receives a list of lists of packets and outputs the same list, but tags each
|
||||
//! top-level list with a list of booleans, telling the next stage whether the
|
||||
//! signature in that packet is valid. It assumes each packet contains one
|
||||
//! transaction. All processing is done on the CPU by default and on a GPU
|
||||
//! if the `cuda` feature is enabled with `--features=cuda`.
|
||||
|
||||
use packet::SharedPackets;
|
||||
use rand::{thread_rng, Rng};
|
||||
use result::Result;
|
||||
use sigverify;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Instant;
|
||||
use streamer::{self, PacketReceiver};
|
||||
use timing;
|
||||
|
||||
pub struct SigVerifyStage {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl SigVerifyStage {
|
||||
pub fn new(
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_receiver: Receiver<SharedPackets>,
|
||||
) -> (Self, Receiver<Vec<(SharedPackets, Vec<u8>)>>) {
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let thread_hdls = Self::verifier_services(exit, packet_receiver, verified_sender);
|
||||
(SigVerifyStage { thread_hdls }, verified_receiver)
|
||||
}
|
||||
|
||||
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<(SharedPackets, Vec<u8>)> {
|
||||
let r = sigverify::ed25519_verify(&batch);
|
||||
batch.into_iter().zip(r).collect()
|
||||
}
|
||||
|
||||
fn verifier(
|
||||
recvr: &Arc<Mutex<PacketReceiver>>,
|
||||
sendr: &Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
|
||||
) -> Result<()> {
|
||||
let (batch, len) =
|
||||
streamer::recv_batch(&recvr.lock().expect("'recvr' lock in fn verifier"))?;
|
||||
|
||||
let now = Instant::now();
|
||||
let batch_len = batch.len();
|
||||
let rand_id = thread_rng().gen_range(0, 100);
|
||||
info!(
|
||||
"@{:?} verifier: verifying: {} id: {}",
|
||||
timing::timestamp(),
|
||||
batch.len(),
|
||||
rand_id
|
||||
);
|
||||
|
||||
let verified_batch = Self::verify_batch(batch);
|
||||
sendr
|
||||
.lock()
|
||||
.expect("lock in fn verify_batch in tpu")
|
||||
.send(verified_batch)?;
|
||||
|
||||
let total_time_ms = timing::duration_as_ms(&now.elapsed());
|
||||
let total_time_s = timing::duration_as_s(&now.elapsed());
|
||||
info!(
|
||||
"@{:?} verifier: done. batches: {} total verify time: {:?} id: {} verified: {} v/s {}",
|
||||
timing::timestamp(),
|
||||
batch_len,
|
||||
total_time_ms,
|
||||
rand_id,
|
||||
len,
|
||||
(len as f32 / total_time_s)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verifier_service(
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_receiver: Arc<Mutex<PacketReceiver>>,
|
||||
verified_sender: Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
let e = Self::verifier(&packet_receiver.clone(), &verified_sender.clone());
|
||||
if e.is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn verifier_services(
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_receiver: PacketReceiver,
|
||||
verified_sender: Sender<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
) -> Vec<JoinHandle<()>> {
|
||||
let sender = Arc::new(Mutex::new(verified_sender));
|
||||
let receiver = Arc::new(Mutex::new(packet_receiver));
|
||||
(0..4)
|
||||
.map(|_| Self::verifier_service(exit.clone(), receiver.clone(), sender.clone()))
|
||||
.collect()
|
||||
}
|
||||
}
|
887
src/streamer.rs
Normal file
887
src/streamer.rs
Normal file
@ -0,0 +1,887 @@
|
||||
//! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets.
|
||||
//!
|
||||
use crdt::Crdt;
|
||||
#[cfg(feature = "erasure")]
|
||||
use erasure;
|
||||
use packet::{
|
||||
Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedBlobs, SharedPackets, BLOB_SIZE,
|
||||
};
|
||||
use result::{Error, Result};
|
||||
use std::collections::VecDeque;
|
||||
use std::mem;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{Receiver, Sender};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
pub const WINDOW_SIZE: u64 = 2 * 1024;
|
||||
pub type PacketReceiver = Receiver<SharedPackets>;
|
||||
pub type PacketSender = Sender<SharedPackets>;
|
||||
pub type BlobSender = Sender<SharedBlobs>;
|
||||
pub type BlobReceiver = Receiver<SharedBlobs>;
|
||||
pub type Window = Arc<RwLock<Vec<Option<SharedBlob>>>>;
|
||||
|
||||
fn recv_loop(
|
||||
sock: &UdpSocket,
|
||||
exit: &Arc<AtomicBool>,
|
||||
re: &PacketRecycler,
|
||||
channel: &PacketSender,
|
||||
) -> Result<()> {
|
||||
loop {
|
||||
let msgs = re.allocate();
|
||||
loop {
|
||||
let result = msgs.write()
|
||||
.expect("write lock in fn recv_loop")
|
||||
.recv_from(sock);
|
||||
match result {
|
||||
Ok(()) => {
|
||||
channel.send(msgs)?;
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
re.recycle(msgs);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn receiver(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
recycler: PacketRecycler,
|
||||
packet_sender: PacketSender,
|
||||
) -> JoinHandle<()> {
|
||||
let res = sock.set_read_timeout(Some(Duration::new(1, 0)));
|
||||
if res.is_err() {
|
||||
panic!("streamer::receiver set_read_timeout error");
|
||||
}
|
||||
Builder::new()
|
||||
.name("solana-receiver".to_string())
|
||||
.spawn(move || {
|
||||
let _ = recv_loop(&sock, &exit, &recycler, &packet_sender);
|
||||
()
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut msgs = r.recv_timeout(timer)?;
|
||||
Blob::send_to(recycler, sock, &mut msgs)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn recv_batch(recvr: &PacketReceiver) -> Result<(Vec<SharedPackets>, usize)> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let msgs = recvr.recv_timeout(timer)?;
|
||||
trace!("got msgs");
|
||||
let mut len = msgs.read().unwrap().packets.len();
|
||||
let mut batch = vec![msgs];
|
||||
while let Ok(more) = recvr.try_recv() {
|
||||
trace!("got more msgs");
|
||||
len += more.read().unwrap().packets.len();
|
||||
batch.push(more);
|
||||
|
||||
if len > 100_000 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
debug!("batch len {}", batch.len());
|
||||
Ok((batch, len))
|
||||
}
|
||||
|
||||
pub fn responder(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
Builder::new()
|
||||
.name("solana-responder".to_string())
|
||||
.spawn(move || loop {
|
||||
if recv_send(&sock, &recycler, &r).is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
//TODO, we would need to stick block authentication before we create the
|
||||
//window.
|
||||
fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> {
|
||||
trace!("receiving on {}", sock.local_addr().unwrap());
|
||||
let dq = Blob::recv_from(recycler, sock)?;
|
||||
if !dq.is_empty() {
|
||||
s.send(dq)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn blob_receiver(
|
||||
exit: Arc<AtomicBool>,
|
||||
recycler: BlobRecycler,
|
||||
sock: UdpSocket,
|
||||
s: BlobSender,
|
||||
) -> Result<JoinHandle<()>> {
|
||||
//DOCUMENTED SIDE-EFFECT
|
||||
//1 second timeout on socket read
|
||||
let timer = Duration::new(1, 0);
|
||||
sock.set_read_timeout(Some(timer))?;
|
||||
let t = Builder::new()
|
||||
.name("solana-blob_receiver".to_string())
|
||||
.spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let _ = recv_blobs(&recycler, &sock, &s);
|
||||
})
|
||||
.unwrap();
|
||||
Ok(t)
|
||||
}
|
||||
|
||||
fn find_next_missing(
|
||||
locked_window: &Window,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
consumed: &mut u64,
|
||||
received: &mut u64,
|
||||
) -> Result<Vec<(SocketAddr, Vec<u8>)>> {
|
||||
if *received <= *consumed {
|
||||
return Err(Error::GenericError);
|
||||
}
|
||||
let window = locked_window.read().unwrap();
|
||||
let reqs: Vec<_> = (*consumed..*received)
|
||||
.filter_map(|pix| {
|
||||
let i = (pix % WINDOW_SIZE) as usize;
|
||||
if let &None = &window[i] {
|
||||
let val = crdt.read().unwrap().window_index_request(pix as u64);
|
||||
if let Ok((to, req)) = val {
|
||||
return Some((to, req));
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
.collect();
|
||||
Ok(reqs)
|
||||
}
|
||||
|
||||
fn repair_window(
|
||||
locked_window: &Window,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
_recycler: &BlobRecycler,
|
||||
last: &mut u64,
|
||||
times: &mut usize,
|
||||
consumed: &mut u64,
|
||||
received: &mut u64,
|
||||
) -> Result<()> {
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
if erasure::recover(
|
||||
_recycler,
|
||||
&mut locked_window.write().unwrap(),
|
||||
*consumed as usize,
|
||||
*received as usize,
|
||||
).is_err()
|
||||
{
|
||||
trace!("erasure::recover failed");
|
||||
}
|
||||
}
|
||||
//exponential backoff
|
||||
if *last != *consumed {
|
||||
*times = 0;
|
||||
}
|
||||
*last = *consumed;
|
||||
*times += 1;
|
||||
//if times flips from all 1s 7 -> 8, 15 -> 16, we retry otherwise return Ok
|
||||
if *times & (*times - 1) != 0 {
|
||||
trace!("repair_window counter {} {}", *times, *consumed);
|
||||
return Ok(());
|
||||
}
|
||||
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
|
||||
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
||||
for (to, req) in reqs {
|
||||
//todo cache socket
|
||||
info!("repair_window request {} {} {}", *consumed, *received, to);
|
||||
assert!(req.len() < BLOB_SIZE);
|
||||
sock.send_to(&req, to)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn recv_window(
|
||||
locked_window: &Window,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
recycler: &BlobRecycler,
|
||||
consumed: &mut u64,
|
||||
received: &mut u64,
|
||||
r: &BlobReceiver,
|
||||
s: &BlobSender,
|
||||
retransmit: &BlobSender,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(200);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
let leader_id = crdt.read()
|
||||
.expect("'crdt' read lock in fn recv_window")
|
||||
.leader_data()
|
||||
.expect("leader not ready")
|
||||
.id;
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq)
|
||||
}
|
||||
{
|
||||
//retransmit all leader blocks
|
||||
let mut retransmitq = VecDeque::new();
|
||||
for b in &dq {
|
||||
let p = b.read().expect("'b' read lock in fn recv_window");
|
||||
//TODO this check isn't safe against adverserial packets
|
||||
//we need to maintain a sequence window
|
||||
trace!(
|
||||
"idx: {} addr: {:?} id: {:?} leader: {:?}",
|
||||
p.get_index().expect("get_index in fn recv_window"),
|
||||
p.get_id().expect("get_id in trace! fn recv_window"),
|
||||
p.meta.addr(),
|
||||
leader_id
|
||||
);
|
||||
if p.get_id().expect("get_id in fn recv_window") == leader_id {
|
||||
//TODO
|
||||
//need to copy the retransmitted blob
|
||||
//otherwise we get into races with which thread
|
||||
//should do the recycling
|
||||
//
|
||||
//a better abstraction would be to recycle when the blob
|
||||
//is dropped via a weakref to the recycler
|
||||
let nv = recycler.allocate();
|
||||
{
|
||||
let mut mnv = nv.write().expect("recycler write lock in fn recv_window");
|
||||
let sz = p.meta.size;
|
||||
mnv.meta.size = sz;
|
||||
mnv.data[..sz].copy_from_slice(&p.data[..sz]);
|
||||
}
|
||||
retransmitq.push_back(nv);
|
||||
}
|
||||
}
|
||||
if !retransmitq.is_empty() {
|
||||
retransmit.send(retransmitq)?;
|
||||
}
|
||||
}
|
||||
//send a contiguous set of blocks
|
||||
let mut contq = VecDeque::new();
|
||||
while let Some(b) = dq.pop_front() {
|
||||
let (pix, meta_size) = {
|
||||
let p = b.write().expect("'b' write lock in fn recv_window");
|
||||
(p.get_index()?, p.meta.size)
|
||||
};
|
||||
if pix > *received {
|
||||
*received = pix;
|
||||
}
|
||||
// Got a blob which has already been consumed, skip it
|
||||
// probably from a repair window request
|
||||
if pix < *consumed {
|
||||
debug!(
|
||||
"received: {} but older than consumed: {} skipping..",
|
||||
pix, *consumed
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let w = (pix % WINDOW_SIZE) as usize;
|
||||
//TODO, after the block are authenticated
|
||||
//if we get different blocks at the same index
|
||||
//that is a network failure/attack
|
||||
trace!("window w: {} size: {}", w, meta_size);
|
||||
{
|
||||
let mut window = locked_window.write().unwrap();
|
||||
|
||||
// Search the window for old blobs in the window
|
||||
// of consumed to received and clear any old ones
|
||||
for ix in *consumed..(pix + 1) {
|
||||
let k = (ix % WINDOW_SIZE) as usize;
|
||||
if let Some(b) = &mut window[k] {
|
||||
if b.read().unwrap().get_index().unwrap() >= *consumed as u64 {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if let Some(b) = mem::replace(&mut window[k], None) {
|
||||
recycler.recycle(b);
|
||||
}
|
||||
}
|
||||
|
||||
// Insert the new blob into the window
|
||||
// spot should be free because we cleared it above
|
||||
if window[w].is_none() {
|
||||
window[w] = Some(b);
|
||||
} else if let Some(cblob) = &window[w] {
|
||||
if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
|
||||
warn!("overrun blob at index {:}", w);
|
||||
} else {
|
||||
debug!("duplicate blob at index {:}", w);
|
||||
}
|
||||
}
|
||||
loop {
|
||||
let k = (*consumed % WINDOW_SIZE) as usize;
|
||||
trace!("k: {} consumed: {}", k, *consumed);
|
||||
|
||||
if window[k].is_none() {
|
||||
break;
|
||||
}
|
||||
let mut is_coding = false;
|
||||
if let &Some(ref cblob) = &window[k] {
|
||||
let cblob_r = cblob
|
||||
.read()
|
||||
.expect("blob read lock for flogs streamer::window");
|
||||
if cblob_r.get_index().unwrap() < *consumed {
|
||||
break;
|
||||
}
|
||||
if cblob_r.is_coding() {
|
||||
is_coding = true;
|
||||
}
|
||||
}
|
||||
if !is_coding {
|
||||
contq.push_back(window[k].clone().expect("clone in fn recv_window"));
|
||||
*consumed += 1;
|
||||
} else {
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
let block_start = *consumed - (*consumed % erasure::NUM_CODED as u64);
|
||||
let coding_end = block_start + erasure::NUM_CODED as u64;
|
||||
// We've received all this block's data blobs, go and null out the window now
|
||||
for j in block_start..*consumed {
|
||||
if let Some(b) =
|
||||
mem::replace(&mut window[(j % WINDOW_SIZE) as usize], None)
|
||||
{
|
||||
recycler.recycle(b);
|
||||
}
|
||||
}
|
||||
for j in *consumed..coding_end {
|
||||
window[(j % WINDOW_SIZE) as usize] = None;
|
||||
}
|
||||
|
||||
*consumed += erasure::MAX_MISSING as u64;
|
||||
debug!(
|
||||
"skipping processing coding blob k: {} consumed: {}",
|
||||
k, *consumed
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
print_window(locked_window, *consumed);
|
||||
trace!("sending contq.len: {}", contq.len());
|
||||
if !contq.is_empty() {
|
||||
trace!("sending contq.len: {}", contq.len());
|
||||
s.send(contq)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn print_window(locked_window: &Window, consumed: u64) {
|
||||
{
|
||||
let buf: Vec<_> = locked_window
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, v)| {
|
||||
if i == (consumed % WINDOW_SIZE) as usize {
|
||||
"_"
|
||||
} else if v.is_none() {
|
||||
"0"
|
||||
} else {
|
||||
if let &Some(ref cblob) = &v {
|
||||
if cblob.read().unwrap().is_coding() {
|
||||
"C"
|
||||
} else {
|
||||
"1"
|
||||
}
|
||||
} else {
|
||||
"0"
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
debug!("WINDOW ({}): {}", consumed, buf.join(""));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_window() -> Window {
|
||||
Arc::new(RwLock::new(vec![None; WINDOW_SIZE as usize]))
|
||||
}
|
||||
|
||||
pub fn window(
|
||||
exit: Arc<AtomicBool>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Window,
|
||||
entry_height: u64,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
s: BlobSender,
|
||||
retransmit: BlobSender,
|
||||
) -> JoinHandle<()> {
|
||||
Builder::new()
|
||||
.name("solana-window".to_string())
|
||||
.spawn(move || {
|
||||
let mut consumed = entry_height;
|
||||
let mut received = entry_height;
|
||||
let mut last = entry_height;
|
||||
let mut times = 0;
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let _ = recv_window(
|
||||
&window,
|
||||
&crdt,
|
||||
&recycler,
|
||||
&mut consumed,
|
||||
&mut received,
|
||||
&r,
|
||||
&s,
|
||||
&retransmit,
|
||||
);
|
||||
let _ = repair_window(
|
||||
&window,
|
||||
&crdt,
|
||||
&recycler,
|
||||
&mut last,
|
||||
&mut times,
|
||||
&mut consumed,
|
||||
&mut received,
|
||||
);
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn broadcast(
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
window: &Window,
|
||||
recycler: &BlobRecycler,
|
||||
r: &BlobReceiver,
|
||||
sock: &UdpSocket,
|
||||
transmit_index: &mut u64,
|
||||
receive_index: &mut u64,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq);
|
||||
}
|
||||
|
||||
// flatten deque to vec
|
||||
let blobs_vec: Vec<_> = dq.into_iter().collect();
|
||||
|
||||
// We could receive more blobs than window slots so
|
||||
// break them up into window-sized chunks to process
|
||||
let blobs_chunked = blobs_vec.chunks(WINDOW_SIZE as usize).map(|x| x.to_vec());
|
||||
|
||||
print_window(window, *receive_index);
|
||||
|
||||
for mut blobs in blobs_chunked {
|
||||
// Insert the coding blobs into the blob stream
|
||||
#[cfg(feature = "erasure")]
|
||||
erasure::add_coding_blobs(recycler, &mut blobs, *receive_index);
|
||||
|
||||
let blobs_len = blobs.len();
|
||||
debug!("broadcast blobs.len: {}", blobs_len);
|
||||
|
||||
// Index the blobs
|
||||
Crdt::index_blobs(crdt, &blobs, receive_index)?;
|
||||
// keep the cache of blobs that are broadcast
|
||||
{
|
||||
let mut win = window.write().unwrap();
|
||||
assert!(blobs.len() <= win.len());
|
||||
for b in &blobs {
|
||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||
let pos = (ix % WINDOW_SIZE) as usize;
|
||||
if let Some(x) = mem::replace(&mut win[pos], None) {
|
||||
trace!(
|
||||
"popped {} at {}",
|
||||
x.read().unwrap().get_index().unwrap(),
|
||||
pos
|
||||
);
|
||||
recycler.recycle(x);
|
||||
}
|
||||
trace!("null {}", pos);
|
||||
}
|
||||
while let Some(b) = blobs.pop() {
|
||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||
let pos = (ix % WINDOW_SIZE) as usize;
|
||||
trace!("caching {} at {}", ix, pos);
|
||||
assert!(win[pos].is_none());
|
||||
win[pos] = Some(b);
|
||||
}
|
||||
}
|
||||
|
||||
// Fill in the coding blob data from the window data blobs
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
erasure::generate_coding(
|
||||
&mut window.write().unwrap(),
|
||||
*receive_index as usize,
|
||||
blobs_len,
|
||||
).map_err(|_| Error::GenericError)?;
|
||||
}
|
||||
|
||||
*receive_index += blobs_len as u64;
|
||||
|
||||
// Send blobs out from the window
|
||||
Crdt::broadcast(crdt, &window, &sock, transmit_index, *receive_index)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Service to broadcast messages from the leader to layer 1 nodes.
|
||||
/// See `crdt` for network layer definitions.
|
||||
/// # Arguments
|
||||
/// * `sock` - Socket to send from.
|
||||
/// * `exit` - Boolean to signal system exit.
|
||||
/// * `crdt` - CRDT structure
|
||||
/// * `window` - Cache of blobs that we have broadcast
|
||||
/// * `recycler` - Blob recycler.
|
||||
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||
pub fn broadcaster(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Window,
|
||||
entry_height: u64,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
Builder::new()
|
||||
.name("solana-broadcaster".to_string())
|
||||
.spawn(move || {
|
||||
let mut transmit_index = entry_height;
|
||||
let mut receive_index = entry_height;
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let _ = broadcast(
|
||||
&crdt,
|
||||
&window,
|
||||
&recycler,
|
||||
&r,
|
||||
&sock,
|
||||
&mut transmit_index,
|
||||
&mut receive_index,
|
||||
);
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn retransmit(
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
recycler: &BlobRecycler,
|
||||
r: &BlobReceiver,
|
||||
sock: &UdpSocket,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq);
|
||||
}
|
||||
{
|
||||
for b in &dq {
|
||||
Crdt::retransmit(&crdt, b, sock)?;
|
||||
}
|
||||
}
|
||||
while let Some(b) = dq.pop_front() {
|
||||
recycler.recycle(b);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Service to retransmit messages from the leader to layer 1 nodes.
|
||||
/// See `crdt` for network layer definitions.
|
||||
/// # Arguments
|
||||
/// * `sock` - Socket to read from. Read timeout is set to 1.
|
||||
/// * `exit` - Boolean to signal system exit.
|
||||
/// * `crdt` - This structure needs to be updated and populated by the bank and via gossip.
|
||||
/// * `recycler` - Blob recycler.
|
||||
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||
pub fn retransmitter(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
Builder::new()
|
||||
.name("solana-retransmitter".to_string())
|
||||
.spawn(move || {
|
||||
trace!("retransmitter started");
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
// TODO: handle this error
|
||||
let _ = retransmit(&crdt, &recycler, &r, &sock);
|
||||
}
|
||||
trace!("exiting retransmitter");
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use packet::{Packet, PacketRecycler, BLOB_SIZE, PACKET_DATA_SIZE};
|
||||
use result::Result;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::sleep;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
use streamer::{receiver, PacketReceiver};
|
||||
|
||||
fn producer(
|
||||
addr: &SocketAddr,
|
||||
recycler: PacketRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> JoinHandle<()> {
|
||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let msgs = recycler.allocate();
|
||||
let msgs_ = msgs.clone();
|
||||
msgs.write().unwrap().packets.resize(10, Packet::default());
|
||||
for w in msgs.write().unwrap().packets.iter_mut() {
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
}
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in msgs_.read().unwrap().packets.iter() {
|
||||
let a = p.meta.addr();
|
||||
assert!(p.meta.size < BLOB_SIZE);
|
||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||
num += 1;
|
||||
}
|
||||
assert_eq!(num, 10);
|
||||
})
|
||||
}
|
||||
|
||||
fn sink(
|
||||
recycler: PacketRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
rvs: Arc<Mutex<usize>>,
|
||||
r: PacketReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
match r.recv_timeout(timer) {
|
||||
Ok(msgs) => {
|
||||
*rvs.lock().unwrap() += msgs.read().unwrap().packets.len();
|
||||
recycler.recycle(msgs);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn bench_streamer_with_result() -> Result<()> {
|
||||
let read = UdpSocket::bind("127.0.0.1:0")?;
|
||||
read.set_read_timeout(Some(Duration::new(1, 0)))?;
|
||||
|
||||
let addr = read.local_addr()?;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let pack_recycler = PacketRecycler::default();
|
||||
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
|
||||
let t_producer1 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
let t_producer2 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
let t_producer3 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
|
||||
let rvs = Arc::new(Mutex::new(0));
|
||||
let t_sink = sink(pack_recycler.clone(), exit.clone(), rvs.clone(), r_reader);
|
||||
|
||||
let start = SystemTime::now();
|
||||
let start_val = *rvs.lock().unwrap();
|
||||
sleep(Duration::new(5, 0));
|
||||
let elapsed = start.elapsed().unwrap();
|
||||
let end_val = *rvs.lock().unwrap();
|
||||
let time = elapsed.as_secs() * 10000000000 + elapsed.subsec_nanos() as u64;
|
||||
let ftime = (time as f64) / 10000000000f64;
|
||||
let fcount = (end_val - start_val) as f64;
|
||||
trace!("performance: {:?}", fcount / ftime);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_reader.join()?;
|
||||
t_producer1.join()?;
|
||||
t_producer2.join()?;
|
||||
t_producer3.join()?;
|
||||
t_sink.join()?;
|
||||
Ok(())
|
||||
}
|
||||
#[bench]
|
||||
pub fn bench_streamer(_bench: &mut Bencher) {
|
||||
bench_streamer_with_result().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crdt::{Crdt, TestNode};
|
||||
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
|
||||
use std::collections::VecDeque;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
use streamer::{blob_receiver, receiver, responder, window};
|
||||
use streamer::{default_window, BlobReceiver, PacketReceiver};
|
||||
|
||||
fn get_msgs(r: PacketReceiver, num: &mut usize) {
|
||||
for _t in 0..5 {
|
||||
let timer = Duration::new(1, 0);
|
||||
match r.recv_timeout(timer) {
|
||||
Ok(m) => *num += m.read().unwrap().packets.len(),
|
||||
e => info!("error {:?}", e),
|
||||
}
|
||||
if *num == 10 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
pub fn streamer_debug() {
|
||||
write!(io::sink(), "{:?}", Packet::default()).unwrap();
|
||||
write!(io::sink(), "{:?}", Packets::default()).unwrap();
|
||||
write!(io::sink(), "{:?}", Blob::default()).unwrap();
|
||||
}
|
||||
#[test]
|
||||
pub fn streamer_send_test() {
|
||||
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
|
||||
let addr = read.local_addr().unwrap();
|
||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let pack_recycler = PacketRecycler::default();
|
||||
let resp_recycler = BlobRecycler::default();
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
|
||||
let mut msgs = VecDeque::new();
|
||||
for i in 0..10 {
|
||||
let b = resp_recycler.allocate();
|
||||
{
|
||||
let mut w = b.write().unwrap();
|
||||
w.data[0] = i as u8;
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
}
|
||||
msgs.push_back(b);
|
||||
}
|
||||
s_responder.send(msgs).expect("send");
|
||||
let mut num = 0;
|
||||
get_msgs(r_reader, &mut num);
|
||||
assert_eq!(num, 10);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_receiver.join().expect("join");
|
||||
t_responder.join().expect("join");
|
||||
}
|
||||
|
||||
fn get_blobs(r: BlobReceiver, num: &mut usize) {
|
||||
for _t in 0..5 {
|
||||
let timer = Duration::new(1, 0);
|
||||
match r.recv_timeout(timer) {
|
||||
Ok(m) => {
|
||||
for (i, v) in m.iter().enumerate() {
|
||||
assert_eq!(v.read().unwrap().get_index().unwrap() as usize, *num + i);
|
||||
}
|
||||
*num += m.len();
|
||||
}
|
||||
e => info!("error {:?}", e),
|
||||
}
|
||||
if *num == 10 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn window_send_test() {
|
||||
let tn = TestNode::new();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let mut crdt_me = Crdt::new(tn.data.clone());
|
||||
let me_id = crdt_me.my_data().id;
|
||||
crdt_me.set_leader(me_id);
|
||||
let subs = Arc::new(RwLock::new(crdt_me));
|
||||
|
||||
let resp_recycler = BlobRecycler::default();
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver = blob_receiver(
|
||||
exit.clone(),
|
||||
resp_recycler.clone(),
|
||||
tn.sockets.gossip,
|
||||
s_reader,
|
||||
).unwrap();
|
||||
let (s_window, r_window) = channel();
|
||||
let (s_retransmit, r_retransmit) = channel();
|
||||
let win = default_window();
|
||||
let t_window = window(
|
||||
exit.clone(),
|
||||
subs,
|
||||
win,
|
||||
0,
|
||||
resp_recycler.clone(),
|
||||
r_reader,
|
||||
s_window,
|
||||
s_retransmit,
|
||||
);
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = responder(
|
||||
tn.sockets.replicate,
|
||||
exit.clone(),
|
||||
resp_recycler.clone(),
|
||||
r_responder,
|
||||
);
|
||||
let mut msgs = VecDeque::new();
|
||||
for v in 0..10 {
|
||||
let i = 9 - v;
|
||||
let b = resp_recycler.allocate();
|
||||
{
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(i).unwrap();
|
||||
w.set_id(me_id).unwrap();
|
||||
assert_eq!(i, w.get_index().unwrap());
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&tn.data.gossip_addr);
|
||||
}
|
||||
msgs.push_back(b);
|
||||
}
|
||||
s_responder.send(msgs).expect("send");
|
||||
let mut num = 0;
|
||||
get_blobs(r_window, &mut num);
|
||||
assert_eq!(num, 10);
|
||||
let mut q = r_retransmit.recv().unwrap();
|
||||
while let Ok(mut nq) = r_retransmit.try_recv() {
|
||||
q.append(&mut nq);
|
||||
}
|
||||
assert_eq!(q.len(), 10);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_receiver.join().expect("join");
|
||||
t_responder.join().expect("join");
|
||||
t_window.join().expect("join");
|
||||
}
|
||||
}
|
392
src/thin_client.rs
Normal file
392
src/thin_client.rs
Normal file
@ -0,0 +1,392 @@
|
||||
//! The `thin_client` module is a client-side object that interfaces with
|
||||
//! a server-side TPU. Client code should use this object instead of writing
|
||||
//! messages to the network directly. The binary encoding of its messages are
|
||||
//! unstable and may change in future releases.
|
||||
|
||||
use bincode::{deserialize, serialize};
|
||||
use hash::Hash;
|
||||
use request::{Request, Response};
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use transaction::Transaction;
|
||||
|
||||
/// An object for querying and sending transactions to the network.
|
||||
pub struct ThinClient {
|
||||
requests_addr: SocketAddr,
|
||||
requests_socket: UdpSocket,
|
||||
transactions_addr: SocketAddr,
|
||||
transactions_socket: UdpSocket,
|
||||
last_id: Option<Hash>,
|
||||
transaction_count: u64,
|
||||
balances: HashMap<PublicKey, i64>,
|
||||
signature_status: bool,
|
||||
}
|
||||
|
||||
impl ThinClient {
|
||||
/// Create a new ThinClient that will interface with Rpu
|
||||
/// over `requests_socket` and `transactions_socket`. To receive responses, the caller must bind `socket`
|
||||
/// to a public address before invoking ThinClient methods.
|
||||
pub fn new(
|
||||
requests_addr: SocketAddr,
|
||||
requests_socket: UdpSocket,
|
||||
transactions_addr: SocketAddr,
|
||||
transactions_socket: UdpSocket,
|
||||
) -> Self {
|
||||
let client = ThinClient {
|
||||
requests_addr,
|
||||
requests_socket,
|
||||
transactions_addr,
|
||||
transactions_socket,
|
||||
last_id: None,
|
||||
transaction_count: 0,
|
||||
balances: HashMap::new(),
|
||||
signature_status: false,
|
||||
};
|
||||
client
|
||||
}
|
||||
|
||||
pub fn recv_response(&self) -> io::Result<Response> {
|
||||
let mut buf = vec![0u8; 1024];
|
||||
trace!("start recv_from");
|
||||
self.requests_socket.recv_from(&mut buf)?;
|
||||
trace!("end recv_from");
|
||||
let resp = deserialize(&buf).expect("deserialize balance in thin_client");
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
pub fn process_response(&mut self, resp: Response) {
|
||||
match resp {
|
||||
Response::Balance { key, val } => {
|
||||
trace!("Response balance {:?} {:?}", key, val);
|
||||
self.balances.insert(key, val);
|
||||
}
|
||||
Response::LastId { id } => {
|
||||
trace!("Response last_id {:?}", id);
|
||||
self.last_id = Some(id);
|
||||
}
|
||||
Response::TransactionCount { transaction_count } => {
|
||||
trace!("Response transaction count {:?}", transaction_count);
|
||||
self.transaction_count = transaction_count;
|
||||
}
|
||||
Response::SignatureStatus { signature_status } => {
|
||||
self.signature_status = signature_status;
|
||||
match signature_status {
|
||||
true => {
|
||||
trace!("Response found signature");
|
||||
}
|
||||
false => {
|
||||
trace!("Response signature not found");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a signed Transaction to the server for processing. This method
|
||||
/// does not wait for a response.
|
||||
pub fn transfer_signed(&self, tx: Transaction) -> io::Result<usize> {
|
||||
let data = serialize(&tx).expect("serialize Transaction in pub fn transfer_signed");
|
||||
self.transactions_socket
|
||||
.send_to(&data, &self.transactions_addr)
|
||||
}
|
||||
|
||||
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
|
||||
pub fn transfer(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
last_id: &Hash,
|
||||
) -> io::Result<Signature> {
|
||||
let tx = Transaction::new(keypair, to, n, *last_id);
|
||||
let sig = tx.sig;
|
||||
self.transfer_signed(tx).map(|_| sig)
|
||||
}
|
||||
|
||||
/// Request the balance of the user holding `pubkey`. This method blocks
|
||||
/// until the server sends a response. If the response packet is dropped
|
||||
/// by the network, this method will hang indefinitely.
|
||||
pub fn get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
|
||||
trace!("get_balance");
|
||||
let req = Request::GetBalance { key: *pubkey };
|
||||
let data = serialize(&req).expect("serialize GetBalance in pub fn get_balance");
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn get_balance");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
let resp = self.recv_response()?;
|
||||
trace!("recv_response {:?}", resp);
|
||||
if let Response::Balance { key, .. } = &resp {
|
||||
done = key == pubkey;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
self.balances
|
||||
.get(pubkey)
|
||||
.map(|x| *x)
|
||||
.ok_or(io::Error::new(io::ErrorKind::Other, "nokey"))
|
||||
}
|
||||
|
||||
/// Request the transaction count. If the response packet is dropped by the network,
|
||||
/// this method will hang.
|
||||
pub fn transaction_count(&mut self) -> u64 {
|
||||
info!("transaction_count");
|
||||
let req = Request::GetTransactionCount;
|
||||
let data =
|
||||
serialize(&req).expect("serialize GetTransactionCount in pub fn transaction_count");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn transaction_count");
|
||||
|
||||
if let Ok(resp) = self.recv_response() {
|
||||
info!("recv_response {:?}", resp);
|
||||
if let &Response::TransactionCount { .. } = &resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
}
|
||||
self.transaction_count
|
||||
}
|
||||
|
||||
/// Request the last Entry ID from the server. This method blocks
|
||||
/// until the server sends a response.
|
||||
pub fn get_last_id(&mut self) -> Hash {
|
||||
trace!("get_last_id");
|
||||
let req = Request::GetLastId;
|
||||
let data = serialize(&req).expect("serialize GetLastId in pub fn get_last_id");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
debug!("get_last_id send_to {}", &self.requests_addr);
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn get_last_id");
|
||||
|
||||
match self.recv_response() {
|
||||
Ok(resp) => {
|
||||
if let &Response::LastId { .. } = &resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("thin_client get_last_id error: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
self.last_id.expect("some last_id")
|
||||
}
|
||||
|
||||
pub fn poll_get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
|
||||
use std::time::Instant;
|
||||
|
||||
let mut balance;
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
balance = self.get_balance(pubkey);
|
||||
if balance.is_ok() && *balance.as_ref().unwrap() != 0 || now.elapsed().as_secs() > 1 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
balance
|
||||
}
|
||||
|
||||
/// Check a signature in the bank. This method blocks
|
||||
/// until the server sends a response.
|
||||
pub fn check_signature(&mut self, sig: &Signature) -> bool {
|
||||
trace!("check_signature");
|
||||
let req = Request::GetSignature { signature: *sig };
|
||||
let data = serialize(&req).expect("serialize GetSignature in pub fn check_signature");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn get_last_id");
|
||||
|
||||
if let Ok(resp) = self.recv_response() {
|
||||
if let &Response::SignatureStatus { .. } = &resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
}
|
||||
self.signature_status
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bank::Bank;
|
||||
use budget::Budget;
|
||||
use crdt::TestNode;
|
||||
use fullnode::FullNode;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use transaction::{Instruction, Plan};
|
||||
|
||||
#[test]
|
||||
fn test_thin_client() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let server = FullNode::new_leader(
|
||||
bank,
|
||||
0,
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
sleep(Duration::from_millis(900));
|
||||
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
requests_socket,
|
||||
leader.data.transactions_addr,
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id();
|
||||
let _sig = client
|
||||
.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
let balance = client.poll_get_balance(&bob_pubkey);
|
||||
assert_eq!(balance.unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bad_sig() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let server = FullNode::new_leader(
|
||||
bank,
|
||||
0,
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
sleep(Duration::from_millis(300));
|
||||
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(5, 0)))
|
||||
.unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
requests_socket,
|
||||
leader.data.transactions_addr,
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id();
|
||||
|
||||
let tx = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
|
||||
|
||||
let _sig = client.transfer_signed(tx).unwrap();
|
||||
|
||||
let last_id = client.get_last_id();
|
||||
|
||||
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
|
||||
if let Instruction::NewContract(contract) = &mut tr2.instruction {
|
||||
contract.tokens = 502;
|
||||
contract.plan = Plan::Budget(Budget::new_payment(502, bob_pubkey));
|
||||
}
|
||||
let _sig = client.transfer_signed(tr2).unwrap();
|
||||
|
||||
let balance = client.poll_get_balance(&bob_pubkey);
|
||||
assert_eq!(balance.unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_client_check_signature() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let server = FullNode::new_leader(
|
||||
bank,
|
||||
0,
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
sleep(Duration::from_millis(300));
|
||||
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(5, 0)))
|
||||
.unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
requests_socket,
|
||||
leader.data.transactions_addr,
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id();
|
||||
let sig = client
|
||||
.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
sleep(Duration::from_millis(100));
|
||||
|
||||
assert!(client.check_signature(&sig));
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
22
src/timing.rs
Normal file
22
src/timing.rs
Normal file
@ -0,0 +1,22 @@
|
||||
//! The `timing` module provides std::time utility functions.
|
||||
use std::time::Duration;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
pub fn duration_as_us(d: &Duration) -> u64 {
|
||||
return (d.as_secs() * 1000 * 1000) + (d.subsec_nanos() as u64 / 1_000);
|
||||
}
|
||||
|
||||
pub fn duration_as_ms(d: &Duration) -> u64 {
|
||||
return (d.as_secs() * 1000) + (d.subsec_nanos() as u64 / 1_000_000);
|
||||
}
|
||||
|
||||
pub fn duration_as_s(d: &Duration) -> f32 {
|
||||
return d.as_secs() as f32 + (d.subsec_nanos() as f32 / 1_000_000_000.0);
|
||||
}
|
||||
|
||||
pub fn timestamp() -> u64 {
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("create timestamp in timing");
|
||||
return duration_as_ms(&now);
|
||||
}
|
94
src/tpu.rs
Normal file
94
src/tpu.rs
Normal file
@ -0,0 +1,94 @@
|
||||
//! The `tpu` module implements the Transaction Processing Unit, a
|
||||
//! 5-stage transaction processing pipeline in software.
|
||||
//!
|
||||
//! ```text
|
||||
//! .---------------------------------------------------------------.
|
||||
//! | TPU .-----. |
|
||||
//! | | PoH | |
|
||||
//! | `--+--` |
|
||||
//! | | |
|
||||
//! | v |
|
||||
//! | .-------. .-----------. .---------. .--------. .-------. |
|
||||
//! .---------. | | Fetch | | SigVerify | | Banking | | Record | | Write | | .------------.
|
||||
//! | Clients |--->| Stage |->| Stage |->| Stage |->| Stage |->| Stage +--->| Validators |
|
||||
//! `---------` | | | | | | | | | | | | `------------`
|
||||
//! | `-------` `-----------` `----+----` `--------` `---+---` |
|
||||
//! | | | |
|
||||
//! | | | |
|
||||
//! | | | |
|
||||
//! | | | |
|
||||
//! `---------------------------------|-----------------------|-----`
|
||||
//! | |
|
||||
//! v v
|
||||
//! .------. .--------.
|
||||
//! | Bank | | Ledger |
|
||||
//! `------` `--------`
|
||||
//! ```
|
||||
|
||||
use bank::Bank;
|
||||
use banking_stage::BankingStage;
|
||||
use fetch_stage::FetchStage;
|
||||
use packet::{BlobRecycler, PacketRecycler};
|
||||
use record_stage::RecordStage;
|
||||
use sigverify_stage::SigVerifyStage;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use streamer::BlobReceiver;
|
||||
use write_stage::WriteStage;
|
||||
|
||||
pub struct Tpu {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Tpu {
|
||||
pub fn new<W: Write + Send + 'static>(
|
||||
bank: Arc<Bank>,
|
||||
tick_duration: Option<Duration>,
|
||||
transactions_socket: UdpSocket,
|
||||
blob_recycler: BlobRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
writer: W,
|
||||
) -> (Self, BlobReceiver) {
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
let (fetch_stage, packet_receiver) =
|
||||
FetchStage::new(transactions_socket, exit.clone(), packet_recycler.clone());
|
||||
|
||||
let (sigverify_stage, verified_receiver) =
|
||||
SigVerifyStage::new(exit.clone(), packet_receiver);
|
||||
|
||||
let (banking_stage, signal_receiver) = BankingStage::new(
|
||||
bank.clone(),
|
||||
exit.clone(),
|
||||
verified_receiver,
|
||||
packet_recycler.clone(),
|
||||
);
|
||||
|
||||
let (record_stage, entry_receiver) = match tick_duration {
|
||||
Some(tick_duration) => {
|
||||
RecordStage::new_with_clock(signal_receiver, &bank.last_id(), tick_duration)
|
||||
}
|
||||
None => RecordStage::new(signal_receiver, &bank.last_id()),
|
||||
};
|
||||
|
||||
let (write_stage, blob_receiver) = WriteStage::new(
|
||||
bank.clone(),
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
writer,
|
||||
entry_receiver,
|
||||
);
|
||||
let mut thread_hdls = vec![
|
||||
banking_stage.thread_hdl,
|
||||
record_stage.thread_hdl,
|
||||
write_stage.thread_hdl,
|
||||
];
|
||||
thread_hdls.extend(fetch_stage.thread_hdls.into_iter());
|
||||
thread_hdls.extend(sigverify_stage.thread_hdls.into_iter());
|
||||
(Tpu { thread_hdls }, blob_receiver)
|
||||
}
|
||||
}
|
328
src/transaction.rs
Normal file
328
src/transaction.rs
Normal file
@ -0,0 +1,328 @@
|
||||
//! The `transaction` module provides functionality for creating log transactions.
|
||||
|
||||
use bincode::serialize;
|
||||
use budget::{Budget, Condition};
|
||||
use chrono::prelude::*;
|
||||
use hash::Hash;
|
||||
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
|
||||
|
||||
pub const SIGNED_DATA_OFFSET: usize = 112;
|
||||
pub const SIG_OFFSET: usize = 8;
|
||||
pub const PUB_KEY_OFFSET: usize = 80;
|
||||
|
||||
/// The type of payment plan. Each item must implement the PaymentPlan trait.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Plan {
|
||||
/// The builtin contract language Budget.
|
||||
Budget(Budget),
|
||||
}
|
||||
|
||||
// A proxy for the underlying DSL.
|
||||
impl PaymentPlan for Plan {
|
||||
fn final_payment(&self) -> Option<Payment> {
|
||||
match self {
|
||||
Plan::Budget(budget) => budget.final_payment(),
|
||||
}
|
||||
}
|
||||
|
||||
fn verify(&self, spendable_tokens: i64) -> bool {
|
||||
match self {
|
||||
Plan::Budget(budget) => budget.verify(spendable_tokens),
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_witness(&mut self, witness: &Witness) {
|
||||
match self {
|
||||
Plan::Budget(budget) => budget.apply_witness(witness),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A smart contract.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Contract {
|
||||
/// The number of tokens allocated to the `Plan` and any transaction fees.
|
||||
pub tokens: i64,
|
||||
pub plan: Plan,
|
||||
}
|
||||
|
||||
/// An instruction to progress the smart contract.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Instruction {
|
||||
/// Declare and instanstansiate `Contract`.
|
||||
NewContract(Contract),
|
||||
|
||||
/// Tell a payment plan acknowledge the given `DateTime` has past.
|
||||
ApplyTimestamp(DateTime<Utc>),
|
||||
|
||||
/// Tell the payment plan that the `NewContract` with `Signature` has been
|
||||
/// signed by the containing transaction's `PublicKey`.
|
||||
ApplySignature(Signature),
|
||||
}
|
||||
|
||||
/// An instruction signed by a client with `PublicKey`.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Transaction {
|
||||
/// A digital signature of `instruction`, `last_id` and `fee`, signed by `PublicKey`.
|
||||
pub sig: Signature,
|
||||
|
||||
/// The `PublicKey` of the entity that signed the transaction data.
|
||||
pub from: PublicKey,
|
||||
|
||||
/// The action the server should take.
|
||||
pub instruction: Instruction,
|
||||
|
||||
/// The ID of a recent ledger entry.
|
||||
pub last_id: Hash,
|
||||
|
||||
/// The number of tokens paid for processing and storage of this transaction.
|
||||
pub fee: i64,
|
||||
}
|
||||
|
||||
impl Transaction {
|
||||
/// Create a signed transaction from the given `Instruction`.
|
||||
fn new_from_instruction(
|
||||
from_keypair: &KeyPair,
|
||||
instruction: Instruction,
|
||||
last_id: Hash,
|
||||
fee: i64,
|
||||
) -> Self {
|
||||
let from = from_keypair.pubkey();
|
||||
let mut tx = Transaction {
|
||||
sig: Signature::default(),
|
||||
instruction,
|
||||
last_id,
|
||||
from,
|
||||
fee,
|
||||
};
|
||||
tx.sign(from_keypair);
|
||||
tx
|
||||
}
|
||||
|
||||
/// Create and sign a new Transaction. Used for unit-testing.
|
||||
pub fn new_taxed(
|
||||
from_keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
tokens: i64,
|
||||
fee: i64,
|
||||
last_id: Hash,
|
||||
) -> Self {
|
||||
let payment = Payment {
|
||||
tokens: tokens - fee,
|
||||
to,
|
||||
};
|
||||
let budget = Budget::Pay(payment);
|
||||
let plan = Plan::Budget(budget);
|
||||
let instruction = Instruction::NewContract(Contract { plan, tokens });
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id, fee)
|
||||
}
|
||||
|
||||
/// Create and sign a new Transaction. Used for unit-testing.
|
||||
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self {
|
||||
Self::new_taxed(from_keypair, to, tokens, 0, last_id)
|
||||
}
|
||||
|
||||
/// Create and sign a new Witness Timestamp. Used for unit-testing.
|
||||
pub fn new_timestamp(from_keypair: &KeyPair, dt: DateTime<Utc>, last_id: Hash) -> Self {
|
||||
let instruction = Instruction::ApplyTimestamp(dt);
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
|
||||
}
|
||||
|
||||
/// Create and sign a new Witness Signature. Used for unit-testing.
|
||||
pub fn new_signature(from_keypair: &KeyPair, tx_sig: Signature, last_id: Hash) -> Self {
|
||||
let instruction = Instruction::ApplySignature(tx_sig);
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
|
||||
}
|
||||
|
||||
/// Create and sign a postdated Transaction. Used for unit-testing.
|
||||
pub fn new_on_date(
|
||||
from_keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
dt: DateTime<Utc>,
|
||||
tokens: i64,
|
||||
last_id: Hash,
|
||||
) -> Self {
|
||||
let from = from_keypair.pubkey();
|
||||
let budget = Budget::Or(
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
);
|
||||
let plan = Plan::Budget(budget);
|
||||
let instruction = Instruction::NewContract(Contract { plan, tokens });
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
|
||||
}
|
||||
|
||||
/// Get the transaction data to sign.
|
||||
fn get_sign_data(&self) -> Vec<u8> {
|
||||
let mut data = serialize(&(&self.instruction)).expect("serialize Contract");
|
||||
let last_id_data = serialize(&(&self.last_id)).expect("serialize last_id");
|
||||
data.extend_from_slice(&last_id_data);
|
||||
|
||||
let fee_data = serialize(&(&self.fee)).expect("serialize last_id");
|
||||
data.extend_from_slice(&fee_data);
|
||||
|
||||
data
|
||||
}
|
||||
|
||||
/// Sign this transaction.
|
||||
pub fn sign(&mut self, keypair: &KeyPair) {
|
||||
let sign_data = self.get_sign_data();
|
||||
self.sig = Signature::clone_from_slice(keypair.sign(&sign_data).as_ref());
|
||||
}
|
||||
|
||||
/// Verify only the transaction signature.
|
||||
pub fn verify_sig(&self) -> bool {
|
||||
warn!("transaction signature verification called");
|
||||
self.sig.verify(&self.from, &self.get_sign_data())
|
||||
}
|
||||
|
||||
/// Verify only the payment plan.
|
||||
pub fn verify_plan(&self) -> bool {
|
||||
if let Instruction::NewContract(contract) = &self.instruction {
|
||||
self.fee >= 0
|
||||
&& self.fee <= contract.tokens
|
||||
&& contract.plan.verify(contract.tokens - self.fee)
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn test_tx() -> Transaction {
|
||||
let keypair1 = KeyPair::new();
|
||||
let pubkey1 = keypair1.pubkey();
|
||||
let zero = Hash::default();
|
||||
Transaction::new(&keypair1, pubkey1, 42, zero)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn memfind<A: Eq>(a: &[A], b: &[A]) -> Option<usize> {
|
||||
assert!(a.len() >= b.len());
|
||||
let end = a.len() - b.len() + 1;
|
||||
for i in 0..end {
|
||||
if a[i..i + b.len()] == b[..] {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode::{deserialize, serialize};
|
||||
|
||||
#[test]
|
||||
fn test_claim() {
|
||||
let keypair = KeyPair::new();
|
||||
let zero = Hash::default();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero);
|
||||
assert!(tx0.verify_plan());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer() {
|
||||
let zero = Hash::default();
|
||||
let keypair0 = KeyPair::new();
|
||||
let keypair1 = KeyPair::new();
|
||||
let pubkey1 = keypair1.pubkey();
|
||||
let tx0 = Transaction::new(&keypair0, pubkey1, 42, zero);
|
||||
assert!(tx0.verify_plan());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_with_fee() {
|
||||
let zero = Hash::default();
|
||||
let keypair0 = KeyPair::new();
|
||||
let pubkey1 = KeyPair::new().pubkey();
|
||||
assert!(Transaction::new_taxed(&keypair0, pubkey1, 1, 1, zero).verify_plan());
|
||||
assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, 2, zero).verify_plan());
|
||||
assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, -1, zero).verify_plan());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_claim() {
|
||||
let budget = Budget::Pay(Payment {
|
||||
tokens: 0,
|
||||
to: Default::default(),
|
||||
});
|
||||
let plan = Plan::Budget(budget);
|
||||
let instruction = Instruction::NewContract(Contract { plan, tokens: 0 });
|
||||
let claim0 = Transaction {
|
||||
instruction,
|
||||
from: Default::default(),
|
||||
last_id: Default::default(),
|
||||
sig: Default::default(),
|
||||
fee: 0,
|
||||
};
|
||||
let buf = serialize(&claim0).unwrap();
|
||||
let claim1: Transaction = deserialize(&buf).unwrap();
|
||||
assert_eq!(claim1, claim0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_token_attack() {
|
||||
let zero = Hash::default();
|
||||
let keypair = KeyPair::new();
|
||||
let pubkey = keypair.pubkey();
|
||||
let mut tx = Transaction::new(&keypair, pubkey, 42, zero);
|
||||
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||
contract.tokens = 1_000_000; // <-- attack, part 1!
|
||||
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||
payment.tokens = contract.tokens; // <-- attack, part 2!
|
||||
}
|
||||
}
|
||||
assert!(tx.verify_plan());
|
||||
assert!(!tx.verify_sig());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hijack_attack() {
|
||||
let keypair0 = KeyPair::new();
|
||||
let keypair1 = KeyPair::new();
|
||||
let thief_keypair = KeyPair::new();
|
||||
let pubkey1 = keypair1.pubkey();
|
||||
let zero = Hash::default();
|
||||
let mut tx = Transaction::new(&keypair0, pubkey1, 42, zero);
|
||||
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||
payment.to = thief_keypair.pubkey(); // <-- attack!
|
||||
}
|
||||
}
|
||||
assert!(tx.verify_plan());
|
||||
assert!(!tx.verify_sig());
|
||||
}
|
||||
#[test]
|
||||
fn test_layout() {
|
||||
let tx = test_tx();
|
||||
let sign_data = tx.get_sign_data();
|
||||
let tx_bytes = serialize(&tx).unwrap();
|
||||
assert_matches!(memfind(&tx_bytes, &sign_data), Some(SIGNED_DATA_OFFSET));
|
||||
assert_matches!(memfind(&tx_bytes, &tx.sig), Some(SIG_OFFSET));
|
||||
assert_matches!(memfind(&tx_bytes, &tx.from), Some(PUB_KEY_OFFSET));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_overspend_attack() {
|
||||
let keypair0 = KeyPair::new();
|
||||
let keypair1 = KeyPair::new();
|
||||
let zero = Hash::default();
|
||||
let mut tx = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero);
|
||||
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||
payment.tokens = 2; // <-- attack!
|
||||
}
|
||||
}
|
||||
assert!(!tx.verify_plan());
|
||||
|
||||
// Also, ensure all branchs of the plan spend all tokens
|
||||
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||
payment.tokens = 0; // <-- whoops!
|
||||
}
|
||||
}
|
||||
assert!(!tx.verify_plan());
|
||||
}
|
||||
}
|
282
src/tvu.rs
Normal file
282
src/tvu.rs
Normal file
@ -0,0 +1,282 @@
|
||||
//! The `tvu` module implements the Transaction Validation Unit, a
|
||||
//! 3-stage transaction validation pipeline in software.
|
||||
//!
|
||||
//! ```text
|
||||
//! .------------------------------------------.
|
||||
//! | TVU |
|
||||
//! | |
|
||||
//! | | .------------.
|
||||
//! | .------------------------>| Validators |
|
||||
//! | .-------. | | `------------`
|
||||
//! .--------. | | | .----+---. .-----------. |
|
||||
//! | Leader |--------->| Blob | | Window | | Replicate | |
|
||||
//! `--------` | | Fetch |-->| Stage |-->| Stage | |
|
||||
//! .------------. | | Stage | | | | | |
|
||||
//! | Validators |----->| | `--------` `----+------` |
|
||||
//! `------------` | `-------` | |
|
||||
//! | | |
|
||||
//! | | |
|
||||
//! | | |
|
||||
//! `--------------------------------|---------`
|
||||
//! |
|
||||
//! v
|
||||
//! .------.
|
||||
//! | Bank |
|
||||
//! `------`
|
||||
//! ```
|
||||
//!
|
||||
//! 1. Fetch Stage
|
||||
//! - Incoming blobs are picked up from the replicate socket and repair socket.
|
||||
//! 2. Window Stage
|
||||
//! - Blobs are windowed until a contiguous chunk is available. This stage also repairs and
|
||||
//! retransmits blobs that are in the queue.
|
||||
//! 3. Replicate Stage
|
||||
//! - Transactions in blobs are processed and applied to the bank.
|
||||
//! - TODO We need to verify the signatures in the blobs.
|
||||
|
||||
use bank::Bank;
|
||||
use blob_fetch_stage::BlobFetchStage;
|
||||
use crdt::Crdt;
|
||||
use packet::BlobRecycler;
|
||||
use replicate_stage::ReplicateStage;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use streamer::Window;
|
||||
use window_stage::WindowStage;
|
||||
|
||||
pub struct Tvu {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Tvu {
|
||||
/// This service receives messages from a leader in the network and processes the transactions
|
||||
/// on the bank state.
|
||||
/// # Arguments
|
||||
/// * `bank` - The bank state.
|
||||
/// * `entry_height` - Initial ledger height, passed to replicate stage
|
||||
/// * `crdt` - The crdt state.
|
||||
/// * `window` - The window state.
|
||||
/// * `replicate_socket` - my replicate socket
|
||||
/// * `repair_socket` - my repair socket
|
||||
/// * `retransmit_socket` - my retransmit socket
|
||||
/// * `exit` - The exit signal.
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
entry_height: u64,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Window,
|
||||
replicate_socket: UdpSocket,
|
||||
repair_socket: UdpSocket,
|
||||
retransmit_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let (fetch_stage, blob_receiver) = BlobFetchStage::new_multi_socket(
|
||||
vec![replicate_socket, repair_socket],
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
);
|
||||
//TODO
|
||||
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
||||
//then sent to the window, which does the erasure coding reconstruction
|
||||
let (window_stage, blob_receiver) = WindowStage::new(
|
||||
crdt,
|
||||
window,
|
||||
entry_height,
|
||||
retransmit_socket,
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
blob_receiver,
|
||||
);
|
||||
|
||||
let replicate_stage = ReplicateStage::new(bank, exit, blob_receiver);
|
||||
|
||||
let mut threads = vec![replicate_stage.thread_hdl];
|
||||
threads.extend(fetch_stage.thread_hdls.into_iter());
|
||||
threads.extend(window_stage.thread_hdls.into_iter());
|
||||
Tvu {
|
||||
thread_hdls: threads,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use bank::Bank;
|
||||
use bincode::serialize;
|
||||
use crdt::{Crdt, TestNode};
|
||||
use entry::Entry;
|
||||
use hash::{hash, Hash};
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use ncp::Ncp;
|
||||
use packet::BlobRecycler;
|
||||
use result::Result;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::collections::VecDeque;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
use streamer::{self, Window};
|
||||
use transaction::Transaction;
|
||||
use tvu::Tvu;
|
||||
|
||||
fn new_ncp(
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
listen: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Result<(Ncp, Window)> {
|
||||
let window = streamer::default_window();
|
||||
let send_sock = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let ncp = Ncp::new(crdt, window.clone(), listen, send_sock, exit)?;
|
||||
Ok((ncp, window))
|
||||
}
|
||||
/// Test that message sent from leader to target1 and replicated to target2
|
||||
#[test]
|
||||
fn test_replicate() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let target1 = TestNode::new();
|
||||
let target2 = TestNode::new();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
//start crdt_leader
|
||||
let mut crdt_l = Crdt::new(leader.data.clone());
|
||||
crdt_l.set_leader(leader.data.id);
|
||||
|
||||
let cref_l = Arc::new(RwLock::new(crdt_l));
|
||||
let dr_l = new_ncp(cref_l, leader.sockets.gossip, exit.clone()).unwrap();
|
||||
|
||||
//start crdt2
|
||||
let mut crdt2 = Crdt::new(target2.data.clone());
|
||||
crdt2.insert(&leader.data);
|
||||
crdt2.set_leader(leader.data.id);
|
||||
let leader_id = leader.data.id;
|
||||
let cref2 = Arc::new(RwLock::new(crdt2));
|
||||
let dr_2 = new_ncp(cref2, target2.sockets.gossip, exit.clone()).unwrap();
|
||||
|
||||
// setup some blob services to send blobs into the socket
|
||||
// to simulate the source peer and get blobs out of the socket to
|
||||
// simulate target peer
|
||||
let recv_recycler = BlobRecycler::default();
|
||||
let resp_recycler = BlobRecycler::default();
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver = streamer::blob_receiver(
|
||||
exit.clone(),
|
||||
recv_recycler.clone(),
|
||||
target2.sockets.replicate,
|
||||
s_reader,
|
||||
).unwrap();
|
||||
|
||||
// simulate leader sending messages
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = streamer::responder(
|
||||
leader.sockets.requests,
|
||||
exit.clone(),
|
||||
resp_recycler.clone(),
|
||||
r_responder,
|
||||
);
|
||||
|
||||
let starting_balance = 10_000;
|
||||
let mint = Mint::new(starting_balance);
|
||||
let replicate_addr = target1.data.replicate_addr;
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
|
||||
//start crdt1
|
||||
let mut crdt1 = Crdt::new(target1.data.clone());
|
||||
crdt1.insert(&leader.data);
|
||||
crdt1.set_leader(leader.data.id);
|
||||
let cref1 = Arc::new(RwLock::new(crdt1));
|
||||
let dr_1 = new_ncp(cref1.clone(), target1.sockets.gossip, exit.clone()).unwrap();
|
||||
|
||||
let tvu = Tvu::new(
|
||||
bank.clone(),
|
||||
0,
|
||||
cref1,
|
||||
dr_1.1,
|
||||
target1.sockets.replicate,
|
||||
target1.sockets.repair,
|
||||
target1.sockets.retransmit,
|
||||
exit.clone(),
|
||||
);
|
||||
|
||||
let mut alice_ref_balance = starting_balance;
|
||||
let mut msgs = VecDeque::new();
|
||||
let mut cur_hash = Hash::default();
|
||||
let mut blob_id = 0;
|
||||
let num_transfers = 10;
|
||||
let transfer_amount = 501;
|
||||
let bob_keypair = KeyPair::new();
|
||||
for i in 0..num_transfers {
|
||||
let entry0 = Entry::new(&cur_hash, i, vec![], false);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
|
||||
let tx0 = Transaction::new(
|
||||
&mint.keypair(),
|
||||
bob_keypair.pubkey(),
|
||||
transfer_amount,
|
||||
cur_hash,
|
||||
);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
let entry1 = Entry::new(&cur_hash, i + num_transfers, vec![tx0], false);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
|
||||
alice_ref_balance -= transfer_amount;
|
||||
|
||||
for entry in vec![entry0, entry1] {
|
||||
let b = resp_recycler.allocate();
|
||||
{
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(blob_id).unwrap();
|
||||
blob_id += 1;
|
||||
w.set_id(leader_id).unwrap();
|
||||
|
||||
let serialized_entry = serialize(&entry).unwrap();
|
||||
|
||||
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
|
||||
w.set_size(serialized_entry.len());
|
||||
w.meta.set_addr(&replicate_addr);
|
||||
}
|
||||
msgs.push_back(b);
|
||||
}
|
||||
}
|
||||
|
||||
// send the blobs into the socket
|
||||
s_responder.send(msgs).expect("send");
|
||||
|
||||
// receive retransmitted messages
|
||||
let timer = Duration::new(1, 0);
|
||||
while let Ok(msg) = r_reader.recv_timeout(timer) {
|
||||
trace!("msg: {:?}", msg);
|
||||
}
|
||||
|
||||
let alice_balance = bank.get_balance(&mint.keypair().pubkey());
|
||||
assert_eq!(alice_balance, alice_ref_balance);
|
||||
|
||||
let bob_balance = bank.get_balance(&bob_keypair.pubkey());
|
||||
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in tvu.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
for t in dr_l.0.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
for t in dr_2.0.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
for t in dr_1.0.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
t_receiver.join().expect("join");
|
||||
t_responder.join().expect("join");
|
||||
}
|
||||
}
|
50
src/window_stage.rs
Normal file
50
src/window_stage.rs
Normal file
@ -0,0 +1,50 @@
|
||||
//! The `window_stage` maintains the blob window
|
||||
|
||||
use crdt::Crdt;
|
||||
use packet::BlobRecycler;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use streamer::{self, BlobReceiver, Window};
|
||||
|
||||
pub struct WindowStage {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl WindowStage {
|
||||
pub fn new(
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Window,
|
||||
entry_height: u64,
|
||||
retransmit_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: BlobRecycler,
|
||||
fetch_stage_receiver: BlobReceiver,
|
||||
) -> (Self, BlobReceiver) {
|
||||
let (retransmit_sender, retransmit_receiver) = channel();
|
||||
|
||||
let t_retransmit = streamer::retransmitter(
|
||||
retransmit_socket,
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
blob_recycler.clone(),
|
||||
retransmit_receiver,
|
||||
);
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let t_window = streamer::window(
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
window,
|
||||
entry_height,
|
||||
blob_recycler.clone(),
|
||||
fetch_stage_receiver,
|
||||
blob_sender,
|
||||
retransmit_sender,
|
||||
);
|
||||
let thread_hdls = vec![t_retransmit, t_window];
|
||||
|
||||
(WindowStage { thread_hdls }, blob_receiver)
|
||||
}
|
||||
}
|
75
src/write_stage.rs
Normal file
75
src/write_stage.rs
Normal file
@ -0,0 +1,75 @@
|
||||
//! The `write_stage` module implements the TPU's write stage. It
|
||||
//! writes entries to the given writer, which is typically a file or
|
||||
//! stdout, and then sends the Entry to its output channel.
|
||||
|
||||
use bank::Bank;
|
||||
use entry::Entry;
|
||||
use entry_writer::EntryWriter;
|
||||
use ledger::Block;
|
||||
use packet::BlobRecycler;
|
||||
use result::Result;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::Write;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer::{BlobReceiver, BlobSender};
|
||||
|
||||
pub struct WriteStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl WriteStage {
|
||||
/// Process any Entry items that have been published by the Historian.
|
||||
/// continuosly broadcast blobs of entries out
|
||||
pub fn write_and_send_entries<W: Write>(
|
||||
entry_writer: &mut EntryWriter<W>,
|
||||
blob_sender: &BlobSender,
|
||||
blob_recycler: &BlobRecycler,
|
||||
entry_receiver: &Receiver<Vec<Entry>>,
|
||||
) -> Result<()> {
|
||||
let entries = entry_receiver.recv_timeout(Duration::new(1, 0))?;
|
||||
entry_writer.write_and_register_entries(&entries)?;
|
||||
trace!("New blobs? {}", entries.len());
|
||||
let mut blobs = VecDeque::new();
|
||||
entries.to_blobs(blob_recycler, &mut blobs);
|
||||
if !blobs.is_empty() {
|
||||
trace!("broadcasting {}", blobs.len());
|
||||
blob_sender.send(blobs)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a new Rpu that wraps the given Bank.
|
||||
pub fn new<W: Write + Send + 'static>(
|
||||
bank: Arc<Bank>,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: BlobRecycler,
|
||||
writer: W,
|
||||
entry_receiver: Receiver<Vec<Entry>>,
|
||||
) -> (Self, BlobReceiver) {
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-writer".to_string())
|
||||
.spawn(move || {
|
||||
let mut entry_writer = EntryWriter::new(&bank, writer);
|
||||
loop {
|
||||
let _ = Self::write_and_send_entries(
|
||||
&mut entry_writer,
|
||||
&blob_sender,
|
||||
&blob_recycler,
|
||||
&entry_receiver,
|
||||
);
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
info!("broadcat_service exiting");
|
||||
break;
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
(WriteStage { thread_hdl }, blob_receiver)
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user