Compare commits
855 Commits
v0.1.1
...
v0.6.0-bet
Author | SHA1 | Date | |
---|---|---|---|
ef8eac92e3 | |||
9c9c63572b | |||
6c0c6de1d0 | |||
b57aecc24c | |||
290dde60a0 | |||
38623785f9 | |||
256ecc7208 | |||
76b06b47ba | |||
cf15cf587f | |||
134c7add57 | |||
ac0791826a | |||
d2622b7798 | |||
f82cbf3a27 | |||
aa7e3df8d6 | |||
ad00d7bd9c | |||
8d1f82c34d | |||
0cb2036e3a | |||
2b1e90b0a5 | |||
f2ccc133a2 | |||
5e824b39dd | |||
41efcae64b | |||
cf5671d058 | |||
2570bba6b1 | |||
71cb7d5c97 | |||
0df6541d5e | |||
52145caf7e | |||
86a50ae9e1 | |||
c64cfb74f3 | |||
26153d9919 | |||
5af922722f | |||
b70d730b32 | |||
bf4b856e0c | |||
0cf0ae6755 | |||
29061cff39 | |||
b7eec4c89f | |||
a3854c229e | |||
dcde256433 | |||
931bdbd5cd | |||
b7bd59c344 | |||
2dbf9a6017 | |||
fe93bba457 | |||
6e35f54738 | |||
089294a85e | |||
25c0b44641 | |||
58c1589688 | |||
bb53f69016 | |||
75659ca042 | |||
fc00594ea4 | |||
8d26be8b89 | |||
af4e95ae0f | |||
ffb4a7aa78 | |||
dcaeacc507 | |||
4f377e6710 | |||
122db85727 | |||
a598e4aa74 | |||
733b31ebbd | |||
dac9775de0 | |||
46c19a5783 | |||
aaeb5ba52f | |||
9f5a3d6064 | |||
4cdf873f98 | |||
b43ae748c3 | |||
02ddd89653 | |||
bbe6eccefe | |||
6677a7b66a | |||
75c37fcc73 | |||
5be71a8a9d | |||
b9ae7d1ebb | |||
8b02e0f57c | |||
342cc7350a | |||
2335a51ced | |||
868df1824c | |||
83c11f0f9d | |||
1022f1b0c6 | |||
c2c80232e3 | |||
115f4e54b8 | |||
669b1694b8 | |||
2128c58fbe | |||
e12e154877 | |||
73d3c17507 | |||
7f647a93da | |||
ecb3dbbb60 | |||
cc907ba69d | |||
5a45eef1dc | |||
0d980e89bc | |||
ef87832bff | |||
94507d1aca | |||
89924a38ff | |||
7faa2b8698 | |||
65352ce8e7 | |||
f1988ee1e3 | |||
82ac8eb731 | |||
ae47e34fa5 | |||
28e781efc3 | |||
5c3ceb8355 | |||
c9113b381d | |||
75e69eecfa | |||
f3c4acc723 | |||
2a0095e322 | |||
9ad5f3c65b | |||
579de64d49 | |||
d4200a7b1e | |||
84477835dc | |||
504b318ef1 | |||
f154c8c490 | |||
d4959bc157 | |||
87e025fe22 | |||
8049323ca8 | |||
b38c7ea2ff | |||
239b925fb3 | |||
60da7f7aaf | |||
8646ff4927 | |||
59be94a81f | |||
437c485e5c | |||
79a58da6a9 | |||
ae29641a18 | |||
9c3f65bca9 | |||
086365b4c4 | |||
64044da49c | |||
7b5b7feb63 | |||
2e059f8504 | |||
207b6686d1 | |||
abfd7d6951 | |||
7fc166b5ba | |||
021953d59a | |||
bbe89df2ff | |||
a638ec5911 | |||
26272a3600 | |||
8454eb79d0 | |||
796f4b981b | |||
34514d65bc | |||
2786357082 | |||
4badeacd1d | |||
63a0ba6ec8 | |||
9a4ce6d70e | |||
35ee2d0ce1 | |||
b04716d40d | |||
051fa6f1f1 | |||
8dc1b07e75 | |||
bee1e7ebaf | |||
f3f0b9f0c5 | |||
a5cf745e1c | |||
273b800047 | |||
6c1f1c2a7a | |||
9c62f8d81f | |||
82aef7ebe2 | |||
57636d3d5f | |||
dc87effc0a | |||
f0c9823e9f | |||
0b91dd6163 | |||
4955c6f13a | |||
2e7beca9ba | |||
59c1b9983d | |||
f7083e0923 | |||
6d4defdf96 | |||
b826f837f8 | |||
5855e18a4e | |||
3f38c0a245 | |||
cfe8b3fc55 | |||
e9ee020b5f | |||
1bcf3891b4 | |||
5456de63e9 | |||
9026c70952 | |||
99dc4ea4a9 | |||
0aaa500f7c | |||
5f5be83a17 | |||
7e44005a0f | |||
ee3fb985ea | |||
2a268aa528 | |||
cd262cf860 | |||
a1889c32d4 | |||
d42d024d9c | |||
7b88b8d159 | |||
4131071b9a | |||
ef6bd7e3b8 | |||
374bff6550 | |||
0a46bbe4f9 | |||
f4971be236 | |||
421273f862 | |||
2c7f229883 | |||
904eabad2f | |||
8b233f6be4 | |||
08fc821ca9 | |||
81706f2d75 | |||
7b50c3910f | |||
2d635386af | |||
a604dcb4c4 | |||
7736b9cac6 | |||
d2dd005a59 | |||
6e8f99d9b2 | |||
685de30047 | |||
17cc9ab07f | |||
3f10bf44db | |||
27984e469a | |||
a2c05b112e | |||
a578c1a5e3 | |||
500aaed48e | |||
4a94da8a94 | |||
cc447c0fda | |||
0ae69bdcd9 | |||
5ba20a94e8 | |||
f168c377fd | |||
dfb754dd13 | |||
455050e19c | |||
317031f455 | |||
b132ce1944 | |||
8b226652aa | |||
2c7fe3ed8d | |||
3d5f2b3c28 | |||
7a79afe4a6 | |||
1f7387a39b | |||
0fc2bee144 | |||
791ae852a2 | |||
c2fcd876d7 | |||
d239d4a495 | |||
aec05ef602 | |||
e5d46d998b | |||
b2e3299539 | |||
c308a6459f | |||
4eb1bc08a7 | |||
ff5e1c635f | |||
6149c2fcb5 | |||
d7cd80dce5 | |||
6264508f5e | |||
a3869dd4c1 | |||
a3d2831f8c | |||
4cd1fa8c38 | |||
1511dc43d7 | |||
3d82807965 | |||
4180571660 | |||
421d9aa501 | |||
898f4971a2 | |||
7ab3331f01 | |||
b4ca414492 | |||
73abea088a | |||
2376dfc139 | |||
d2f95d5319 | |||
cd96843699 | |||
ca80bc33c6 | |||
19607886f7 | |||
3c11a91f77 | |||
b781fdbd04 | |||
765d901530 | |||
3cedbc493e | |||
0488d0a82f | |||
f0be595e4c | |||
55100854d6 | |||
600a1f8866 | |||
95bf68f3f5 | |||
bcdb058492 | |||
7f46aef624 | |||
e779496dfb | |||
3d77fa5fbc | |||
250830ade9 | |||
7b2eb7ccfc | |||
458c27c6e9 | |||
a49e664e63 | |||
f20380d6b4 | |||
05a5e551d6 | |||
d278b71cb2 | |||
a485c141d5 | |||
8a9f6b9ae3 | |||
7144090528 | |||
ee0015ac38 | |||
8b7f7f1088 | |||
c95c6a75f8 | |||
44bf79e35f | |||
bb654f286c | |||
1acd2aa8cf | |||
18d3659b91 | |||
63a4bafa72 | |||
4eb2e84c9f | |||
73c7fb87e8 | |||
c1496722aa | |||
d9f81b0c8c | |||
d69beaabe1 | |||
b7a0bd6347 | |||
882ea6b672 | |||
736d3eabae | |||
af53197c04 | |||
cf186c5762 | |||
f384a2ce85 | |||
803b76e997 | |||
230d7c3dd6 | |||
4f629dd982 | |||
4fdd891b54 | |||
64a892321a | |||
a80991f2b3 | |||
c9cd81319a | |||
521ae21632 | |||
bcd6606a16 | |||
52ebb88205 | |||
1e91d09be7 | |||
02c573986b | |||
f2de486658 | |||
900b4f2644 | |||
1cfaa9afb6 | |||
801468d70d | |||
0601e05978 | |||
7ce11b5d1c | |||
f2d4799491 | |||
ebc458cd32 | |||
43cd631579 | |||
bc824c1a6c | |||
4223aff840 | |||
f107c6c2ca | |||
7daf14caa7 | |||
ded28c705f | |||
778bec0777 | |||
6967cf7f86 | |||
0ee3ec86bd | |||
e4c47e8417 | |||
98ae80f4ed | |||
876c77d0bc | |||
d44a6f7541 | |||
9040c04d27 | |||
ebbdef0538 | |||
bfbee988d0 | |||
1d4d0272ca | |||
77a76f0783 | |||
d9079de262 | |||
b3d732a1a1 | |||
52f1a02938 | |||
fe51669e85 | |||
670a6c50c9 | |||
86c1aaf7d8 | |||
658e787b60 | |||
40c50aef50 | |||
a24c2bbe73 | |||
bdbe90b891 | |||
3236be7877 | |||
1dca17fdb4 | |||
785e971698 | |||
2bfa20ff85 | |||
474a9af78d | |||
61425eacb8 | |||
4870def1fb | |||
3e73fb9233 | |||
5ad6061c3f | |||
fae019b974 | |||
3bb06d8364 | |||
c9c9afa472 | |||
bd0671e123 | |||
6f3ec8d21f | |||
9a0bf13feb | |||
9ff1a6f0cd | |||
a59f64cae1 | |||
a4ecd09723 | |||
f159dfd15a | |||
9e8ec86fa3 | |||
62bb78f58d | |||
893011c3ba | |||
880cb8e7cc | |||
85f83f2c74 | |||
4751e459cc | |||
138efa6cec | |||
a68e50935e | |||
e8f5fb35ac | |||
6af27669b0 | |||
e162f24119 | |||
dbcc462a48 | |||
2d5313639a | |||
38af0f436d | |||
888c2ffb20 | |||
588593f619 | |||
2cdd515b12 | |||
0aad71d46e | |||
6f9285322d | |||
68c7f992fa | |||
1feff408ff | |||
f752e02487 | |||
c9c7fb0a27 | |||
de680c2a8e | |||
03695ba4c5 | |||
c2e2960bf7 | |||
385d2a580c | |||
7e02652068 | |||
ae29c9b4a0 | |||
078f917e61 | |||
b65f04d500 | |||
6acaffe581 | |||
e47ef42a33 | |||
b950e33d81 | |||
ec8cfc77ad | |||
00a16db9cd | |||
4b9f115586 | |||
c5cc91443e | |||
48d94143e7 | |||
8174a05156 | |||
63cf6363a2 | |||
cc6de605ac | |||
d0151d2b79 | |||
6b45d453b8 | |||
b992a84d67 | |||
cb362e9052 | |||
ccb478c1f6 | |||
6af3680f99 | |||
e6c3c215ab | |||
5c66bbde01 | |||
77dd1bdd4a | |||
6268d540a8 | |||
5918e38747 | |||
3cfb571356 | |||
5eb80f8027 | |||
f6e5f2439d | |||
edf6272374 | |||
7f6a4b0ce3 | |||
3be5f25f2f | |||
1b6cdd5637 | |||
f752e55929 | |||
ebb089b3f1 | |||
ad6303f031 | |||
828b9d6717 | |||
444adcd1ca | |||
69ac305883 | |||
2ff57df2a0 | |||
7077f4cbe2 | |||
266f85f607 | |||
d90ab90145 | |||
48018b3f5b | |||
15584e7062 | |||
d415b17146 | |||
9ed953e8c3 | |||
b60a98bd6e | |||
a15e30d4b3 | |||
d5d133353f | |||
6badc98510 | |||
ea8bfb46ce | |||
58860ed19f | |||
583f652197 | |||
3215dcff78 | |||
38fdd17067 | |||
807ccd15ba | |||
1c923d2f9e | |||
2676b21400 | |||
fd5ef94b5a | |||
02c7eea236 | |||
34d1805b54 | |||
753eaa8266 | |||
0b39c6f98e | |||
55b8d0db4d | |||
3d7969d8a2 | |||
041de8082a | |||
3da1fa4d88 | |||
39df21de30 | |||
8cbb7d7362 | |||
10a0c47210 | |||
89bf3765f3 | |||
8181bc591b | |||
ca877e689c | |||
c6048e2bab | |||
60015aee04 | |||
43e6741071 | |||
b91f6bcbff | |||
64e2f1b949 | |||
13a2f05776 | |||
903374ae9b | |||
d366a07403 | |||
e94921174a | |||
dea5ab2f79 | |||
5e11078f34 | |||
d7670cd4ff | |||
29f3230089 | |||
d003efb522 | |||
97e772e87a | |||
0b33615979 | |||
249cead13e | |||
7c96dea359 | |||
374c9921fd | |||
fb55ab8c33 | |||
13485074ac | |||
4944c965e4 | |||
83c5b3bc38 | |||
7fc42de758 | |||
0a30bd74c1 | |||
9b12a79c8d | |||
0dcde23b05 | |||
8dc15b88eb | |||
d20c952f92 | |||
c2eeeb27fd | |||
180d8b67e4 | |||
9c989c46ee | |||
51633f509d | |||
705228ecc2 | |||
740f6d2258 | |||
3b9ef5ccab | |||
ab74e7f24f | |||
be9a670fb7 | |||
6e43e7a146 | |||
ab2093926a | |||
916b90f415 | |||
2ef3db9fab | |||
6987b6fd58 | |||
078179e9b8 | |||
50ccecdff5 | |||
e838a8c28a | |||
e5f7eeedbf | |||
d1948b5a00 | |||
c07f700c53 | |||
c934a30f66 | |||
310d01d8a2 | |||
f330739bc7 | |||
58626721ad | |||
584c8c07b8 | |||
a93ec03d2c | |||
7bd3a8e004 | |||
912a5f951e | |||
6869089111 | |||
6fd32fe850 | |||
81e2b36d38 | |||
7d811afab1 | |||
39f5aaab8b | |||
5fc81dd6c8 | |||
491a530d90 | |||
c12da50f9b | |||
41e8500fc5 | |||
a7f59ef3c1 | |||
f4466c8c0a | |||
bc6d6b20fa | |||
01326936e6 | |||
c960e8d351 | |||
fc69d31914 | |||
8d425e127b | |||
3cfb07ea38 | |||
76679ffb92 | |||
dc2ec925d7 | |||
81d6ba3ec5 | |||
014bdaa355 | |||
0c60fdd2ce | |||
43d986d14e | |||
123d7c6a37 | |||
5ac7df17f9 | |||
bc0dde696a | |||
c323bd3c87 | |||
5c672adc21 | |||
2f80747dc7 | |||
95749ed0e3 | |||
94eea3abec | |||
fe32159673 | |||
07aa2e1260 | |||
6fec8fad57 | |||
84df487f7d | |||
49708e92d3 | |||
daadae7987 | |||
2b788d06b7 | |||
90cd9bd533 | |||
d63506f98c | |||
17de6876bb | |||
fc540395f9 | |||
da2b4962a9 | |||
3abe305a21 | |||
46e8c09bd8 | |||
e683c34a89 | |||
54e4f75081 | |||
9f256f0929 | |||
ef169a6652 | |||
eaec25f940 | |||
6a87d8975c | |||
b8cf5f9427 | |||
2f1e585446 | |||
f9309b46aa | |||
22f5985f1b | |||
c59c38e50e | |||
232e1bb8a3 | |||
1fbb34620c | |||
89f5b803c9 | |||
55179101cd | |||
132495b1fc | |||
a03d7bf5cd | |||
3bf225e85f | |||
cc2bb290c4 | |||
878ca8c5c5 | |||
4bc41d81ee | |||
f6ca176fc8 | |||
0bec360a31 | |||
04f30710c5 | |||
98c0a2af87 | |||
9db42c1769 | |||
849bced602 | |||
27f29019ef | |||
8642a41f2b | |||
bf902ef5bc | |||
7656b55c22 | |||
7d3d4b9443 | |||
15c093c5e2 | |||
116166f62d | |||
26b19dde75 | |||
c8ddc68f13 | |||
7c9681007c | |||
13206e4976 | |||
2f18302d32 | |||
ddb21d151d | |||
c64a9fb456 | |||
ee19b4f86e | |||
14239e584f | |||
112aecf6eb | |||
c1783d77d7 | |||
f089abb3c5 | |||
8e551f5e32 | |||
290960c3b5 | |||
62af09adbe | |||
e39c0b34e5 | |||
8ad90807ee | |||
533b3170a7 | |||
7732f3f5fb | |||
f52f02a434 | |||
4d7d4d673e | |||
9a437f0d38 | |||
c385f8bb6e | |||
fa44be2a9d | |||
117ab0c141 | |||
7488d19ae6 | |||
60524ad5f2 | |||
fad7ff8bf0 | |||
383d445ba1 | |||
803dcb0800 | |||
fde320e2f2 | |||
8ea97141ea | |||
9f232bac58 | |||
8295cc11c0 | |||
70f80adb9a | |||
9a7cac1e07 | |||
c584a25ec9 | |||
bff32bf7bc | |||
d0e7450389 | |||
4da89ac8a9 | |||
f7032f7d9a | |||
7c7e3931a0 | |||
6be3d62d89 | |||
6f509a8a1e | |||
4379fabf16 | |||
6b66e1a077 | |||
c11a3e0fdc | |||
3418033c55 | |||
caa9a846ed | |||
8ee76bcea0 | |||
47325cbe01 | |||
e0c8417297 | |||
9238ee9572 | |||
64af37e0cd | |||
9f9b79f30b | |||
265f41887f | |||
4f09e5d04c | |||
434f321336 | |||
f4e0d1be58 | |||
e5bae0604b | |||
e7da083c31 | |||
367c32dabe | |||
e054238af6 | |||
e8faf6d59a | |||
baa4ea3cd8 | |||
75ef0f0329 | |||
65185c0011 | |||
eb94613d7d | |||
67f4f4fb49 | |||
a7ecf4ac4c | |||
45765b625a | |||
aa0a184ebe | |||
069f9f0d5d | |||
c82b520ea8 | |||
9d6e5bde4a | |||
0eb3669fbf | |||
30449b6054 | |||
f5f71a19b8 | |||
0135971769 | |||
8579795c40 | |||
9d77fd7eec | |||
8c40d1bd72 | |||
7a0bc7d888 | |||
1e07014f86 | |||
49281b24e5 | |||
a8b1980de4 | |||
b8cd5f0482 | |||
cc9f0788aa | |||
209910299d | |||
17926ff5d9 | |||
957fb0667c | |||
8d17aed785 | |||
7ef8d5ddde | |||
9930a2e167 | |||
a86be9ebf2 | |||
ad6665c8b6 | |||
923162ae9d | |||
dd2bd67049 | |||
d500bbff04 | |||
e759bd1a99 | |||
94daf4cea4 | |||
2379792e0a | |||
dba6d7a8a6 | |||
086c206b76 | |||
5dd567deef | |||
b6d8f737ca | |||
491ba9da84 | |||
a420a9293f | |||
c1bc5f6a07 | |||
9834c251d0 | |||
54340ed4c6 | |||
96a0a9202c | |||
a4c081d3a1 | |||
d1b6206858 | |||
0eb6849fe3 | |||
b725fdb093 | |||
1436bb1ff2 | |||
5a44c36b1f | |||
5d990502cb | |||
64735da716 | |||
95b82aa6dc | |||
f09952f3d7 | |||
b98e04dc56 | |||
cb436250da | |||
4376032e3a | |||
c231331e05 | |||
624c151ca2 | |||
5d0356f74b | |||
b019416518 | |||
4fcd9e3bd6 | |||
66bf889c39 | |||
a2811842c8 | |||
1929601425 | |||
282afee47e | |||
e701ccc949 | |||
6543497c17 | |||
7d9af5a937 | |||
720c54a5bb | |||
5dca3c41f2 | |||
929546f60b | |||
cb0ce9986c | |||
064eba00fd | |||
a4336a39d6 | |||
298989c4b9 | |||
48c28c2267 | |||
d76ecbc9c9 | |||
79fb9c00aa | |||
c9e03f37ce | |||
aa5f1699a7 | |||
e1e9126d03 | |||
672a4b3723 | |||
955f76baab | |||
7da8a5e2d1 | |||
ff82fbf112 | |||
8503a0a58f | |||
b1e9512f44 | |||
608def9c78 | |||
bcb21bc1d8 | |||
f63096620a | |||
9b26892bae | |||
572475ce14 | |||
876d7995e1 | |||
b8655e30d4 | |||
7cf0d55546 | |||
ce60b960c0 | |||
cebcb5b92d | |||
11a0f96f5e | |||
74ebaf1744 | |||
f7496ea6d1 | |||
bebba7dc1f | |||
afb2bf442c | |||
c7de48c982 | |||
f906112c03 | |||
8ef864fb39 | |||
1c9b5ab53c | |||
c10faae3b5 | |||
2104dd5a0a | |||
fbe64037db | |||
d8c50b150c | |||
8871bb2d8e | |||
a148454376 | |||
be518b569b | |||
c998fbe2ae | |||
9f12cd0c09 | |||
0d0fee1ca1 | |||
a0410c4677 | |||
8fe464cfa3 | |||
3e2d6d9e8b | |||
32d677787b | |||
dfd1c4eab3 | |||
36bb1f989d | |||
684f4c59e0 | |||
1b77e8a69a | |||
662e10c3e0 | |||
c935fdb12f | |||
9e16937914 | |||
f705202381 | |||
f5532ad9f7 | |||
570e71f050 | |||
c9cc4b4369 | |||
7111aa3b18 | |||
12eba4bcc7 | |||
4610de8fdd | |||
3fcc2dd944 | |||
8299bae2d4 | |||
604ccf7552 | |||
f3dd47948a | |||
c3bb207488 | |||
9009d1bfb3 | |||
fa4d9e8bcb | |||
34b77efc87 | |||
5ca0ccbcd2 | |||
6aa4e52480 | |||
f98e9a2ad7 | |||
c6134cc25b | |||
0443b39264 | |||
8b0b8efbcb | |||
97449cee43 | |||
ab5252c750 | |||
05a27cb34d | |||
b02eab57d2 | |||
b8d52cc3e4 | |||
7d9bab9508 | |||
944181a30e | |||
d8dd50505a | |||
d78082f5e4 | |||
08e501e57b | |||
29a607427d | |||
afb830c91f | |||
c1326ac3d5 | |||
513a1adf57 | |||
7871b38c80 | |||
b34d2d7dee | |||
d7dfa8c22d | |||
8df274f0af | |||
07c4ebb7f2 | |||
49605b257d | |||
fa4e232d73 | |||
bd84cf6586 | |||
6e37f70d55 | |||
d97112d7f0 | |||
e57bba17c1 | |||
959da300cc | |||
ba90e43f72 | |||
6effd64ab0 | |||
e18da7c7c1 | |||
0297edaf1f | |||
b317d13b44 | |||
bb22522e45 | |||
41053b6d0b | |||
bd3fe5fac9 | |||
10a70a238b | |||
0bead4d410 | |||
4a7156de43 | |||
d88d1b2a09 | |||
a7186328e0 | |||
5e3c7816bd | |||
a2fa60fa31 | |||
ceb65c2669 | |||
fd209ef1a9 | |||
471f036444 | |||
6ec0e5834c | |||
4c94754661 | |||
831e2cbdc9 | |||
3550f703c3 | |||
ea1d57b461 | |||
49386309c8 | |||
b7a95ab7cc | |||
bf35b730de |
2
.codecov.yml
Normal file
2
.codecov.yml
Normal file
@ -0,0 +1,2 @@
|
||||
ignore:
|
||||
- "src/bin"
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -1,4 +1,4 @@
|
||||
|
||||
Cargo.lock
|
||||
/target/
|
||||
**/*.rs.bk
|
||||
Cargo.lock
|
||||
.cargo
|
||||
|
22
.travis.yml
22
.travis.yml
@ -1,22 +0,0 @@
|
||||
language: rust
|
||||
required: sudo
|
||||
services:
|
||||
- docker
|
||||
matrix:
|
||||
allow_failures:
|
||||
- rust: nightly
|
||||
include:
|
||||
- rust: stable
|
||||
- rust: nightly
|
||||
env:
|
||||
- FEATURES='unstable'
|
||||
before_script: |
|
||||
export PATH="$PATH:$HOME/.cargo/bin"
|
||||
rustup component add rustfmt-preview
|
||||
script:
|
||||
- cargo fmt -- --write-mode=diff
|
||||
- cargo build --verbose --features "$FEATURES"
|
||||
- cargo test --verbose --features "$FEATURES"
|
||||
after_success: |
|
||||
docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
|
||||
bash <(curl -s https://codecov.io/bash) -s target/cov
|
63
Cargo.toml
63
Cargo.toml
@ -1,19 +1,68 @@
|
||||
[package]
|
||||
name = "silk"
|
||||
description = "A silky smooth implementation of the Loom architecture"
|
||||
version = "0.1.1"
|
||||
name = "solana"
|
||||
description = "The World's Fastest Blockchain"
|
||||
version = "0.6.0-beta"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "http://solana.com/"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
authors = [
|
||||
"Anatoly Yakovenko <aeyakovenko@gmail.com>",
|
||||
"Greg Fitzgerald <garious@gmail.com>",
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
"Greg Fitzgerald <greg@solana.com>",
|
||||
"Stephen Akridge <stephen@solana.com>",
|
||||
]
|
||||
license = "Apache-2.0"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-client-demo"
|
||||
path = "src/bin/client-demo.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-fullnode"
|
||||
path = "src/bin/fullnode.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-genesis"
|
||||
path = "src/bin/genesis.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-genesis-demo"
|
||||
path = "src/bin/genesis-demo.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-mint"
|
||||
path = "src/bin/mint.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-mint-demo"
|
||||
path = "src/bin/mint-demo.rs"
|
||||
|
||||
[badges]
|
||||
codecov = { repository = "loomprotocol/silk", branch = "master", service = "github" }
|
||||
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
||||
|
||||
[features]
|
||||
unstable = []
|
||||
ipv6 = []
|
||||
cuda = []
|
||||
erasure = []
|
||||
|
||||
[dependencies]
|
||||
rayon = "1.0.0"
|
||||
itertools = "0.7.6"
|
||||
sha2 = "0.7.0"
|
||||
generic-array = { version = "0.9.0", default-features = false, features = ["serde"] }
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
serde_json = "1.0.10"
|
||||
ring = "0.12.1"
|
||||
untrusted = "0.5.1"
|
||||
bincode = "1.0.0"
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
log = "^0.4.1"
|
||||
env_logger = "^0.4.1"
|
||||
matches = "^0.1.6"
|
||||
byteorder = "^1.2.1"
|
||||
libc = "^0.2.1"
|
||||
getopts = "^0.2"
|
||||
isatty = "0.1"
|
||||
futures = "0.1"
|
||||
rand = "0.4.2"
|
||||
pnet = "^0.21.0"
|
||||
|
2
LICENSE
2
LICENSE
@ -1,4 +1,4 @@
|
||||
Copyright 2018 Anatoly Yakovenko <anatoly@loomprotocol.com> and Greg Fitzgerald <garious@gmail.com>
|
||||
Copyright 2018 Anatoly Yakovenko, Greg Fitzgerald and Stephen Akridge
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
169
README.md
169
README.md
@ -1,22 +1,100 @@
|
||||
[](https://crates.io/crates/silk)
|
||||
[](https://docs.rs/silk)
|
||||
[](https://travis-ci.org/loomprotocol/silk)
|
||||
[](https://codecov.io/gh/loomprotocol/silk)
|
||||
[](https://crates.io/crates/solana)
|
||||
[](https://docs.rs/solana)
|
||||
[](https://buildkite.com/solana-labs/solana)
|
||||
[](https://codecov.io/gh/solana-labs/solana)
|
||||
|
||||
# Silk, A Silky Smooth Implementation of the Loom Architecture
|
||||
Disclaimer
|
||||
===
|
||||
|
||||
Loom is a new achitecture for a high performance blockchain. Its whitepaper boasts a theoretical
|
||||
throughput of 710k transactions per second on a 1 gbps network. The first implementation of the
|
||||
whitepaper is happening in the 'loomprotocol/loom' repository. That repo is aggressively moving
|
||||
forward, looking to de-risk technical claims as quickly as possible. This repo is quite a bit
|
||||
different philosophically. Here we assume the Loom architecture is sound and worthy of building
|
||||
a community around. We care a great deal about quality, clarity and short learning curve. We
|
||||
avoid the use of `unsafe` Rust and an write tests for *everything*. Optimizations are only
|
||||
added when corresponding benchmarks are also added that demonstrate real performance boots. We
|
||||
expect the feature set here will always be a long ways behind the loom repo, but that this is
|
||||
an implementation you can take to the bank, literally.
|
||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
||||
|
||||
# Developing
|
||||
Solana: High Performance Blockchain
|
||||
===
|
||||
|
||||
Solana™ is a new architecture for a high performance blockchain. It aims to support
|
||||
over 700 thousand transactions per second on a gigabit network.
|
||||
|
||||
Introduction
|
||||
===
|
||||
|
||||
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 178 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [H.T.Kung, J.T.Robinson (1981)]. At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! Furthermore, and much to our surprise, it can implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
|
||||
|
||||
Running the demo
|
||||
===
|
||||
|
||||
First, install Rust's package manager Cargo.
|
||||
|
||||
```bash
|
||||
$ curl https://sh.rustup.rs -sSf | sh
|
||||
$ source $HOME/.cargo/env
|
||||
```
|
||||
|
||||
Now checkout the code from github:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/solana-labs/solana.git
|
||||
$ cd solana
|
||||
```
|
||||
|
||||
The fullnode server is initialized with a ledger from stdin and
|
||||
generates new ledger entries on stdout. To create the input ledger, we'll need
|
||||
to create *the mint* and use it to generate a *genesis ledger*. It's done in
|
||||
two steps because the mint-demo.json file contains private keys that will be
|
||||
used later in this demo.
|
||||
|
||||
```bash
|
||||
$ echo 1000000000 | cargo run --release --bin solana-mint-demo > mint-demo.json
|
||||
$ cat mint-demo.json | cargo run --release --bin solana-genesis-demo > genesis.log
|
||||
```
|
||||
|
||||
Before you start the server, make sure you know the IP address of the machine ou want to be the leader for the demo, and make sure that udp ports 8000-10000 are open on all the machines you wan to test with. Now you can start the server:
|
||||
|
||||
```bash
|
||||
$ cat ./multinode-demo/leader.sh
|
||||
#!/bin/bash
|
||||
export RUST_LOG=solana=info
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
cat genesis.log leader.log | cargo run --release --features cuda --bin solana-fullnode -- -s leader.json -l leader.json -b 8000 -d 2>&1 | tee leader-tee.log
|
||||
$ ./multinode-demo/leader.sh
|
||||
```
|
||||
|
||||
Wait a few seconds for the server to initialize. It will print "Ready." when it's safe
|
||||
to start sending it transactions.
|
||||
|
||||
Now you can start some validators:
|
||||
|
||||
```bash
|
||||
$ cat ./multinode-demo/validator.sh
|
||||
#!/bin/bash
|
||||
rsync -v -e ssh $1:~/solana/mint-demo.json .
|
||||
rsync -v -e ssh $1:~/solana/leader.json .
|
||||
rsync -v -e ssh $1:~/solana/genesis.log .
|
||||
rsync -v -e ssh $1:~/solana/leader.log .
|
||||
rsync -v -e ssh $1:~/solana/libcuda_verify_ed25519.a .
|
||||
export RUST_LOG=solana=info
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
cat genesis.log leader.log | cargo run --release --features cuda --bin solana-fullnode -- -l validator.json -s validator.json -v leader.json -b 9000 -d 2>&1 | tee validator-tee.log
|
||||
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51 #The leader machine
|
||||
```
|
||||
|
||||
|
||||
Then, in a separate shell, let's execute some transactions. Note we pass in
|
||||
the JSON configuration file here, not the genesis ledger.
|
||||
|
||||
```bash
|
||||
$ cat ./multinode-demo/client.sh
|
||||
#!/bin/bash
|
||||
export RUST_LOG=solana=info
|
||||
rsync -v -e ssh $1:~/solana/leader.json .
|
||||
rsync -v -e ssh $1:~/solana/mint-demo.json .
|
||||
cat mint-demo.json | cargo run --release --bin solana-client-demo -- -l leader.json -c 8100 -n 1
|
||||
$ ./multinode-demo/client.sh ubuntu@10.0.1.51 #The leader machine
|
||||
```
|
||||
|
||||
Try starting a more validators and reruning the client demo!
|
||||
|
||||
Developing
|
||||
===
|
||||
|
||||
Building
|
||||
---
|
||||
@ -29,11 +107,17 @@ $ source $HOME/.cargo/env
|
||||
$ rustup component add rustfmt-preview
|
||||
```
|
||||
|
||||
If your rustc version is lower than 1.25.0, please update it:
|
||||
|
||||
```bash
|
||||
$ rustup update
|
||||
```
|
||||
|
||||
Download the source code:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/loomprotocol/silk.git
|
||||
$ cd silk
|
||||
$ git clone https://github.com/solana-labs/solana.git
|
||||
$ cd solana
|
||||
```
|
||||
|
||||
Testing
|
||||
@ -42,9 +126,26 @@ Testing
|
||||
Run the test suite:
|
||||
|
||||
```bash
|
||||
cargo test
|
||||
$ cargo test
|
||||
```
|
||||
|
||||
To emulate all the tests that will run on a Pull Request, run:
|
||||
```bash
|
||||
$ ./ci/run-local.sh
|
||||
```
|
||||
|
||||
Debugging
|
||||
---
|
||||
|
||||
There are some useful debug messages in the code, you can enable them on a per-module and per-level
|
||||
basis with the normal RUST\_LOG environment variable. Run the fullnode with this syntax:
|
||||
```bash
|
||||
$ RUST_LOG=solana::streamer=debug,solana::server=info cat genesis.log | ./target/release/solana-fullnode > transactions0.log
|
||||
```
|
||||
to see the debug and info sections for streamer and server respectively. Generally
|
||||
we are using debug for infrequent debug messages, trace for potentially frequent messages and
|
||||
info for performance-related logging.
|
||||
|
||||
Benchmarking
|
||||
---
|
||||
|
||||
@ -59,3 +160,33 @@ Run the benchmarks:
|
||||
```bash
|
||||
$ cargo +nightly bench --features="unstable"
|
||||
```
|
||||
|
||||
To run the benchmarks on Linux with GPU optimizations enabled:
|
||||
|
||||
```bash
|
||||
$ wget https://solana-build-artifacts.s3.amazonaws.com/v0.5.0/libcuda_verify_ed25519.a
|
||||
$ cargo +nightly bench --features="unstable,cuda"
|
||||
```
|
||||
|
||||
Code coverage
|
||||
---
|
||||
|
||||
To generate code coverage statistics, run kcov via Docker:
|
||||
|
||||
```bash
|
||||
$ ./ci/coverage.sh
|
||||
```
|
||||
The coverage report will be written to `./target/cov/index.html`
|
||||
|
||||
|
||||
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
|
||||
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
|
||||
some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running
|
||||
the test suite should indicate that your change didn't *infringe* on anyone else's solutions. Adding a
|
||||
test *protects* your solution from future changes. Say you don't understand why a line of code exists,
|
||||
try deleting it and running the unit-tests. The nearest test failure should tell you what problem
|
||||
was solved by that code. If no test fails, go ahead and submit a Pull Request that asks, "what
|
||||
problem is solved by this code?" On the other hand, if a test does fail and you can think of a
|
||||
better way to solve the same problem, a Pull Request with your solution would most certainly be
|
||||
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
|
||||
send us that patch!
|
||||
|
1
_config.yml
Normal file
1
_config.yml
Normal file
@ -0,0 +1 @@
|
||||
theme: jekyll-theme-slate
|
15
build.rs
Normal file
15
build.rs
Normal file
@ -0,0 +1,15 @@
|
||||
use std::env;
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rustc-link-search=native=.");
|
||||
if !env::var("CARGO_FEATURE_CUDA").is_err() {
|
||||
println!("cargo:rustc-link-lib=static=cuda_verify_ed25519");
|
||||
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
|
||||
println!("cargo:rustc-link-lib=dylib=cudart");
|
||||
println!("cargo:rustc-link-lib=dylib=cuda");
|
||||
println!("cargo:rustc-link-lib=dylib=cudadevrt");
|
||||
}
|
||||
if !env::var("CARGO_FEATURE_ERASURE").is_err() {
|
||||
println!("cargo:rustc-link-lib=dylib=Jerasure");
|
||||
}
|
||||
}
|
2
ci/.gitignore
vendored
Normal file
2
ci/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
/node_modules/
|
||||
/package-lock.json
|
16
ci/buildkite.yml
Normal file
16
ci/buildkite.yml
Normal file
@ -0,0 +1,16 @@
|
||||
steps:
|
||||
- command: "ci/coverage.sh"
|
||||
name: "coverage [public]"
|
||||
- command: "ci/docker-run.sh rust ci/test-stable.sh"
|
||||
name: "stable [public]"
|
||||
- command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh || true"
|
||||
name: "nightly - FAILURES IGNORED [public]"
|
||||
- command: "ci/docker-run.sh rust ci/test-ignored.sh"
|
||||
name: "ignored [public]"
|
||||
- command: "ci/test-cuda.sh"
|
||||
name: "cuda"
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck [public]"
|
||||
- wait
|
||||
- command: "ci/publish.sh"
|
||||
name: "publish release artifacts"
|
21
ci/coverage.sh
Executable file
21
ci/coverage.sh
Executable file
@ -0,0 +1,21 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
ci/docker-run.sh evilmachines/rust-cargo-kcov \
|
||||
bash -exc "\
|
||||
export RUST_BACKTRACE=1; \
|
||||
cargo build --verbose; \
|
||||
cargo kcov --lib --verbose; \
|
||||
"
|
||||
|
||||
echo Coverage report:
|
||||
ls -l target/cov/index.html
|
||||
|
||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo CODECOV_TOKEN undefined
|
||||
else
|
||||
bash <(curl -s https://codecov.io/bash)
|
||||
fi
|
||||
|
||||
exit 0
|
41
ci/docker-run.sh
Executable file
41
ci/docker-run.sh
Executable file
@ -0,0 +1,41 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 [docker image name] [command]"
|
||||
echo
|
||||
echo Runs command in the specified docker image with
|
||||
echo a CI-appropriate environment
|
||||
echo
|
||||
}
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
IMAGE="$1"
|
||||
if [[ -z "$IMAGE" ]]; then
|
||||
echo Error: image not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker pull "$IMAGE"
|
||||
shift
|
||||
|
||||
ARGS=(--workdir /solana --volume "$PWD:/solana" --rm)
|
||||
|
||||
ARGS+=(--env "CARGO_HOME=/solana/.cargo")
|
||||
|
||||
# kcov tries to set the personality of the binary which docker
|
||||
# doesn't allow by default.
|
||||
ARGS+=(--security-opt "seccomp=unconfined")
|
||||
|
||||
# Ensure files are created with the current host uid/gid
|
||||
ARGS+=(--user "$(id -u):$(id -g)")
|
||||
|
||||
# Environment variables to propagate into the container
|
||||
ARGS+=(
|
||||
--env BUILDKITE_TAG
|
||||
--env CODECOV_TOKEN
|
||||
--env CRATES_IO_TOKEN
|
||||
)
|
||||
|
||||
set -x
|
||||
docker run "${ARGS[@]}" "$IMAGE" "$@"
|
19
ci/publish.sh
Executable file
19
ci/publish.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if [[ -z "$BUILDKITE_TAG" ]]; then
|
||||
# Skip publish if this is not a tagged release
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -z "$CRATES_IO_TOKEN" ]]; then
|
||||
echo CRATES_IO_TOKEN undefined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
||||
ci/docker-run.sh rust \
|
||||
bash -exc "cargo package; cargo publish --token $CRATES_IO_TOKEN"
|
||||
|
||||
exit 0
|
19
ci/run-local.sh
Executable file
19
ci/run-local.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Run the entire buildkite CI pipeline locally for pre-testing before sending a
|
||||
# Github pull request
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
BKRUN=ci/node_modules/.bin/bkrun
|
||||
|
||||
if [[ ! -x $BKRUN ]]; then
|
||||
(
|
||||
set -x
|
||||
cd ci/
|
||||
npm install bkrun
|
||||
)
|
||||
fi
|
||||
|
||||
set -x
|
||||
./ci/node_modules/.bin/bkrun ci/buildkite.yml
|
11
ci/shellcheck.sh
Executable file
11
ci/shellcheck.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Reference: https://github.com/koalaman/shellcheck/wiki/Directive
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
set -x
|
||||
find . -name "*.sh" -not -regex ".*/.cargo/.*" -not -regex ".*/node_modules/.*" -print0 \
|
||||
| xargs -0 \
|
||||
ci/docker-run.sh koalaman/shellcheck --color=always --external-sources --shell=bash
|
||||
exit 0
|
22
ci/test-cuda.sh
Executable file
22
ci/test-cuda.sh
Executable file
@ -0,0 +1,22 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
LIB=libcuda_verify_ed25519.a
|
||||
if [[ ! -r $LIB ]]; then
|
||||
if [[ -z "${libcuda_verify_ed25519_URL:-}" ]]; then
|
||||
echo "$0 skipped. Unable to locate $LIB"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
export LD_LIBRARY_PATH=/usr/local/cuda/lib64
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
curl -X GET -o $LIB "$libcuda_verify_ed25519_URL"
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC1090 # <-- shellcheck can't follow ~
|
||||
source ~/.cargo/env
|
||||
export RUST_BACKTRACE=1
|
||||
cargo test --features=cuda
|
||||
|
||||
exit 0
|
9
ci/test-ignored.sh
Executable file
9
ci/test-ignored.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
cargo test -- --ignored
|
14
ci/test-nightly.sh
Executable file
14
ci/test-nightly.sh
Executable file
@ -0,0 +1,14 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustup component add rustfmt-preview
|
||||
cargo build --verbose --features unstable
|
||||
cargo test --verbose --features unstable
|
||||
cargo bench --verbose --features unstable
|
||||
|
||||
exit 0
|
14
ci/test-stable.sh
Executable file
14
ci/test-stable.sh
Executable file
@ -0,0 +1,14 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustup component add rustfmt-preview
|
||||
cargo fmt -- --write-mode=diff
|
||||
cargo build --verbose
|
||||
cargo test --verbose
|
||||
|
||||
exit 0
|
15
doc/consensus.msc
Normal file
15
doc/consensus.msc
Normal file
@ -0,0 +1,15 @@
|
||||
msc {
|
||||
client,leader,verifier_a,verifier_b,verifier_c;
|
||||
|
||||
client=>leader [ label = "SUBMIT" ] ;
|
||||
leader=>client [ label = "CONFIRMED" ] ;
|
||||
leader=>verifier_a [ label = "CONFIRMED" ] ;
|
||||
leader=>verifier_b [ label = "CONFIRMED" ] ;
|
||||
leader=>verifier_c [ label = "CONFIRMED" ] ;
|
||||
verifier_a=>leader [ label = "VERIFIED" ] ;
|
||||
verifier_b=>leader [ label = "VERIFIED" ] ;
|
||||
leader=>client [ label = "FINALIZED" ] ;
|
||||
leader=>verifier_a [ label = "FINALIZED" ] ;
|
||||
leader=>verifier_b [ label = "FINALIZED" ] ;
|
||||
leader=>verifier_c [ label = "FINALIZED" ] ;
|
||||
}
|
65
doc/historian.md
Normal file
65
doc/historian.md
Normal file
@ -0,0 +1,65 @@
|
||||
The Historian
|
||||
===
|
||||
|
||||
Create a *Historian* and send it *events* to generate an *event log*, where each *entry*
|
||||
is tagged with the historian's latest *hash*. Then ensure the order of events was not tampered
|
||||
with by verifying each entry's hash can be generated from the hash in the previous entry:
|
||||
|
||||

|
||||
|
||||
```rust
|
||||
extern crate solana;
|
||||
|
||||
use solana::historian::Historian;
|
||||
use solana::ledger::{Block, Entry, Hash};
|
||||
use solana::event::{generate_keypair, get_pubkey, sign_claim_data, Event};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use std::sync::mpsc::SendError;
|
||||
|
||||
fn create_ledger(hist: &Historian<Hash>) -> Result<(), SendError<Event<Hash>>> {
|
||||
sleep(Duration::from_millis(15));
|
||||
let tokens = 42;
|
||||
let keypair = generate_keypair();
|
||||
let event0 = Event::new_claim(get_pubkey(&keypair), tokens, sign_claim_data(&tokens, &keypair));
|
||||
hist.sender.send(event0)?;
|
||||
sleep(Duration::from_millis(10));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let seed = Hash::default();
|
||||
let hist = Historian::new(&seed, Some(10));
|
||||
create_ledger(&hist).expect("send error");
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry<Hash>> = hist.receiver.iter().collect();
|
||||
for entry in &entries {
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
// Proof-of-History: Verify the historian learned about the events
|
||||
// in the same order they appear in the vector.
|
||||
assert!(entries[..].verify(&seed));
|
||||
}
|
||||
```
|
||||
|
||||
Running the program should produce a ledger similar to:
|
||||
|
||||
```rust
|
||||
Entry { num_hashes: 0, id: [0, ...], event: Tick }
|
||||
Entry { num_hashes: 3, id: [67, ...], event: Transaction { tokens: 42 } }
|
||||
Entry { num_hashes: 3, id: [123, ...], event: Tick }
|
||||
```
|
||||
|
||||
Proof-of-History
|
||||
---
|
||||
|
||||
Take note of the last line:
|
||||
|
||||
```rust
|
||||
assert!(entries[..].verify(&seed));
|
||||
```
|
||||
|
||||
[It's a proof!](https://en.wikipedia.org/wiki/Curry–Howard_correspondence) For each entry returned by the
|
||||
historian, we can verify that `id` is the result of applying a sha256 hash to the previous `id`
|
||||
exactly `num_hashes` times, and then hashing then event data on top of that. Because the event data is
|
||||
included in the hash, the events cannot be reordered without regenerating all the hashes.
|
18
doc/historian.msc
Normal file
18
doc/historian.msc
Normal file
@ -0,0 +1,18 @@
|
||||
msc {
|
||||
client,historian,recorder;
|
||||
|
||||
recorder=>historian [ label = "e0 = Entry{id: h0, n: 0, event: Tick}" ] ;
|
||||
recorder=>recorder [ label = "h1 = hash(h0)" ] ;
|
||||
recorder=>recorder [ label = "h2 = hash(h1)" ] ;
|
||||
client=>historian [ label = "Transaction(d0)" ] ;
|
||||
historian=>recorder [ label = "Transaction(d0)" ] ;
|
||||
recorder=>recorder [ label = "h3 = hash(h2 + d0)" ] ;
|
||||
recorder=>historian [ label = "e1 = Entry{id: hash(h3), n: 3, event: Transaction(d0)}" ] ;
|
||||
recorder=>recorder [ label = "h4 = hash(h3)" ] ;
|
||||
recorder=>recorder [ label = "h5 = hash(h4)" ] ;
|
||||
recorder=>recorder [ label = "h6 = hash(h5)" ] ;
|
||||
recorder=>historian [ label = "e2 = Entry{id: h6, n: 3, event: Tick}" ] ;
|
||||
client=>historian [ label = "collect()" ] ;
|
||||
historian=>client [ label = "entries = [e0, e1, e2]" ] ;
|
||||
client=>client [ label = "entries.verify(h0)" ] ;
|
||||
}
|
16
multinode-demo/client.sh
Executable file
16
multinode-demo/client.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
echo "usage: $0 [leader machine]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LEADER="$1"
|
||||
|
||||
set -x
|
||||
export RUST_LOG=solana=info
|
||||
rsync -v -e ssh "$LEADER:~/solana/leader.json" .
|
||||
rsync -v -e ssh "$LEADER:~/solana/mint-demo.json" .
|
||||
|
||||
cargo run --release --bin solana-client-demo -- \
|
||||
-l leader.json -c 8100 -n 1 < mint-demo.json
|
4
multinode-demo/leader.sh
Executable file
4
multinode-demo/leader.sh
Executable file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
export RUST_LOG=solana=info
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
cat genesis.log leader.log | cargo run --release --features cuda --bin solana-fullnode -- -s leader.json -l leader.json -b 8000 -d 2>&1 | tee leader-tee.log
|
24
multinode-demo/validator.sh
Executable file
24
multinode-demo/validator.sh
Executable file
@ -0,0 +1,24 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
echo "usage: $0 [leader machine]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
LEADER="$1"
|
||||
|
||||
set -x
|
||||
|
||||
rsync -v -e ssh "$LEADER:~/solana/mint-demo.json" .
|
||||
rsync -v -e ssh "$LEADER:~/solana/leader.json" .
|
||||
rsync -v -e ssh "$LEADER:~/solana/genesis.log" .
|
||||
rsync -v -e ssh "$LEADER:~/solana/leader.log" .
|
||||
rsync -v -e ssh "$LEADER:~/solana/libcuda_verify_ed25519.a" .
|
||||
|
||||
export RUST_LOG=solana=info
|
||||
|
||||
sudo sysctl -w net.core.rmem_max=26214400
|
||||
|
||||
cat genesis.log leader.log | \
|
||||
cargo run --release --features cuda --bin solana-fullnode -- \
|
||||
-l validator.json -s validator.json -v leader.json -b 9000 -d 2>&1 | tee validator-tee.log
|
648
src/bank.rs
Normal file
648
src/bank.rs
Normal file
@ -0,0 +1,648 @@
|
||||
//! The `bank` module tracks client balances, and the progress of pending
|
||||
//! transactions. It offers a high-level public API that signs transactions
|
||||
//! on behalf of the caller, and a private low-level API for when they have
|
||||
//! already been signed and verified.
|
||||
|
||||
extern crate libc;
|
||||
|
||||
use chrono::prelude::*;
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use mint::Mint;
|
||||
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use rayon::prelude::*;
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::collections::hash_map::Entry::Occupied;
|
||||
use std::collections::{HashMap, HashSet, VecDeque};
|
||||
use std::result;
|
||||
use std::sync::atomic::{AtomicIsize, AtomicUsize, Ordering};
|
||||
use std::sync::RwLock;
|
||||
use transaction::{Instruction, Plan, Transaction};
|
||||
|
||||
pub const MAX_ENTRY_IDS: usize = 1024 * 4;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum BankError {
|
||||
AccountNotFound(PublicKey),
|
||||
InsufficientFunds(PublicKey),
|
||||
DuplicateSiganture(Signature),
|
||||
LastIdNotFound(Hash),
|
||||
NegativeTokens,
|
||||
}
|
||||
|
||||
pub type Result<T> = result::Result<T, BankError>;
|
||||
|
||||
pub struct Bank {
|
||||
balances: RwLock<HashMap<PublicKey, AtomicIsize>>,
|
||||
pending: RwLock<HashMap<Signature, Plan>>,
|
||||
last_ids: RwLock<VecDeque<(Hash, RwLock<HashSet<Signature>>)>>,
|
||||
time_sources: RwLock<HashSet<PublicKey>>,
|
||||
last_time: RwLock<DateTime<Utc>>,
|
||||
transaction_count: AtomicUsize,
|
||||
}
|
||||
|
||||
impl Bank {
|
||||
/// Create an Bank using a deposit.
|
||||
pub fn new_from_deposit(deposit: &Payment) -> Self {
|
||||
let bank = Bank {
|
||||
balances: RwLock::new(HashMap::new()),
|
||||
pending: RwLock::new(HashMap::new()),
|
||||
last_ids: RwLock::new(VecDeque::new()),
|
||||
time_sources: RwLock::new(HashSet::new()),
|
||||
last_time: RwLock::new(Utc.timestamp(0, 0)),
|
||||
transaction_count: AtomicUsize::new(0),
|
||||
};
|
||||
bank.apply_payment(deposit);
|
||||
bank
|
||||
}
|
||||
|
||||
/// Create an Bank with only a Mint. Typically used by unit tests.
|
||||
pub fn new(mint: &Mint) -> Self {
|
||||
let deposit = Payment {
|
||||
to: mint.pubkey(),
|
||||
tokens: mint.tokens,
|
||||
};
|
||||
let bank = Self::new_from_deposit(&deposit);
|
||||
bank.register_entry_id(&mint.last_id());
|
||||
bank
|
||||
}
|
||||
|
||||
/// Commit funds to the 'to' party.
|
||||
fn apply_payment(&self, payment: &Payment) {
|
||||
// First we check balances with a read lock to maximize potential parallelization.
|
||||
if self.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in apply_payment")
|
||||
.contains_key(&payment.to)
|
||||
{
|
||||
let bals = self.balances.read().expect("'balances' read lock");
|
||||
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
||||
} else {
|
||||
// Now we know the key wasn't present a nanosecond ago, but it might be there
|
||||
// by the time we aquire a write lock, so we'll have to check again.
|
||||
let mut bals = self.balances.write().expect("'balances' write lock");
|
||||
if bals.contains_key(&payment.to) {
|
||||
bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed);
|
||||
} else {
|
||||
bals.insert(payment.to, AtomicIsize::new(payment.tokens as isize));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the last entry ID registered
|
||||
pub fn last_id(&self) -> Hash {
|
||||
let last_ids = self.last_ids.read().expect("'last_ids' read lock");
|
||||
let last_item = last_ids.iter().last().expect("empty 'last_ids' list");
|
||||
last_item.0
|
||||
}
|
||||
|
||||
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> Result<()> {
|
||||
if signatures
|
||||
.read()
|
||||
.expect("'signatures' read lock")
|
||||
.contains(sig)
|
||||
{
|
||||
return Err(BankError::DuplicateSiganture(*sig));
|
||||
}
|
||||
signatures
|
||||
.write()
|
||||
.expect("'signatures' write lock")
|
||||
.insert(*sig);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) {
|
||||
signatures
|
||||
.write()
|
||||
.expect("'signatures' write lock in forget_signature")
|
||||
.remove(sig);
|
||||
}
|
||||
|
||||
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) {
|
||||
if let Some(entry) = self.last_ids
|
||||
.read()
|
||||
.expect("'last_ids' read lock in forget_signature_with_last_id")
|
||||
.iter()
|
||||
.rev()
|
||||
.find(|x| x.0 == *last_id)
|
||||
{
|
||||
Self::forget_signature(&entry.1, sig);
|
||||
}
|
||||
}
|
||||
|
||||
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> Result<()> {
|
||||
if let Some(entry) = self.last_ids
|
||||
.read()
|
||||
.expect("'last_ids' read lock in reserve_signature_with_last_id")
|
||||
.iter()
|
||||
.rev()
|
||||
.find(|x| x.0 == *last_id)
|
||||
{
|
||||
return Self::reserve_signature(&entry.1, sig);
|
||||
}
|
||||
Err(BankError::LastIdNotFound(*last_id))
|
||||
}
|
||||
|
||||
/// Tell the bank which Entry IDs exist on the ledger. This function
|
||||
/// assumes subsequent calls correspond to later entries, and will boot
|
||||
/// the oldest ones once its internal cache is full. Once boot, the
|
||||
/// bank will reject transactions using that `last_id`.
|
||||
pub fn register_entry_id(&self, last_id: &Hash) {
|
||||
let mut last_ids = self.last_ids
|
||||
.write()
|
||||
.expect("'last_ids' write lock in register_entry_id");
|
||||
if last_ids.len() >= MAX_ENTRY_IDS {
|
||||
last_ids.pop_front();
|
||||
}
|
||||
last_ids.push_back((*last_id, RwLock::new(HashSet::new())));
|
||||
}
|
||||
|
||||
/// Deduct tokens from the 'from' address the account has sufficient
|
||||
/// funds and isn't a duplicate.
|
||||
fn apply_debits(&self, tx: &Transaction) -> Result<()> {
|
||||
if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
trace!("Transaction {}", contract.tokens);
|
||||
if contract.tokens < 0 {
|
||||
return Err(BankError::NegativeTokens);
|
||||
}
|
||||
}
|
||||
let bals = self.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in apply_debits");
|
||||
let option = bals.get(&tx.from);
|
||||
|
||||
if option.is_none() {
|
||||
return Err(BankError::AccountNotFound(tx.from));
|
||||
}
|
||||
|
||||
self.reserve_signature_with_last_id(&tx.sig, &tx.last_id)?;
|
||||
|
||||
loop {
|
||||
let result = if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
let bal = option.expect("assignment of option to bal");
|
||||
let current = bal.load(Ordering::Relaxed) as i64;
|
||||
|
||||
if current < contract.tokens {
|
||||
self.forget_signature_with_last_id(&tx.sig, &tx.last_id);
|
||||
return Err(BankError::InsufficientFunds(tx.from));
|
||||
}
|
||||
|
||||
bal.compare_exchange(
|
||||
current as isize,
|
||||
(current - contract.tokens) as isize,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
)
|
||||
} else {
|
||||
Ok(0)
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(_) => {
|
||||
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
||||
return Ok(());
|
||||
}
|
||||
Err(_) => continue,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_credits(&self, tx: &Transaction) {
|
||||
match &tx.instruction {
|
||||
Instruction::NewContract(contract) => {
|
||||
let mut plan = contract.plan.clone();
|
||||
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
||||
.read()
|
||||
.expect("timestamp creation in apply_credits")));
|
||||
|
||||
if let Some(ref payment) = plan.final_payment() {
|
||||
self.apply_payment(payment);
|
||||
} else {
|
||||
let mut pending = self.pending
|
||||
.write()
|
||||
.expect("'pending' write lock in apply_credits");
|
||||
pending.insert(tx.sig, plan);
|
||||
}
|
||||
}
|
||||
Instruction::ApplyTimestamp(dt) => {
|
||||
let _ = self.apply_timestamp(tx.from, *dt);
|
||||
}
|
||||
Instruction::ApplySignature(tx_sig) => {
|
||||
let _ = self.apply_signature(tx.from, *tx_sig);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a Transaction.
|
||||
fn process_transaction(&self, tx: &Transaction) -> Result<()> {
|
||||
self.apply_debits(tx)?;
|
||||
self.apply_credits(tx);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a batch of transactions.
|
||||
pub fn process_transactions(&self, txs: Vec<Transaction>) -> Vec<Result<Transaction>> {
|
||||
// Run all debits first to filter out any transactions that can't be processed
|
||||
// in parallel deterministically.
|
||||
info!("processing Transactions {}", txs.len());
|
||||
let results: Vec<_> = txs.into_par_iter()
|
||||
.map(|tx| self.apply_debits(&tx).map(|_| tx))
|
||||
.collect(); // Calling collect() here forces all debits to complete before moving on.
|
||||
|
||||
results
|
||||
.into_par_iter()
|
||||
.map(|result| {
|
||||
result.map(|tx| {
|
||||
self.apply_credits(&tx);
|
||||
tx
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn process_entries(&self, entries: Vec<Entry>) -> Result<()> {
|
||||
for entry in entries {
|
||||
self.register_entry_id(&entry.id);
|
||||
for result in self.process_transactions(entry.transactions) {
|
||||
result?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a Witness Signature.
|
||||
fn apply_signature(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
|
||||
if let Occupied(mut e) = self.pending
|
||||
.write()
|
||||
.expect("write() in apply_signature")
|
||||
.entry(tx_sig)
|
||||
{
|
||||
e.get_mut().apply_witness(&Witness::Signature(from));
|
||||
if let Some(payment) = e.get().final_payment() {
|
||||
self.apply_payment(&payment);
|
||||
e.remove_entry();
|
||||
}
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a Witness Timestamp.
|
||||
fn apply_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
||||
// If this is the first timestamp we've seen, it probably came from the genesis block,
|
||||
// so we'll trust it.
|
||||
if *self.last_time
|
||||
.read()
|
||||
.expect("'last_time' read lock on first timestamp check")
|
||||
== Utc.timestamp(0, 0)
|
||||
{
|
||||
self.time_sources
|
||||
.write()
|
||||
.expect("'time_sources' write lock on first timestamp")
|
||||
.insert(from);
|
||||
}
|
||||
|
||||
if self.time_sources
|
||||
.read()
|
||||
.expect("'time_sources' read lock")
|
||||
.contains(&from)
|
||||
{
|
||||
if dt > *self.last_time.read().expect("'last_time' read lock") {
|
||||
*self.last_time.write().expect("'last_time' write lock") = dt;
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check to see if any timelocked transactions can be completed.
|
||||
let mut completed = vec![];
|
||||
|
||||
// Hold 'pending' write lock until the end of this function. Otherwise another thread can
|
||||
// double-spend if it enters before the modified plan is removed from 'pending'.
|
||||
let mut pending = self.pending
|
||||
.write()
|
||||
.expect("'pending' write lock in apply_timestamp");
|
||||
for (key, plan) in pending.iter_mut() {
|
||||
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
||||
.read()
|
||||
.expect("'last_time' read lock when creating timestamp")));
|
||||
if let Some(ref payment) = plan.final_payment() {
|
||||
self.apply_payment(payment);
|
||||
completed.push(key.clone());
|
||||
}
|
||||
}
|
||||
|
||||
for key in completed {
|
||||
pending.remove(&key);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create, sign, and process a Transaction from `keypair` to `to` of
|
||||
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
|
||||
pub fn transfer(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tx = Transaction::new(keypair, to, n, last_id);
|
||||
let sig = tx.sig;
|
||||
self.process_transaction(&tx).map(|_| sig)
|
||||
}
|
||||
|
||||
/// Create, sign, and process a postdated Transaction from `keypair`
|
||||
/// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID
|
||||
/// observed by the client.
|
||||
pub fn transfer_on_date(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
dt: DateTime<Utc>,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tx = Transaction::new_on_date(keypair, to, dt, n, last_id);
|
||||
let sig = tx.sig;
|
||||
self.process_transaction(&tx).map(|_| sig)
|
||||
}
|
||||
|
||||
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
|
||||
let bals = self.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in get_balance");
|
||||
bals.get(pubkey).map(|x| x.load(Ordering::Relaxed) as i64)
|
||||
}
|
||||
|
||||
pub fn transaction_count(&self) -> usize {
|
||||
self.transaction_count.load(Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use hash::hash;
|
||||
use signature::KeyPairUtil;
|
||||
|
||||
#[test]
|
||||
fn test_bank() {
|
||||
let mint = Mint::new(10_000);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let bank = Bank::new(&mint);
|
||||
assert_eq!(bank.last_id(), mint.last_id());
|
||||
|
||||
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_000);
|
||||
|
||||
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_500);
|
||||
assert_eq!(bank.transaction_count(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_tokens() {
|
||||
let mint = Mint::new(1);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let bank = Bank::new(&mint);
|
||||
assert_eq!(
|
||||
bank.transfer(-1, &mint.keypair(), pubkey, mint.last_id()),
|
||||
Err(BankError::NegativeTokens)
|
||||
);
|
||||
assert_eq!(bank.transaction_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_account_not_found() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let keypair = KeyPair::new();
|
||||
assert_eq!(
|
||||
bank.transfer(1, &keypair, mint.pubkey(), mint.last_id()),
|
||||
Err(BankError::AccountNotFound(keypair.pubkey()))
|
||||
);
|
||||
assert_eq!(bank.transaction_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_transfer() {
|
||||
let mint = Mint::new(11_000);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
assert_eq!(
|
||||
bank.transfer(10_001, &mint.keypair(), pubkey, mint.last_id()),
|
||||
Err(BankError::InsufficientFunds(mint.pubkey()))
|
||||
);
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
let mint_pubkey = mint.keypair().pubkey();
|
||||
assert_eq!(bank.get_balance(&mint_pubkey).unwrap(), 10_000);
|
||||
assert_eq!(bank.get_balance(&pubkey).unwrap(), 1_000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_to_newb() {
|
||||
let mint = Mint::new(10_000);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey).unwrap(), 500);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_on_date() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||
.unwrap();
|
||||
|
||||
// Mint's balance will be zero because all funds are locked up.
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
|
||||
|
||||
// tx count is 1, because debits were applied.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
// pubkey's balance will be None because the funds have not been
|
||||
// sent.
|
||||
assert_eq!(bank.get_balance(&pubkey), None);
|
||||
|
||||
// Now, acknowledge the time in the condition occurred and
|
||||
// that pubkey's funds are now available.
|
||||
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey), Some(1));
|
||||
|
||||
// tx count is still 1, because we chose not to count timestamp transactions
|
||||
// tx count.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
bank.apply_timestamp(mint.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
|
||||
assert_ne!(bank.get_balance(&pubkey), Some(2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_after_date() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
|
||||
|
||||
// It's now past now, so this transfer should be processed immediately.
|
||||
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
|
||||
assert_eq!(bank.get_balance(&pubkey), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cancel_transfer() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
let sig = bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||
.unwrap();
|
||||
|
||||
// Assert the debit counts as a transaction.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
// Mint's balance will be zero because all funds are locked up.
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), Some(0));
|
||||
|
||||
// pubkey's balance will be None because the funds have not been
|
||||
// sent.
|
||||
assert_eq!(bank.get_balance(&pubkey), None);
|
||||
|
||||
// Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them.
|
||||
bank.apply_signature(mint.pubkey(), sig).unwrap();
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), Some(1));
|
||||
assert_eq!(bank.get_balance(&pubkey), None);
|
||||
|
||||
// Assert cancel doesn't cause count to go backward.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
bank.apply_signature(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
||||
assert_ne!(bank.get_balance(&mint.pubkey()), Some(2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_duplicate_transaction_signature() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
assert!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||
.is_ok()
|
||||
);
|
||||
assert_eq!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
|
||||
Err(BankError::DuplicateSiganture(sig))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_forget_signature() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||
.unwrap();
|
||||
bank.forget_signature_with_last_id(&sig, &mint.last_id());
|
||||
assert!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||
.is_ok()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_entry_ids() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
for i in 0..MAX_ENTRY_IDS {
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
}
|
||||
// Assert we're no longer able to use the oldest entry ID.
|
||||
assert_eq!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
|
||||
Err(BankError::LastIdNotFound(mint.last_id()))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debits_before_credits() {
|
||||
let mint = Mint::new(2);
|
||||
let bank = Bank::new(&mint);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id());
|
||||
let tx1 = Transaction::new(&keypair, mint.pubkey(), 1, mint.last_id());
|
||||
let txs = vec![tx0, tx1];
|
||||
let results = bank.process_transactions(txs);
|
||||
assert!(results[1].is_err());
|
||||
|
||||
// Assert bad transactions aren't counted.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use bank::*;
|
||||
use bincode::serialize;
|
||||
use hash::hash;
|
||||
use signature::KeyPairUtil;
|
||||
|
||||
#[bench]
|
||||
fn bench_process_transaction(bencher: &mut Bencher) {
|
||||
let mint = Mint::new(100_000_000);
|
||||
let bank = Bank::new(&mint);
|
||||
// Create transactions between unrelated parties.
|
||||
let transactions: Vec<_> = (0..4096)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
// Seed the 'from' account.
|
||||
let rando0 = KeyPair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
|
||||
bank.process_transaction(&tx).unwrap();
|
||||
|
||||
// Seed the 'to' account and a cell for its signature.
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
|
||||
let rando1 = KeyPair::new();
|
||||
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||
bank.process_transaction(&tx).unwrap();
|
||||
|
||||
// Finally, return a transaction that's unique
|
||||
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
})
|
||||
.collect();
|
||||
bencher.iter(|| {
|
||||
// Since benchmarker runs this multiple times, we need to clear the signatures.
|
||||
for sigs in bank.last_ids.read().unwrap().iter() {
|
||||
sigs.1.write().unwrap().clear();
|
||||
}
|
||||
|
||||
assert!(
|
||||
bank.process_transactions(transactions.clone())
|
||||
.iter()
|
||||
.all(|x| x.is_ok())
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
300
src/banking_stage.rs
Normal file
300
src/banking_stage.rs
Normal file
@ -0,0 +1,300 @@
|
||||
//! The `banking_stage` processes Transaction messages.
|
||||
|
||||
use bank::Bank;
|
||||
use bincode::deserialize;
|
||||
use packet;
|
||||
use packet::SharedPackets;
|
||||
use rayon::prelude::*;
|
||||
use record_stage::Signal;
|
||||
use result::Result;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use timing;
|
||||
use transaction::Transaction;
|
||||
|
||||
pub struct BankingStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
pub signal_receiver: Receiver<Signal>,
|
||||
}
|
||||
|
||||
impl BankingStage {
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
exit: Arc<AtomicBool>,
|
||||
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
packet_recycler: packet::PacketRecycler,
|
||||
) -> Self {
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let thread_hdl = spawn(move || loop {
|
||||
let e = Self::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
);
|
||||
if e.is_err() {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
BankingStage {
|
||||
thread_hdl,
|
||||
signal_receiver,
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_transactions(p: &packet::Packets) -> Vec<Option<(Transaction, SocketAddr)>> {
|
||||
p.packets
|
||||
.par_iter()
|
||||
.map(|x| {
|
||||
deserialize(&x.data[0..x.meta.size])
|
||||
.map(|req| (req, x.meta.addr()))
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn process_packets(
|
||||
bank: Arc<Bank>,
|
||||
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
signal_sender: &Sender<Signal>,
|
||||
packet_recycler: &packet::PacketRecycler,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let recv_start = Instant::now();
|
||||
let mms = verified_receiver.recv_timeout(timer)?;
|
||||
let mut reqs_len = 0;
|
||||
let mms_len = mms.len();
|
||||
info!(
|
||||
"@{:?} process start stalled for: {:?}ms batches: {}",
|
||||
timing::timestamp(),
|
||||
timing::duration_as_ms(&recv_start.elapsed()),
|
||||
mms.len(),
|
||||
);
|
||||
let proc_start = Instant::now();
|
||||
for (msgs, vers) in mms {
|
||||
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
|
||||
reqs_len += transactions.len();
|
||||
let transactions = transactions
|
||||
.into_iter()
|
||||
.zip(vers)
|
||||
.filter_map(|(tx, ver)| match tx {
|
||||
None => None,
|
||||
Some((tx, _addr)) => if tx.verify_plan() && ver != 0 {
|
||||
Some(tx)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
})
|
||||
.collect();
|
||||
|
||||
debug!("process_transactions");
|
||||
let results = bank.process_transactions(transactions);
|
||||
let transactions = results.into_iter().filter_map(|x| x.ok()).collect();
|
||||
signal_sender.send(Signal::Events(transactions))?;
|
||||
debug!("done process_transactions");
|
||||
|
||||
packet_recycler.recycle(msgs);
|
||||
}
|
||||
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
||||
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
||||
info!(
|
||||
"@{:?} done processing transaction batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
||||
timing::timestamp(),
|
||||
mms_len,
|
||||
total_time_ms,
|
||||
reqs_len,
|
||||
(reqs_len as f32) / (total_time_s)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: When banking is pulled out of RequestStage, add this test back in.
|
||||
|
||||
//use bank::Bank;
|
||||
//use entry::Entry;
|
||||
//use hash::Hash;
|
||||
//use record_stage::RecordStage;
|
||||
//use record_stage::Signal;
|
||||
//use result::Result;
|
||||
//use std::sync::mpsc::{channel, Sender};
|
||||
//use std::sync::{Arc, Mutex};
|
||||
//use std::time::Duration;
|
||||
//use transaction::Transaction;
|
||||
//
|
||||
//#[cfg(test)]
|
||||
//mod tests {
|
||||
// use bank::Bank;
|
||||
// use mint::Mint;
|
||||
// use signature::{KeyPair, KeyPairUtil};
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// #[test]
|
||||
// // TODO: Move this test banking_stage. Calling process_transactions() directly
|
||||
// // defeats the purpose of this test.
|
||||
// fn test_banking_sequential_consistency() {
|
||||
// // In this attack we'll demonstrate that a verifier can interpret the ledger
|
||||
// // differently if either the server doesn't signal the ledger to add an
|
||||
// // Entry OR if the verifier tries to parallelize across multiple Entries.
|
||||
// let mint = Mint::new(2);
|
||||
// let bank = Bank::new(&mint);
|
||||
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// // Process a batch that includes a transaction that receives two tokens.
|
||||
// let alice = KeyPair::new();
|
||||
// let tx = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
|
||||
// let transactions = vec![tx];
|
||||
// let entry0 = banking_stage.process_transactions(transactions).unwrap();
|
||||
//
|
||||
// // Process a second batch that spends one of those tokens.
|
||||
// let tx = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
|
||||
// let transactions = vec![tx];
|
||||
// let entry1 = banking_stage.process_transactions(transactions).unwrap();
|
||||
//
|
||||
// // Collect the ledger and feed it to a new bank.
|
||||
// let entries = vec![entry0, entry1];
|
||||
//
|
||||
// // Assert the user holds one token, not two. If the server only output one
|
||||
// // entry, then the second transaction will be rejected, because it drives
|
||||
// // the account balance below zero before the credit is added.
|
||||
// let bank = Bank::new(&mint);
|
||||
// for entry in entries {
|
||||
// assert!(
|
||||
// bank
|
||||
// .process_transactions(entry.transactions)
|
||||
// .into_iter()
|
||||
// .all(|x| x.is_ok())
|
||||
// );
|
||||
// }
|
||||
// assert_eq!(bank.get_balance(&alice.pubkey()), Some(1));
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//#[cfg(all(feature = "unstable", test))]
|
||||
//mod bench {
|
||||
// extern crate test;
|
||||
// use self::test::Bencher;
|
||||
// use bank::{Bank, MAX_ENTRY_IDS};
|
||||
// use bincode::serialize;
|
||||
// use hash::hash;
|
||||
// use mint::Mint;
|
||||
// use rayon::prelude::*;
|
||||
// use signature::{KeyPair, KeyPairUtil};
|
||||
// use std::collections::HashSet;
|
||||
// use std::time::Instant;
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// #[bench]
|
||||
// fn bench_process_transactions(_bencher: &mut Bencher) {
|
||||
// let mint = Mint::new(100_000_000);
|
||||
// let bank = Bank::new(&mint);
|
||||
// // Create transactions between unrelated parties.
|
||||
// let txs = 100_000;
|
||||
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
|
||||
// let transactions: Vec<_> = (0..txs)
|
||||
// .into_par_iter()
|
||||
// .map(|i| {
|
||||
// // Seed the 'to' account and a cell for its signature.
|
||||
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
|
||||
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
|
||||
// {
|
||||
// let mut last_ids = last_ids.lock().unwrap();
|
||||
// if !last_ids.contains(&last_id) {
|
||||
// last_ids.insert(last_id);
|
||||
// bank.register_entry_id(&last_id);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Seed the 'from' account.
|
||||
// let rando0 = KeyPair::new();
|
||||
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// let rando1 = KeyPair::new();
|
||||
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// // Finally, return a transaction that's unique
|
||||
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
// })
|
||||
// .collect();
|
||||
//
|
||||
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// let now = Instant::now();
|
||||
// assert!(banking_stage.process_transactions(transactions).is_ok());
|
||||
// let duration = now.elapsed();
|
||||
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
|
||||
// let tps = txs as f64 / sec;
|
||||
//
|
||||
// // Ensure that all transactions were successfully logged.
|
||||
// drop(banking_stage.historian_input);
|
||||
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
|
||||
// assert_eq!(entries.len(), 1);
|
||||
// assert_eq!(entries[0].transactions.len(), txs as usize);
|
||||
//
|
||||
// println!("{} tps", tps);
|
||||
// }
|
||||
//}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use bank::*;
|
||||
use banking_stage::BankingStage;
|
||||
use mint::Mint;
|
||||
use packet::{to_packets, PacketRecycler};
|
||||
use record_stage::Signal;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::iter;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use transaction::Transaction;
|
||||
|
||||
#[bench]
|
||||
fn bench_stage(bencher: &mut Bencher) {
|
||||
let tx = 100_usize;
|
||||
let mint = Mint::new(1_000_000_000);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.map(|i| Transaction::new(&mint.keypair(), pubkey, i as i64, mint.last_id()))
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let verified: Vec<_> = to_packets(&packet_recycler, transactions)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
verified_sender.send(verified.clone()).unwrap();
|
||||
BankingStage::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
let signal = signal_receiver.recv().unwrap();
|
||||
if let Signal::Events(ref transactions) = signal {
|
||||
assert_eq!(transactions.len(), tx);
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
277
src/bin/client-demo.rs
Normal file
277
src/bin/client-demo.rs
Normal file
@ -0,0 +1,277 @@
|
||||
extern crate futures;
|
||||
extern crate getopts;
|
||||
extern crate isatty;
|
||||
extern crate pnet;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use futures::Future;
|
||||
use getopts::Options;
|
||||
use isatty::stdin_isatty;
|
||||
use pnet::datalink;
|
||||
use rayon::prelude::*;
|
||||
use solana::crdt::{Crdt, ReplicatedData};
|
||||
use solana::mint::MintDemo;
|
||||
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
|
||||
use solana::streamer::default_window;
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::transaction::Transaction;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{IpAddr, SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
|
||||
brief += " Solana client demo creates a number of transactions and\n";
|
||||
brief += " sends them to a target node.";
|
||||
brief += " Takes json formatted mint file to stdin.";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn get_ip_addr() -> Option<IpAddr> {
|
||||
for iface in datalink::interfaces() {
|
||||
for p in iface.ips {
|
||||
if !p.ip().is_loopback() && !p.ip().is_multicast() {
|
||||
return Some(p.ip());
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut threads = 4usize;
|
||||
let mut num_nodes = 10usize;
|
||||
let mut leader = "leader.json".to_string();
|
||||
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("l", "", "leader", "leader.json");
|
||||
opts.optopt("c", "", "client port", "port");
|
||||
opts.optopt("t", "", "number of threads", &format!("{}", threads));
|
||||
opts.optopt(
|
||||
"n",
|
||||
"",
|
||||
"number of nodes to converge to",
|
||||
&format!("{}", num_nodes),
|
||||
);
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
if matches.opt_present("l") {
|
||||
leader = matches.opt_str("l").unwrap();
|
||||
}
|
||||
let mut addr: SocketAddr = "127.0.0.1:8010".parse().unwrap();
|
||||
if matches.opt_present("c") {
|
||||
let port = matches.opt_str("c").unwrap().parse().unwrap();
|
||||
addr.set_port(port);
|
||||
}
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let client_addr: Arc<RwLock<SocketAddr>> = Arc::new(RwLock::new(addr));
|
||||
if matches.opt_present("t") {
|
||||
threads = matches.opt_str("t").unwrap().parse().expect("integer");
|
||||
}
|
||||
if matches.opt_present("n") {
|
||||
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
|
||||
}
|
||||
|
||||
let leader: ReplicatedData = read_leader(leader);
|
||||
let signal = Arc::new(AtomicBool::new(false));
|
||||
let mut c_threads = vec![];
|
||||
let validators = converge(
|
||||
&client_addr,
|
||||
&leader,
|
||||
signal.clone(),
|
||||
num_nodes + 2,
|
||||
&mut c_threads,
|
||||
);
|
||||
|
||||
if stdin_isatty() {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
println!("Parsing stdin...");
|
||||
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
let mut client = mk_client(&client_addr, &leader);
|
||||
|
||||
println!("Get last ID...");
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
println!("Got last ID {:?}", last_id);
|
||||
|
||||
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
|
||||
|
||||
println!("Creating keypairs...");
|
||||
let txs = demo.num_accounts / 2;
|
||||
let keypairs = rnd.gen_n_keypairs(demo.num_accounts);
|
||||
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
|
||||
|
||||
println!("Signing transactions...");
|
||||
let now = Instant::now();
|
||||
let transactions: Vec<_> = keypair_pairs
|
||||
.into_par_iter()
|
||||
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
|
||||
.collect();
|
||||
let duration = now.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = txs as f64 / ns as f64;
|
||||
let nsps = ns as f64 / txs as f64;
|
||||
println!(
|
||||
"Done. {} thousand signatures per second, {}us per signature",
|
||||
bsps * 1_000_000_f64,
|
||||
nsps / 1_000_f64
|
||||
);
|
||||
|
||||
let first_count = client.transaction_count();
|
||||
println!("initial count {}", first_count);
|
||||
|
||||
println!("Transfering {} transactions in {} batches", txs, threads);
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
chunks.into_par_iter().for_each(|txs| {
|
||||
println!("Transferring 1 unit {} times... to", txs.len());
|
||||
let client = mk_client(&client_addr, &leader);
|
||||
for tx in txs {
|
||||
client.transfer_signed(tx.clone()).unwrap();
|
||||
}
|
||||
});
|
||||
|
||||
println!("Sampling tps every second...",);
|
||||
validators.into_par_iter().for_each(|val| {
|
||||
let mut client = mk_client(&client_addr, &val);
|
||||
let mut now = Instant::now();
|
||||
let mut initial_tx_count = client.transaction_count();
|
||||
for i in 0..100 {
|
||||
let tx_count = client.transaction_count();
|
||||
let duration = now.elapsed();
|
||||
now = Instant::now();
|
||||
let sample = tx_count - initial_tx_count;
|
||||
initial_tx_count = tx_count;
|
||||
println!(
|
||||
"{}: Transactions processed {}",
|
||||
val.transactions_addr, sample
|
||||
);
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||
println!("{}: {} tps", val.transactions_addr, tps);
|
||||
let total = tx_count - first_count;
|
||||
println!(
|
||||
"{}: Total Transactions processed {}",
|
||||
val.transactions_addr, total
|
||||
);
|
||||
if total == transactions.len() as u64 {
|
||||
break;
|
||||
}
|
||||
if i > 20 && sample == 0 {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
});
|
||||
signal.store(true, Ordering::Relaxed);
|
||||
for t in c_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn mk_client(locked_addr: &Arc<RwLock<SocketAddr>>, r: &ReplicatedData) -> ThinClient {
|
||||
let mut addr = locked_addr.write().unwrap();
|
||||
let port = addr.port();
|
||||
let transactions_socket = UdpSocket::bind(addr.clone()).unwrap();
|
||||
addr.set_port(port + 1);
|
||||
let requests_socket = UdpSocket::bind(addr.clone()).unwrap();
|
||||
addr.set_port(port + 2);
|
||||
ThinClient::new(
|
||||
r.requests_addr,
|
||||
requests_socket,
|
||||
r.transactions_addr,
|
||||
transactions_socket,
|
||||
)
|
||||
}
|
||||
|
||||
fn spy_node(client_addr: &Arc<RwLock<SocketAddr>>) -> (ReplicatedData, UdpSocket) {
|
||||
let mut addr = client_addr.write().unwrap();
|
||||
let port = addr.port();
|
||||
let gossip = UdpSocket::bind(addr.clone()).unwrap();
|
||||
addr.set_port(port + 1);
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let node = ReplicatedData::new(pubkey, gossip.local_addr().unwrap(), daddr, daddr, daddr);
|
||||
(node, gossip)
|
||||
}
|
||||
|
||||
fn converge(
|
||||
client_addr: &Arc<RwLock<SocketAddr>>,
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) -> Vec<ReplicatedData> {
|
||||
//lets spy on the network
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let (spy, spy_gossip) = spy_node(client_addr);
|
||||
let mut spy_crdt = Crdt::new(spy);
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let spy_window = default_window();
|
||||
let t_spy_listen = Crdt::listen(spy_ref.clone(), spy_window, spy_gossip, exit.clone());
|
||||
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
|
||||
//wait for the network to converge
|
||||
for _ in 0..30 {
|
||||
let min = spy_ref.read().unwrap().convergence();
|
||||
if num_nodes as u64 == min {
|
||||
println!("converged!");
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
threads.push(t_spy_listen);
|
||||
threads.push(t_spy_gossip);
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.requests_addr != daddr)
|
||||
.map(|x| x.clone())
|
||||
.collect();
|
||||
v.clone()
|
||||
}
|
||||
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path).expect("file");
|
||||
serde_json::from_reader(file).expect("parse")
|
||||
}
|
246
src/bin/fullnode.rs
Normal file
246
src/bin/fullnode.rs
Normal file
@ -0,0 +1,246 @@
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate isatty;
|
||||
extern crate pnet;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use getopts::Options;
|
||||
use isatty::stdin_isatty;
|
||||
use pnet::datalink;
|
||||
use solana::bank::Bank;
|
||||
use solana::crdt::ReplicatedData;
|
||||
use solana::entry::Entry;
|
||||
use solana::payment_plan::PaymentPlan;
|
||||
use solana::server::Server;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::transaction::Instruction;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{IpAddr, SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
//use std::time::Duration;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
|
||||
brief += " Run a Solana node to handle transactions and\n";
|
||||
brief += " write a new transaction log to stdout.\n";
|
||||
brief += " Takes existing transaction log from stdin.";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn main() {
|
||||
env_logger::init().unwrap();
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("b", "", "bind", "bind to port or address");
|
||||
opts.optflag("d", "dyn", "detect network address dynamically");
|
||||
opts.optopt("s", "", "save", "save my identity to path.json");
|
||||
opts.optopt("l", "", "load", "load my identity to path.json");
|
||||
opts.optflag("h", "help", "print help");
|
||||
opts.optopt(
|
||||
"v",
|
||||
"",
|
||||
"validator",
|
||||
"run as replicate with path to leader.json",
|
||||
);
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
let bind_addr: SocketAddr = {
|
||||
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
|
||||
if matches.opt_present("d") {
|
||||
let ip = get_ip_addr().unwrap();
|
||||
bind_addr.set_ip(ip);
|
||||
}
|
||||
bind_addr
|
||||
};
|
||||
if stdin_isatty() {
|
||||
eprintln!("nothing found on stdin, expected a log file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a log file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
eprintln!("Initializing...");
|
||||
let mut entries = buffer.lines().map(|line| {
|
||||
serde_json::from_str(&line).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
})
|
||||
});
|
||||
|
||||
eprintln!("done parsing...");
|
||||
|
||||
// The first item in the ledger is required to be an entry with zero num_hashes,
|
||||
// which implies its id can be used as the ledger's seed.
|
||||
let entry0 = entries.next().unwrap();
|
||||
|
||||
// The second item in the ledger is a special transaction where the to and from
|
||||
// fields are the same. That entry should be treated as a deposit, not a
|
||||
// transfer to oneself.
|
||||
let entry1: Entry = entries.next().unwrap();
|
||||
let tx = &entry1.transactions[0];
|
||||
let deposit = if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
contract.plan.final_payment()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
eprintln!("creating bank...");
|
||||
|
||||
let bank = Bank::new_from_deposit(&deposit.unwrap());
|
||||
bank.register_entry_id(&entry0.id);
|
||||
bank.register_entry_id(&entry1.id);
|
||||
|
||||
eprintln!("processing entries...");
|
||||
|
||||
let mut last_id = entry1.id;
|
||||
for entry in entries {
|
||||
last_id = entry.id;
|
||||
let results = bank.process_transactions(entry.transactions);
|
||||
for result in results {
|
||||
if let Err(e) = result {
|
||||
eprintln!("failed to process transaction {:?}", e);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
bank.register_entry_id(&last_id);
|
||||
}
|
||||
|
||||
eprintln!("creating networking stack...");
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
// we need all the receiving sockets to be bound within the expected
|
||||
// port range that we open on aws
|
||||
let mut repl_data = make_repl_data(&bind_addr);
|
||||
if matches.opt_present("l") {
|
||||
let path = matches.opt_str("l").unwrap();
|
||||
if let Ok(file) = File::open(path) {
|
||||
repl_data = serde_json::from_reader(file).expect("parse");
|
||||
}
|
||||
}
|
||||
let threads = if matches.opt_present("v") {
|
||||
eprintln!("starting validator... {}", repl_data.requests_addr);
|
||||
let path = matches.opt_str("v").unwrap();
|
||||
let file = File::open(path).expect("file");
|
||||
let leader = serde_json::from_reader(file).expect("parse");
|
||||
let s = Server::new_validator(
|
||||
bank,
|
||||
repl_data.clone(),
|
||||
UdpSocket::bind(repl_data.requests_addr).unwrap(),
|
||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
UdpSocket::bind(repl_data.replicate_addr).unwrap(),
|
||||
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
|
||||
leader,
|
||||
exit.clone(),
|
||||
);
|
||||
s.thread_hdls
|
||||
} else {
|
||||
eprintln!("starting leader... {}", repl_data.requests_addr);
|
||||
repl_data.current_leader_id = repl_data.id.clone();
|
||||
let file = File::create("leader.log").expect("leader.log create");
|
||||
let server = Server::new_leader(
|
||||
bank,
|
||||
last_id,
|
||||
//Some(Duration::from_millis(1000)),
|
||||
None,
|
||||
repl_data.clone(),
|
||||
UdpSocket::bind(repl_data.requests_addr).unwrap(),
|
||||
UdpSocket::bind(repl_data.transactions_addr).unwrap(),
|
||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
UdpSocket::bind("0.0.0.0:0").unwrap(),
|
||||
UdpSocket::bind(repl_data.gossip_addr).unwrap(),
|
||||
exit.clone(),
|
||||
file,
|
||||
);
|
||||
server.thread_hdls
|
||||
};
|
||||
if matches.opt_present("s") {
|
||||
let path = matches.opt_str("s").unwrap();
|
||||
let file = File::create(path).expect("file");
|
||||
serde_json::to_writer(file, &repl_data).expect("serialize");
|
||||
}
|
||||
eprintln!("Ready. Listening on {}", repl_data.transactions_addr);
|
||||
|
||||
for t in threads {
|
||||
t.join().expect("join");
|
||||
}
|
||||
}
|
||||
|
||||
fn next_port(server_addr: &SocketAddr, nxt: u16) -> SocketAddr {
|
||||
let mut gossip_addr = server_addr.clone();
|
||||
gossip_addr.set_port(server_addr.port() + nxt);
|
||||
gossip_addr
|
||||
}
|
||||
|
||||
fn make_repl_data(bind_addr: &SocketAddr) -> ReplicatedData {
|
||||
let transactions_addr = bind_addr.clone();
|
||||
let gossip_addr = next_port(&bind_addr, 1);
|
||||
let replicate_addr = next_port(&bind_addr, 2);
|
||||
let requests_addr = next_port(&bind_addr, 3);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip_addr,
|
||||
replicate_addr,
|
||||
requests_addr,
|
||||
transactions_addr,
|
||||
)
|
||||
}
|
||||
|
||||
fn parse_port_or_addr(optstr: Option<String>) -> SocketAddr {
|
||||
let daddr: SocketAddr = "0.0.0.0:8000".parse().expect("default socket address");
|
||||
if let Some(addrstr) = optstr {
|
||||
if let Ok(port) = addrstr.parse() {
|
||||
let mut addr = daddr.clone();
|
||||
addr.set_port(port);
|
||||
addr
|
||||
} else if let Ok(addr) = addrstr.parse() {
|
||||
addr
|
||||
} else {
|
||||
daddr
|
||||
}
|
||||
} else {
|
||||
daddr
|
||||
}
|
||||
}
|
||||
|
||||
fn get_ip_addr() -> Option<IpAddr> {
|
||||
for iface in datalink::interfaces() {
|
||||
for p in iface.ips {
|
||||
if !p.ip().is_loopback() && !p.ip().is_multicast() {
|
||||
return Some(p.ip());
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_port_or_addr() {
|
||||
let p1 = parse_port_or_addr(Some("9000".to_string()));
|
||||
assert_eq!(p1.port(), 9000);
|
||||
let p2 = parse_port_or_addr(Some("127.0.0.1:7000".to_string()));
|
||||
assert_eq!(p2.port(), 7000);
|
||||
let p3 = parse_port_or_addr(None);
|
||||
assert_eq!(p3.port(), 8000);
|
||||
}
|
73
src/bin/genesis-demo.rs
Normal file
73
src/bin/genesis-demo.rs
Normal file
@ -0,0 +1,73 @@
|
||||
extern crate isatty;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use isatty::stdin_isatty;
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::MAX_ENTRY_IDS;
|
||||
use solana::entry::{next_entry, Entry};
|
||||
use solana::mint::MintDemo;
|
||||
use solana::signature::{GenKeys, KeyPairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use std::io::{stdin, Read};
|
||||
use std::process::exit;
|
||||
|
||||
// Generate a ledger with lots and lots of accounts.
|
||||
fn main() {
|
||||
if stdin_isatty() {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let rnd = GenKeys::new(demo.mint.keypair().public_key_bytes());
|
||||
let num_accounts = demo.num_accounts;
|
||||
let tokens_per_user = 1_000;
|
||||
|
||||
let keypairs = rnd.gen_n_keypairs(num_accounts);
|
||||
|
||||
let mint_keypair = demo.mint.keypair();
|
||||
let last_id = demo.mint.last_id();
|
||||
|
||||
eprintln!("Signing {} transactions...", num_accounts);
|
||||
let transactions: Vec<_> = keypairs
|
||||
.into_par_iter()
|
||||
.map(|rando| {
|
||||
let last_id = demo.mint.last_id();
|
||||
Transaction::new(&mint_keypair, rando.pubkey(), tokens_per_user, last_id)
|
||||
})
|
||||
.collect();
|
||||
|
||||
for entry in demo.mint.create_entries() {
|
||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
||||
}
|
||||
|
||||
eprintln!("Logging the creation of {} accounts...", num_accounts);
|
||||
let entry = Entry::new(&last_id, 0, transactions);
|
||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
||||
|
||||
eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS);
|
||||
// Offer client lots of entry IDs to use for each transaction's last_id.
|
||||
let mut last_id = last_id;
|
||||
for _ in 0..MAX_ENTRY_IDS {
|
||||
let entry = next_entry(&last_id, 1, vec![]);
|
||||
last_id = entry.id;
|
||||
let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| {
|
||||
eprintln!("failed to serialize: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
println!("{}", serialized);
|
||||
}
|
||||
}
|
36
src/bin/genesis.rs
Normal file
36
src/bin/genesis.rs
Normal file
@ -0,0 +1,36 @@
|
||||
//! A command-line executable for generating the chain's genesis block.
|
||||
|
||||
extern crate isatty;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use isatty::stdin_isatty;
|
||||
use solana::mint::Mint;
|
||||
use std::io::{stdin, Read};
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
if stdin_isatty() {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mint: Mint = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
for x in mint.create_entries() {
|
||||
let serialized = serde_json::to_string(&x).unwrap_or_else(|e| {
|
||||
eprintln!("failed to serialize: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
println!("{}", serialized);
|
||||
}
|
||||
}
|
21
src/bin/mint-demo.rs
Normal file
21
src/bin/mint-demo.rs
Normal file
@ -0,0 +1,21 @@
|
||||
extern crate rayon;
|
||||
extern crate ring;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use solana::mint::{Mint, MintDemo};
|
||||
use std::io;
|
||||
|
||||
fn main() {
|
||||
let mut input_text = String::new();
|
||||
io::stdin().read_line(&mut input_text).unwrap();
|
||||
let trimmed = input_text.trim();
|
||||
let tokens = trimmed.parse::<i64>().unwrap();
|
||||
|
||||
let mint = Mint::new(tokens);
|
||||
let tokens_per_user = 1_000;
|
||||
let num_accounts = tokens / tokens_per_user;
|
||||
|
||||
let demo = MintDemo { mint, num_accounts };
|
||||
println!("{}", serde_json::to_string(&demo).unwrap());
|
||||
}
|
29
src/bin/mint.rs
Normal file
29
src/bin/mint.rs
Normal file
@ -0,0 +1,29 @@
|
||||
extern crate isatty;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use isatty::stdin_isatty;
|
||||
use solana::mint::Mint;
|
||||
use std::io;
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
let mut input_text = String::new();
|
||||
if stdin_isatty() {
|
||||
eprintln!("nothing found on stdin, expected a token number");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
io::stdin().read_line(&mut input_text).unwrap();
|
||||
let trimmed = input_text.trim();
|
||||
let tokens = trimmed.parse::<i64>().unwrap_or_else(|e| {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
});
|
||||
let mint = Mint::new(tokens);
|
||||
let serialized = serde_json::to_string(&mint).unwrap_or_else(|e| {
|
||||
eprintln!("failed to serialize: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
println!("{}", serialized);
|
||||
}
|
168
src/budget.rs
Normal file
168
src/budget.rs
Normal file
@ -0,0 +1,168 @@
|
||||
//! The `budget` module provides a domain-specific language for payment plans. Users create Budget objects that
|
||||
//! are given to an interpreter. The interpreter listens for `Witness` transactions,
|
||||
//! which it uses to reduce the payment plan. When the budget is reduced to a
|
||||
//! `Payment`, the payment is executed.
|
||||
|
||||
use chrono::prelude::*;
|
||||
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use signature::PublicKey;
|
||||
use std::mem;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Condition {
|
||||
Timestamp(DateTime<Utc>),
|
||||
Signature(PublicKey),
|
||||
}
|
||||
|
||||
impl Condition {
|
||||
/// Return true if the given Witness satisfies this Condition.
|
||||
pub fn is_satisfied(&self, witness: &Witness) -> bool {
|
||||
match (self, witness) {
|
||||
(&Condition::Signature(ref pubkey), &Witness::Signature(ref from)) => pubkey == from,
|
||||
(&Condition::Timestamp(ref dt), &Witness::Timestamp(ref last_time)) => dt <= last_time,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Budget {
|
||||
Pay(Payment),
|
||||
After(Condition, Payment),
|
||||
Race((Condition, Payment), (Condition, Payment)),
|
||||
}
|
||||
|
||||
impl Budget {
|
||||
/// Create the simplest budget - one that pays `tokens` to PublicKey.
|
||||
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
|
||||
Budget::Pay(Payment { tokens, to })
|
||||
}
|
||||
|
||||
/// Create a budget that pays `tokens` to `to` after being witnessed by `from`.
|
||||
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
|
||||
Budget::After(Condition::Signature(from), Payment { tokens, to })
|
||||
}
|
||||
|
||||
/// Create a budget that pays `tokens` to `to` after the given DateTime.
|
||||
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
|
||||
Budget::After(Condition::Timestamp(dt), Payment { tokens, to })
|
||||
}
|
||||
|
||||
/// Create a budget that pays `tokens` to `to` after the given DateTime
|
||||
/// unless cancelled by `from`.
|
||||
pub fn new_cancelable_future_payment(
|
||||
dt: DateTime<Utc>,
|
||||
from: PublicKey,
|
||||
tokens: i64,
|
||||
to: PublicKey,
|
||||
) -> Self {
|
||||
Budget::Race(
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl PaymentPlan for Budget {
|
||||
/// Return Payment if the budget requires no additional Witnesses.
|
||||
fn final_payment(&self) -> Option<Payment> {
|
||||
match *self {
|
||||
Budget::Pay(ref payment) => Some(payment.clone()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return true if the budget spends exactly `spendable_tokens`.
|
||||
fn verify(&self, spendable_tokens: i64) -> bool {
|
||||
match *self {
|
||||
Budget::Pay(ref payment) | Budget::After(_, ref payment) => {
|
||||
payment.tokens == spendable_tokens
|
||||
}
|
||||
Budget::Race(ref a, ref b) => {
|
||||
a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply a witness to the budget to see if the budget can be reduced.
|
||||
/// If so, modify the budget in-place.
|
||||
fn apply_witness(&mut self, witness: &Witness) {
|
||||
let new_payment = match *self {
|
||||
Budget::After(ref cond, ref payment) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Race((ref cond, ref payment), _) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Race(_, (ref cond, ref payment)) if cond.is_satisfied(witness) => Some(payment),
|
||||
_ => None,
|
||||
}.cloned();
|
||||
|
||||
if let Some(payment) = new_payment {
|
||||
mem::replace(self, Budget::Pay(payment));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_signature_satisfied() {
|
||||
let sig = PublicKey::default();
|
||||
assert!(Condition::Signature(sig).is_satisfied(&Witness::Signature(sig)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_satisfied() {
|
||||
let dt1 = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let dt2 = Utc.ymd(2014, 11, 14).and_hms(10, 9, 8);
|
||||
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt1)));
|
||||
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt2)));
|
||||
assert!(!Condition::Timestamp(dt2).is_satisfied(&Witness::Timestamp(dt1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
assert!(Budget::new_payment(42, to).verify(42));
|
||||
assert!(Budget::new_authorized_payment(from, 42, to).verify(42));
|
||||
assert!(Budget::new_future_payment(dt, 42, to).verify(42));
|
||||
assert!(Budget::new_cancelable_future_payment(dt, from, 42, to).verify(42));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_authorized_payment() {
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
|
||||
let mut budget = Budget::new_authorized_payment(from, 42, to);
|
||||
budget.apply_witness(&Witness::Signature(from));
|
||||
assert_eq!(budget, Budget::new_payment(42, to));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_future_payment() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let to = PublicKey::default();
|
||||
|
||||
let mut budget = Budget::new_future_payment(dt, 42, to);
|
||||
budget.apply_witness(&Witness::Timestamp(dt));
|
||||
assert_eq!(budget, Budget::new_payment(42, to));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cancelable_future_payment() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
|
||||
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
|
||||
budget.apply_witness(&Witness::Timestamp(dt));
|
||||
assert_eq!(budget, Budget::new_payment(42, to));
|
||||
|
||||
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
|
||||
budget.apply_witness(&Witness::Signature(from));
|
||||
assert_eq!(budget, Budget::new_payment(42, from));
|
||||
}
|
||||
}
|
744
src/crdt.rs
Normal file
744
src/crdt.rs
Normal file
@ -0,0 +1,744 @@
|
||||
//! The `crdt` module defines a data structure that is shared by all the nodes in the network over
|
||||
//! a gossip control plane. The goal is to share small bits of off-chain information and detect and
|
||||
//! repair partitions.
|
||||
//!
|
||||
//! This CRDT only supports a very limited set of types. A map of PublicKey -> Versioned Struct.
|
||||
//! The last version is always picked during an update.
|
||||
//!
|
||||
//! The network is arranged in layers:
|
||||
//!
|
||||
//! * layer 0 - Leader.
|
||||
//! * layer 1 - As many nodes as we can fit
|
||||
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
|
||||
//!
|
||||
//! Bank needs to provide an interface for us to query the stake weight
|
||||
|
||||
use bincode::{deserialize, serialize};
|
||||
use byteorder::{LittleEndian, ReadBytesExt};
|
||||
use hash::Hash;
|
||||
use packet::{SharedBlob, BLOB_SIZE};
|
||||
use rayon::prelude::*;
|
||||
use result::{Error, Result};
|
||||
use ring::rand::{SecureRandom, SystemRandom};
|
||||
use signature::{PublicKey, Signature};
|
||||
use std;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Cursor;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{sleep, spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
/// Structure to be replicated by the network
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct ReplicatedData {
|
||||
pub id: PublicKey,
|
||||
sig: Signature,
|
||||
/// should always be increasing
|
||||
version: u64,
|
||||
/// address to connect to for gossip
|
||||
pub gossip_addr: SocketAddr,
|
||||
/// address to connect to for replication
|
||||
pub replicate_addr: SocketAddr,
|
||||
/// address to connect to when this node is leader
|
||||
pub requests_addr: SocketAddr,
|
||||
/// transactions address
|
||||
pub transactions_addr: SocketAddr,
|
||||
/// current leader identity
|
||||
pub current_leader_id: PublicKey,
|
||||
/// last verified hash that was submitted to the leader
|
||||
last_verified_hash: Hash,
|
||||
/// last verified count, always increasing
|
||||
last_verified_count: u64,
|
||||
}
|
||||
|
||||
impl ReplicatedData {
|
||||
pub fn new(
|
||||
id: PublicKey,
|
||||
gossip_addr: SocketAddr,
|
||||
replicate_addr: SocketAddr,
|
||||
requests_addr: SocketAddr,
|
||||
transactions_addr: SocketAddr,
|
||||
) -> ReplicatedData {
|
||||
ReplicatedData {
|
||||
id,
|
||||
sig: Signature::default(),
|
||||
version: 0,
|
||||
gossip_addr,
|
||||
replicate_addr,
|
||||
requests_addr,
|
||||
transactions_addr,
|
||||
current_leader_id: PublicKey::default(),
|
||||
last_verified_hash: Hash::default(),
|
||||
last_verified_count: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// `Crdt` structure keeps a table of `ReplicatedData` structs
|
||||
/// # Properties
|
||||
/// * `table` - map of public id's to versioned and signed ReplicatedData structs
|
||||
/// * `local` - map of public id's to what `self.update_index` `self.table` was updated
|
||||
/// * `remote` - map of public id's to the `remote.update_index` was sent
|
||||
/// * `update_index` - my update index
|
||||
/// # Remarks
|
||||
/// This implements two services, `gossip` and `listen`.
|
||||
/// * `gossip` - asynchronously ask nodes to send updates
|
||||
/// * `listen` - listen for requests and responses
|
||||
/// No attempt to keep track of timeouts or dropped requests is made, or should be.
|
||||
pub struct Crdt {
|
||||
pub table: HashMap<PublicKey, ReplicatedData>,
|
||||
/// Value of my update index when entry in table was updated.
|
||||
/// Nodes will ask for updates since `update_index`, and this node
|
||||
/// should respond with all the identities that are greater then the
|
||||
/// request's `update_index` in this list
|
||||
local: HashMap<PublicKey, u64>,
|
||||
/// The value of the remote update index that I have last seen
|
||||
/// This Node will ask external nodes for updates since the value in this list
|
||||
pub remote: HashMap<PublicKey, u64>,
|
||||
pub update_index: u64,
|
||||
pub me: PublicKey,
|
||||
timeout: Duration,
|
||||
}
|
||||
// TODO These messages should be signed, and go through the gpu pipeline for spam filtering
|
||||
#[derive(Serialize, Deserialize)]
|
||||
enum Protocol {
|
||||
/// forward your own latest data structure when requesting an update
|
||||
/// this doesn't update the `remote` update index, but it allows the
|
||||
/// recepient of this request to add knowledge of this node to the network
|
||||
RequestUpdates(u64, ReplicatedData),
|
||||
//TODO might need a since?
|
||||
/// from id, form's last update index, ReplicatedData
|
||||
ReceiveUpdates(PublicKey, u64, Vec<ReplicatedData>),
|
||||
/// ask for a missing index
|
||||
RequestWindowIndex(ReplicatedData, u64),
|
||||
}
|
||||
|
||||
impl Crdt {
|
||||
pub fn new(me: ReplicatedData) -> Crdt {
|
||||
assert_eq!(me.version, 0);
|
||||
let mut g = Crdt {
|
||||
table: HashMap::new(),
|
||||
local: HashMap::new(),
|
||||
remote: HashMap::new(),
|
||||
me: me.id,
|
||||
update_index: 1,
|
||||
timeout: Duration::from_millis(100),
|
||||
};
|
||||
g.local.insert(me.id, g.update_index);
|
||||
g.table.insert(me.id, me);
|
||||
g
|
||||
}
|
||||
pub fn my_data(&self) -> &ReplicatedData {
|
||||
&self.table[&self.me]
|
||||
}
|
||||
pub fn leader_data(&self) -> &ReplicatedData {
|
||||
&self.table[&self.table[&self.me].current_leader_id]
|
||||
}
|
||||
|
||||
pub fn set_leader(&mut self, key: PublicKey) -> () {
|
||||
let mut me = self.my_data().clone();
|
||||
me.current_leader_id = key;
|
||||
me.version += 1;
|
||||
self.insert(&me);
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, v: &ReplicatedData) {
|
||||
// TODO check that last_verified types are always increasing
|
||||
if self.table.get(&v.id).is_none() || (v.version > self.table[&v.id].version) {
|
||||
//somehow we signed a message for our own identity with a higher version that
|
||||
// we have stored ourselves
|
||||
trace!("me: {:?}", self.me[0]);
|
||||
trace!("v.id: {:?}", v.id[0]);
|
||||
trace!("insert! {}", v.version);
|
||||
self.update_index += 1;
|
||||
let _ = self.table.insert(v.id.clone(), v.clone());
|
||||
let _ = self.local.insert(v.id, self.update_index);
|
||||
} else {
|
||||
trace!(
|
||||
"INSERT FAILED new.version: {} me.version: {}",
|
||||
v.version,
|
||||
self.table[&v.id].version
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// broadcast messages from the leader to layer 1 nodes
|
||||
/// # Remarks
|
||||
/// We need to avoid having obj locked while doing any io, such as the `send_to`
|
||||
pub fn broadcast(
|
||||
obj: &Arc<RwLock<Self>>,
|
||||
blobs: &Vec<SharedBlob>,
|
||||
s: &UdpSocket,
|
||||
transmit_index: &mut u64,
|
||||
) -> Result<()> {
|
||||
let (me, table): (ReplicatedData, Vec<ReplicatedData>) = {
|
||||
// copy to avoid locking during IO
|
||||
let robj = obj.read().expect("'obj' read lock in pub fn broadcast");
|
||||
trace!("broadcast table {}", robj.table.len());
|
||||
let cloned_table: Vec<ReplicatedData> = robj.table.values().cloned().collect();
|
||||
(robj.table[&robj.me].clone(), cloned_table)
|
||||
};
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let nodes: Vec<&ReplicatedData> = table
|
||||
.iter()
|
||||
.filter(|v| {
|
||||
if me.id == v.id {
|
||||
//filter myself
|
||||
false
|
||||
} else if v.replicate_addr == daddr {
|
||||
//filter nodes that are not listening
|
||||
false
|
||||
} else {
|
||||
trace!("broadcast node {}", v.replicate_addr);
|
||||
true
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
if nodes.len() < 1 {
|
||||
warn!("crdt too small");
|
||||
return Err(Error::CrdtTooSmall);
|
||||
}
|
||||
trace!("nodes table {}", nodes.len());
|
||||
trace!("blobs table {}", blobs.len());
|
||||
// enumerate all the blobs, those are the indices
|
||||
// transmit them to nodes, starting from a different node
|
||||
let orders: Vec<_> = blobs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.zip(
|
||||
nodes
|
||||
.iter()
|
||||
.cycle()
|
||||
.skip((*transmit_index as usize) % nodes.len()),
|
||||
)
|
||||
.collect();
|
||||
trace!("orders table {}", orders.len());
|
||||
let errs: Vec<_> = orders
|
||||
.into_iter()
|
||||
.map(|((i, b), v)| {
|
||||
// only leader should be broadcasting
|
||||
assert!(me.current_leader_id != v.id);
|
||||
let mut blob = b.write().expect("'b' write lock in pub fn broadcast");
|
||||
blob.set_id(me.id).expect("set_id in pub fn broadcast");
|
||||
blob.set_index(*transmit_index + i as u64)
|
||||
.expect("set_index in pub fn broadcast");
|
||||
//TODO profile this, may need multiple sockets for par_iter
|
||||
trace!("broadcast {} to {}", blob.meta.size, v.replicate_addr);
|
||||
assert!(blob.meta.size < BLOB_SIZE);
|
||||
let e = s.send_to(&blob.data[..blob.meta.size], &v.replicate_addr);
|
||||
trace!("done broadcast {} to {}", blob.meta.size, v.replicate_addr);
|
||||
e
|
||||
})
|
||||
.collect();
|
||||
trace!("broadcast results {}", errs.len());
|
||||
for e in errs {
|
||||
match e {
|
||||
Err(e) => {
|
||||
error!("broadcast result {:?}", e);
|
||||
return Err(Error::IO(e));
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
*transmit_index += 1;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// retransmit messages from the leader to layer 1 nodes
|
||||
/// # Remarks
|
||||
/// We need to avoid having obj locked while doing any io, such as the `send_to`
|
||||
pub fn retransmit(obj: &Arc<RwLock<Self>>, blob: &SharedBlob, s: &UdpSocket) -> Result<()> {
|
||||
let (me, table): (ReplicatedData, Vec<ReplicatedData>) = {
|
||||
// copy to avoid locking during IO
|
||||
let s = obj.read().expect("'obj' read lock in pub fn retransmit");
|
||||
(s.table[&s.me].clone(), s.table.values().cloned().collect())
|
||||
};
|
||||
blob.write()
|
||||
.unwrap()
|
||||
.set_id(me.id)
|
||||
.expect("set_id in pub fn retransmit");
|
||||
let rblob = blob.read().unwrap();
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let orders: Vec<_> = table
|
||||
.iter()
|
||||
.filter(|v| {
|
||||
if me.id == v.id {
|
||||
false
|
||||
} else if me.current_leader_id == v.id {
|
||||
trace!("skip retransmit to leader {:?}", v.id);
|
||||
false
|
||||
} else if v.replicate_addr == daddr {
|
||||
trace!("skip nodes that are not listening {:?}", v.id);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let errs: Vec<_> = orders
|
||||
.par_iter()
|
||||
.map(|v| {
|
||||
trace!(
|
||||
"retransmit blob {} to {}",
|
||||
rblob.get_index().unwrap(),
|
||||
v.replicate_addr
|
||||
);
|
||||
//TODO profile this, may need multiple sockets for par_iter
|
||||
assert!(rblob.meta.size < BLOB_SIZE);
|
||||
s.send_to(&rblob.data[..rblob.meta.size], &v.replicate_addr)
|
||||
})
|
||||
.collect();
|
||||
for e in errs {
|
||||
match e {
|
||||
Err(e) => {
|
||||
info!("retransmit error {:?}", e);
|
||||
return Err(Error::IO(e));
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// max number of nodes that we could be converged to
|
||||
pub fn convergence(&self) -> u64 {
|
||||
let max = self.remote.values().len() as u64 + 1;
|
||||
self.remote.values().fold(max, |a, b| std::cmp::min(a, *b))
|
||||
}
|
||||
|
||||
fn random() -> u64 {
|
||||
let rnd = SystemRandom::new();
|
||||
let mut buf = [0u8; 8];
|
||||
rnd.fill(&mut buf).expect("rnd.fill in pub fn random");
|
||||
let mut rdr = Cursor::new(&buf);
|
||||
rdr.read_u64::<LittleEndian>()
|
||||
.expect("rdr.read_u64 in fn random")
|
||||
}
|
||||
fn get_updates_since(&self, v: u64) -> (PublicKey, u64, Vec<ReplicatedData>) {
|
||||
//trace!("get updates since {}", v);
|
||||
let data = self.table
|
||||
.values()
|
||||
.filter(|x| self.local[&x.id] > v)
|
||||
.cloned()
|
||||
.collect();
|
||||
let id = self.me;
|
||||
let ups = self.update_index;
|
||||
(id, ups, data)
|
||||
}
|
||||
|
||||
pub fn window_index_request(&self, ix: u64) -> Result<(SocketAddr, Vec<u8>)> {
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let valid: Vec<_> = self.table
|
||||
.values()
|
||||
.filter(|r| r.id != self.me && r.replicate_addr != daddr)
|
||||
.collect();
|
||||
if valid.is_empty() {
|
||||
return Err(Error::CrdtTooSmall);
|
||||
}
|
||||
let n = (Self::random() as usize) % valid.len();
|
||||
let addr = valid[n].gossip_addr.clone();
|
||||
let req = Protocol::RequestWindowIndex(self.table[&self.me].clone(), ix);
|
||||
let out = serialize(&req)?;
|
||||
Ok((addr, out))
|
||||
}
|
||||
|
||||
/// Create a random gossip request
|
||||
/// # Returns
|
||||
/// (A,B)
|
||||
/// * A - Address to send to
|
||||
/// * B - RequestUpdates protocol message
|
||||
fn gossip_request(&self) -> Result<(SocketAddr, Protocol)> {
|
||||
let options: Vec<_> = self.table.values().filter(|v| v.id != self.me).collect();
|
||||
if options.len() < 1 {
|
||||
trace!("crdt too small for gossip");
|
||||
return Err(Error::CrdtTooSmall);
|
||||
}
|
||||
let n = (Self::random() as usize) % options.len();
|
||||
let v = options[n].clone();
|
||||
let remote_update_index = *self.remote.get(&v.id).unwrap_or(&0);
|
||||
let req = Protocol::RequestUpdates(remote_update_index, self.table[&self.me].clone());
|
||||
Ok((v.gossip_addr, req))
|
||||
}
|
||||
|
||||
/// At random pick a node and try to get updated changes from them
|
||||
fn run_gossip(obj: &Arc<RwLock<Self>>) -> Result<()> {
|
||||
//TODO we need to keep track of stakes and weight the selection by stake size
|
||||
//TODO cache sockets
|
||||
|
||||
// Lock the object only to do this operation and not for any longer
|
||||
// especially not when doing the `sock.send_to`
|
||||
let (remote_gossip_addr, req) = obj.read()
|
||||
.expect("'obj' read lock in fn run_gossip")
|
||||
.gossip_request()?;
|
||||
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
||||
// TODO this will get chatty, so we need to first ask for number of updates since
|
||||
// then only ask for specific data that we dont have
|
||||
let r = serialize(&req)?;
|
||||
trace!("sending gossip request to {}", remote_gossip_addr);
|
||||
sock.send_to(&r, remote_gossip_addr)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Apply updates that we received from the identity `from`
|
||||
/// # Arguments
|
||||
/// * `from` - identity of the sender of the updates
|
||||
/// * `update_index` - the number of updates that `from` has completed and this set of `data` represents
|
||||
/// * `data` - the update data
|
||||
fn apply_updates(&mut self, from: PublicKey, update_index: u64, data: &[ReplicatedData]) {
|
||||
trace!("got updates {}", data.len());
|
||||
// TODO we need to punish/spam resist here
|
||||
// sig verify the whole update and slash anyone who sends a bad update
|
||||
for v in data {
|
||||
self.insert(&v);
|
||||
}
|
||||
*self.remote.entry(from).or_insert(update_index) = update_index;
|
||||
}
|
||||
|
||||
/// randomly pick a node and ask them for updates asynchronously
|
||||
pub fn gossip(obj: Arc<RwLock<Self>>, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
let _ = Self::run_gossip(&obj);
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
//TODO this should be a tuned parameter
|
||||
sleep(
|
||||
obj.read()
|
||||
.expect("'obj' read lock in pub fn gossip")
|
||||
.timeout,
|
||||
);
|
||||
})
|
||||
}
|
||||
fn run_window_request(
|
||||
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
sock: &UdpSocket,
|
||||
from: &ReplicatedData,
|
||||
ix: u64,
|
||||
) -> Result<()> {
|
||||
let pos = (ix as usize) % window.read().unwrap().len();
|
||||
let mut outblob = vec![];
|
||||
if let &Some(ref blob) = &window.read().unwrap()[pos] {
|
||||
let rblob = blob.read().unwrap();
|
||||
let blob_ix = rblob.get_index().expect("run_window_request get_index");
|
||||
if blob_ix == ix {
|
||||
// copy to avoid doing IO inside the lock
|
||||
outblob.extend(&rblob.data[..rblob.meta.size]);
|
||||
}
|
||||
} else {
|
||||
assert!(window.read().unwrap()[pos].is_none());
|
||||
info!("failed RequestWindowIndex {} {}", ix, from.replicate_addr);
|
||||
}
|
||||
if outblob.len() > 0 {
|
||||
info!(
|
||||
"responding RequestWindowIndex {} {}",
|
||||
ix, from.replicate_addr
|
||||
);
|
||||
assert!(outblob.len() < BLOB_SIZE);
|
||||
sock.send_to(&outblob, from.replicate_addr)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
/// Process messages from the network
|
||||
fn run_listen(
|
||||
obj: &Arc<RwLock<Self>>,
|
||||
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
sock: &UdpSocket,
|
||||
) -> Result<()> {
|
||||
//TODO cache connections
|
||||
let mut buf = vec![0u8; BLOB_SIZE];
|
||||
trace!("recv_from on {}", sock.local_addr().unwrap());
|
||||
let (amt, src) = sock.recv_from(&mut buf)?;
|
||||
trace!("got request from {}", src);
|
||||
buf.resize(amt, 0);
|
||||
let r = deserialize(&buf)?;
|
||||
match r {
|
||||
// TODO sigverify these
|
||||
Protocol::RequestUpdates(v, reqdata) => {
|
||||
trace!("RequestUpdates {} from {}", v, src);
|
||||
let addr = reqdata.gossip_addr;
|
||||
// only lock for this call, dont lock during IO `sock.send_to` or `sock.recv_from`
|
||||
let (from, ups, data) = obj.read()
|
||||
.expect("'obj' read lock in RequestUpdates")
|
||||
.get_updates_since(v);
|
||||
trace!("get updates since response {} {}", v, data.len());
|
||||
let rsp = serialize(&Protocol::ReceiveUpdates(from, ups, data))?;
|
||||
trace!("send_to {}", addr);
|
||||
//TODO verify reqdata belongs to sender
|
||||
obj.write()
|
||||
.expect("'obj' write lock in RequestUpdates")
|
||||
.insert(&reqdata);
|
||||
assert!(rsp.len() < BLOB_SIZE);
|
||||
sock.send_to(&rsp, addr)
|
||||
.expect("'sock.send_to' in RequestUpdates");
|
||||
trace!("send_to done!");
|
||||
}
|
||||
Protocol::ReceiveUpdates(from, ups, data) => {
|
||||
trace!("ReceivedUpdates {} from {}", ups, src);
|
||||
obj.write()
|
||||
.expect("'obj' write lock in ReceiveUpdates")
|
||||
.apply_updates(from, ups, &data);
|
||||
}
|
||||
Protocol::RequestWindowIndex(from, ix) => {
|
||||
//TODO verify from is signed
|
||||
obj.write().unwrap().insert(&from);
|
||||
let me = obj.read().unwrap().my_data().clone();
|
||||
trace!(
|
||||
"received RequestWindowIndex {} {} myaddr {}",
|
||||
ix,
|
||||
from.replicate_addr,
|
||||
me.replicate_addr
|
||||
);
|
||||
assert_ne!(from.replicate_addr, me.replicate_addr);
|
||||
let _ = Self::run_window_request(window, sock, &from, ix);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
pub fn listen(
|
||||
obj: Arc<RwLock<Self>>,
|
||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> JoinHandle<()> {
|
||||
sock.set_read_timeout(Some(Duration::new(2, 0)))
|
||||
.expect("'sock.set_read_timeout' in crdt.rs");
|
||||
spawn(move || loop {
|
||||
let e = Self::run_listen(&obj, &window, &sock);
|
||||
if e.is_err() {
|
||||
info!(
|
||||
"run_listen timeout, table size: {}",
|
||||
obj.read().unwrap().table.len()
|
||||
);
|
||||
}
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use logger;
|
||||
use packet::Blob;
|
||||
use rayon::iter::*;
|
||||
use signature::KeyPair;
|
||||
use signature::KeyPairUtil;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{sleep, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
fn test_node() -> (Crdt, UdpSocket, UdpSocket, UdpSocket) {
|
||||
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let serve = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let d = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
serve.local_addr().unwrap(),
|
||||
transactions.local_addr().unwrap(),
|
||||
);
|
||||
let crdt = Crdt::new(d);
|
||||
trace!(
|
||||
"id: {} gossip: {} replicate: {} serve: {}",
|
||||
crdt.my_data().id[0],
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
serve.local_addr().unwrap(),
|
||||
);
|
||||
(crdt, gossip, replicate, serve)
|
||||
}
|
||||
|
||||
/// Test that the network converges.
|
||||
/// Run until every node in the network has a full ReplicatedData set.
|
||||
/// Check that nodes stop sending updates after all the ReplicatedData has been shared.
|
||||
/// tests that actually use this function are below
|
||||
fn run_gossip_topo<F>(topo: F)
|
||||
where
|
||||
F: Fn(&Vec<(Arc<RwLock<Crdt>>, JoinHandle<()>)>) -> (),
|
||||
{
|
||||
let num: usize = 5;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let listen: Vec<_> = (0..num)
|
||||
.map(|_| {
|
||||
let (crdt, gossip, _, _) = test_node();
|
||||
let c = Arc::new(RwLock::new(crdt));
|
||||
let w = Arc::new(RwLock::new(vec![]));
|
||||
let l = Crdt::listen(c.clone(), w, gossip, exit.clone());
|
||||
(c, l)
|
||||
})
|
||||
.collect();
|
||||
topo(&listen);
|
||||
let gossip: Vec<_> = listen
|
||||
.iter()
|
||||
.map(|&(ref c, _)| Crdt::gossip(c.clone(), exit.clone()))
|
||||
.collect();
|
||||
let mut done = true;
|
||||
for i in 0..(num * 32) {
|
||||
done = false;
|
||||
trace!("round {}", i);
|
||||
for &(ref c, _) in listen.iter() {
|
||||
if num == c.read().unwrap().convergence() as usize {
|
||||
done = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
//at least 1 node converged
|
||||
if done == true {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for j in gossip {
|
||||
j.join().unwrap();
|
||||
}
|
||||
for (c, j) in listen.into_iter() {
|
||||
j.join().unwrap();
|
||||
// make it clear what failed
|
||||
// protocol is to chatty, updates should stop after everyone receives `num`
|
||||
assert!(c.read().unwrap().update_index <= num as u64);
|
||||
// protocol is not chatty enough, everyone should get `num` entries
|
||||
assert_eq!(c.read().unwrap().table.len(), num);
|
||||
}
|
||||
assert!(done);
|
||||
}
|
||||
/// ring a -> b -> c -> d -> e -> a
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn gossip_ring_test() {
|
||||
logger::setup();
|
||||
run_gossip_topo(|listen| {
|
||||
let num = listen.len();
|
||||
for n in 0..num {
|
||||
let y = n % listen.len();
|
||||
let x = (n + 1) % listen.len();
|
||||
let mut xv = listen[x].0.write().unwrap();
|
||||
let yv = listen[y].0.read().unwrap();
|
||||
let mut d = yv.table[&yv.me].clone();
|
||||
d.version = 0;
|
||||
xv.insert(&d);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// star (b,c,d,e) -> a
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn gossip_star_test() {
|
||||
run_gossip_topo(|listen| {
|
||||
let num = listen.len();
|
||||
for n in 0..(num - 1) {
|
||||
let x = 0;
|
||||
let y = (n + 1) % listen.len();
|
||||
let mut xv = listen[x].0.write().unwrap();
|
||||
let yv = listen[y].0.read().unwrap();
|
||||
let mut d = yv.table[&yv.me].clone();
|
||||
d.version = 0;
|
||||
xv.insert(&d);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Test that insert drops messages that are older
|
||||
#[test]
|
||||
fn insert_test() {
|
||||
let mut d = ReplicatedData::new(
|
||||
KeyPair::new().pubkey(),
|
||||
"127.0.0.1:1234".parse().unwrap(),
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
"127.0.0.1:1236".parse().unwrap(),
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
);
|
||||
assert_eq!(d.version, 0);
|
||||
let mut crdt = Crdt::new(d.clone());
|
||||
assert_eq!(crdt.table[&d.id].version, 0);
|
||||
d.version = 2;
|
||||
crdt.insert(&d);
|
||||
assert_eq!(crdt.table[&d.id].version, 2);
|
||||
d.version = 1;
|
||||
crdt.insert(&d);
|
||||
assert_eq!(crdt.table[&d.id].version, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
pub fn test_crdt_retransmit() {
|
||||
logger::setup();
|
||||
trace!("c1:");
|
||||
let (mut c1, s1, r1, e1) = test_node();
|
||||
trace!("c2:");
|
||||
let (mut c2, s2, r2, _) = test_node();
|
||||
trace!("c3:");
|
||||
let (mut c3, s3, r3, _) = test_node();
|
||||
let c1_id = c1.my_data().id;
|
||||
c1.set_leader(c1_id);
|
||||
|
||||
c2.insert(&c1.my_data());
|
||||
c3.insert(&c1.my_data());
|
||||
|
||||
c2.set_leader(c1.my_data().id);
|
||||
c3.set_leader(c1.my_data().id);
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
// Create listen threads
|
||||
let win1 = Arc::new(RwLock::new(vec![]));
|
||||
let a1 = Arc::new(RwLock::new(c1));
|
||||
let t1 = Crdt::listen(a1.clone(), win1, s1, exit.clone());
|
||||
|
||||
let a2 = Arc::new(RwLock::new(c2));
|
||||
let win2 = Arc::new(RwLock::new(vec![]));
|
||||
let t2 = Crdt::listen(a2.clone(), win2, s2, exit.clone());
|
||||
|
||||
let a3 = Arc::new(RwLock::new(c3));
|
||||
let win3 = Arc::new(RwLock::new(vec![]));
|
||||
let t3 = Crdt::listen(a3.clone(), win3, s3, exit.clone());
|
||||
|
||||
// Create gossip threads
|
||||
let t1_gossip = Crdt::gossip(a1.clone(), exit.clone());
|
||||
let t2_gossip = Crdt::gossip(a2.clone(), exit.clone());
|
||||
let t3_gossip = Crdt::gossip(a3.clone(), exit.clone());
|
||||
|
||||
//wait to converge
|
||||
trace!("waitng to converge:");
|
||||
let mut done = false;
|
||||
for _ in 0..30 {
|
||||
done = a1.read().unwrap().table.len() == 3 && a2.read().unwrap().table.len() == 3
|
||||
&& a3.read().unwrap().table.len() == 3;
|
||||
if done {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
assert!(done);
|
||||
let mut b = Blob::default();
|
||||
b.meta.size = 10;
|
||||
Crdt::retransmit(&a1, &Arc::new(RwLock::new(b)), &e1).unwrap();
|
||||
let res: Vec<_> = [r1, r2, r3]
|
||||
.into_par_iter()
|
||||
.map(|s| {
|
||||
let mut b = Blob::default();
|
||||
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
let res = s.recv_from(&mut b.data);
|
||||
res.is_err() //true if failed to receive the retransmit packet
|
||||
})
|
||||
.collect();
|
||||
//true if failed receive the retransmit packet, r2, and r3 should succeed
|
||||
//r1 was the sender, so it should fail to receive the packet
|
||||
assert_eq!(res, [true, false, false]);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
let threads = vec![t1, t2, t3, t1_gossip, t2_gossip, t3_gossip];
|
||||
for t in threads.into_iter() {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
167
src/entry.rs
Normal file
167
src/entry.rs
Normal file
@ -0,0 +1,167 @@
|
||||
//! The `entry` module is a fundamental building block of Proof of History. It contains a
|
||||
//! unique ID that is the hash of the Entry before it, plus the hash of the
|
||||
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
|
||||
//! represents an approximate amount of time since the last Entry was created.
|
||||
use hash::{extend_and_hash, hash, Hash};
|
||||
use rayon::prelude::*;
|
||||
use transaction::Transaction;
|
||||
|
||||
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
|
||||
/// of hashes performed since the previous entry. The `id` field is the result
|
||||
/// of hashing `id` from the previous entry `num_hashes` times. The `transactions`
|
||||
/// field points to Events that took place shortly after `id` was generated.
|
||||
///
|
||||
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
|
||||
/// get a duration estimate since the last Entry. Since processing power increases
|
||||
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
|
||||
/// Though processing power varies across nodes, the network gives priority to the
|
||||
/// fastest processor. Duration should therefore be estimated by assuming that the hash
|
||||
/// was generated by the fastest processor at the time the entry was recorded.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Entry {
|
||||
pub num_hashes: u64,
|
||||
pub id: Hash,
|
||||
pub transactions: Vec<Transaction>,
|
||||
}
|
||||
|
||||
impl Entry {
|
||||
/// Creates the next Entry `num_hashes` after `start_hash`.
|
||||
pub fn new(start_hash: &Hash, cur_hashes: u64, transactions: Vec<Transaction>) -> Self {
|
||||
let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
|
||||
let id = next_hash(start_hash, 0, &transactions);
|
||||
Entry {
|
||||
num_hashes,
|
||||
id,
|
||||
transactions,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
||||
pub fn new_mut(
|
||||
start_hash: &mut Hash,
|
||||
cur_hashes: &mut u64,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> Self {
|
||||
let entry = Self::new(start_hash, *cur_hashes, transactions);
|
||||
*start_hash = entry.id;
|
||||
*cur_hashes = 0;
|
||||
entry
|
||||
}
|
||||
|
||||
/// Creates a Entry from the number of hashes `num_hashes` since the previous transaction
|
||||
/// and that resulting `id`.
|
||||
pub fn new_tick(num_hashes: u64, id: &Hash) -> Self {
|
||||
Entry {
|
||||
num_hashes,
|
||||
id: *id,
|
||||
transactions: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
|
||||
/// If the transaction is not a Tick, then hash that as well.
|
||||
pub fn verify(&self, start_hash: &Hash) -> bool {
|
||||
self.transactions.par_iter().all(|tx| tx.verify_plan())
|
||||
&& self.id == next_hash(start_hash, self.num_hashes, &self.transactions)
|
||||
}
|
||||
}
|
||||
|
||||
fn add_transaction_data(hash_data: &mut Vec<u8>, tx: &Transaction) {
|
||||
hash_data.push(0u8);
|
||||
hash_data.extend_from_slice(&tx.sig);
|
||||
}
|
||||
|
||||
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
|
||||
/// a signature, the final hash will be a hash of both the previous ID and
|
||||
/// the signature.
|
||||
pub fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
|
||||
let mut id = *start_hash;
|
||||
for _ in 1..num_hashes {
|
||||
id = hash(&id);
|
||||
}
|
||||
|
||||
// Hash all the transaction data
|
||||
let mut hash_data = vec![];
|
||||
for tx in transactions {
|
||||
add_transaction_data(&mut hash_data, tx);
|
||||
}
|
||||
|
||||
if !hash_data.is_empty() {
|
||||
extend_and_hash(&id, &hash_data)
|
||||
} else if num_hashes != 0 {
|
||||
hash(&id)
|
||||
} else {
|
||||
id
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the next Tick or Event Entry `num_hashes` after `start_hash`.
|
||||
pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
|
||||
Entry {
|
||||
num_hashes,
|
||||
id: next_hash(start_hash, num_hashes, &transactions),
|
||||
transactions,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use chrono::prelude::*;
|
||||
use entry::Entry;
|
||||
use hash::hash;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[test]
|
||||
fn test_entry_verify() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case
|
||||
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
|
||||
assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step
|
||||
assert!(!next_entry(&zero, 1, vec![]).verify(&one)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transaction_reorder_attack() {
|
||||
let zero = Hash::default();
|
||||
|
||||
// First, verify entries
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
|
||||
let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero);
|
||||
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
|
||||
assert!(e0.verify(&zero));
|
||||
|
||||
// Next, swap two transactions and ensure verification fails.
|
||||
e0.transactions[0] = tx1; // <-- attack
|
||||
e0.transactions[1] = tx0;
|
||||
assert!(!e0.verify(&zero));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_witness_reorder_attack() {
|
||||
let zero = Hash::default();
|
||||
|
||||
// First, verify entries
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
|
||||
let tx1 = Transaction::new_signature(&keypair, Default::default(), zero);
|
||||
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
|
||||
assert!(e0.verify(&zero));
|
||||
|
||||
// Next, swap two witness transactions and ensure verification fails.
|
||||
e0.transactions[0] = tx1; // <-- attack
|
||||
e0.transactions[1] = tx0;
|
||||
assert!(!e0.verify(&zero));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_entry() {
|
||||
let zero = Hash::default();
|
||||
let tick = next_entry(&zero, 1, vec![]);
|
||||
assert_eq!(tick.num_hashes, 1);
|
||||
assert_ne!(tick.id, zero);
|
||||
}
|
||||
}
|
80
src/entry_writer.rs
Normal file
80
src/entry_writer.rs
Normal file
@ -0,0 +1,80 @@
|
||||
//! The `entry_writer` module helps implement the TPU's write stage.
|
||||
|
||||
use bank::Bank;
|
||||
use entry::Entry;
|
||||
use ledger::Block;
|
||||
use packet;
|
||||
use result::Result;
|
||||
use serde_json;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::sink;
|
||||
use std::io::Write;
|
||||
use std::sync::mpsc::Receiver;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
|
||||
pub struct EntryWriter<'a> {
|
||||
bank: &'a Bank,
|
||||
}
|
||||
|
||||
impl<'a> EntryWriter<'a> {
|
||||
/// Create a new Tpu that wraps the given Bank.
|
||||
pub fn new(bank: &'a Bank) -> Self {
|
||||
EntryWriter { bank }
|
||||
}
|
||||
|
||||
fn write_entry<W: Write>(&self, writer: &Mutex<W>, entry: &Entry) {
|
||||
trace!("write_entry entry");
|
||||
self.bank.register_entry_id(&entry.id);
|
||||
writeln!(
|
||||
writer.lock().expect("'writer' lock in fn fn write_entry"),
|
||||
"{}",
|
||||
serde_json::to_string(&entry).expect("'entry' to_strong in fn write_entry")
|
||||
).expect("writeln! in fn write_entry");
|
||||
}
|
||||
|
||||
fn write_entries<W: Write>(
|
||||
&self,
|
||||
writer: &Mutex<W>,
|
||||
entry_receiver: &Receiver<Entry>,
|
||||
) -> Result<Vec<Entry>> {
|
||||
//TODO implement a serialize for channel that does this without allocations
|
||||
let mut l = vec![];
|
||||
let entry = entry_receiver.recv_timeout(Duration::new(1, 0))?;
|
||||
self.write_entry(writer, &entry);
|
||||
l.push(entry);
|
||||
while let Ok(entry) = entry_receiver.try_recv() {
|
||||
self.write_entry(writer, &entry);
|
||||
l.push(entry);
|
||||
}
|
||||
Ok(l)
|
||||
}
|
||||
|
||||
/// Process any Entry items that have been published by the Historian.
|
||||
/// continuosly broadcast blobs of entries out
|
||||
pub fn write_and_send_entries<W: Write>(
|
||||
&self,
|
||||
broadcast: &streamer::BlobSender,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
writer: &Mutex<W>,
|
||||
entry_receiver: &Receiver<Entry>,
|
||||
) -> Result<()> {
|
||||
let mut q = VecDeque::new();
|
||||
let list = self.write_entries(writer, entry_receiver)?;
|
||||
trace!("New blobs? {}", list.len());
|
||||
list.to_blobs(blob_recycler, &mut q);
|
||||
if !q.is_empty() {
|
||||
trace!("broadcasting {}", q.len());
|
||||
broadcast.send(q)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process any Entry items that have been published by the Historian.
|
||||
/// continuosly broadcast blobs of entries out
|
||||
pub fn drain_entries(&self, entry_receiver: &Receiver<Entry>) -> Result<()> {
|
||||
self.write_entries(&Arc::new(Mutex::new(sink())), entry_receiver)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
430
src/erasure.rs
Normal file
430
src/erasure.rs
Normal file
@ -0,0 +1,430 @@
|
||||
// Support erasure coding
|
||||
|
||||
use packet::{BlobRecycler, SharedBlob};
|
||||
use std::result;
|
||||
|
||||
//TODO(sakridge) pick these values
|
||||
const NUM_CODED: usize = 10;
|
||||
const MAX_MISSING: usize = 2;
|
||||
const NUM_DATA: usize = NUM_CODED - MAX_MISSING;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum ErasureError {
|
||||
NotEnoughBlocksToDecode,
|
||||
DecodeError,
|
||||
InvalidBlockSize,
|
||||
}
|
||||
|
||||
pub type Result<T> = result::Result<T, ErasureError>;
|
||||
|
||||
// k = number of data devices
|
||||
// m = number of coding devices
|
||||
// w = word size
|
||||
|
||||
extern "C" {
|
||||
fn jerasure_matrix_encode(
|
||||
k: i32,
|
||||
m: i32,
|
||||
w: i32,
|
||||
matrix: *const i32,
|
||||
data_ptrs: *const *const u8,
|
||||
coding_ptrs: *const *mut u8,
|
||||
size: i32,
|
||||
);
|
||||
fn jerasure_matrix_decode(
|
||||
k: i32,
|
||||
m: i32,
|
||||
w: i32,
|
||||
matrix: *const i32,
|
||||
row_k_ones: i32,
|
||||
erasures: *const i32,
|
||||
data_ptrs: *const *mut u8,
|
||||
coding_ptrs: *const *const u8,
|
||||
size: i32,
|
||||
) -> i32;
|
||||
fn galois_single_divide(a: i32, b: i32, w: i32) -> i32;
|
||||
}
|
||||
|
||||
fn get_matrix(m: i32, k: i32, w: i32) -> Vec<i32> {
|
||||
let mut matrix = vec![0; (m * k) as usize];
|
||||
for i in 0..m {
|
||||
for j in 0..k {
|
||||
unsafe {
|
||||
matrix[(i * k + j) as usize] = galois_single_divide(1, i ^ (m + j), w);
|
||||
}
|
||||
}
|
||||
}
|
||||
matrix
|
||||
}
|
||||
|
||||
pub const ERASURE_W: i32 = 32;
|
||||
|
||||
// Generate coding blocks into coding
|
||||
// There are some alignment restrictions, blocks should be aligned by 16 bytes
|
||||
// which means their size should be >= 16 bytes
|
||||
pub fn generate_coding_blocks(coding: &mut [&mut [u8]], data: &[&[u8]]) -> Result<()> {
|
||||
if data.len() == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
let m = coding.len() as i32;
|
||||
let block_len = data[0].len();
|
||||
let matrix: Vec<i32> = get_matrix(m, data.len() as i32, ERASURE_W);
|
||||
let mut coding_arg = Vec::new();
|
||||
let mut data_arg = Vec::new();
|
||||
for block in data {
|
||||
if block_len != block.len() {
|
||||
return Err(ErasureError::InvalidBlockSize);
|
||||
}
|
||||
data_arg.push(block.as_ptr());
|
||||
}
|
||||
for mut block in coding {
|
||||
if block_len != block.len() {
|
||||
return Err(ErasureError::InvalidBlockSize);
|
||||
}
|
||||
coding_arg.push(block.as_mut_ptr());
|
||||
}
|
||||
|
||||
unsafe {
|
||||
jerasure_matrix_encode(
|
||||
data.len() as i32,
|
||||
m,
|
||||
ERASURE_W,
|
||||
matrix.as_ptr(),
|
||||
data_arg.as_ptr(),
|
||||
coding_arg.as_ptr(),
|
||||
data[0].len() as i32,
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Recover data + coding blocks into data blocks
|
||||
// data: array of blocks to recover into
|
||||
// coding: arry of coding blocks
|
||||
// erasures: list of indices in data where blocks should be recovered
|
||||
pub fn decode_blocks(data: &mut [&mut [u8]], coding: &[&[u8]], erasures: &[i32]) -> Result<()> {
|
||||
if data.len() == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
let block_len = data[0].len();
|
||||
let matrix: Vec<i32> = get_matrix(coding.len() as i32, data.len() as i32, ERASURE_W);
|
||||
|
||||
// generate coding pointers, blocks should be the same size
|
||||
let mut coding_arg: Vec<*const u8> = Vec::new();
|
||||
for x in coding.iter() {
|
||||
if x.len() != block_len {
|
||||
return Err(ErasureError::InvalidBlockSize);
|
||||
}
|
||||
coding_arg.push(x.as_ptr());
|
||||
}
|
||||
|
||||
// generate data pointers, blocks should be the same size
|
||||
let mut data_arg: Vec<*mut u8> = Vec::new();
|
||||
for x in data.iter_mut() {
|
||||
if x.len() != block_len {
|
||||
return Err(ErasureError::InvalidBlockSize);
|
||||
}
|
||||
data_arg.push(x.as_mut_ptr());
|
||||
}
|
||||
unsafe {
|
||||
let ret = jerasure_matrix_decode(
|
||||
data.len() as i32,
|
||||
coding.len() as i32,
|
||||
ERASURE_W,
|
||||
matrix.as_ptr(),
|
||||
0,
|
||||
erasures.as_ptr(),
|
||||
data_arg.as_ptr(),
|
||||
coding_arg.as_ptr(),
|
||||
data[0].len() as i32,
|
||||
);
|
||||
trace!("jerasure_matrix_decode ret: {}", ret);
|
||||
for x in data[erasures[0] as usize][0..8].iter() {
|
||||
trace!("{} ", x)
|
||||
}
|
||||
trace!("");
|
||||
if ret < 0 {
|
||||
return Err(ErasureError::DecodeError);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Generate coding blocks in window from consumed to consumed+NUM_DATA
|
||||
pub fn generate_coding(
|
||||
re: &BlobRecycler,
|
||||
window: &mut Vec<SharedBlob>,
|
||||
consumed: usize,
|
||||
) -> Result<()> {
|
||||
let mut data_blobs = Vec::new();
|
||||
let mut coding_blobs = Vec::new();
|
||||
let mut data_locks = Vec::new();
|
||||
let mut data_ptrs: Vec<&[u8]> = Vec::new();
|
||||
let mut coding_locks = Vec::new();
|
||||
let mut coding_ptrs: Vec<&mut [u8]> = Vec::new();
|
||||
for i in consumed..consumed + NUM_DATA {
|
||||
let n = i % window.len();
|
||||
data_blobs.push(
|
||||
window[n]
|
||||
.clone()
|
||||
.expect("'data_blobs' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
for b in &data_blobs {
|
||||
data_locks.push(b.write().expect("'b' write lock in pub fn generate_coding"));
|
||||
}
|
||||
for (i, l) in data_locks.iter_mut().enumerate() {
|
||||
trace!("i: {} data: {}", i, l.data[0]);
|
||||
data_ptrs.push(&l.data);
|
||||
}
|
||||
|
||||
// generate coding ptr array
|
||||
let coding_start = consumed + NUM_DATA;
|
||||
let coding_end = consumed + NUM_CODED;
|
||||
for i in coding_start..coding_end {
|
||||
let n = i % window.len();
|
||||
window[n] = re.allocate();
|
||||
coding_blobs.push(
|
||||
window[n]
|
||||
.clone()
|
||||
.expect("'coding_blobs' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
for b in &coding_blobs {
|
||||
coding_locks.push(
|
||||
b.write()
|
||||
.expect("'coding_locks' arr in pub fn generate_coding"),
|
||||
);
|
||||
}
|
||||
for (i, l) in coding_locks.iter_mut().enumerate() {
|
||||
trace!("i: {} data: {}", i, l.data[0]);
|
||||
coding_ptrs.push(&mut l.data);
|
||||
}
|
||||
|
||||
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?;
|
||||
trace!("consumed: {}", consumed);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Recover missing blocks into window
|
||||
// missing blocks should be None, will use re
|
||||
// to allocate new ones. Returns err if not enough
|
||||
// coding blocks are present to restore
|
||||
pub fn recover(
|
||||
re: &BlobRecycler,
|
||||
window: &mut Vec<Option<SharedBlob>>,
|
||||
consumed: usize,
|
||||
) -> Result<()> {
|
||||
//recover with erasure coding
|
||||
let mut data_missing = 0;
|
||||
let mut coded_missing = 0;
|
||||
let coding_start = consumed + NUM_DATA;
|
||||
let coding_end = consumed + NUM_CODED;
|
||||
for i in consumed..coding_end {
|
||||
let n = i % window.len();
|
||||
if window[n].is_none() {
|
||||
if i >= coding_start {
|
||||
coded_missing += 1;
|
||||
} else {
|
||||
data_missing += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
trace!("missing: data: {} coding: {}", data_missing, coded_missing);
|
||||
if data_missing > 0 {
|
||||
if (data_missing + coded_missing) <= MAX_MISSING {
|
||||
let mut blobs: Vec<SharedBlob> = Vec::new();
|
||||
let mut locks = Vec::new();
|
||||
let mut data_ptrs: Vec<&mut [u8]> = Vec::new();
|
||||
let mut coding_ptrs: Vec<&[u8]> = Vec::new();
|
||||
let mut erasures: Vec<i32> = Vec::new();
|
||||
for i in consumed..coding_end {
|
||||
let j = i % window.len();
|
||||
let mut b = &mut window[j];
|
||||
if b.is_some() {
|
||||
blobs.push(b.clone().expect("'blobs' arr in pb fn recover"));
|
||||
continue;
|
||||
}
|
||||
let n = re.allocate();
|
||||
*b = Some(n.clone());
|
||||
//mark the missing memory
|
||||
blobs.push(n);
|
||||
erasures.push((i - consumed) as i32);
|
||||
}
|
||||
erasures.push(-1);
|
||||
trace!("erasures: {:?}", erasures);
|
||||
//lock everything
|
||||
for b in &blobs {
|
||||
locks.push(b.write().expect("'locks' arr in pb fn recover"));
|
||||
}
|
||||
for (i, l) in locks.iter_mut().enumerate() {
|
||||
if i >= NUM_DATA {
|
||||
trace!("pushing coding: {}", i);
|
||||
coding_ptrs.push(&l.data);
|
||||
} else {
|
||||
trace!("pushing data: {}", i);
|
||||
data_ptrs.push(&mut l.data);
|
||||
}
|
||||
}
|
||||
trace!(
|
||||
"coding_ptrs.len: {} data_ptrs.len {}",
|
||||
coding_ptrs.len(),
|
||||
data_ptrs.len()
|
||||
);
|
||||
decode_blocks(data_ptrs.as_mut_slice(), &coding_ptrs, &erasures)?;
|
||||
} else {
|
||||
return Err(ErasureError::NotEnoughBlocksToDecode);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use erasure;
|
||||
use packet::{BlobRecycler, SharedBlob, PACKET_DATA_SIZE};
|
||||
|
||||
#[test]
|
||||
pub fn test_coding() {
|
||||
let zero_vec = vec![0; 16];
|
||||
let mut vs: Vec<Vec<u8>> = (0..4).map(|i| (i..(16 + i)).collect()).collect();
|
||||
let v_orig: Vec<u8> = vs[0].clone();
|
||||
|
||||
let m = 2;
|
||||
let mut coding_blocks: Vec<_> = (0..m).map(|_| vec![0u8; 16]).collect();
|
||||
|
||||
{
|
||||
let mut coding_blocks_slices: Vec<_> =
|
||||
coding_blocks.iter_mut().map(|x| x.as_mut_slice()).collect();
|
||||
let v_slices: Vec<_> = vs.iter().map(|x| x.as_slice()).collect();
|
||||
|
||||
assert!(
|
||||
erasure::generate_coding_blocks(
|
||||
coding_blocks_slices.as_mut_slice(),
|
||||
v_slices.as_slice()
|
||||
).is_ok()
|
||||
);
|
||||
}
|
||||
trace!("coding blocks:");
|
||||
for b in &coding_blocks {
|
||||
trace!("{:?}", b);
|
||||
}
|
||||
let erasure: i32 = 1;
|
||||
let erasures = vec![erasure, -1];
|
||||
// clear an entry
|
||||
vs[erasure as usize].copy_from_slice(zero_vec.as_slice());
|
||||
|
||||
{
|
||||
let coding_blocks_slices: Vec<_> = coding_blocks.iter().map(|x| x.as_slice()).collect();
|
||||
let mut v_slices: Vec<_> = vs.iter_mut().map(|x| x.as_mut_slice()).collect();
|
||||
|
||||
assert!(
|
||||
erasure::decode_blocks(
|
||||
v_slices.as_mut_slice(),
|
||||
coding_blocks_slices.as_slice(),
|
||||
erasures.as_slice(),
|
||||
).is_ok()
|
||||
);
|
||||
}
|
||||
|
||||
trace!("vs:");
|
||||
for v in &vs {
|
||||
trace!("{:?}", v);
|
||||
}
|
||||
assert_eq!(v_orig, vs[0]);
|
||||
}
|
||||
|
||||
fn print_window(window: &Vec<Option<SharedBlob>>) {
|
||||
for (i, w) in window.iter().enumerate() {
|
||||
print!("window({}): ", i);
|
||||
if w.is_some() {
|
||||
let window_lock = w.clone().unwrap();
|
||||
let window_data = window_lock.read().unwrap().data;
|
||||
for i in 0..8 {
|
||||
print!("{} ", window_data[i]);
|
||||
}
|
||||
} else {
|
||||
print!("null");
|
||||
}
|
||||
println!("");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_window_recover() {
|
||||
let mut window = Vec::new();
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let offset = 4;
|
||||
for i in 0..(4 * erasure::NUM_CODED + 1) {
|
||||
let b = blob_recycler.allocate();
|
||||
let b_ = b.clone();
|
||||
let data_len = b.read().unwrap().data.len();
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(i as u64).unwrap();
|
||||
assert_eq!(i as u64, w.get_index().unwrap());
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
for k in 0..data_len {
|
||||
w.data[k] = (k + i) as u8;
|
||||
}
|
||||
window.push(Some(b_));
|
||||
}
|
||||
println!("** after-gen:");
|
||||
print_window(&window);
|
||||
assert!(erasure::generate_coding(&blob_recycler, &mut window, offset).is_ok());
|
||||
assert!(
|
||||
erasure::generate_coding(&blob_recycler, &mut window, offset + erasure::NUM_CODED)
|
||||
.is_ok()
|
||||
);
|
||||
assert!(
|
||||
erasure::generate_coding(
|
||||
&blob_recycler,
|
||||
&mut window,
|
||||
offset + (2 * erasure::NUM_CODED)
|
||||
).is_ok()
|
||||
);
|
||||
assert!(
|
||||
erasure::generate_coding(
|
||||
&blob_recycler,
|
||||
&mut window,
|
||||
offset + (3 * erasure::NUM_CODED)
|
||||
).is_ok()
|
||||
);
|
||||
println!("** after-coding:");
|
||||
print_window(&window);
|
||||
let refwindow = window[offset + 1].clone();
|
||||
window[offset + 1] = None;
|
||||
window[offset + 2] = None;
|
||||
window[offset + erasure::NUM_CODED + 3] = None;
|
||||
window[offset + (2 * erasure::NUM_CODED) + 0] = None;
|
||||
window[offset + (2 * erasure::NUM_CODED) + 1] = None;
|
||||
window[offset + (2 * erasure::NUM_CODED) + 2] = None;
|
||||
let window_l0 = &(window[offset + (3 * erasure::NUM_CODED)]).clone().unwrap();
|
||||
window_l0.write().unwrap().data[0] = 55;
|
||||
println!("** after-nulling:");
|
||||
print_window(&window);
|
||||
assert!(erasure::recover(&blob_recycler, &mut window, offset).is_ok());
|
||||
assert!(erasure::recover(&blob_recycler, &mut window, offset + erasure::NUM_CODED).is_ok());
|
||||
assert!(
|
||||
erasure::recover(
|
||||
&blob_recycler,
|
||||
&mut window,
|
||||
offset + (2 * erasure::NUM_CODED)
|
||||
).is_err()
|
||||
);
|
||||
assert!(
|
||||
erasure::recover(
|
||||
&blob_recycler,
|
||||
&mut window,
|
||||
offset + (3 * erasure::NUM_CODED)
|
||||
).is_ok()
|
||||
);
|
||||
println!("** after-restore:");
|
||||
print_window(&window);
|
||||
let window_l = window[offset + 1].clone().unwrap();
|
||||
let ref_l = refwindow.clone().unwrap();
|
||||
assert_eq!(
|
||||
window_l.read().unwrap().data.to_vec(),
|
||||
ref_l.read().unwrap().data.to_vec()
|
||||
);
|
||||
}
|
||||
}
|
151
src/event.rs
151
src/event.rs
@ -1,151 +0,0 @@
|
||||
//! The `event` crate provides the foundational data structures for Proof-of-History
|
||||
|
||||
/// A Proof-of-History is an ordered log of events in time. Each entry contains three
|
||||
/// pieces of data. The 'num_hashes' field is the number of hashes performed since the previous
|
||||
/// entry. The 'end_hash' field is the result of hashing 'end_hash' from the previous entry
|
||||
/// 'num_hashes' times. The 'data' field is an optional foreign key (a hash) pointing to some
|
||||
/// arbitrary data that a client is looking to associate with the entry.
|
||||
///
|
||||
/// If you divide 'num_hashes' by the amount of time it takes to generate a new hash, you
|
||||
/// get a duration estimate since the last event. Since processing power increases
|
||||
/// over time, one should expect the duration 'num_hashes' represents to decrease proportionally.
|
||||
/// Though processing power varies across nodes, the network gives priority to the
|
||||
/// fastest processor. Duration should therefore be estimated by assuming that the hash
|
||||
/// was generated by the fastest processor at the time the entry was logged.
|
||||
pub struct Event {
|
||||
pub num_hashes: u64,
|
||||
pub end_hash: u64,
|
||||
pub data: EventData,
|
||||
}
|
||||
|
||||
/// When 'data' is Tick, the event represents a simple clock tick, and exists for the
|
||||
/// sole purpose of improving the performance of event log verification. A tick can
|
||||
/// be generated in 'num_hashes' hashes and verified in 'num_hashes' hashes. By logging
|
||||
/// a hash alongside the tick, each tick and be verified in parallel using the 'end_hash'
|
||||
/// of the preceding tick to seed its hashing.
|
||||
pub enum EventData {
|
||||
Tick,
|
||||
UserDataKey(u64),
|
||||
}
|
||||
|
||||
impl Event {
|
||||
/// Creates an Event from the number of hashes 'num_hashes' since the previous event
|
||||
/// and that resulting 'end_hash'.
|
||||
pub fn new_tick(num_hashes: u64, end_hash: u64) -> Self {
|
||||
let data = EventData::Tick;
|
||||
Event {
|
||||
num_hashes,
|
||||
end_hash,
|
||||
data,
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies self.end_hash is the result of hashing a 'start_hash' 'self.num_hashes' times.
|
||||
pub fn verify(self: &Self, start_hash: u64) -> bool {
|
||||
self.end_hash == next_tick(start_hash, self.num_hashes).end_hash
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the next Tick Event 'num_hashes' after 'start_hash'.
|
||||
pub fn next_tick(start_hash: u64, num_hashes: u64) -> Event {
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
let mut end_hash = start_hash;
|
||||
let mut hasher = DefaultHasher::new();
|
||||
for _ in 0..num_hashes {
|
||||
end_hash.hash(&mut hasher);
|
||||
end_hash = hasher.finish();
|
||||
}
|
||||
Event::new_tick(num_hashes, end_hash)
|
||||
}
|
||||
|
||||
/// Verifies the hashes and counts of a slice of events are all consistent.
|
||||
pub fn verify_slice(events: &[Event], start_hash: u64) -> bool {
|
||||
use rayon::prelude::*;
|
||||
let genesis = [Event::new_tick(0, start_hash)];
|
||||
let event_pairs = genesis.par_iter().chain(events).zip(events);
|
||||
event_pairs.all(|(x0, x1)| x1.verify(x0.end_hash))
|
||||
}
|
||||
|
||||
/// Verifies the hashes and events serially. Exists only for reference.
|
||||
pub fn verify_slice_seq(events: &[Event], start_hash: u64) -> bool {
|
||||
let genesis = [Event::new_tick(0, start_hash)];
|
||||
let mut event_pairs = genesis.iter().chain(events).zip(events);
|
||||
event_pairs.all(|(x0, x1)| x1.verify(x0.end_hash))
|
||||
}
|
||||
|
||||
/// Create a vector of Ticks of length 'len' from 'start_hash' hash and 'num_hashes'.
|
||||
pub fn create_ticks(start_hash: u64, num_hashes: u64, len: usize) -> Vec<Event> {
|
||||
use itertools::unfold;
|
||||
let mut events = unfold(start_hash, |state| {
|
||||
let event = next_tick(*state, num_hashes);
|
||||
*state = event.end_hash;
|
||||
return Some(event);
|
||||
});
|
||||
events.by_ref().take(len).collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_event_verify() {
|
||||
assert!(Event::new_tick(0, 0).verify(0)); // base case
|
||||
assert!(!Event::new_tick(0, 0).verify(1)); // base case, bad
|
||||
assert!(next_tick(0, 1).verify(0)); // inductive step
|
||||
assert!(!next_tick(0, 1).verify(1)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_tick() {
|
||||
assert_eq!(next_tick(0, 1).num_hashes, 1)
|
||||
}
|
||||
|
||||
fn verify_slice_generic(verify_slice: fn(&[Event], u64) -> bool) {
|
||||
assert!(verify_slice(&vec![], 0)); // base case
|
||||
assert!(verify_slice(&vec![Event::new_tick(0, 0)], 0)); // singleton case 1
|
||||
assert!(!verify_slice(&vec![Event::new_tick(0, 0)], 1)); // singleton case 2, bad
|
||||
assert!(verify_slice(&create_ticks(0, 0, 2), 0)); // inductive step
|
||||
|
||||
let mut bad_ticks = create_ticks(0, 0, 2);
|
||||
bad_ticks[1].end_hash = 1;
|
||||
assert!(!verify_slice(&bad_ticks, 0)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice() {
|
||||
verify_slice_generic(verify_slice);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice_seq() {
|
||||
verify_slice_generic(verify_slice_seq);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use event;
|
||||
|
||||
#[bench]
|
||||
fn event_bench(bencher: &mut Bencher) {
|
||||
let start_hash = 0;
|
||||
let events = event::create_ticks(start_hash, 100_000, 8);
|
||||
bencher.iter(|| {
|
||||
assert!(event::verify_slice(&events, start_hash));
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn event_bench_seq(bencher: &mut Bencher) {
|
||||
let start_hash = 0;
|
||||
let events = event::create_ticks(start_hash, 100_000, 8);
|
||||
bencher.iter(|| {
|
||||
assert!(event::verify_slice_seq(&events, start_hash));
|
||||
});
|
||||
}
|
||||
}
|
31
src/fetch_stage.rs
Normal file
31
src/fetch_stage.rs
Normal file
@ -0,0 +1,31 @@
|
||||
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
|
||||
|
||||
use packet;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
|
||||
pub struct FetchStage {
|
||||
pub packet_receiver: streamer::PacketReceiver,
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl FetchStage {
|
||||
pub fn new(
|
||||
socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_recycler: packet::PacketRecycler,
|
||||
) -> Self {
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let thread_hdl =
|
||||
streamer::receiver(socket, exit.clone(), packet_recycler.clone(), packet_sender);
|
||||
|
||||
FetchStage {
|
||||
packet_receiver,
|
||||
thread_hdl,
|
||||
}
|
||||
}
|
||||
}
|
21
src/hash.rs
Normal file
21
src/hash.rs
Normal file
@ -0,0 +1,21 @@
|
||||
//! The `hash` module provides functions for creating SHA-256 hashes.
|
||||
|
||||
use generic_array::typenum::U32;
|
||||
use generic_array::GenericArray;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
pub type Hash = GenericArray<u8, U32>;
|
||||
|
||||
/// Return a Sha256 hash for the given data.
|
||||
pub fn hash(val: &[u8]) -> Hash {
|
||||
let mut hasher = Sha256::default();
|
||||
hasher.input(val);
|
||||
hasher.result()
|
||||
}
|
||||
|
||||
/// Return the hash of the given hash extended with the given value.
|
||||
pub fn extend_and_hash(id: &Hash, val: &[u8]) -> Hash {
|
||||
let mut hash_data = id.to_vec();
|
||||
hash_data.extend_from_slice(val);
|
||||
hash(&hash_data)
|
||||
}
|
203
src/ledger.rs
Normal file
203
src/ledger.rs
Normal file
@ -0,0 +1,203 @@
|
||||
//! The `ledger` module provides functions for parallel verification of the
|
||||
//! Proof of History ledger.
|
||||
|
||||
use bincode::{deserialize, serialize_into};
|
||||
use entry::{next_entry, Entry};
|
||||
use hash::Hash;
|
||||
use packet;
|
||||
use packet::{SharedBlob, BLOB_DATA_SIZE, BLOB_SIZE};
|
||||
use rayon::prelude::*;
|
||||
use std::cmp::min;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::Cursor;
|
||||
use std::mem::size_of;
|
||||
use transaction::Transaction;
|
||||
|
||||
pub trait Block {
|
||||
/// Verifies the hashes and counts of a slice of transactions are all consistent.
|
||||
fn verify(&self, start_hash: &Hash) -> bool;
|
||||
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>);
|
||||
}
|
||||
|
||||
impl Block for [Entry] {
|
||||
fn verify(&self, start_hash: &Hash) -> bool {
|
||||
let genesis = [Entry::new_tick(0, start_hash)];
|
||||
let entry_pairs = genesis.par_iter().chain(self).zip(self);
|
||||
entry_pairs.all(|(x0, x1)| x1.verify(&x0.id))
|
||||
}
|
||||
|
||||
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>) {
|
||||
let mut start = 0;
|
||||
let mut end = 0;
|
||||
while start < self.len() {
|
||||
let mut entries: Vec<Vec<Entry>> = Vec::new();
|
||||
let mut total = 0;
|
||||
for i in &self[start..] {
|
||||
total += size_of::<Transaction>() * i.transactions.len();
|
||||
total += size_of::<Entry>();
|
||||
if total >= BLOB_DATA_SIZE {
|
||||
break;
|
||||
}
|
||||
end += 1;
|
||||
}
|
||||
// See if we need to split the transactions
|
||||
if end <= start {
|
||||
let mut transaction_start = 0;
|
||||
let num_transactions_per_blob = BLOB_DATA_SIZE / size_of::<Transaction>();
|
||||
let total_entry_chunks = (self[end].transactions.len() + num_transactions_per_blob
|
||||
- 1) / num_transactions_per_blob;
|
||||
trace!(
|
||||
"splitting transactions end: {} total_chunks: {}",
|
||||
end,
|
||||
total_entry_chunks
|
||||
);
|
||||
for _ in 0..total_entry_chunks {
|
||||
let transaction_end = min(
|
||||
transaction_start + num_transactions_per_blob,
|
||||
self[end].transactions.len(),
|
||||
);
|
||||
let mut entry = Entry {
|
||||
num_hashes: self[end].num_hashes,
|
||||
id: self[end].id,
|
||||
transactions: self[end].transactions[transaction_start..transaction_end]
|
||||
.to_vec(),
|
||||
};
|
||||
entries.push(vec![entry]);
|
||||
transaction_start = transaction_end;
|
||||
}
|
||||
end += 1;
|
||||
} else {
|
||||
entries.push(self[start..end].to_vec());
|
||||
}
|
||||
|
||||
for entry in entries {
|
||||
let b = blob_recycler.allocate();
|
||||
let pos = {
|
||||
let mut bd = b.write().unwrap();
|
||||
let mut out = Cursor::new(bd.data_mut());
|
||||
serialize_into(&mut out, &entry).expect("failed to serialize output");
|
||||
out.position() as usize
|
||||
};
|
||||
assert!(pos < BLOB_SIZE);
|
||||
b.write().unwrap().set_size(pos);
|
||||
q.push_back(b);
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a vector of Entries of length `transaction_batches.len()` from `start_hash` hash, `num_hashes`, and `transaction_batches`.
|
||||
pub fn next_entries(
|
||||
start_hash: &Hash,
|
||||
num_hashes: u64,
|
||||
transaction_batches: Vec<Vec<Transaction>>,
|
||||
) -> Vec<Entry> {
|
||||
let mut id = *start_hash;
|
||||
let mut entries = vec![];
|
||||
for transactions in &transaction_batches {
|
||||
let transactions = transactions.clone();
|
||||
let entry = next_entry(&id, num_hashes, transactions);
|
||||
id = entry.id;
|
||||
entries.push(entry);
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
||||
pub fn reconstruct_entries_from_blobs(blobs: &VecDeque<SharedBlob>) -> Vec<Entry> {
|
||||
let mut entries_to_apply: Vec<Entry> = Vec::new();
|
||||
let mut last_id = Hash::default();
|
||||
for msgs in blobs {
|
||||
let blob = msgs.read().unwrap();
|
||||
let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap();
|
||||
for entry in entries {
|
||||
if entry.id == last_id {
|
||||
if let Some(last_entry) = entries_to_apply.last_mut() {
|
||||
last_entry.transactions.extend(entry.transactions);
|
||||
}
|
||||
} else {
|
||||
last_id = entry.id;
|
||||
entries_to_apply.push(entry);
|
||||
}
|
||||
}
|
||||
//TODO respond back to leader with hash of the state
|
||||
}
|
||||
entries_to_apply
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use hash::hash;
|
||||
use packet::BlobRecycler;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
assert!(vec![][..].verify(&zero)); // base case
|
||||
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
|
||||
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
|
||||
assert!(next_entries(&zero, 0, vec![vec![]; 2])[..].verify(&zero)); // inductive step
|
||||
|
||||
let mut bad_ticks = next_entries(&zero, 0, vec![vec![]; 2]);
|
||||
bad_ticks[1].id = one;
|
||||
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entry_to_blobs() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0.clone(); 10000];
|
||||
let e0 = Entry::new(&zero, 0, transactions);
|
||||
|
||||
let entries = vec![e0.clone(); 1];
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let mut blob_q = VecDeque::new();
|
||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||
|
||||
assert_eq!(reconstruct_entries_from_blobs(&blob_q), entries);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_entries() {
|
||||
let mut id = Hash::default();
|
||||
let next_id = hash(&id);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
|
||||
let transactions = vec![tx0.clone(); 5];
|
||||
let transaction_batches = vec![transactions.clone(); 5];
|
||||
let entries0 = next_entries(&id, 0, transaction_batches);
|
||||
|
||||
assert_eq!(entries0.len(), 5);
|
||||
|
||||
let mut entries1 = vec![];
|
||||
for _ in 0..5 {
|
||||
let entry = next_entry(&id, 0, transactions.clone());
|
||||
id = entry.id;
|
||||
entries1.push(entry);
|
||||
}
|
||||
assert_eq!(entries0, entries1);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use ledger::*;
|
||||
|
||||
#[bench]
|
||||
fn bench_next_entries(bencher: &mut Bencher) {
|
||||
let start_hash = Hash::default();
|
||||
let entries = next_entries(&start_hash, 10_000, vec![vec![]; 8]);
|
||||
bencher.iter(|| {
|
||||
assert!(entries.verify(&start_hash));
|
||||
});
|
||||
}
|
||||
}
|
58
src/lib.rs
58
src/lib.rs
@ -1,4 +1,58 @@
|
||||
#![cfg_attr(feature = "unstable", feature(test))]
|
||||
pub mod event;
|
||||
extern crate itertools;
|
||||
pub mod bank;
|
||||
pub mod banking_stage;
|
||||
pub mod budget;
|
||||
pub mod crdt;
|
||||
pub mod entry;
|
||||
pub mod entry_writer;
|
||||
#[cfg(feature = "erasure")]
|
||||
pub mod erasure;
|
||||
pub mod fetch_stage;
|
||||
pub mod hash;
|
||||
pub mod ledger;
|
||||
pub mod logger;
|
||||
pub mod mint;
|
||||
pub mod packet;
|
||||
pub mod payment_plan;
|
||||
pub mod record_stage;
|
||||
pub mod recorder;
|
||||
pub mod replicate_stage;
|
||||
pub mod request;
|
||||
pub mod request_processor;
|
||||
pub mod request_stage;
|
||||
pub mod result;
|
||||
pub mod rpu;
|
||||
pub mod server;
|
||||
pub mod signature;
|
||||
pub mod sigverify;
|
||||
pub mod sigverify_stage;
|
||||
pub mod streamer;
|
||||
pub mod thin_client;
|
||||
pub mod timing;
|
||||
pub mod tpu;
|
||||
pub mod transaction;
|
||||
pub mod tvu;
|
||||
pub mod write_stage;
|
||||
extern crate bincode;
|
||||
extern crate byteorder;
|
||||
extern crate chrono;
|
||||
extern crate generic_array;
|
||||
extern crate libc;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate rayon;
|
||||
extern crate ring;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate serde_json;
|
||||
extern crate sha2;
|
||||
extern crate untrusted;
|
||||
|
||||
extern crate futures;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate matches;
|
||||
|
||||
extern crate rand;
|
||||
|
11
src/logger.rs
Normal file
11
src/logger.rs
Normal file
@ -0,0 +1,11 @@
|
||||
use std::sync::{Once, ONCE_INIT};
|
||||
extern crate env_logger;
|
||||
|
||||
static INIT: Once = ONCE_INIT;
|
||||
|
||||
/// Setup function that is only run once, even if called multiple times.
|
||||
pub fn setup() {
|
||||
INIT.call_once(|| {
|
||||
let _ = env_logger::init();
|
||||
});
|
||||
}
|
92
src/mint.rs
Normal file
92
src/mint.rs
Normal file
@ -0,0 +1,92 @@
|
||||
//! The `mint` module is a library for generating the chain's genesis block.
|
||||
|
||||
use entry::Entry;
|
||||
use hash::{hash, Hash};
|
||||
use ring::rand::SystemRandom;
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
use transaction::Transaction;
|
||||
use untrusted::Input;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Mint {
|
||||
pub pkcs8: Vec<u8>,
|
||||
pubkey: PublicKey,
|
||||
pub tokens: i64,
|
||||
}
|
||||
|
||||
impl Mint {
|
||||
pub fn new(tokens: i64) -> Self {
|
||||
let rnd = SystemRandom::new();
|
||||
let pkcs8 = KeyPair::generate_pkcs8(&rnd)
|
||||
.expect("generate_pkcs8 in mint pub fn new")
|
||||
.to_vec();
|
||||
let keypair =
|
||||
KeyPair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in mint pub fn new");
|
||||
let pubkey = keypair.pubkey();
|
||||
Mint {
|
||||
pkcs8,
|
||||
pubkey,
|
||||
tokens,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn seed(&self) -> Hash {
|
||||
hash(&self.pkcs8)
|
||||
}
|
||||
|
||||
pub fn last_id(&self) -> Hash {
|
||||
self.create_entries()[1].id
|
||||
}
|
||||
|
||||
pub fn keypair(&self) -> KeyPair {
|
||||
KeyPair::from_pkcs8(Input::from(&self.pkcs8)).expect("from_pkcs8 in mint pub fn keypair")
|
||||
}
|
||||
|
||||
pub fn pubkey(&self) -> PublicKey {
|
||||
self.pubkey
|
||||
}
|
||||
|
||||
pub fn create_transactions(&self) -> Vec<Transaction> {
|
||||
let keypair = self.keypair();
|
||||
let tx = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed());
|
||||
vec![tx]
|
||||
}
|
||||
|
||||
pub fn create_entries(&self) -> Vec<Entry> {
|
||||
let e0 = Entry::new(&self.seed(), 0, vec![]);
|
||||
let e1 = Entry::new(&e0.id, 0, self.create_transactions());
|
||||
vec![e0, e1]
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct MintDemo {
|
||||
pub mint: Mint,
|
||||
pub num_accounts: i64,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use budget::Budget;
|
||||
use ledger::Block;
|
||||
use transaction::{Instruction, Plan};
|
||||
|
||||
#[test]
|
||||
fn test_create_transactions() {
|
||||
let mut transactions = Mint::new(100).create_transactions().into_iter();
|
||||
let tx = transactions.next().unwrap();
|
||||
if let Instruction::NewContract(contract) = tx.instruction {
|
||||
if let Plan::Budget(Budget::Pay(payment)) = contract.plan {
|
||||
assert_eq!(tx.from, payment.to);
|
||||
}
|
||||
}
|
||||
assert_eq!(transactions.next(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_entries() {
|
||||
let entries = Mint::new(100).create_entries();
|
||||
assert!(entries[..].verify(&entries[0].id));
|
||||
}
|
||||
}
|
488
src/packet.rs
Normal file
488
src/packet.rs
Normal file
@ -0,0 +1,488 @@
|
||||
//! The `packet` module defines data structures and methods to pull data from the network.
|
||||
use bincode::{deserialize, serialize};
|
||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
use result::{Error, Result};
|
||||
use serde::Serialize;
|
||||
use signature::PublicKey;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
use std::mem::size_of;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
|
||||
pub type SharedPackets = Arc<RwLock<Packets>>;
|
||||
pub type SharedBlob = Arc<RwLock<Blob>>;
|
||||
pub type PacketRecycler = Recycler<Packets>;
|
||||
pub type BlobRecycler = Recycler<Blob>;
|
||||
|
||||
pub const NUM_PACKETS: usize = 1024 * 8;
|
||||
pub const BLOB_SIZE: usize = 64 * 1024;
|
||||
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_ID_END;
|
||||
pub const PACKET_DATA_SIZE: usize = 256;
|
||||
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
#[repr(C)]
|
||||
pub struct Meta {
|
||||
pub size: usize,
|
||||
pub addr: [u16; 8],
|
||||
pub port: u16,
|
||||
pub v6: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[repr(C)]
|
||||
pub struct Packet {
|
||||
pub data: [u8; PACKET_DATA_SIZE],
|
||||
pub meta: Meta,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Packet {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Packet {{ size: {:?}, addr: {:?} }}",
|
||||
self.meta.size,
|
||||
self.meta.addr()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Packet {
|
||||
fn default() -> Packet {
|
||||
Packet {
|
||||
data: [0u8; PACKET_DATA_SIZE],
|
||||
meta: Meta::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Meta {
|
||||
pub fn addr(&self) -> SocketAddr {
|
||||
if !self.v6 {
|
||||
let addr = [
|
||||
self.addr[0] as u8,
|
||||
self.addr[1] as u8,
|
||||
self.addr[2] as u8,
|
||||
self.addr[3] as u8,
|
||||
];
|
||||
let ipv4: Ipv4Addr = From::<[u8; 4]>::from(addr);
|
||||
SocketAddr::new(IpAddr::V4(ipv4), self.port)
|
||||
} else {
|
||||
let ipv6: Ipv6Addr = From::<[u16; 8]>::from(self.addr);
|
||||
SocketAddr::new(IpAddr::V6(ipv6), self.port)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_addr(&mut self, a: &SocketAddr) {
|
||||
match *a {
|
||||
SocketAddr::V4(v4) => {
|
||||
let ip = v4.ip().octets();
|
||||
self.addr[0] = u16::from(ip[0]);
|
||||
self.addr[1] = u16::from(ip[1]);
|
||||
self.addr[2] = u16::from(ip[2]);
|
||||
self.addr[3] = u16::from(ip[3]);
|
||||
self.port = a.port();
|
||||
}
|
||||
SocketAddr::V6(v6) => {
|
||||
self.addr = v6.ip().segments();
|
||||
self.port = a.port();
|
||||
self.v6 = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Packets {
|
||||
pub packets: Vec<Packet>,
|
||||
}
|
||||
|
||||
//auto derive doesn't support large arrays
|
||||
impl Default for Packets {
|
||||
fn default() -> Packets {
|
||||
Packets {
|
||||
packets: vec![Packet::default(); NUM_PACKETS],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Blob {
|
||||
pub data: [u8; BLOB_SIZE],
|
||||
pub meta: Meta,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Blob {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Blob {{ size: {:?}, addr: {:?} }}",
|
||||
self.meta.size,
|
||||
self.meta.addr()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
//auto derive doesn't support large arrays
|
||||
impl Default for Blob {
|
||||
fn default() -> Blob {
|
||||
Blob {
|
||||
data: [0u8; BLOB_SIZE],
|
||||
meta: Meta::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Recycler<T> {
|
||||
gc: Arc<Mutex<Vec<Arc<RwLock<T>>>>>,
|
||||
}
|
||||
|
||||
impl<T: Default> Default for Recycler<T> {
|
||||
fn default() -> Recycler<T> {
|
||||
Recycler {
|
||||
gc: Arc::new(Mutex::new(vec![])),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Default> Clone for Recycler<T> {
|
||||
fn clone(&self) -> Recycler<T> {
|
||||
Recycler {
|
||||
gc: self.gc.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Default> Recycler<T> {
|
||||
pub fn allocate(&self) -> Arc<RwLock<T>> {
|
||||
let mut gc = self.gc.lock().expect("recycler lock in pb fn allocate");
|
||||
gc.pop()
|
||||
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())))
|
||||
}
|
||||
pub fn recycle(&self, msgs: Arc<RwLock<T>>) {
|
||||
let mut gc = self.gc.lock().expect("recycler lock in pub fn recycle");
|
||||
gc.push(msgs);
|
||||
}
|
||||
}
|
||||
|
||||
impl Packets {
|
||||
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
||||
self.packets.resize(NUM_PACKETS, Packet::default());
|
||||
let mut i = 0;
|
||||
//DOCUMENTED SIDE-EFFECT
|
||||
//Performance out of the IO without poll
|
||||
// * block on the socket until it's readable
|
||||
// * set the socket to non blocking
|
||||
// * read until it fails
|
||||
// * set it back to blocking before returning
|
||||
socket.set_nonblocking(false)?;
|
||||
for p in &mut self.packets {
|
||||
p.meta.size = 0;
|
||||
trace!("receiving");
|
||||
match socket.recv_from(&mut p.data) {
|
||||
Err(_) if i > 0 => {
|
||||
debug!("got {:?} messages", i);
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
trace!("recv_from err {:?}", e);
|
||||
return Err(Error::IO(e));
|
||||
}
|
||||
Ok((nrecv, from)) => {
|
||||
p.meta.size = nrecv;
|
||||
p.meta.set_addr(&from);
|
||||
if i == 0 {
|
||||
socket.set_nonblocking(true)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
Ok(i)
|
||||
}
|
||||
pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<()> {
|
||||
let sz = self.run_read_from(socket)?;
|
||||
self.packets.resize(sz, Packet::default());
|
||||
debug!("recv_from: {}", sz);
|
||||
Ok(())
|
||||
}
|
||||
pub fn send_to(&self, socket: &UdpSocket) -> Result<()> {
|
||||
for p in &self.packets {
|
||||
let a = p.meta.addr();
|
||||
socket.send_to(&p.data[..p.meta.size], &a)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPackets> {
|
||||
let mut out = vec![];
|
||||
for x in xs.chunks(NUM_PACKETS) {
|
||||
let p = r.allocate();
|
||||
p.write()
|
||||
.unwrap()
|
||||
.packets
|
||||
.resize(x.len(), Default::default());
|
||||
for (i, o) in x.iter().zip(p.write().unwrap().packets.iter_mut()) {
|
||||
let v = serialize(&i).expect("serialize request");
|
||||
let len = v.len();
|
||||
o.data[..len].copy_from_slice(&v);
|
||||
o.meta.size = len;
|
||||
}
|
||||
out.push(p);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
pub fn to_blob<T: Serialize>(
|
||||
resp: T,
|
||||
rsp_addr: SocketAddr,
|
||||
blob_recycler: &BlobRecycler,
|
||||
) -> Result<SharedBlob> {
|
||||
let blob = blob_recycler.allocate();
|
||||
{
|
||||
let mut b = blob.write().unwrap();
|
||||
let v = serialize(&resp)?;
|
||||
let len = v.len();
|
||||
// TODO: we are not using .data_mut() method here because
|
||||
// the raw bytes are being serialized and sent, this isn't the
|
||||
// right interface, and we should create a separate path for
|
||||
// sending request responses in the RPU
|
||||
b.data[..len].copy_from_slice(&v);
|
||||
b.meta.size = len;
|
||||
b.meta.set_addr(&rsp_addr);
|
||||
}
|
||||
Ok(blob)
|
||||
}
|
||||
|
||||
pub fn to_blobs<T: Serialize>(
|
||||
rsps: Vec<(T, SocketAddr)>,
|
||||
blob_recycler: &BlobRecycler,
|
||||
) -> Result<VecDeque<SharedBlob>> {
|
||||
let mut blobs = VecDeque::new();
|
||||
for (resp, rsp_addr) in rsps {
|
||||
blobs.push_back(to_blob(resp, rsp_addr, blob_recycler)?);
|
||||
}
|
||||
Ok(blobs)
|
||||
}
|
||||
|
||||
const BLOB_INDEX_END: usize = size_of::<u64>();
|
||||
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>();
|
||||
|
||||
impl Blob {
|
||||
pub fn get_index(&self) -> Result<u64> {
|
||||
let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_END]);
|
||||
let r = rdr.read_u64::<LittleEndian>()?;
|
||||
Ok(r)
|
||||
}
|
||||
pub fn set_index(&mut self, ix: u64) -> Result<()> {
|
||||
let mut wtr = vec![];
|
||||
wtr.write_u64::<LittleEndian>(ix)?;
|
||||
self.data[..BLOB_INDEX_END].clone_from_slice(&wtr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_id(&self) -> Result<PublicKey> {
|
||||
let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?;
|
||||
Ok(e)
|
||||
}
|
||||
|
||||
pub fn set_id(&mut self, id: PublicKey) -> Result<()> {
|
||||
let wtr = serialize(&id)?;
|
||||
self.data[BLOB_INDEX_END..BLOB_ID_END].clone_from_slice(&wtr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn data(&self) -> &[u8] {
|
||||
&self.data[BLOB_ID_END..]
|
||||
}
|
||||
pub fn data_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.data[BLOB_ID_END..]
|
||||
}
|
||||
pub fn set_size(&mut self, size: usize) {
|
||||
self.meta.size = size + BLOB_ID_END;
|
||||
}
|
||||
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> {
|
||||
let mut v = VecDeque::new();
|
||||
//DOCUMENTED SIDE-EFFECT
|
||||
//Performance out of the IO without poll
|
||||
// * block on the socket until it's readable
|
||||
// * set the socket to non blocking
|
||||
// * read until it fails
|
||||
// * set it back to blocking before returning
|
||||
socket.set_nonblocking(false)?;
|
||||
for i in 0..NUM_BLOBS {
|
||||
let r = re.allocate();
|
||||
{
|
||||
let mut p = r.write().expect("'r' write lock in pub fn recv_from");
|
||||
match socket.recv_from(&mut p.data) {
|
||||
Err(_) if i > 0 => {
|
||||
trace!("got {:?} messages", i);
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
if e.kind() != io::ErrorKind::WouldBlock {
|
||||
info!("recv_from err {:?}", e);
|
||||
}
|
||||
return Err(Error::IO(e));
|
||||
}
|
||||
Ok((nrecv, from)) => {
|
||||
p.meta.size = nrecv;
|
||||
p.meta.set_addr(&from);
|
||||
if i == 0 {
|
||||
socket.set_nonblocking(true)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
v.push_back(r);
|
||||
}
|
||||
Ok(v)
|
||||
}
|
||||
pub fn send_to(
|
||||
re: &BlobRecycler,
|
||||
socket: &UdpSocket,
|
||||
v: &mut VecDeque<SharedBlob>,
|
||||
) -> Result<()> {
|
||||
while let Some(r) = v.pop_front() {
|
||||
{
|
||||
let p = r.read().expect("'r' read lock in pub fn send_to");
|
||||
let a = p.meta.addr();
|
||||
socket.send_to(&p.data[..p.meta.size], &a)?;
|
||||
}
|
||||
re.recycle(r);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use packet::{to_packets, Blob, BlobRecycler, Packet, PacketRecycler, Packets, NUM_PACKETS};
|
||||
use request::Request;
|
||||
use std::collections::VecDeque;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
|
||||
#[test]
|
||||
pub fn packet_recycler_test() {
|
||||
let r = PacketRecycler::default();
|
||||
let p = r.allocate();
|
||||
r.recycle(p);
|
||||
assert_eq!(r.gc.lock().unwrap().len(), 1);
|
||||
let _ = r.allocate();
|
||||
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
||||
}
|
||||
#[test]
|
||||
pub fn blob_recycler_test() {
|
||||
let r = BlobRecycler::default();
|
||||
let p = r.allocate();
|
||||
r.recycle(p);
|
||||
assert_eq!(r.gc.lock().unwrap().len(), 1);
|
||||
let _ = r.allocate();
|
||||
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
||||
}
|
||||
#[test]
|
||||
pub fn packet_send_recv() {
|
||||
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let addr = reader.local_addr().unwrap();
|
||||
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let saddr = sender.local_addr().unwrap();
|
||||
let r = PacketRecycler::default();
|
||||
let p = r.allocate();
|
||||
p.write().unwrap().packets.resize(10, Packet::default());
|
||||
for m in p.write().unwrap().packets.iter_mut() {
|
||||
m.meta.set_addr(&addr);
|
||||
m.meta.size = 256;
|
||||
}
|
||||
p.read().unwrap().send_to(&sender).unwrap();
|
||||
p.write().unwrap().recv_from(&reader).unwrap();
|
||||
for m in p.write().unwrap().packets.iter_mut() {
|
||||
assert_eq!(m.meta.size, 256);
|
||||
assert_eq!(m.meta.addr(), saddr);
|
||||
}
|
||||
|
||||
r.recycle(p);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_packets() {
|
||||
let tx = Request::GetTransactionCount;
|
||||
let re = PacketRecycler::default();
|
||||
let rv = to_packets(&re, vec![tx.clone(); 1]);
|
||||
assert_eq!(rv.len(), 1);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
|
||||
|
||||
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS]);
|
||||
assert_eq!(rv.len(), 1);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||
|
||||
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS + 1]);
|
||||
assert_eq!(rv.len(), 2);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn blob_send_recv() {
|
||||
trace!("start");
|
||||
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let addr = reader.local_addr().unwrap();
|
||||
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let r = BlobRecycler::default();
|
||||
let p = r.allocate();
|
||||
p.write().unwrap().meta.set_addr(&addr);
|
||||
p.write().unwrap().meta.size = 1024;
|
||||
let mut v = VecDeque::new();
|
||||
v.push_back(p);
|
||||
assert_eq!(v.len(), 1);
|
||||
Blob::send_to(&r, &sender, &mut v).unwrap();
|
||||
trace!("send_to");
|
||||
assert_eq!(v.len(), 0);
|
||||
let mut rv = Blob::recv_from(&r, &reader).unwrap();
|
||||
trace!("recv_from");
|
||||
assert_eq!(rv.len(), 1);
|
||||
let rp = rv.pop_front().unwrap();
|
||||
assert_eq!(rp.write().unwrap().meta.size, 1024);
|
||||
r.recycle(rp);
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "ipv6", test))]
|
||||
#[test]
|
||||
pub fn blob_ipv6_send_recv() {
|
||||
let reader = UdpSocket::bind("[::1]:0").expect("bind");
|
||||
let addr = reader.local_addr().unwrap();
|
||||
let sender = UdpSocket::bind("[::1]:0").expect("bind");
|
||||
let r = BlobRecycler::default();
|
||||
let p = r.allocate();
|
||||
p.write().unwrap().meta.set_addr(&addr);
|
||||
p.write().unwrap().meta.size = 1024;
|
||||
let mut v = VecDeque::default();
|
||||
v.push_back(p);
|
||||
Blob::send_to(&r, &sender, &mut v).unwrap();
|
||||
let mut rv = Blob::recv_from(&r, &reader).unwrap();
|
||||
let rp = rv.pop_front().unwrap();
|
||||
assert_eq!(rp.write().unwrap().meta.size, 1024);
|
||||
r.recycle(rp);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn debug_trait() {
|
||||
write!(io::sink(), "{:?}", Packet::default()).unwrap();
|
||||
write!(io::sink(), "{:?}", Packets::default()).unwrap();
|
||||
write!(io::sink(), "{:?}", Blob::default()).unwrap();
|
||||
}
|
||||
#[test]
|
||||
pub fn blob_test() {
|
||||
let mut b = Blob::default();
|
||||
b.set_index(<u64>::max_value()).unwrap();
|
||||
assert_eq!(b.get_index().unwrap(), <u64>::max_value());
|
||||
b.data_mut()[0] = 1;
|
||||
assert_eq!(b.data()[0], 1);
|
||||
assert_eq!(b.get_index().unwrap(), <u64>::max_value());
|
||||
}
|
||||
|
||||
}
|
31
src/payment_plan.rs
Normal file
31
src/payment_plan.rs
Normal file
@ -0,0 +1,31 @@
|
||||
//! The `plan` module provides a domain-specific language for payment plans. Users create Budget objects that
|
||||
//! are given to an interpreter. The interpreter listens for `Witness` transactions,
|
||||
//! which it uses to reduce the payment plan. When the plan is reduced to a
|
||||
//! `Payment`, the payment is executed.
|
||||
|
||||
use chrono::prelude::*;
|
||||
use signature::PublicKey;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Witness {
|
||||
Timestamp(DateTime<Utc>),
|
||||
Signature(PublicKey),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Payment {
|
||||
pub tokens: i64,
|
||||
pub to: PublicKey,
|
||||
}
|
||||
|
||||
pub trait PaymentPlan {
|
||||
/// Return Payment if the payment plan requires no additional Witnesses.
|
||||
fn final_payment(&self) -> Option<Payment>;
|
||||
|
||||
/// Return true if the plan spends exactly `spendable_tokens`.
|
||||
fn verify(&self, spendable_tokens: i64) -> bool;
|
||||
|
||||
/// Apply a witness to the payment plan to see if the plan can be reduced.
|
||||
/// If so, modify the plan in-place.
|
||||
fn apply_witness(&mut self, witness: &Witness);
|
||||
}
|
166
src/record_stage.rs
Normal file
166
src/record_stage.rs
Normal file
@ -0,0 +1,166 @@
|
||||
//! The `record_stage` module provides an object for generating a Proof of History.
|
||||
//! It records Event items on behalf of its users. It continuously generates
|
||||
//! new hashes, only stopping to check if it has been sent an Event item. It
|
||||
//! tags each Event with an Entry, and sends it back. The Entry includes the
|
||||
//! Event, the latest hash, and the number of hashes since the last transaction.
|
||||
//! The resulting stream of entries represents ordered transactions in time.
|
||||
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use recorder::Recorder;
|
||||
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::{Duration, Instant};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
pub enum Signal {
|
||||
Tick,
|
||||
Events(Vec<Transaction>),
|
||||
}
|
||||
|
||||
pub struct RecordStage {
|
||||
pub entry_receiver: Receiver<Entry>,
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl RecordStage {
|
||||
/// A background thread that will continue tagging received Event messages and
|
||||
/// sending back Entry messages until either the receiver or sender channel is closed.
|
||||
pub fn new(
|
||||
transaction_receiver: Receiver<Signal>,
|
||||
start_hash: &Hash,
|
||||
tick_duration: Option<Duration>,
|
||||
) -> Self {
|
||||
let (entry_sender, entry_receiver) = channel();
|
||||
let start_hash = start_hash.clone();
|
||||
|
||||
let thread_hdl = spawn(move || {
|
||||
let mut recorder = Recorder::new(start_hash);
|
||||
let duration_data = tick_duration.map(|dur| (Instant::now(), dur));
|
||||
loop {
|
||||
if let Err(_) = Self::process_transactions(
|
||||
&mut recorder,
|
||||
duration_data,
|
||||
&transaction_receiver,
|
||||
&entry_sender,
|
||||
) {
|
||||
return;
|
||||
}
|
||||
if duration_data.is_some() {
|
||||
recorder.hash();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
RecordStage {
|
||||
entry_receiver,
|
||||
thread_hdl,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_transactions(
|
||||
recorder: &mut Recorder,
|
||||
duration_data: Option<(Instant, Duration)>,
|
||||
receiver: &Receiver<Signal>,
|
||||
sender: &Sender<Entry>,
|
||||
) -> Result<(), ()> {
|
||||
loop {
|
||||
if let Some((start_time, tick_duration)) = duration_data {
|
||||
if let Some(entry) = recorder.tick(start_time, tick_duration) {
|
||||
sender.send(entry).or(Err(()))?;
|
||||
}
|
||||
}
|
||||
match receiver.try_recv() {
|
||||
Ok(signal) => match signal {
|
||||
Signal::Tick => {
|
||||
let entry = recorder.record(vec![]);
|
||||
sender.send(entry).or(Err(()))?;
|
||||
}
|
||||
Signal::Events(transactions) => {
|
||||
let entry = recorder.record(transactions);
|
||||
sender.send(entry).or(Err(()))?;
|
||||
}
|
||||
},
|
||||
Err(TryRecvError::Empty) => return Ok(()),
|
||||
Err(TryRecvError::Disconnected) => return Err(()),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ledger::Block;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::thread::sleep;
|
||||
|
||||
#[test]
|
||||
fn test_historian() {
|
||||
let (tx_sender, tx_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(tx_receiver, &zero, None);
|
||||
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
|
||||
let entry0 = record_stage.entry_receiver.recv().unwrap();
|
||||
let entry1 = record_stage.entry_receiver.recv().unwrap();
|
||||
let entry2 = record_stage.entry_receiver.recv().unwrap();
|
||||
|
||||
assert_eq!(entry0.num_hashes, 0);
|
||||
assert_eq!(entry1.num_hashes, 0);
|
||||
assert_eq!(entry2.num_hashes, 0);
|
||||
|
||||
drop(tx_sender);
|
||||
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
|
||||
|
||||
assert!([entry0, entry1, entry2].verify(&zero));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_historian_closed_sender() {
|
||||
let (tx_sender, tx_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(tx_receiver, &zero, None);
|
||||
drop(record_stage.entry_receiver);
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transactions() {
|
||||
let (tx_sender, signal_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(signal_receiver, &zero, None);
|
||||
let alice_keypair = KeyPair::new();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
|
||||
let tx1 = Transaction::new(&alice_keypair, bob_pubkey, 2, zero);
|
||||
tx_sender.send(Signal::Events(vec![tx0, tx1])).unwrap();
|
||||
drop(tx_sender);
|
||||
let entries: Vec<_> = record_stage.entry_receiver.iter().collect();
|
||||
assert_eq!(entries.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_ticking_historian() {
|
||||
let (tx_sender, tx_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let record_stage = RecordStage::new(tx_receiver, &zero, Some(Duration::from_millis(20)));
|
||||
sleep(Duration::from_millis(900));
|
||||
tx_sender.send(Signal::Tick).unwrap();
|
||||
drop(tx_sender);
|
||||
let entries: Vec<Entry> = record_stage.entry_receiver.iter().collect();
|
||||
assert!(entries.len() > 1);
|
||||
|
||||
// Ensure the ID is not the seed.
|
||||
assert_ne!(entries[0].id, zero);
|
||||
}
|
||||
}
|
42
src/recorder.rs
Normal file
42
src/recorder.rs
Normal file
@ -0,0 +1,42 @@
|
||||
//! The `recorder` module provides an object for generating a Proof of History.
|
||||
//! It records Event items on behalf of its users.
|
||||
|
||||
use entry::Entry;
|
||||
use hash::{hash, Hash};
|
||||
use std::time::{Duration, Instant};
|
||||
use transaction::Transaction;
|
||||
|
||||
pub struct Recorder {
|
||||
last_hash: Hash,
|
||||
num_hashes: u64,
|
||||
num_ticks: u32,
|
||||
}
|
||||
|
||||
impl Recorder {
|
||||
pub fn new(last_hash: Hash) -> Self {
|
||||
Recorder {
|
||||
last_hash,
|
||||
num_hashes: 0,
|
||||
num_ticks: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hash(&mut self) {
|
||||
self.last_hash = hash(&self.last_hash);
|
||||
self.num_hashes += 1;
|
||||
}
|
||||
|
||||
pub fn record(&mut self, transactions: Vec<Transaction>) -> Entry {
|
||||
Entry::new_mut(&mut self.last_hash, &mut self.num_hashes, transactions)
|
||||
}
|
||||
|
||||
pub fn tick(&mut self, start_time: Instant, tick_duration: Duration) -> Option<Entry> {
|
||||
if start_time.elapsed() > tick_duration * (self.num_ticks + 1) {
|
||||
// TODO: don't let this overflow u32
|
||||
self.num_ticks += 1;
|
||||
Some(self.record(vec![]))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
52
src/replicate_stage.rs
Normal file
52
src/replicate_stage.rs
Normal file
@ -0,0 +1,52 @@
|
||||
//! The `replicate_stage` replicates transactions broadcast by the leader.
|
||||
|
||||
use bank::Bank;
|
||||
use ledger;
|
||||
use packet;
|
||||
use result::Result;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
|
||||
pub struct ReplicateStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl ReplicateStage {
|
||||
/// Process verified blobs, already in order
|
||||
fn replicate_requests(
|
||||
bank: &Arc<Bank>,
|
||||
verified_receiver: &streamer::BlobReceiver,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let blobs = verified_receiver.recv_timeout(timer)?;
|
||||
let entries = ledger::reconstruct_entries_from_blobs(&blobs);
|
||||
let res = bank.process_entries(entries);
|
||||
if res.is_err() {
|
||||
error!("process_entries {} {:?}", blobs.len(), res);
|
||||
}
|
||||
res?;
|
||||
for blob in blobs {
|
||||
blob_recycler.recycle(blob);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
exit: Arc<AtomicBool>,
|
||||
window_receiver: streamer::BlobReceiver,
|
||||
blob_recycler: packet::BlobRecycler,
|
||||
) -> Self {
|
||||
let thread_hdl = spawn(move || loop {
|
||||
let e = Self::replicate_requests(&bank, &window_receiver, &blob_recycler);
|
||||
if e.is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
});
|
||||
ReplicateStage { thread_hdl }
|
||||
}
|
||||
}
|
26
src/request.rs
Normal file
26
src/request.rs
Normal file
@ -0,0 +1,26 @@
|
||||
//! The `request` module defines the messages for the thin client.
|
||||
|
||||
use hash::Hash;
|
||||
use signature::PublicKey;
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub enum Request {
|
||||
GetBalance { key: PublicKey },
|
||||
GetLastId,
|
||||
GetTransactionCount,
|
||||
}
|
||||
|
||||
impl Request {
|
||||
/// Verify the request is valid.
|
||||
pub fn verify(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum Response {
|
||||
Balance { key: PublicKey, val: Option<i64> },
|
||||
LastId { id: Hash },
|
||||
TransactionCount { transaction_count: u64 },
|
||||
}
|
54
src/request_processor.rs
Normal file
54
src/request_processor.rs
Normal file
@ -0,0 +1,54 @@
|
||||
//! The `request_processor` processes thin client Request messages.
|
||||
|
||||
use bank::Bank;
|
||||
use request::{Request, Response};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct RequestProcessor {
|
||||
bank: Arc<Bank>,
|
||||
}
|
||||
|
||||
impl RequestProcessor {
|
||||
/// Create a new Tpu that wraps the given Bank.
|
||||
pub fn new(bank: Arc<Bank>) -> Self {
|
||||
RequestProcessor { bank }
|
||||
}
|
||||
|
||||
/// Process Request items sent by clients.
|
||||
fn process_request(
|
||||
&self,
|
||||
msg: Request,
|
||||
rsp_addr: SocketAddr,
|
||||
) -> Option<(Response, SocketAddr)> {
|
||||
match msg {
|
||||
Request::GetBalance { key } => {
|
||||
let val = self.bank.get_balance(&key);
|
||||
let rsp = (Response::Balance { key, val }, rsp_addr);
|
||||
info!("Response::Balance {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
Request::GetLastId => {
|
||||
let id = self.bank.last_id();
|
||||
let rsp = (Response::LastId { id }, rsp_addr);
|
||||
info!("Response::LastId {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
Request::GetTransactionCount => {
|
||||
let transaction_count = self.bank.transaction_count() as u64;
|
||||
let rsp = (Response::TransactionCount { transaction_count }, rsp_addr);
|
||||
info!("Response::TransactionCount {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_requests(
|
||||
&self,
|
||||
reqs: Vec<(Request, SocketAddr)>,
|
||||
) -> Vec<(Response, SocketAddr)> {
|
||||
reqs.into_iter()
|
||||
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
|
||||
.collect()
|
||||
}
|
||||
}
|
113
src/request_stage.rs
Normal file
113
src/request_stage.rs
Normal file
@ -0,0 +1,113 @@
|
||||
//! The `request_stage` processes thin client Request messages.
|
||||
|
||||
use bincode::deserialize;
|
||||
use packet;
|
||||
use packet::SharedPackets;
|
||||
use rayon::prelude::*;
|
||||
use request::Request;
|
||||
use request_processor::RequestProcessor;
|
||||
use result::Result;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Instant;
|
||||
use streamer;
|
||||
use timing;
|
||||
|
||||
pub struct RequestStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
pub blob_receiver: streamer::BlobReceiver,
|
||||
pub request_processor: Arc<RequestProcessor>,
|
||||
}
|
||||
|
||||
impl RequestStage {
|
||||
pub fn deserialize_requests(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
|
||||
p.packets
|
||||
.par_iter()
|
||||
.map(|x| {
|
||||
deserialize(&x.data[0..x.meta.size])
|
||||
.map(|req| (req, x.meta.addr()))
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn process_request_packets(
|
||||
request_processor: &RequestProcessor,
|
||||
packet_receiver: &Receiver<SharedPackets>,
|
||||
blob_sender: &streamer::BlobSender,
|
||||
packet_recycler: &packet::PacketRecycler,
|
||||
blob_recycler: &packet::BlobRecycler,
|
||||
) -> Result<()> {
|
||||
let (batch, batch_len) = streamer::recv_batch(packet_receiver)?;
|
||||
|
||||
info!(
|
||||
"@{:?} request_stage: processing: {}",
|
||||
timing::timestamp(),
|
||||
batch_len
|
||||
);
|
||||
|
||||
let mut reqs_len = 0;
|
||||
let proc_start = Instant::now();
|
||||
for msgs in batch {
|
||||
let reqs: Vec<_> = Self::deserialize_requests(&msgs.read().unwrap())
|
||||
.into_iter()
|
||||
.filter_map(|x| x)
|
||||
.collect();
|
||||
reqs_len += reqs.len();
|
||||
|
||||
let rsps = request_processor.process_requests(reqs);
|
||||
|
||||
let blobs = packet::to_blobs(rsps, blob_recycler)?;
|
||||
if !blobs.is_empty() {
|
||||
info!("process: sending blobs: {}", blobs.len());
|
||||
//don't wake up the other side if there is nothing
|
||||
blob_sender.send(blobs)?;
|
||||
}
|
||||
packet_recycler.recycle(msgs);
|
||||
}
|
||||
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
||||
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
||||
info!(
|
||||
"@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
||||
timing::timestamp(),
|
||||
batch_len,
|
||||
total_time_ms,
|
||||
reqs_len,
|
||||
(reqs_len as f32) / (total_time_s)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
pub fn new(
|
||||
request_processor: RequestProcessor,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_receiver: Receiver<SharedPackets>,
|
||||
packet_recycler: packet::PacketRecycler,
|
||||
blob_recycler: packet::BlobRecycler,
|
||||
) -> Self {
|
||||
let request_processor = Arc::new(request_processor);
|
||||
let request_processor_ = request_processor.clone();
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let thread_hdl = spawn(move || loop {
|
||||
let e = Self::process_request_packets(
|
||||
&request_processor_,
|
||||
&packet_receiver,
|
||||
&blob_sender,
|
||||
&packet_recycler,
|
||||
&blob_recycler,
|
||||
);
|
||||
if e.is_err() {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
RequestStage {
|
||||
thread_hdl,
|
||||
blob_receiver,
|
||||
request_processor,
|
||||
}
|
||||
}
|
||||
}
|
136
src/result.rs
Normal file
136
src/result.rs
Normal file
@ -0,0 +1,136 @@
|
||||
//! The `result` module exposes a Result type that propagates one of many different Error types.
|
||||
|
||||
use bank;
|
||||
use bincode;
|
||||
use serde_json;
|
||||
use std;
|
||||
use std::any::Any;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
IO(std::io::Error),
|
||||
JSON(serde_json::Error),
|
||||
AddrParse(std::net::AddrParseError),
|
||||
JoinError(Box<Any + Send + 'static>),
|
||||
RecvError(std::sync::mpsc::RecvError),
|
||||
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
|
||||
Serialize(std::boxed::Box<bincode::ErrorKind>),
|
||||
BankError(bank::BankError),
|
||||
SendError,
|
||||
Services,
|
||||
CrdtTooSmall,
|
||||
GenericError,
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl std::convert::From<std::sync::mpsc::RecvError> for Error {
|
||||
fn from(e: std::sync::mpsc::RecvError) -> Error {
|
||||
Error::RecvError(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<std::sync::mpsc::RecvTimeoutError> for Error {
|
||||
fn from(e: std::sync::mpsc::RecvTimeoutError) -> Error {
|
||||
Error::RecvTimeoutError(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<bank::BankError> for Error {
|
||||
fn from(e: bank::BankError) -> Error {
|
||||
Error::BankError(e)
|
||||
}
|
||||
}
|
||||
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
|
||||
fn from(_e: std::sync::mpsc::SendError<T>) -> Error {
|
||||
Error::SendError
|
||||
}
|
||||
}
|
||||
impl std::convert::From<Box<Any + Send + 'static>> for Error {
|
||||
fn from(e: Box<Any + Send + 'static>) -> Error {
|
||||
Error::JoinError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::From<std::io::Error> for Error {
|
||||
fn from(e: std::io::Error) -> Error {
|
||||
Error::IO(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<serde_json::Error> for Error {
|
||||
fn from(e: serde_json::Error) -> Error {
|
||||
Error::JSON(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<std::net::AddrParseError> for Error {
|
||||
fn from(e: std::net::AddrParseError) -> Error {
|
||||
Error::AddrParse(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<std::boxed::Box<bincode::ErrorKind>> for Error {
|
||||
fn from(e: std::boxed::Box<bincode::ErrorKind>) -> Error {
|
||||
Error::Serialize(e)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use result::Error;
|
||||
use result::Result;
|
||||
use serde_json;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::net::SocketAddr;
|
||||
use std::panic;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::mpsc::RecvError;
|
||||
use std::sync::mpsc::RecvTimeoutError;
|
||||
use std::thread;
|
||||
|
||||
fn addr_parse_error() -> Result<SocketAddr> {
|
||||
let r = "12fdfasfsafsadfs".parse()?;
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
fn join_error() -> Result<()> {
|
||||
panic::set_hook(Box::new(|_info| {}));
|
||||
let r = thread::spawn(|| panic!("hi")).join()?;
|
||||
Ok(r)
|
||||
}
|
||||
fn json_error() -> Result<()> {
|
||||
let r = serde_json::from_slice("=342{;;;;:}".as_bytes())?;
|
||||
Ok(r)
|
||||
}
|
||||
fn send_error() -> Result<()> {
|
||||
let (s, r) = channel();
|
||||
drop(r);
|
||||
s.send(())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn from_test() {
|
||||
assert_matches!(addr_parse_error(), Err(Error::AddrParse(_)));
|
||||
assert_matches!(Error::from(RecvError {}), Error::RecvError(_));
|
||||
assert_matches!(
|
||||
Error::from(RecvTimeoutError::Timeout),
|
||||
Error::RecvTimeoutError(_)
|
||||
);
|
||||
assert_matches!(send_error(), Err(Error::SendError));
|
||||
assert_matches!(join_error(), Err(Error::JoinError(_)));
|
||||
let ioe = io::Error::new(io::ErrorKind::NotFound, "hi");
|
||||
assert_matches!(Error::from(ioe), Error::IO(_));
|
||||
}
|
||||
#[test]
|
||||
fn fmt_test() {
|
||||
write!(io::sink(), "{:?}", addr_parse_error()).unwrap();
|
||||
write!(io::sink(), "{:?}", Error::from(RecvError {})).unwrap();
|
||||
write!(io::sink(), "{:?}", Error::from(RecvTimeoutError::Timeout)).unwrap();
|
||||
write!(io::sink(), "{:?}", send_error()).unwrap();
|
||||
write!(io::sink(), "{:?}", join_error()).unwrap();
|
||||
write!(io::sink(), "{:?}", json_error()).unwrap();
|
||||
write!(
|
||||
io::sink(),
|
||||
"{:?}",
|
||||
Error::from(io::Error::new(io::ErrorKind::NotFound, "hi"))
|
||||
).unwrap();
|
||||
}
|
||||
}
|
55
src/rpu.rs
Normal file
55
src/rpu.rs
Normal file
@ -0,0 +1,55 @@
|
||||
//! The `rpu` module implements the Request Processing Unit, a
|
||||
//! 5-stage transaction processing pipeline in software.
|
||||
|
||||
use bank::Bank;
|
||||
use packet;
|
||||
use request_processor::RequestProcessor;
|
||||
use request_stage::RequestStage;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
|
||||
pub struct Rpu {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Rpu {
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
requests_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let packet_recycler = packet::PacketRecycler::default();
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let t_receiver = streamer::receiver(
|
||||
requests_socket,
|
||||
exit.clone(),
|
||||
packet_recycler.clone(),
|
||||
packet_sender,
|
||||
);
|
||||
|
||||
let blob_recycler = packet::BlobRecycler::default();
|
||||
let request_processor = RequestProcessor::new(bank.clone());
|
||||
let request_stage = RequestStage::new(
|
||||
request_processor,
|
||||
exit.clone(),
|
||||
packet_receiver,
|
||||
packet_recycler.clone(),
|
||||
blob_recycler.clone(),
|
||||
);
|
||||
|
||||
let t_responder = streamer::responder(
|
||||
respond_socket,
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
request_stage.blob_receiver,
|
||||
);
|
||||
|
||||
let thread_hdls = vec![t_receiver, t_responder, request_stage.thread_hdl];
|
||||
Rpu { thread_hdls }
|
||||
}
|
||||
}
|
95
src/server.rs
Normal file
95
src/server.rs
Normal file
@ -0,0 +1,95 @@
|
||||
//! The `server` module hosts all the server microservices.
|
||||
|
||||
use bank::Bank;
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use hash::Hash;
|
||||
use packet;
|
||||
use rpu::Rpu;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
use tpu::Tpu;
|
||||
use tvu::Tvu;
|
||||
|
||||
pub struct Server {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Server {
|
||||
pub fn new_leader<W: Write + Send + 'static>(
|
||||
bank: Bank,
|
||||
start_hash: Hash,
|
||||
tick_duration: Option<Duration>,
|
||||
me: ReplicatedData,
|
||||
requests_socket: UdpSocket,
|
||||
transactions_socket: UdpSocket,
|
||||
broadcast_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
gossip_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
writer: W,
|
||||
) -> Self {
|
||||
let bank = Arc::new(bank);
|
||||
let mut thread_hdls = vec![];
|
||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
||||
thread_hdls.extend(rpu.thread_hdls);
|
||||
|
||||
let blob_recycler = packet::BlobRecycler::default();
|
||||
let tpu = Tpu::new(
|
||||
bank.clone(),
|
||||
start_hash,
|
||||
tick_duration,
|
||||
transactions_socket,
|
||||
blob_recycler.clone(),
|
||||
exit.clone(),
|
||||
writer,
|
||||
);
|
||||
thread_hdls.extend(tpu.thread_hdls);
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
|
||||
let window = streamer::default_window();
|
||||
let t_listen = Crdt::listen(crdt.clone(), window.clone(), gossip_socket, exit.clone());
|
||||
|
||||
let t_broadcast = streamer::broadcaster(
|
||||
broadcast_socket,
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
window,
|
||||
blob_recycler.clone(),
|
||||
tpu.blob_receiver,
|
||||
);
|
||||
thread_hdls.extend(vec![t_gossip, t_listen, t_broadcast]);
|
||||
|
||||
Server { thread_hdls }
|
||||
}
|
||||
pub fn new_validator(
|
||||
bank: Bank,
|
||||
me: ReplicatedData,
|
||||
requests_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
replicate_socket: UdpSocket,
|
||||
gossip_socket: UdpSocket,
|
||||
leader_repl_data: ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let bank = Arc::new(bank);
|
||||
let mut thread_hdls = vec![];
|
||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
||||
thread_hdls.extend(rpu.thread_hdls);
|
||||
let tvu = Tvu::new(
|
||||
bank.clone(),
|
||||
me,
|
||||
gossip_socket,
|
||||
replicate_socket,
|
||||
leader_repl_data,
|
||||
exit.clone(),
|
||||
);
|
||||
thread_hdls.extend(tvu.thread_hdls);
|
||||
Server { thread_hdls }
|
||||
}
|
||||
}
|
139
src/signature.rs
Normal file
139
src/signature.rs
Normal file
@ -0,0 +1,139 @@
|
||||
//! The `signature` module provides functionality for public, and private keys.
|
||||
|
||||
use generic_array::typenum::{U32, U64};
|
||||
use generic_array::GenericArray;
|
||||
use rand::{ChaChaRng, Rng, SeedableRng};
|
||||
use rayon::prelude::*;
|
||||
use ring::error::Unspecified;
|
||||
use ring::rand::SecureRandom;
|
||||
use ring::signature::Ed25519KeyPair;
|
||||
use ring::{rand, signature};
|
||||
use std::cell::RefCell;
|
||||
use untrusted;
|
||||
|
||||
pub type KeyPair = Ed25519KeyPair;
|
||||
pub type PublicKey = GenericArray<u8, U32>;
|
||||
pub type Signature = GenericArray<u8, U64>;
|
||||
|
||||
pub trait KeyPairUtil {
|
||||
fn new() -> Self;
|
||||
fn pubkey(&self) -> PublicKey;
|
||||
}
|
||||
|
||||
impl KeyPairUtil for Ed25519KeyPair {
|
||||
/// Return a new ED25519 keypair
|
||||
fn new() -> Self {
|
||||
let rng = rand::SystemRandom::new();
|
||||
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng)
|
||||
.expect("generate_pkcs8 in signature pb fn new");
|
||||
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes))
|
||||
.expect("from_pcks8 in signature pb fn new")
|
||||
}
|
||||
|
||||
/// Return the public key for the given keypair
|
||||
fn pubkey(&self) -> PublicKey {
|
||||
GenericArray::clone_from_slice(self.public_key_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait SignatureUtil {
|
||||
fn verify(&self, peer_public_key_bytes: &[u8], msg_bytes: &[u8]) -> bool;
|
||||
}
|
||||
|
||||
impl SignatureUtil for GenericArray<u8, U64> {
|
||||
fn verify(&self, peer_public_key_bytes: &[u8], msg_bytes: &[u8]) -> bool {
|
||||
let peer_public_key = untrusted::Input::from(peer_public_key_bytes);
|
||||
let msg = untrusted::Input::from(msg_bytes);
|
||||
let sig = untrusted::Input::from(self);
|
||||
signature::verify(&signature::ED25519, peer_public_key, msg, sig).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GenKeys {
|
||||
// This is necessary because the rng needs to mutate its state to remain
|
||||
// deterministic, and the fill trait requires an immuatble reference to self
|
||||
generator: RefCell<ChaChaRng>,
|
||||
}
|
||||
|
||||
impl GenKeys {
|
||||
pub fn new(seed: &[u8]) -> GenKeys {
|
||||
let seed32: Vec<_> = seed.iter().map(|&x| x as u32).collect();
|
||||
let rng = ChaChaRng::from_seed(&seed32);
|
||||
GenKeys {
|
||||
generator: RefCell::new(rng),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_key(&self) -> Vec<u8> {
|
||||
KeyPair::generate_pkcs8(self).unwrap().to_vec()
|
||||
}
|
||||
|
||||
pub fn gen_n_seeds(&self, n: i64) -> Vec<[u8; 16]> {
|
||||
let mut rng = self.generator.borrow_mut();
|
||||
(0..n).map(|_| rng.gen()).collect()
|
||||
}
|
||||
|
||||
pub fn gen_n_keypairs(&self, n: i64) -> Vec<KeyPair> {
|
||||
self.gen_n_seeds(n)
|
||||
.into_par_iter()
|
||||
.map(|seed| {
|
||||
let pkcs8 = GenKeys::new(&seed).new_key();
|
||||
KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8)).unwrap()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl SecureRandom for GenKeys {
|
||||
fn fill(&self, dest: &mut [u8]) -> Result<(), Unspecified> {
|
||||
let mut rng = self.generator.borrow_mut();
|
||||
rng.fill_bytes(dest);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::collections::HashSet;
|
||||
|
||||
#[test]
|
||||
fn test_new_key_is_deterministic() {
|
||||
let seed = [1, 2, 3, 4];
|
||||
let rng0 = GenKeys::new(&seed);
|
||||
let rng1 = GenKeys::new(&seed);
|
||||
|
||||
for _ in 0..100 {
|
||||
assert_eq!(rng0.new_key(), rng1.new_key());
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_n_pubkeys(seed: &[u8], n: i64) -> HashSet<PublicKey> {
|
||||
GenKeys::new(&seed)
|
||||
.gen_n_keypairs(n)
|
||||
.into_iter()
|
||||
.map(|x| x.pubkey())
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gen_n_pubkeys_deterministic() {
|
||||
let seed = [1, 2, 3, 4];
|
||||
assert_eq!(gen_n_pubkeys(&seed, 50), gen_n_pubkeys(&seed, 50));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
|
||||
use self::test::Bencher;
|
||||
use super::*;
|
||||
|
||||
#[bench]
|
||||
fn bench_gen_keys(b: &mut Bencher) {
|
||||
let seed: &[_] = &[1, 2, 3, 4];
|
||||
let rnd = GenKeys::new(seed);
|
||||
b.iter(|| rnd.gen_n_keypairs(1000));
|
||||
}
|
||||
}
|
214
src/sigverify.rs
Normal file
214
src/sigverify.rs
Normal file
@ -0,0 +1,214 @@
|
||||
use packet::{Packet, SharedPackets};
|
||||
use std::mem::size_of;
|
||||
use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET};
|
||||
|
||||
pub const TX_OFFSET: usize = 0;
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
#[repr(C)]
|
||||
struct Elems {
|
||||
elems: *const Packet,
|
||||
num: u32,
|
||||
}
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
#[link(name = "cuda_verify_ed25519")]
|
||||
extern "C" {
|
||||
fn ed25519_verify_many(
|
||||
vecs: *const Elems,
|
||||
num: u32, //number of vecs
|
||||
message_size: u32, //size of each element inside the elems field of the vec
|
||||
public_key_offset: u32,
|
||||
signature_offset: u32,
|
||||
signed_message_offset: u32,
|
||||
signed_message_len_offset: u32,
|
||||
out: *mut u8, //combined length of all the items in vecs
|
||||
) -> u32;
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "cuda"))]
|
||||
fn verify_packet(packet: &Packet) -> u8 {
|
||||
use ring::signature;
|
||||
use signature::{PublicKey, Signature};
|
||||
use untrusted;
|
||||
|
||||
let msg_start = TX_OFFSET + SIGNED_DATA_OFFSET;
|
||||
let sig_start = TX_OFFSET + SIG_OFFSET;
|
||||
let sig_end = sig_start + size_of::<Signature>();
|
||||
let pub_key_start = TX_OFFSET + PUB_KEY_OFFSET;
|
||||
let pub_key_end = pub_key_start + size_of::<PublicKey>();
|
||||
|
||||
if packet.meta.size <= msg_start {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let msg_end = packet.meta.size;
|
||||
signature::verify(
|
||||
&signature::ED25519,
|
||||
untrusted::Input::from(&packet.data[pub_key_start..pub_key_end]),
|
||||
untrusted::Input::from(&packet.data[msg_start..msg_end]),
|
||||
untrusted::Input::from(&packet.data[sig_start..sig_end]),
|
||||
).is_ok() as u8
|
||||
}
|
||||
|
||||
fn batch_size(batches: &Vec<SharedPackets>) -> usize {
|
||||
batches
|
||||
.iter()
|
||||
.map(|p| p.read().unwrap().packets.len())
|
||||
.sum()
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "cuda"))]
|
||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
use rayon::prelude::*;
|
||||
info!("CPU ECDSA for {}", batch_size(batches));
|
||||
batches
|
||||
.into_par_iter()
|
||||
.map(|p| {
|
||||
p.read()
|
||||
.expect("'p' read lock in ed25519_verify")
|
||||
.packets
|
||||
.par_iter()
|
||||
.map(verify_packet)
|
||||
.collect()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
use packet::PACKET_DATA_SIZE;
|
||||
|
||||
info!("CUDA ECDSA for {}", batch_size(batches));
|
||||
let mut out = Vec::new();
|
||||
let mut elems = Vec::new();
|
||||
let mut locks = Vec::new();
|
||||
let mut rvs = Vec::new();
|
||||
|
||||
for packets in batches {
|
||||
locks.push(
|
||||
packets
|
||||
.read()
|
||||
.expect("'packets' read lock in pub fn ed25519_verify"),
|
||||
);
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in locks {
|
||||
elems.push(Elems {
|
||||
elems: p.packets.as_ptr(),
|
||||
num: p.packets.len() as u32,
|
||||
});
|
||||
let mut v = Vec::new();
|
||||
v.resize(p.packets.len(), 0);
|
||||
rvs.push(v);
|
||||
num += p.packets.len();
|
||||
}
|
||||
out.resize(num, 0);
|
||||
trace!("Starting verify num packets: {}", num);
|
||||
trace!("elem len: {}", elems.len() as u32);
|
||||
trace!("packet sizeof: {}", size_of::<Packet>() as u32);
|
||||
trace!("pub key: {}", (TX_OFFSET + PUB_KEY_OFFSET) as u32);
|
||||
trace!("sig offset: {}", (TX_OFFSET + SIG_OFFSET) as u32);
|
||||
trace!("sign data: {}", (TX_OFFSET + SIGNED_DATA_OFFSET) as u32);
|
||||
trace!("len offset: {}", PACKET_DATA_SIZE as u32);
|
||||
unsafe {
|
||||
let res = ed25519_verify_many(
|
||||
elems.as_ptr(),
|
||||
elems.len() as u32,
|
||||
size_of::<Packet>() as u32,
|
||||
(TX_OFFSET + PUB_KEY_OFFSET) as u32,
|
||||
(TX_OFFSET + SIG_OFFSET) as u32,
|
||||
(TX_OFFSET + SIGNED_DATA_OFFSET) as u32,
|
||||
PACKET_DATA_SIZE as u32,
|
||||
out.as_mut_ptr(),
|
||||
);
|
||||
if res != 0 {
|
||||
trace!("RETURN!!!: {}", res);
|
||||
}
|
||||
}
|
||||
trace!("done verify");
|
||||
let mut num = 0;
|
||||
for vs in rvs.iter_mut() {
|
||||
for mut v in vs.iter_mut() {
|
||||
*v = out[num];
|
||||
if *v != 0 {
|
||||
trace!("VERIFIED PACKET!!!!!");
|
||||
}
|
||||
num += 1;
|
||||
}
|
||||
}
|
||||
rvs
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bincode::serialize;
|
||||
use packet::{Packet, Packets, SharedPackets};
|
||||
use sigverify;
|
||||
use std::sync::RwLock;
|
||||
use transaction::Transaction;
|
||||
use transaction::{memfind, test_tx};
|
||||
|
||||
#[test]
|
||||
fn test_layout() {
|
||||
let tx = test_tx();
|
||||
let tx_bytes = serialize(&tx).unwrap();
|
||||
let packet = serialize(&tx).unwrap();
|
||||
assert_matches!(memfind(&packet, &tx_bytes), Some(sigverify::TX_OFFSET));
|
||||
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
|
||||
}
|
||||
|
||||
fn make_packet_from_transaction(tx: Transaction) -> Packet {
|
||||
let tx_bytes = serialize(&tx).unwrap();
|
||||
let mut packet = Packet::default();
|
||||
packet.meta.size = tx_bytes.len();
|
||||
packet.data[..packet.meta.size].copy_from_slice(&tx_bytes);
|
||||
return packet;
|
||||
}
|
||||
|
||||
fn test_verify_n(n: usize, modify_data: bool) {
|
||||
let tx = test_tx();
|
||||
let mut packet = make_packet_from_transaction(tx);
|
||||
|
||||
// jumble some data to test failure
|
||||
if modify_data {
|
||||
packet.data[20] = 10;
|
||||
}
|
||||
|
||||
// generate packet vector
|
||||
let mut packets = Packets::default();
|
||||
packets.packets = Vec::new();
|
||||
for _ in 0..n {
|
||||
packets.packets.push(packet.clone());
|
||||
}
|
||||
let shared_packets = SharedPackets::new(RwLock::new(packets));
|
||||
let batches = vec![shared_packets.clone(), shared_packets.clone()];
|
||||
|
||||
// verify packets
|
||||
let ans = sigverify::ed25519_verify(&batches);
|
||||
|
||||
// check result
|
||||
let ref_ans = if modify_data { 0u8 } else { 1u8 };
|
||||
assert_eq!(ans, vec![vec![ref_ans; n], vec![ref_ans; n]]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_zero() {
|
||||
test_verify_n(0, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_one() {
|
||||
test_verify_n(1, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_seventy_one() {
|
||||
test_verify_n(71, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_fail() {
|
||||
test_verify_n(5, true);
|
||||
}
|
||||
}
|
96
src/sigverify_stage.rs
Normal file
96
src/sigverify_stage.rs
Normal file
@ -0,0 +1,96 @@
|
||||
//! The `sigverify_stage` implements the signature verification stage of the TPU.
|
||||
|
||||
use packet::SharedPackets;
|
||||
use rand::{thread_rng, Rng};
|
||||
use result::Result;
|
||||
use sigverify;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Instant;
|
||||
use streamer;
|
||||
use timing;
|
||||
|
||||
pub struct SigVerifyStage {
|
||||
pub verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl SigVerifyStage {
|
||||
pub fn new(exit: Arc<AtomicBool>, packet_receiver: Receiver<SharedPackets>) -> Self {
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let thread_hdls = Self::verifier_services(exit, packet_receiver, verified_sender);
|
||||
SigVerifyStage {
|
||||
thread_hdls,
|
||||
verified_receiver,
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<(SharedPackets, Vec<u8>)> {
|
||||
let r = sigverify::ed25519_verify(&batch);
|
||||
batch.into_iter().zip(r).collect()
|
||||
}
|
||||
|
||||
fn verifier(
|
||||
recvr: &Arc<Mutex<streamer::PacketReceiver>>,
|
||||
sendr: &Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
|
||||
) -> Result<()> {
|
||||
let (batch, len) =
|
||||
streamer::recv_batch(&recvr.lock().expect("'recvr' lock in fn verifier"))?;
|
||||
|
||||
let now = Instant::now();
|
||||
let batch_len = batch.len();
|
||||
let rand_id = thread_rng().gen_range(0, 100);
|
||||
info!(
|
||||
"@{:?} verifier: verifying: {} id: {}",
|
||||
timing::timestamp(),
|
||||
batch.len(),
|
||||
rand_id
|
||||
);
|
||||
|
||||
let verified_batch = Self::verify_batch(batch);
|
||||
sendr
|
||||
.lock()
|
||||
.expect("lock in fn verify_batch in tpu")
|
||||
.send(verified_batch)?;
|
||||
|
||||
let total_time_ms = timing::duration_as_ms(&now.elapsed());
|
||||
let total_time_s = timing::duration_as_s(&now.elapsed());
|
||||
info!(
|
||||
"@{:?} verifier: done. batches: {} total verify time: {:?} id: {} verified: {} v/s {}",
|
||||
timing::timestamp(),
|
||||
batch_len,
|
||||
total_time_ms,
|
||||
rand_id,
|
||||
len,
|
||||
(len as f32 / total_time_s)
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verifier_service(
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_receiver: Arc<Mutex<streamer::PacketReceiver>>,
|
||||
verified_sender: Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
let e = Self::verifier(&packet_receiver.clone(), &verified_sender.clone());
|
||||
if e.is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn verifier_services(
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_receiver: streamer::PacketReceiver,
|
||||
verified_sender: Sender<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
) -> Vec<JoinHandle<()>> {
|
||||
let sender = Arc::new(Mutex::new(verified_sender));
|
||||
let receiver = Arc::new(Mutex::new(packet_receiver));
|
||||
(0..4)
|
||||
.map(|_| Self::verifier_service(exit.clone(), receiver.clone(), sender.clone()))
|
||||
.collect()
|
||||
}
|
||||
}
|
845
src/streamer.rs
Normal file
845
src/streamer.rs
Normal file
@ -0,0 +1,845 @@
|
||||
//! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets.
|
||||
//!
|
||||
use crdt::Crdt;
|
||||
#[cfg(feature = "erasure")]
|
||||
use erasure;
|
||||
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets, BLOB_SIZE};
|
||||
use result::{Error, Result};
|
||||
use std::collections::VecDeque;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
pub const WINDOW_SIZE: usize = 2 * 1024;
|
||||
pub type PacketReceiver = mpsc::Receiver<SharedPackets>;
|
||||
pub type PacketSender = mpsc::Sender<SharedPackets>;
|
||||
pub type BlobSender = mpsc::Sender<VecDeque<SharedBlob>>;
|
||||
pub type BlobReceiver = mpsc::Receiver<VecDeque<SharedBlob>>;
|
||||
|
||||
fn recv_loop(
|
||||
sock: &UdpSocket,
|
||||
exit: &Arc<AtomicBool>,
|
||||
re: &PacketRecycler,
|
||||
channel: &PacketSender,
|
||||
) -> Result<()> {
|
||||
loop {
|
||||
let msgs = re.allocate();
|
||||
let msgs_ = msgs.clone();
|
||||
loop {
|
||||
match msgs.write()
|
||||
.expect("write lock in fn recv_loop")
|
||||
.recv_from(sock)
|
||||
{
|
||||
Ok(()) => {
|
||||
channel.send(msgs_)?;
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
re.recycle(msgs_);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn receiver(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
recycler: PacketRecycler,
|
||||
packet_sender: PacketSender,
|
||||
) -> JoinHandle<()> {
|
||||
let res = sock.set_read_timeout(Some(Duration::new(1, 0)));
|
||||
if res.is_err() {
|
||||
panic!("streamer::receiver set_read_timeout error");
|
||||
}
|
||||
spawn(move || {
|
||||
let _ = recv_loop(&sock, &exit, &recycler, &packet_sender);
|
||||
()
|
||||
})
|
||||
}
|
||||
|
||||
fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut msgs = r.recv_timeout(timer)?;
|
||||
Blob::send_to(recycler, sock, &mut msgs)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn recv_batch(recvr: &PacketReceiver) -> Result<(Vec<SharedPackets>, usize)> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let msgs = recvr.recv_timeout(timer)?;
|
||||
trace!("got msgs");
|
||||
let mut len = msgs.read().unwrap().packets.len();
|
||||
let mut batch = vec![msgs];
|
||||
while let Ok(more) = recvr.try_recv() {
|
||||
trace!("got more msgs");
|
||||
len += more.read().unwrap().packets.len();
|
||||
batch.push(more);
|
||||
|
||||
if len > 100_000 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
debug!("batch len {}", batch.len());
|
||||
Ok((batch, len))
|
||||
}
|
||||
|
||||
pub fn responder(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if recv_send(&sock, &recycler, &r).is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
//TODO, we would need to stick block authentication before we create the
|
||||
//window.
|
||||
fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> {
|
||||
let dq = Blob::recv_from(recycler, sock)?;
|
||||
if !dq.is_empty() {
|
||||
s.send(dq)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn blob_receiver(
|
||||
exit: Arc<AtomicBool>,
|
||||
recycler: BlobRecycler,
|
||||
sock: UdpSocket,
|
||||
s: BlobSender,
|
||||
) -> Result<JoinHandle<()>> {
|
||||
//DOCUMENTED SIDE-EFFECT
|
||||
//1 second timeout on socket read
|
||||
let timer = Duration::new(1, 0);
|
||||
sock.set_read_timeout(Some(timer))?;
|
||||
let t = spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let _ = recv_blobs(&recycler, &sock, &s);
|
||||
});
|
||||
Ok(t)
|
||||
}
|
||||
|
||||
fn find_next_missing(
|
||||
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
consumed: &mut usize,
|
||||
received: &mut usize,
|
||||
) -> Result<Vec<(SocketAddr, Vec<u8>)>> {
|
||||
if *received <= *consumed {
|
||||
return Err(Error::GenericError);
|
||||
}
|
||||
let window = locked_window.read().unwrap();
|
||||
let reqs: Vec<_> = (*consumed..*received)
|
||||
.filter_map(|pix| {
|
||||
let i = pix % WINDOW_SIZE;
|
||||
if let &None = &window[i] {
|
||||
let val = crdt.read().unwrap().window_index_request(pix as u64);
|
||||
if let Ok((to, req)) = val {
|
||||
return Some((to, req));
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
.collect();
|
||||
Ok(reqs)
|
||||
}
|
||||
|
||||
fn repair_window(
|
||||
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
last: &mut usize,
|
||||
times: &mut usize,
|
||||
consumed: &mut usize,
|
||||
received: &mut usize,
|
||||
) -> Result<()> {
|
||||
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
|
||||
//exponential backoff
|
||||
if *last != *consumed {
|
||||
*times = 0;
|
||||
}
|
||||
*last = *consumed;
|
||||
*times += 1;
|
||||
//if times flips from all 1s 7 -> 8, 15 -> 16, we retry otherwise return Ok
|
||||
if *times & (*times - 1) != 0 {
|
||||
trace!("repair_window counter {} {}", *times, *consumed);
|
||||
return Ok(());
|
||||
}
|
||||
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
||||
for (to, req) in reqs {
|
||||
//todo cache socket
|
||||
info!("repair_window request {} {} {}", *consumed, *received, to);
|
||||
assert!(req.len() < BLOB_SIZE);
|
||||
sock.send_to(&req, to)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn recv_window(
|
||||
locked_window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
recycler: &BlobRecycler,
|
||||
consumed: &mut usize,
|
||||
received: &mut usize,
|
||||
r: &BlobReceiver,
|
||||
s: &BlobSender,
|
||||
retransmit: &BlobSender,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(200);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
let leader_id = crdt.read()
|
||||
.expect("'crdt' read lock in fn recv_window")
|
||||
.leader_data()
|
||||
.id;
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq)
|
||||
}
|
||||
{
|
||||
//retransmit all leader blocks
|
||||
let mut retransmitq = VecDeque::new();
|
||||
for b in &dq {
|
||||
let p = b.read().expect("'b' read lock in fn recv_window");
|
||||
//TODO this check isn't safe against adverserial packets
|
||||
//we need to maintain a sequence window
|
||||
trace!(
|
||||
"idx: {} addr: {:?} id: {:?} leader: {:?}",
|
||||
p.get_index().expect("get_index in fn recv_window"),
|
||||
p.get_id().expect("get_id in trace! fn recv_window"),
|
||||
p.meta.addr(),
|
||||
leader_id
|
||||
);
|
||||
if p.get_id().expect("get_id in fn recv_window") == leader_id {
|
||||
//TODO
|
||||
//need to copy the retransmitted blob
|
||||
//otherwise we get into races with which thread
|
||||
//should do the recycling
|
||||
//
|
||||
//a better abstraction would be to recycle when the blob
|
||||
//is dropped via a weakref to the recycler
|
||||
let nv = recycler.allocate();
|
||||
{
|
||||
let mut mnv = nv.write().expect("recycler write lock in fn recv_window");
|
||||
let sz = p.meta.size;
|
||||
mnv.meta.size = sz;
|
||||
mnv.data[..sz].copy_from_slice(&p.data[..sz]);
|
||||
}
|
||||
retransmitq.push_back(nv);
|
||||
}
|
||||
}
|
||||
if !retransmitq.is_empty() {
|
||||
retransmit.send(retransmitq)?;
|
||||
}
|
||||
}
|
||||
//send a contiguous set of blocks
|
||||
let mut contq = VecDeque::new();
|
||||
while let Some(b) = dq.pop_front() {
|
||||
let b_ = b.clone();
|
||||
let p = b.write().expect("'b' write lock in fn recv_window");
|
||||
let pix = p.get_index()? as usize;
|
||||
if pix > *received {
|
||||
*received = pix;
|
||||
}
|
||||
let w = pix % WINDOW_SIZE;
|
||||
//TODO, after the block are authenticated
|
||||
//if we get different blocks at the same index
|
||||
//that is a network failure/attack
|
||||
trace!("window w: {} size: {}", w, p.meta.size);
|
||||
{
|
||||
let mut window = locked_window.write().unwrap();
|
||||
if window[w].is_none() {
|
||||
window[w] = Some(b_);
|
||||
} else if let &Some(ref cblob) = &window[w] {
|
||||
if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
|
||||
warn!("overrun blob at index {:}", w);
|
||||
} else {
|
||||
debug!("duplicate blob at index {:}", w);
|
||||
}
|
||||
}
|
||||
loop {
|
||||
let k = *consumed % WINDOW_SIZE;
|
||||
trace!("k: {} consumed: {}", k, *consumed);
|
||||
if window[k].is_none() {
|
||||
break;
|
||||
}
|
||||
contq.push_back(window[k].clone().expect("clone in fn recv_window"));
|
||||
window[k] = None;
|
||||
*consumed += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
let buf: Vec<_> = locked_window
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, v)| {
|
||||
if i == (*consumed % WINDOW_SIZE) {
|
||||
assert!(v.is_none());
|
||||
"_"
|
||||
} else if v.is_none() {
|
||||
"0"
|
||||
} else {
|
||||
"1"
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
trace!("WINDOW: {}", buf.join(""));
|
||||
}
|
||||
trace!("sending contq.len: {}", contq.len());
|
||||
if !contq.is_empty() {
|
||||
trace!("sending contq.len: {}", contq.len());
|
||||
s.send(contq)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn default_window() -> Arc<RwLock<Vec<Option<SharedBlob>>>> {
|
||||
Arc::new(RwLock::new(vec![None; WINDOW_SIZE]))
|
||||
}
|
||||
|
||||
pub fn window(
|
||||
exit: Arc<AtomicBool>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
s: BlobSender,
|
||||
retransmit: BlobSender,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || {
|
||||
let mut consumed = 0;
|
||||
let mut received = 0;
|
||||
let mut last = 0;
|
||||
let mut times = 0;
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let _ = recv_window(
|
||||
&window,
|
||||
&crdt,
|
||||
&recycler,
|
||||
&mut consumed,
|
||||
&mut received,
|
||||
&r,
|
||||
&s,
|
||||
&retransmit,
|
||||
);
|
||||
let _ = repair_window(
|
||||
&window,
|
||||
&crdt,
|
||||
&mut last,
|
||||
&mut times,
|
||||
&mut consumed,
|
||||
&mut received,
|
||||
);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn broadcast(
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
window: &Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
recycler: &BlobRecycler,
|
||||
r: &BlobReceiver,
|
||||
sock: &UdpSocket,
|
||||
transmit_index: &mut u64,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq);
|
||||
}
|
||||
let mut blobs = dq.into_iter().collect();
|
||||
/// appends codes to the list of blobs allowing us to reconstruct the stream
|
||||
#[cfg(feature = "erasure")]
|
||||
erasure::generate_coding(re, blobs, consumed);
|
||||
Crdt::broadcast(crdt, &blobs, &sock, transmit_index)?;
|
||||
// keep the cache of blobs that are broadcast
|
||||
{
|
||||
let mut win = window.write().unwrap();
|
||||
for b in &blobs {
|
||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||
let pos = (ix as usize) % WINDOW_SIZE;
|
||||
if let Some(x) = &win[pos] {
|
||||
trace!(
|
||||
"popped {} at {}",
|
||||
x.read().unwrap().get_index().unwrap(),
|
||||
pos
|
||||
);
|
||||
recycler.recycle(x.clone());
|
||||
}
|
||||
trace!("null {}", pos);
|
||||
win[pos] = None;
|
||||
assert!(win[pos].is_none());
|
||||
}
|
||||
while let Some(b) = blobs.pop() {
|
||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||
let pos = (ix as usize) % WINDOW_SIZE;
|
||||
trace!("caching {} at {}", ix, pos);
|
||||
assert!(win[pos].is_none());
|
||||
win[pos] = Some(b);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Service to broadcast messages from the leader to layer 1 nodes.
|
||||
/// See `crdt` for network layer definitions.
|
||||
/// # Arguments
|
||||
/// * `sock` - Socket to send from.
|
||||
/// * `exit` - Boolean to signal system exit.
|
||||
/// * `crdt` - CRDT structure
|
||||
/// * `window` - Cache of blobs that we have broadcast
|
||||
/// * `recycler` - Blob recycler.
|
||||
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||
pub fn broadcaster(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || {
|
||||
let mut transmit_index = 0;
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let _ = broadcast(&crdt, &window, &recycler, &r, &sock, &mut transmit_index);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn retransmit(
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
recycler: &BlobRecycler,
|
||||
r: &BlobReceiver,
|
||||
sock: &UdpSocket,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq);
|
||||
}
|
||||
{
|
||||
for b in &dq {
|
||||
Crdt::retransmit(&crdt, b, sock)?;
|
||||
}
|
||||
}
|
||||
while let Some(b) = dq.pop_front() {
|
||||
recycler.recycle(b);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Service to retransmit messages from the leader to layer 1 nodes.
|
||||
/// See `crdt` for network layer definitions.
|
||||
/// # Arguments
|
||||
/// * `sock` - Socket to read from. Read timeout is set to 1.
|
||||
/// * `exit` - Boolean to signal system exit.
|
||||
/// * `crdt` - This structure needs to be updated and populated by the bank and via gossip.
|
||||
/// * `recycler` - Blob recycler.
|
||||
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||
pub fn retransmitter(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || {
|
||||
trace!("retransmitter started");
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
// TODO: handle this error
|
||||
let _ = retransmit(&crdt, &recycler, &r, &sock);
|
||||
}
|
||||
trace!("exiting retransmitter");
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use packet::{Packet, PacketRecycler, BLOB_SIZE, PACKET_DATA_SIZE};
|
||||
use result::Result;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::sleep;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
use streamer::{receiver, PacketReceiver};
|
||||
|
||||
fn producer(
|
||||
addr: &SocketAddr,
|
||||
recycler: PacketRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> JoinHandle<()> {
|
||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let msgs = recycler.allocate();
|
||||
let msgs_ = msgs.clone();
|
||||
msgs.write().unwrap().packets.resize(10, Packet::default());
|
||||
for w in msgs.write().unwrap().packets.iter_mut() {
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
}
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in msgs_.read().unwrap().packets.iter() {
|
||||
let a = p.meta.addr();
|
||||
assert!(p.meta.size < BLOB_SIZE);
|
||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||
num += 1;
|
||||
}
|
||||
assert_eq!(num, 10);
|
||||
})
|
||||
}
|
||||
|
||||
fn sink(
|
||||
recycler: PacketRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
rvs: Arc<Mutex<usize>>,
|
||||
r: PacketReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
match r.recv_timeout(timer) {
|
||||
Ok(msgs) => {
|
||||
let msgs_ = msgs.clone();
|
||||
*rvs.lock().unwrap() += msgs.read().unwrap().packets.len();
|
||||
recycler.recycle(msgs_);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn bench_streamer_with_result() -> Result<()> {
|
||||
let read = UdpSocket::bind("127.0.0.1:0")?;
|
||||
read.set_read_timeout(Some(Duration::new(1, 0)))?;
|
||||
|
||||
let addr = read.local_addr()?;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let pack_recycler = PacketRecycler::default();
|
||||
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
|
||||
let t_producer1 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
let t_producer2 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
let t_producer3 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
|
||||
let rvs = Arc::new(Mutex::new(0));
|
||||
let t_sink = sink(pack_recycler.clone(), exit.clone(), rvs.clone(), r_reader);
|
||||
|
||||
let start = SystemTime::now();
|
||||
let start_val = *rvs.lock().unwrap();
|
||||
sleep(Duration::new(5, 0));
|
||||
let elapsed = start.elapsed().unwrap();
|
||||
let end_val = *rvs.lock().unwrap();
|
||||
let time = elapsed.as_secs() * 10000000000 + elapsed.subsec_nanos() as u64;
|
||||
let ftime = (time as f64) / 10000000000f64;
|
||||
let fcount = (end_val - start_val) as f64;
|
||||
trace!("performance: {:?}", fcount / ftime);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_reader.join()?;
|
||||
t_producer1.join()?;
|
||||
t_producer2.join()?;
|
||||
t_producer3.join()?;
|
||||
t_sink.join()?;
|
||||
Ok(())
|
||||
}
|
||||
#[bench]
|
||||
pub fn bench_streamer(_bench: &mut Bencher) {
|
||||
bench_streamer_with_result().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use logger;
|
||||
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
|
||||
use signature::KeyPair;
|
||||
use signature::KeyPairUtil;
|
||||
use std::collections::VecDeque;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use streamer::{blob_receiver, receiver, responder, retransmitter, window};
|
||||
use streamer::{default_window, BlobReceiver, PacketReceiver};
|
||||
|
||||
fn get_msgs(r: PacketReceiver, num: &mut usize) {
|
||||
for _t in 0..5 {
|
||||
let timer = Duration::new(1, 0);
|
||||
match r.recv_timeout(timer) {
|
||||
Ok(m) => *num += m.read().unwrap().packets.len(),
|
||||
e => info!("error {:?}", e),
|
||||
}
|
||||
if *num == 10 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
pub fn streamer_debug() {
|
||||
write!(io::sink(), "{:?}", Packet::default()).unwrap();
|
||||
write!(io::sink(), "{:?}", Packets::default()).unwrap();
|
||||
write!(io::sink(), "{:?}", Blob::default()).unwrap();
|
||||
}
|
||||
#[test]
|
||||
pub fn streamer_send_test() {
|
||||
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
read.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||
|
||||
let addr = read.local_addr().unwrap();
|
||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let pack_recycler = PacketRecycler::default();
|
||||
let resp_recycler = BlobRecycler::default();
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
|
||||
let mut msgs = VecDeque::new();
|
||||
for i in 0..10 {
|
||||
let b = resp_recycler.allocate();
|
||||
let b_ = b.clone();
|
||||
let mut w = b.write().unwrap();
|
||||
w.data[0] = i as u8;
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
msgs.push_back(b_);
|
||||
}
|
||||
s_responder.send(msgs).expect("send");
|
||||
let mut num = 0;
|
||||
get_msgs(r_reader, &mut num);
|
||||
assert_eq!(num, 10);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_receiver.join().expect("join");
|
||||
t_responder.join().expect("join");
|
||||
}
|
||||
|
||||
fn get_blobs(r: BlobReceiver, num: &mut usize) {
|
||||
for _t in 0..5 {
|
||||
let timer = Duration::new(1, 0);
|
||||
match r.recv_timeout(timer) {
|
||||
Ok(m) => {
|
||||
for (i, v) in m.iter().enumerate() {
|
||||
assert_eq!(v.read().unwrap().get_index().unwrap() as usize, *num + i);
|
||||
}
|
||||
*num += m.len();
|
||||
}
|
||||
e => info!("error {:?}", e),
|
||||
}
|
||||
if *num == 10 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn window_send_test() {
|
||||
let pubkey_me = KeyPair::new().pubkey();
|
||||
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let addr = read.local_addr().unwrap();
|
||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let serve = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let transaction = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let rep_data = ReplicatedData::new(
|
||||
pubkey_me,
|
||||
read.local_addr().unwrap(),
|
||||
send.local_addr().unwrap(),
|
||||
serve.local_addr().unwrap(),
|
||||
transaction.local_addr().unwrap(),
|
||||
);
|
||||
let mut crdt_me = Crdt::new(rep_data);
|
||||
let me_id = crdt_me.my_data().id;
|
||||
crdt_me.set_leader(me_id);
|
||||
let subs = Arc::new(RwLock::new(crdt_me));
|
||||
|
||||
let resp_recycler = BlobRecycler::default();
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver =
|
||||
blob_receiver(exit.clone(), resp_recycler.clone(), read, s_reader).unwrap();
|
||||
let (s_window, r_window) = channel();
|
||||
let (s_retransmit, r_retransmit) = channel();
|
||||
let win = default_window();
|
||||
let t_window = window(
|
||||
exit.clone(),
|
||||
subs,
|
||||
win,
|
||||
resp_recycler.clone(),
|
||||
r_reader,
|
||||
s_window,
|
||||
s_retransmit,
|
||||
);
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
|
||||
let mut msgs = VecDeque::new();
|
||||
for v in 0..10 {
|
||||
let i = 9 - v;
|
||||
let b = resp_recycler.allocate();
|
||||
let b_ = b.clone();
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(i).unwrap();
|
||||
w.set_id(me_id).unwrap();
|
||||
assert_eq!(i, w.get_index().unwrap());
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
msgs.push_back(b_);
|
||||
}
|
||||
s_responder.send(msgs).expect("send");
|
||||
let mut num = 0;
|
||||
get_blobs(r_window, &mut num);
|
||||
assert_eq!(num, 10);
|
||||
let mut q = r_retransmit.recv().unwrap();
|
||||
while let Ok(mut nq) = r_retransmit.try_recv() {
|
||||
q.append(&mut nq);
|
||||
}
|
||||
assert_eq!(q.len(), 10);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_receiver.join().expect("join");
|
||||
t_responder.join().expect("join");
|
||||
t_window.join().expect("join");
|
||||
}
|
||||
|
||||
fn test_node() -> (Arc<RwLock<Crdt>>, UdpSocket, UdpSocket, UdpSocket) {
|
||||
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let serve = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let transaction = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let d = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
serve.local_addr().unwrap(),
|
||||
transaction.local_addr().unwrap(),
|
||||
);
|
||||
trace!("data: {:?}", d);
|
||||
let crdt = Crdt::new(d);
|
||||
(Arc::new(RwLock::new(crdt)), gossip, replicate, serve)
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
//retransmit from leader to replicate target
|
||||
pub fn retransmit() {
|
||||
logger::setup();
|
||||
trace!("retransmit test start");
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let (crdt_leader, sock_gossip_leader, _, sock_leader) = test_node();
|
||||
let (crdt_target, sock_gossip_target, sock_replicate_target, _) = test_node();
|
||||
let leader_data = crdt_leader.read().unwrap().my_data().clone();
|
||||
crdt_leader.write().unwrap().insert(&leader_data);
|
||||
crdt_leader.write().unwrap().set_leader(leader_data.id);
|
||||
let t_crdt_leader_g = Crdt::gossip(crdt_leader.clone(), exit.clone());
|
||||
let window_leader = Arc::new(RwLock::new(vec![]));
|
||||
let t_crdt_leader_l = Crdt::listen(
|
||||
crdt_leader.clone(),
|
||||
window_leader,
|
||||
sock_gossip_leader,
|
||||
exit.clone(),
|
||||
);
|
||||
|
||||
crdt_target.write().unwrap().insert(&leader_data);
|
||||
crdt_target.write().unwrap().set_leader(leader_data.id);
|
||||
let t_crdt_target_g = Crdt::gossip(crdt_target.clone(), exit.clone());
|
||||
let window_target = Arc::new(RwLock::new(vec![]));
|
||||
let t_crdt_target_l = Crdt::listen(
|
||||
crdt_target.clone(),
|
||||
window_target,
|
||||
sock_gossip_target,
|
||||
exit.clone(),
|
||||
);
|
||||
//leader retransmitter
|
||||
let (s_retransmit, r_retransmit) = channel();
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let saddr = sock_leader.local_addr().unwrap();
|
||||
let t_retransmit = retransmitter(
|
||||
sock_leader,
|
||||
exit.clone(),
|
||||
crdt_leader.clone(),
|
||||
blob_recycler.clone(),
|
||||
r_retransmit,
|
||||
);
|
||||
|
||||
//target receiver
|
||||
let (s_blob_receiver, r_blob_receiver) = channel();
|
||||
let t_receiver = blob_receiver(
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
sock_replicate_target,
|
||||
s_blob_receiver,
|
||||
).unwrap();
|
||||
for _ in 0..10 {
|
||||
let done = crdt_target.read().unwrap().update_index == 2
|
||||
&& crdt_leader.read().unwrap().update_index == 2;
|
||||
if done {
|
||||
break;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
sleep(timer);
|
||||
}
|
||||
|
||||
//send the data through
|
||||
let mut bq = VecDeque::new();
|
||||
let b = blob_recycler.allocate();
|
||||
b.write().unwrap().meta.size = 10;
|
||||
bq.push_back(b);
|
||||
s_retransmit.send(bq).unwrap();
|
||||
let timer = Duration::new(5, 0);
|
||||
trace!("Waiting for timeout");
|
||||
let mut oq = r_blob_receiver.recv_timeout(timer).unwrap();
|
||||
assert_eq!(oq.len(), 1);
|
||||
let o = oq.pop_front().unwrap();
|
||||
let ro = o.read().unwrap();
|
||||
assert_eq!(ro.meta.size, 10);
|
||||
assert_eq!(ro.meta.addr(), saddr);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
let threads = vec![
|
||||
t_receiver,
|
||||
t_retransmit,
|
||||
t_crdt_target_g,
|
||||
t_crdt_target_l,
|
||||
t_crdt_leader_g,
|
||||
t_crdt_leader_l,
|
||||
];
|
||||
for t in threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
296
src/thin_client.rs
Normal file
296
src/thin_client.rs
Normal file
@ -0,0 +1,296 @@
|
||||
//! The `thin_client` module is a client-side object that interfaces with
|
||||
//! a server-side TPU. Client code should use this object instead of writing
|
||||
//! messages to the network directly. The binary encoding of its messages are
|
||||
//! unstable and may change in future releases.
|
||||
|
||||
use bincode::{deserialize, serialize};
|
||||
use futures::future::{ok, FutureResult};
|
||||
use hash::Hash;
|
||||
use request::{Request, Response};
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use transaction::Transaction;
|
||||
|
||||
pub struct ThinClient {
|
||||
requests_addr: SocketAddr,
|
||||
requests_socket: UdpSocket,
|
||||
transactions_addr: SocketAddr,
|
||||
transactions_socket: UdpSocket,
|
||||
last_id: Option<Hash>,
|
||||
transaction_count: u64,
|
||||
balances: HashMap<PublicKey, Option<i64>>,
|
||||
}
|
||||
|
||||
impl ThinClient {
|
||||
/// Create a new ThinClient that will interface with Rpu
|
||||
/// over `requests_socket` and `transactions_socket`. To receive responses, the caller must bind `socket`
|
||||
/// to a public address before invoking ThinClient methods.
|
||||
pub fn new(
|
||||
requests_addr: SocketAddr,
|
||||
requests_socket: UdpSocket,
|
||||
transactions_addr: SocketAddr,
|
||||
transactions_socket: UdpSocket,
|
||||
) -> Self {
|
||||
let client = ThinClient {
|
||||
requests_addr,
|
||||
requests_socket,
|
||||
transactions_addr,
|
||||
transactions_socket,
|
||||
last_id: None,
|
||||
transaction_count: 0,
|
||||
balances: HashMap::new(),
|
||||
};
|
||||
client
|
||||
}
|
||||
|
||||
pub fn recv_response(&self) -> io::Result<Response> {
|
||||
let mut buf = vec![0u8; 1024];
|
||||
trace!("start recv_from");
|
||||
self.requests_socket.recv_from(&mut buf)?;
|
||||
trace!("end recv_from");
|
||||
let resp = deserialize(&buf).expect("deserialize balance in thin_client");
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
pub fn process_response(&mut self, resp: Response) {
|
||||
match resp {
|
||||
Response::Balance { key, val } => {
|
||||
trace!("Response balance {:?} {:?}", key, val);
|
||||
self.balances.insert(key, val);
|
||||
}
|
||||
Response::LastId { id } => {
|
||||
info!("Response last_id {:?}", id);
|
||||
self.last_id = Some(id);
|
||||
}
|
||||
Response::TransactionCount { transaction_count } => {
|
||||
info!("Response transaction count {:?}", transaction_count);
|
||||
self.transaction_count = transaction_count;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a signed Transaction to the server for processing. This method
|
||||
/// does not wait for a response.
|
||||
pub fn transfer_signed(&self, tx: Transaction) -> io::Result<usize> {
|
||||
let data = serialize(&tx).expect("serialize Transaction in pub fn transfer_signed");
|
||||
self.transactions_socket
|
||||
.send_to(&data, &self.transactions_addr)
|
||||
}
|
||||
|
||||
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
|
||||
pub fn transfer(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
last_id: &Hash,
|
||||
) -> io::Result<Signature> {
|
||||
let tx = Transaction::new(keypair, to, n, *last_id);
|
||||
let sig = tx.sig;
|
||||
self.transfer_signed(tx).map(|_| sig)
|
||||
}
|
||||
|
||||
/// Request the balance of the user holding `pubkey`. This method blocks
|
||||
/// until the server sends a response. If the response packet is dropped
|
||||
/// by the network, this method will hang indefinitely.
|
||||
pub fn get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
|
||||
trace!("get_balance");
|
||||
let req = Request::GetBalance { key: *pubkey };
|
||||
let data = serialize(&req).expect("serialize GetBalance in pub fn get_balance");
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn get_balance");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
let resp = self.recv_response()?;
|
||||
trace!("recv_response {:?}", resp);
|
||||
if let &Response::Balance { ref key, .. } = &resp {
|
||||
done = key == pubkey;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
self.balances[pubkey].ok_or(io::Error::new(io::ErrorKind::Other, "nokey"))
|
||||
}
|
||||
|
||||
/// Request the transaction count. If the response packet is dropped by the network,
|
||||
/// this method will hang.
|
||||
pub fn transaction_count(&mut self) -> u64 {
|
||||
info!("transaction_count");
|
||||
let req = Request::GetTransactionCount;
|
||||
let data =
|
||||
serialize(&req).expect("serialize GetTransactionCount in pub fn transaction_count");
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn transaction_count");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
let resp = self.recv_response().expect("transaction count dropped");
|
||||
info!("recv_response {:?}", resp);
|
||||
if let &Response::TransactionCount { .. } = &resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
self.transaction_count
|
||||
}
|
||||
|
||||
/// Request the last Entry ID from the server. This method blocks
|
||||
/// until the server sends a response.
|
||||
pub fn get_last_id(&mut self) -> FutureResult<Hash, ()> {
|
||||
info!("get_last_id");
|
||||
let req = Request::GetLastId;
|
||||
let data = serialize(&req).expect("serialize GetLastId in pub fn get_last_id");
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn get_last_id");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
let resp = self.recv_response().expect("get_last_id response");
|
||||
if let &Response::LastId { .. } = &resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
}
|
||||
ok(self.last_id.expect("some last_id"))
|
||||
}
|
||||
|
||||
pub fn poll_get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
|
||||
use std::time::Instant;
|
||||
|
||||
let mut balance;
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
balance = self.get_balance(pubkey);
|
||||
if balance.is_ok() || now.elapsed().as_secs() > 1 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
balance
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bank::Bank;
|
||||
use budget::Budget;
|
||||
use futures::Future;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use server::Server;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use transaction::{Instruction, Plan};
|
||||
use tvu::TestNode;
|
||||
|
||||
#[test]
|
||||
fn test_thin_client() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let server = Server::new_leader(
|
||||
bank,
|
||||
alice.last_id(),
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
sleep(Duration::from_millis(900));
|
||||
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
requests_socket,
|
||||
leader.data.transactions_addr,
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
let _sig = client
|
||||
.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
let balance = client.poll_get_balance(&bob_pubkey);
|
||||
assert_eq!(balance.unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bad_sig() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let server = Server::new_leader(
|
||||
bank,
|
||||
alice.last_id(),
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
sleep(Duration::from_millis(300));
|
||||
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(5, 0)))
|
||||
.unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
requests_socket,
|
||||
leader.data.transactions_addr,
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
|
||||
let tx = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
|
||||
|
||||
let _sig = client.transfer_signed(tx).unwrap();
|
||||
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
|
||||
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
|
||||
if let Instruction::NewContract(contract) = &mut tr2.instruction {
|
||||
contract.tokens = 502;
|
||||
contract.plan = Plan::Budget(Budget::new_payment(502, bob_pubkey));
|
||||
}
|
||||
let _sig = client.transfer_signed(tr2).unwrap();
|
||||
|
||||
let balance = client.poll_get_balance(&bob_pubkey);
|
||||
assert_eq!(balance.unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
17
src/timing.rs
Normal file
17
src/timing.rs
Normal file
@ -0,0 +1,17 @@
|
||||
use std::time::Duration;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
pub fn duration_as_ms(d: &Duration) -> u64 {
|
||||
return (d.as_secs() * 1000) + (d.subsec_nanos() as u64 / 1_000_000);
|
||||
}
|
||||
|
||||
pub fn duration_as_s(d: &Duration) -> f32 {
|
||||
return d.as_secs() as f32 + (d.subsec_nanos() as f32 / 1_000_000_000.0);
|
||||
}
|
||||
|
||||
pub fn timestamp() -> u64 {
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("create timestamp in timing");
|
||||
return duration_as_ms(&now);
|
||||
}
|
72
src/tpu.rs
Normal file
72
src/tpu.rs
Normal file
@ -0,0 +1,72 @@
|
||||
//! The `tpu` module implements the Transaction Processing Unit, a
|
||||
//! 5-stage transaction processing pipeline in software.
|
||||
|
||||
use bank::Bank;
|
||||
use banking_stage::BankingStage;
|
||||
use fetch_stage::FetchStage;
|
||||
use hash::Hash;
|
||||
use packet::{BlobRecycler, PacketRecycler};
|
||||
use record_stage::RecordStage;
|
||||
use sigverify_stage::SigVerifyStage;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use streamer::BlobReceiver;
|
||||
use write_stage::WriteStage;
|
||||
|
||||
pub struct Tpu {
|
||||
pub blob_receiver: BlobReceiver,
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Tpu {
|
||||
pub fn new<W: Write + Send + 'static>(
|
||||
bank: Arc<Bank>,
|
||||
start_hash: Hash,
|
||||
tick_duration: Option<Duration>,
|
||||
transactions_socket: UdpSocket,
|
||||
blob_recycler: BlobRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
writer: W,
|
||||
) -> Self {
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
let fetch_stage =
|
||||
FetchStage::new(transactions_socket, exit.clone(), packet_recycler.clone());
|
||||
|
||||
let sigverify_stage = SigVerifyStage::new(exit.clone(), fetch_stage.packet_receiver);
|
||||
|
||||
let banking_stage = BankingStage::new(
|
||||
bank.clone(),
|
||||
exit.clone(),
|
||||
sigverify_stage.verified_receiver,
|
||||
packet_recycler.clone(),
|
||||
);
|
||||
|
||||
let record_stage =
|
||||
RecordStage::new(banking_stage.signal_receiver, &start_hash, tick_duration);
|
||||
|
||||
let write_stage = WriteStage::new(
|
||||
bank.clone(),
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
Mutex::new(writer),
|
||||
record_stage.entry_receiver,
|
||||
);
|
||||
|
||||
let mut thread_hdls = vec![
|
||||
fetch_stage.thread_hdl,
|
||||
banking_stage.thread_hdl,
|
||||
record_stage.thread_hdl,
|
||||
write_stage.thread_hdl,
|
||||
];
|
||||
thread_hdls.extend(sigverify_stage.thread_hdls.into_iter());
|
||||
Tpu {
|
||||
blob_receiver: write_stage.blob_receiver,
|
||||
thread_hdls,
|
||||
}
|
||||
}
|
||||
}
|
301
src/transaction.rs
Normal file
301
src/transaction.rs
Normal file
@ -0,0 +1,301 @@
|
||||
//! The `transaction` module provides functionality for creating log transactions.
|
||||
|
||||
use bincode::serialize;
|
||||
use budget::{Budget, Condition};
|
||||
use chrono::prelude::*;
|
||||
use hash::Hash;
|
||||
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
|
||||
|
||||
pub const SIGNED_DATA_OFFSET: usize = 112;
|
||||
pub const SIG_OFFSET: usize = 8;
|
||||
pub const PUB_KEY_OFFSET: usize = 80;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Plan {
|
||||
Budget(Budget),
|
||||
}
|
||||
|
||||
// A proxy for the underlying DSL.
|
||||
impl PaymentPlan for Plan {
|
||||
fn final_payment(&self) -> Option<Payment> {
|
||||
match self {
|
||||
Plan::Budget(budget) => budget.final_payment(),
|
||||
}
|
||||
}
|
||||
|
||||
fn verify(&self, spendable_tokens: i64) -> bool {
|
||||
match self {
|
||||
Plan::Budget(budget) => budget.verify(spendable_tokens),
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_witness(&mut self, witness: &Witness) {
|
||||
match self {
|
||||
Plan::Budget(budget) => budget.apply_witness(witness),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Contract {
|
||||
pub tokens: i64,
|
||||
pub plan: Plan,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Instruction {
|
||||
NewContract(Contract),
|
||||
ApplyTimestamp(DateTime<Utc>),
|
||||
ApplySignature(Signature),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Transaction {
|
||||
pub sig: Signature,
|
||||
pub from: PublicKey,
|
||||
pub instruction: Instruction,
|
||||
pub last_id: Hash,
|
||||
pub fee: i64,
|
||||
}
|
||||
|
||||
impl Transaction {
|
||||
fn new_from_instruction(
|
||||
from_keypair: &KeyPair,
|
||||
instruction: Instruction,
|
||||
last_id: Hash,
|
||||
fee: i64,
|
||||
) -> Self {
|
||||
let from = from_keypair.pubkey();
|
||||
let mut tx = Transaction {
|
||||
sig: Signature::default(),
|
||||
instruction,
|
||||
last_id,
|
||||
from,
|
||||
fee,
|
||||
};
|
||||
tx.sign(from_keypair);
|
||||
tx
|
||||
}
|
||||
|
||||
/// Create and sign a new Transaction. Used for unit-testing.
|
||||
pub fn new_taxed(
|
||||
from_keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
tokens: i64,
|
||||
fee: i64,
|
||||
last_id: Hash,
|
||||
) -> Self {
|
||||
let payment = Payment {
|
||||
tokens: tokens - fee,
|
||||
to,
|
||||
};
|
||||
let budget = Budget::Pay(payment);
|
||||
let plan = Plan::Budget(budget);
|
||||
let instruction = Instruction::NewContract(Contract { plan, tokens });
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id, fee)
|
||||
}
|
||||
|
||||
/// Create and sign a new Transaction. Used for unit-testing.
|
||||
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self {
|
||||
Self::new_taxed(from_keypair, to, tokens, 0, last_id)
|
||||
}
|
||||
|
||||
/// Create and sign a new Witness Timestamp. Used for unit-testing.
|
||||
pub fn new_timestamp(from_keypair: &KeyPair, dt: DateTime<Utc>, last_id: Hash) -> Self {
|
||||
let instruction = Instruction::ApplyTimestamp(dt);
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
|
||||
}
|
||||
|
||||
/// Create and sign a new Witness Signature. Used for unit-testing.
|
||||
pub fn new_signature(from_keypair: &KeyPair, tx_sig: Signature, last_id: Hash) -> Self {
|
||||
let instruction = Instruction::ApplySignature(tx_sig);
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
|
||||
}
|
||||
|
||||
/// Create and sign a postdated Transaction. Used for unit-testing.
|
||||
pub fn new_on_date(
|
||||
from_keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
dt: DateTime<Utc>,
|
||||
tokens: i64,
|
||||
last_id: Hash,
|
||||
) -> Self {
|
||||
let from = from_keypair.pubkey();
|
||||
let budget = Budget::Race(
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
);
|
||||
let plan = Plan::Budget(budget);
|
||||
let instruction = Instruction::NewContract(Contract { plan, tokens });
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
|
||||
}
|
||||
|
||||
fn get_sign_data(&self) -> Vec<u8> {
|
||||
let mut data = serialize(&(&self.instruction)).expect("serialize Contract");
|
||||
let last_id_data = serialize(&(&self.last_id)).expect("serialize last_id");
|
||||
data.extend_from_slice(&last_id_data);
|
||||
|
||||
let fee_data = serialize(&(&self.fee)).expect("serialize last_id");
|
||||
data.extend_from_slice(&fee_data);
|
||||
|
||||
data
|
||||
}
|
||||
|
||||
/// Sign this transaction.
|
||||
pub fn sign(&mut self, keypair: &KeyPair) {
|
||||
let sign_data = self.get_sign_data();
|
||||
self.sig = Signature::clone_from_slice(keypair.sign(&sign_data).as_ref());
|
||||
}
|
||||
|
||||
pub fn verify_sig(&self) -> bool {
|
||||
self.sig.verify(&self.from, &self.get_sign_data())
|
||||
}
|
||||
|
||||
pub fn verify_plan(&self) -> bool {
|
||||
if let Instruction::NewContract(contract) = &self.instruction {
|
||||
self.fee >= 0 && self.fee <= contract.tokens
|
||||
&& contract.plan.verify(contract.tokens - self.fee)
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn test_tx() -> Transaction {
|
||||
let keypair1 = KeyPair::new();
|
||||
let pubkey1 = keypair1.pubkey();
|
||||
let zero = Hash::default();
|
||||
Transaction::new(&keypair1, pubkey1, 42, zero)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn memfind<A: Eq>(a: &[A], b: &[A]) -> Option<usize> {
|
||||
assert!(a.len() >= b.len());
|
||||
let end = a.len() - b.len() + 1;
|
||||
for i in 0..end {
|
||||
if a[i..i + b.len()] == b[..] {
|
||||
return Some(i);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode::{deserialize, serialize};
|
||||
|
||||
#[test]
|
||||
fn test_claim() {
|
||||
let keypair = KeyPair::new();
|
||||
let zero = Hash::default();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero);
|
||||
assert!(tx0.verify_plan());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer() {
|
||||
let zero = Hash::default();
|
||||
let keypair0 = KeyPair::new();
|
||||
let keypair1 = KeyPair::new();
|
||||
let pubkey1 = keypair1.pubkey();
|
||||
let tx0 = Transaction::new(&keypair0, pubkey1, 42, zero);
|
||||
assert!(tx0.verify_plan());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_with_fee() {
|
||||
let zero = Hash::default();
|
||||
let keypair0 = KeyPair::new();
|
||||
let pubkey1 = KeyPair::new().pubkey();
|
||||
assert!(Transaction::new_taxed(&keypair0, pubkey1, 1, 1, zero).verify_plan());
|
||||
assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, 2, zero).verify_plan());
|
||||
assert!(!Transaction::new_taxed(&keypair0, pubkey1, 1, -1, zero).verify_plan());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_claim() {
|
||||
let budget = Budget::Pay(Payment {
|
||||
tokens: 0,
|
||||
to: Default::default(),
|
||||
});
|
||||
let plan = Plan::Budget(budget);
|
||||
let instruction = Instruction::NewContract(Contract { plan, tokens: 0 });
|
||||
let claim0 = Transaction {
|
||||
instruction,
|
||||
from: Default::default(),
|
||||
last_id: Default::default(),
|
||||
sig: Default::default(),
|
||||
fee: 0,
|
||||
};
|
||||
let buf = serialize(&claim0).unwrap();
|
||||
let claim1: Transaction = deserialize(&buf).unwrap();
|
||||
assert_eq!(claim1, claim0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_token_attack() {
|
||||
let zero = Hash::default();
|
||||
let keypair = KeyPair::new();
|
||||
let pubkey = keypair.pubkey();
|
||||
let mut tx = Transaction::new(&keypair, pubkey, 42, zero);
|
||||
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||
contract.tokens = 1_000_000; // <-- attack, part 1!
|
||||
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||
payment.tokens = contract.tokens; // <-- attack, part 2!
|
||||
}
|
||||
}
|
||||
assert!(tx.verify_plan());
|
||||
assert!(!tx.verify_sig());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hijack_attack() {
|
||||
let keypair0 = KeyPair::new();
|
||||
let keypair1 = KeyPair::new();
|
||||
let thief_keypair = KeyPair::new();
|
||||
let pubkey1 = keypair1.pubkey();
|
||||
let zero = Hash::default();
|
||||
let mut tx = Transaction::new(&keypair0, pubkey1, 42, zero);
|
||||
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||
payment.to = thief_keypair.pubkey(); // <-- attack!
|
||||
}
|
||||
}
|
||||
assert!(tx.verify_plan());
|
||||
assert!(!tx.verify_sig());
|
||||
}
|
||||
#[test]
|
||||
fn test_layout() {
|
||||
let tx = test_tx();
|
||||
let sign_data = tx.get_sign_data();
|
||||
let tx_bytes = serialize(&tx).unwrap();
|
||||
assert_matches!(memfind(&tx_bytes, &sign_data), Some(SIGNED_DATA_OFFSET));
|
||||
assert_matches!(memfind(&tx_bytes, &tx.sig), Some(SIG_OFFSET));
|
||||
assert_matches!(memfind(&tx_bytes, &tx.from), Some(PUB_KEY_OFFSET));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_overspend_attack() {
|
||||
let keypair0 = KeyPair::new();
|
||||
let keypair1 = KeyPair::new();
|
||||
let zero = Hash::default();
|
||||
let mut tx = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero);
|
||||
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||
payment.tokens = 2; // <-- attack!
|
||||
}
|
||||
}
|
||||
assert!(!tx.verify_plan());
|
||||
|
||||
// Also, ensure all branchs of the plan spend all tokens
|
||||
if let Instruction::NewContract(contract) = &mut tx.instruction {
|
||||
if let Plan::Budget(Budget::Pay(ref mut payment)) = contract.plan {
|
||||
payment.tokens = 0; // <-- whoops!
|
||||
}
|
||||
}
|
||||
assert!(!tx.verify_plan());
|
||||
}
|
||||
}
|
347
src/tvu.rs
Normal file
347
src/tvu.rs
Normal file
@ -0,0 +1,347 @@
|
||||
//! The `tvu` module implements the Transaction Validation Unit, a
|
||||
//! 5-stage transaction validation pipeline in software.
|
||||
//! 1. streamer
|
||||
//! - Incoming blobs are picked up from the replicate socket.
|
||||
//! 2. verifier
|
||||
//! - TODO Blobs are sent to the GPU, and while the memory is there the PoH stream is verified
|
||||
//! along with the ecdsa signature for the blob and each signature in all the transactions. Blobs
|
||||
//! with errors are dropped, or marked for slashing.
|
||||
//! 3.a retransmit
|
||||
//! - Blobs originating from the parent (leader, at the moment, is the only parent), are retransmit to all the
|
||||
//! peers in the crdt. Peers is everyone who is not me or the leader that has a known replicate
|
||||
//! address.
|
||||
//! 3.b window
|
||||
//! - Verified blobs are placed into a window, indexed by the counter set by the leader.sockets. This could
|
||||
//! be the PoH counter if its monotonically increasing in each blob. Erasure coding is used to
|
||||
//! recover any missing packets, and requests are made at random to peers and parents to retransmit
|
||||
//! a missing packet.
|
||||
//! 4. accountant
|
||||
//! - Contigous blobs are sent to the accountant for processing transactions
|
||||
//! 5. validator
|
||||
//! - TODO Validation messages are sent back to the leader
|
||||
|
||||
use bank::Bank;
|
||||
use crdt::{Crdt, ReplicatedData};
|
||||
use packet;
|
||||
use replicate_stage::ReplicateStage;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use streamer;
|
||||
|
||||
pub struct Tvu {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Tvu {
|
||||
/// This service receives messages from a leader in the network and processes the transactions
|
||||
/// on the bank state.
|
||||
/// # Arguments
|
||||
/// * `bank` - The bank state.
|
||||
/// * `me` - my configuration
|
||||
/// * `gossip` - my gossisp socket
|
||||
/// * `replicate` - my replicate socket
|
||||
/// * `leader` - leader configuration
|
||||
/// * `exit` - The exit signal.
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
me: ReplicatedData,
|
||||
gossip: UdpSocket,
|
||||
replicate: UdpSocket,
|
||||
leader: ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
//replicate pipeline
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
crdt.write()
|
||||
.expect("'crdt' write lock in pub fn replicate")
|
||||
.set_leader(leader.id);
|
||||
crdt.write()
|
||||
.expect("'crdt' write lock before insert() in pub fn replicate")
|
||||
.insert(&leader);
|
||||
let t_gossip = Crdt::gossip(crdt.clone(), exit.clone());
|
||||
let window = streamer::default_window();
|
||||
let t_listen = Crdt::listen(crdt.clone(), window.clone(), gossip, exit.clone());
|
||||
|
||||
// TODO pull this socket out through the public interface
|
||||
// make sure we are on the same interface
|
||||
let mut local = replicate.local_addr().expect("tvu: get local address");
|
||||
local.set_port(0);
|
||||
let write = UdpSocket::bind(local).expect("tvu: bind to local socket");
|
||||
|
||||
let blob_recycler = packet::BlobRecycler::default();
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let t_blob_receiver = streamer::blob_receiver(
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
replicate,
|
||||
blob_sender.clone(),
|
||||
).expect("tvu: blob receiver creation");
|
||||
let (window_sender, window_receiver) = channel();
|
||||
let (retransmit_sender, retransmit_receiver) = channel();
|
||||
|
||||
let t_retransmit = streamer::retransmitter(
|
||||
write,
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
blob_recycler.clone(),
|
||||
retransmit_receiver,
|
||||
);
|
||||
|
||||
//TODO
|
||||
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
||||
//then sent to the window, which does the erasure coding reconstruction
|
||||
let t_window = streamer::window(
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
window,
|
||||
blob_recycler.clone(),
|
||||
blob_receiver,
|
||||
window_sender,
|
||||
retransmit_sender,
|
||||
);
|
||||
|
||||
let replicate_stage = ReplicateStage::new(
|
||||
bank.clone(),
|
||||
exit.clone(),
|
||||
window_receiver,
|
||||
blob_recycler.clone(),
|
||||
);
|
||||
|
||||
let threads = vec![
|
||||
//replicate threads
|
||||
t_blob_receiver,
|
||||
t_retransmit,
|
||||
t_window,
|
||||
replicate_stage.thread_hdl,
|
||||
t_gossip,
|
||||
t_listen,
|
||||
];
|
||||
Tvu {
|
||||
thread_hdls: threads,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Sockets {
|
||||
pub gossip: UdpSocket,
|
||||
pub requests: UdpSocket,
|
||||
pub replicate: UdpSocket,
|
||||
pub transaction: UdpSocket,
|
||||
pub respond: UdpSocket,
|
||||
pub broadcast: UdpSocket,
|
||||
}
|
||||
|
||||
pub struct TestNode {
|
||||
pub data: ReplicatedData,
|
||||
pub sockets: Sockets,
|
||||
}
|
||||
|
||||
impl TestNode {
|
||||
pub fn new() -> TestNode {
|
||||
let gossip = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let requests = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transaction = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let replicate = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let respond = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let broadcast = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let data = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
requests.local_addr().unwrap(),
|
||||
transaction.local_addr().unwrap(),
|
||||
);
|
||||
TestNode {
|
||||
data: data,
|
||||
sockets: Sockets {
|
||||
gossip,
|
||||
requests,
|
||||
replicate,
|
||||
transaction,
|
||||
respond,
|
||||
broadcast,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket, UdpSocket) {
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::time::Duration;
|
||||
|
||||
let transactions_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let gossip = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let replicate = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
let requests_socket = UdpSocket::bind("127.0.0.1:0").unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let d = ReplicatedData::new(
|
||||
pubkey,
|
||||
gossip.local_addr().unwrap(),
|
||||
replicate.local_addr().unwrap(),
|
||||
requests_socket.local_addr().unwrap(),
|
||||
transactions_socket.local_addr().unwrap(),
|
||||
);
|
||||
(d, gossip, replicate, requests_socket, transactions_socket)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use bank::Bank;
|
||||
use bincode::serialize;
|
||||
use crdt::Crdt;
|
||||
use entry::Entry;
|
||||
use hash::{hash, Hash};
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use packet::BlobRecycler;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
use transaction::Transaction;
|
||||
use tvu::{TestNode, Tvu};
|
||||
|
||||
/// Test that message sent from leader to target1 and replicated to target2
|
||||
#[test]
|
||||
fn test_replicate() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let target1 = TestNode::new();
|
||||
let target2 = TestNode::new();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
//start crdt_leader
|
||||
let mut crdt_l = Crdt::new(leader.data.clone());
|
||||
crdt_l.set_leader(leader.data.id);
|
||||
|
||||
let cref_l = Arc::new(RwLock::new(crdt_l));
|
||||
let t_l_gossip = Crdt::gossip(cref_l.clone(), exit.clone());
|
||||
let window1 = streamer::default_window();
|
||||
let t_l_listen = Crdt::listen(cref_l, window1, leader.sockets.gossip, exit.clone());
|
||||
|
||||
//start crdt2
|
||||
let mut crdt2 = Crdt::new(target2.data.clone());
|
||||
crdt2.insert(&leader.data);
|
||||
crdt2.set_leader(leader.data.id);
|
||||
let leader_id = leader.data.id;
|
||||
let cref2 = Arc::new(RwLock::new(crdt2));
|
||||
let t2_gossip = Crdt::gossip(cref2.clone(), exit.clone());
|
||||
let window2 = streamer::default_window();
|
||||
let t2_listen = Crdt::listen(cref2, window2, target2.sockets.gossip, exit.clone());
|
||||
|
||||
// setup some blob services to send blobs into the socket
|
||||
// to simulate the source peer and get blobs out of the socket to
|
||||
// simulate target peer
|
||||
let recv_recycler = BlobRecycler::default();
|
||||
let resp_recycler = BlobRecycler::default();
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver = streamer::blob_receiver(
|
||||
exit.clone(),
|
||||
recv_recycler.clone(),
|
||||
target2.sockets.replicate,
|
||||
s_reader,
|
||||
).unwrap();
|
||||
|
||||
// simulate leader sending messages
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = streamer::responder(
|
||||
leader.sockets.requests,
|
||||
exit.clone(),
|
||||
resp_recycler.clone(),
|
||||
r_responder,
|
||||
);
|
||||
|
||||
let starting_balance = 10_000;
|
||||
let mint = Mint::new(starting_balance);
|
||||
let replicate_addr = target1.data.replicate_addr;
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
let tvu = Tvu::new(
|
||||
bank.clone(),
|
||||
target1.data,
|
||||
target1.sockets.gossip,
|
||||
target1.sockets.replicate,
|
||||
leader.data,
|
||||
exit.clone(),
|
||||
);
|
||||
|
||||
let mut alice_ref_balance = starting_balance;
|
||||
let mut msgs = VecDeque::new();
|
||||
let mut cur_hash = Hash::default();
|
||||
let num_blobs = 10;
|
||||
let transfer_amount = 501;
|
||||
let bob_keypair = KeyPair::new();
|
||||
for i in 0..num_blobs {
|
||||
let b = resp_recycler.allocate();
|
||||
let b_ = b.clone();
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(i).unwrap();
|
||||
w.set_id(leader_id).unwrap();
|
||||
|
||||
let entry0 = Entry::new(&cur_hash, i, vec![]);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
|
||||
let tx0 = Transaction::new(
|
||||
&mint.keypair(),
|
||||
bob_keypair.pubkey(),
|
||||
transfer_amount,
|
||||
cur_hash,
|
||||
);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
let entry1 = Entry::new(&cur_hash, i + num_blobs, vec![tx0]);
|
||||
bank.register_entry_id(&cur_hash);
|
||||
cur_hash = hash(&cur_hash);
|
||||
|
||||
alice_ref_balance -= transfer_amount;
|
||||
|
||||
let serialized_entry = serialize(&vec![entry0, entry1]).unwrap();
|
||||
|
||||
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
|
||||
w.set_size(serialized_entry.len());
|
||||
w.meta.set_addr(&replicate_addr);
|
||||
drop(w);
|
||||
msgs.push_back(b_);
|
||||
}
|
||||
|
||||
// send the blobs into the socket
|
||||
s_responder.send(msgs).expect("send");
|
||||
|
||||
// receive retransmitted messages
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut msgs: Vec<_> = Vec::new();
|
||||
while let Ok(msg) = r_reader.recv_timeout(timer) {
|
||||
trace!("msg: {:?}", msg);
|
||||
msgs.push(msg);
|
||||
}
|
||||
|
||||
let alice_balance = bank.get_balance(&mint.keypair().pubkey()).unwrap();
|
||||
assert_eq!(alice_balance, alice_ref_balance);
|
||||
|
||||
let bob_balance = bank.get_balance(&bob_keypair.pubkey()).unwrap();
|
||||
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in tvu.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
t2_gossip.join().expect("join");
|
||||
t2_listen.join().expect("join");
|
||||
t_receiver.join().expect("join");
|
||||
t_responder.join().expect("join");
|
||||
t_l_gossip.join().expect("join");
|
||||
t_l_listen.join().expect("join");
|
||||
}
|
||||
}
|
71
src/write_stage.rs
Normal file
71
src/write_stage.rs
Normal file
@ -0,0 +1,71 @@
|
||||
//! The `write_stage` module implements write stage of the RPU.
|
||||
|
||||
use bank::Bank;
|
||||
use entry::Entry;
|
||||
use entry_writer::EntryWriter;
|
||||
use packet;
|
||||
use std::io::Write;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use streamer;
|
||||
|
||||
pub struct WriteStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
pub blob_receiver: streamer::BlobReceiver,
|
||||
}
|
||||
|
||||
impl WriteStage {
|
||||
/// Create a new Rpu that wraps the given Bank.
|
||||
pub fn new<W: Write + Send + 'static>(
|
||||
bank: Arc<Bank>,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: packet::BlobRecycler,
|
||||
writer: Mutex<W>,
|
||||
entry_receiver: Receiver<Entry>,
|
||||
) -> Self {
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let thread_hdl = spawn(move || loop {
|
||||
let entry_writer = EntryWriter::new(&bank);
|
||||
let _ = entry_writer.write_and_send_entries(
|
||||
&blob_sender,
|
||||
&blob_recycler,
|
||||
&writer,
|
||||
&entry_receiver,
|
||||
);
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
info!("broadcat_service exiting");
|
||||
break;
|
||||
}
|
||||
});
|
||||
|
||||
WriteStage {
|
||||
thread_hdl,
|
||||
blob_receiver,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_drain(
|
||||
bank: Arc<Bank>,
|
||||
exit: Arc<AtomicBool>,
|
||||
entry_receiver: Receiver<Entry>,
|
||||
) -> Self {
|
||||
let (_blob_sender, blob_receiver) = channel();
|
||||
let thread_hdl = spawn(move || {
|
||||
let entry_writer = EntryWriter::new(&bank);
|
||||
loop {
|
||||
let _ = entry_writer.drain_entries(&entry_receiver);
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
info!("drain_service exiting");
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
WriteStage {
|
||||
thread_hdl,
|
||||
blob_receiver,
|
||||
}
|
||||
}
|
||||
}
|
178
tests/multinode.rs
Normal file
178
tests/multinode.rs
Normal file
@ -0,0 +1,178 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate bincode;
|
||||
extern crate futures;
|
||||
extern crate solana;
|
||||
|
||||
use futures::Future;
|
||||
use solana::bank::Bank;
|
||||
use solana::crdt::{Crdt, ReplicatedData};
|
||||
use solana::logger;
|
||||
use solana::mint::Mint;
|
||||
use solana::server::Server;
|
||||
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
use solana::streamer::default_window;
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::tvu::TestNode;
|
||||
use std::io;
|
||||
use std::io::sink;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
|
||||
fn validator(
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
alice: &Mint,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) {
|
||||
let validator = TestNode::new();
|
||||
let replicant_bank = Bank::new(&alice);
|
||||
let mut ts = Server::new_validator(
|
||||
replicant_bank,
|
||||
validator.data.clone(),
|
||||
validator.sockets.requests,
|
||||
validator.sockets.respond,
|
||||
validator.sockets.replicate,
|
||||
validator.sockets.gossip,
|
||||
leader.clone(),
|
||||
exit.clone(),
|
||||
);
|
||||
threads.append(&mut ts.thread_hdls);
|
||||
}
|
||||
|
||||
fn converge(
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) -> Vec<ReplicatedData> {
|
||||
//lets spy on the network
|
||||
let mut spy = TestNode::new();
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let me = spy.data.id.clone();
|
||||
spy.data.replicate_addr = daddr;
|
||||
spy.data.requests_addr = daddr;
|
||||
let mut spy_crdt = Crdt::new(spy.data);
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let spy_window = default_window();
|
||||
let t_spy_listen = Crdt::listen(
|
||||
spy_ref.clone(),
|
||||
spy_window,
|
||||
spy.sockets.gossip,
|
||||
exit.clone(),
|
||||
);
|
||||
let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone());
|
||||
//wait for the network to converge
|
||||
let mut converged = false;
|
||||
for _ in 0..30 {
|
||||
let num = spy_ref.read().unwrap().convergence();
|
||||
if num == num_nodes as u64 {
|
||||
converged = true;
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
assert!(converged);
|
||||
threads.push(t_spy_listen);
|
||||
threads.push(t_spy_gossip);
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.id != me)
|
||||
.map(|x| x.clone())
|
||||
.collect();
|
||||
v.clone()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_node() {
|
||||
logger::setup();
|
||||
const N: usize = 5;
|
||||
trace!("test_multi_accountant_stub");
|
||||
let leader = TestNode::new();
|
||||
let alice = Mint::new(10_000);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let leader_bank = Bank::new(&alice);
|
||||
let server = Server::new_leader(
|
||||
leader_bank,
|
||||
alice.last_id(),
|
||||
None,
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
|
||||
let mut threads = server.thread_hdls;
|
||||
for _ in 0..N {
|
||||
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
||||
}
|
||||
let servers = converge(&leader.data, exit.clone(), N + 2, &mut threads);
|
||||
//contains the leader addr as well
|
||||
assert_eq!(servers.len(), N + 1);
|
||||
//verify leader can do transfer
|
||||
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
|
||||
assert_eq!(leader_balance, 500);
|
||||
//verify validator has the same balance
|
||||
let mut success = 0usize;
|
||||
for server in servers.iter() {
|
||||
let mut client = mk_client(server);
|
||||
if let Ok(bal) = client.poll_get_balance(&bob_pubkey) {
|
||||
trace!("validator balance {}", bal);
|
||||
if bal == leader_balance {
|
||||
success += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
assert_eq!(success, servers.len());
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn mk_client(leader: &ReplicatedData) -> ThinClient {
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
ThinClient::new(
|
||||
leader.requests_addr,
|
||||
requests_socket,
|
||||
leader.transactions_addr,
|
||||
transactions_socket,
|
||||
)
|
||||
}
|
||||
|
||||
fn tx_and_retry_get_balance(
|
||||
leader: &ReplicatedData,
|
||||
alice: &Mint,
|
||||
bob_pubkey: &PublicKey,
|
||||
) -> io::Result<i64> {
|
||||
let mut client = mk_client(leader);
|
||||
trace!("getting leader last_id");
|
||||
let last_id = client.get_last_id().wait().unwrap();
|
||||
info!("executing leader transer");
|
||||
let _sig = client
|
||||
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
client.poll_get_balance(bob_pubkey)
|
||||
}
|
Reference in New Issue
Block a user