Compare commits
1566 Commits
Author | SHA1 | Date | |
---|---|---|---|
6f3beb915c | |||
f399172f51 | |||
15f3b97492 | |||
de284466ff | |||
cb20ebc583 | |||
ceb5686175 | |||
55d59b1ac6 | |||
2b60b4e23a | |||
9bdc1b9727 | |||
87efdabcb3 | |||
7d5bb28128 | |||
ae433d6a34 | |||
e3c668acff | |||
5825501c79 | |||
7e84bb7a60 | |||
da1fd96d50 | |||
141e1e974d | |||
fc0d7f5982 | |||
f697632edb | |||
73797c789b | |||
036fcced31 | |||
1d3157fb80 | |||
0b11c2e119 | |||
96af892d95 | |||
c2983f824e | |||
88d6fea999 | |||
c23fa289c3 | |||
db35f220f7 | |||
982afa87a6 | |||
dccae18b53 | |||
53e86f2fa2 | |||
757dfd36a3 | |||
708add0e64 | |||
d8991ae2ca | |||
5f6cbe0cf8 | |||
f167b0c2c5 | |||
f784500fbb | |||
83df47323a | |||
c75d4abb0b | |||
5216a723b1 | |||
b801ca477d | |||
c830c604f4 | |||
0e66606c7f | |||
8707abe091 | |||
dc2a840985 | |||
2727067b94 | |||
6a8a494f5d | |||
a09d2e252a | |||
3e9c463ff1 | |||
46d50f5bde | |||
e8da903c6c | |||
ab10b7676a | |||
fa44a71d3e | |||
c86e9e8568 | |||
9e22e23ce6 | |||
835f29a178 | |||
9688f8fb64 | |||
df5cde74b0 | |||
231d5e5968 | |||
c2ba72fe1f | |||
d93786c86a | |||
bf15cad36b | |||
288ed7a8ea | |||
f07c038266 | |||
8eed120c38 | |||
5dbcb43abd | |||
dd1eefaf62 | |||
35de159d00 | |||
546a1e90d5 | |||
b033e1d904 | |||
96d6985895 | |||
58f220a3b7 | |||
a206f2570d | |||
2318ffc704 | |||
d4304eea28 | |||
06af9de753 | |||
7f71e1e09f | |||
bb7eccd542 | |||
b04c71acd9 | |||
bbf9ea89c5 | |||
846ad61941 | |||
8b41c415b7 | |||
197ba8b395 | |||
8d2a61a0c9 | |||
7512317243 | |||
bca2294655 | |||
abd55e4159 | |||
4a980568ac | |||
9d436fc5f8 | |||
ad331e6d56 | |||
d7e4e57548 | |||
b2067d2721 | |||
c2bbe4344e | |||
8567253833 | |||
ca7d4c42dd | |||
8ca514a5ca | |||
b605552079 | |||
74f5538bd3 | |||
ff57c7b7df | |||
ce8a4fa831 | |||
8331aab26a | |||
a6857dbaaa | |||
054298d957 | |||
cca240c279 | |||
89f17ceecf | |||
fe97857c62 | |||
75854cc234 | |||
9783d47fd1 | |||
38be61bd22 | |||
c64e2acf8b | |||
a200cedb4b | |||
5fec0ac82f | |||
999534248b | |||
fbc754ea25 | |||
ecea41a0ab | |||
1b6d472cb2 | |||
f0446c7e88 | |||
2a0025bb57 | |||
64d6d3015a | |||
90550c5b58 | |||
53cd2cdd9f | |||
1ac5d300a4 | |||
642c25bd3b | |||
df808dedd1 | |||
02f9cb415b | |||
e3cf1e6598 | |||
7681211c02 | |||
0ee935dd72 | |||
16772d3d51 | |||
1c38e40dee | |||
ceb5a76609 | |||
db2392a691 | |||
9c1b6288a4 | |||
575179be8e | |||
5b6ffaecc0 | |||
efc72b9572 | |||
5dc7177540 | |||
78a4b1287d | |||
c5001869f1 | |||
7c31f217d5 | |||
1152457691 | |||
3beb38ac8a | |||
8cbaa19d2e | |||
63d2b2eb42 | |||
e02da9a15a | |||
ae111a131c | |||
4402e1128f | |||
f55bb6d95c | |||
91741e20fa | |||
0514f5e573 | |||
637d403415 | |||
9fabd34156 | |||
039ed01abf | |||
ead0eb2754 | |||
c3db2df7eb | |||
ee6c15d2db | |||
715a3d50fe | |||
692b125391 | |||
5193819d8e | |||
210b9d346f | |||
4c4b0f551e | |||
6800ff1882 | |||
399a3852b1 | |||
e7d3069f58 | |||
40ea3e3e61 | |||
dc9a11bae0 | |||
906d18a709 | |||
a13058b6c4 | |||
98ee4b4672 | |||
7fd7310b96 | |||
28fa43d2a9 | |||
1a9e6ffdd7 | |||
c998199954 | |||
19792192a7 | |||
4aab413154 | |||
15a6179b97 | |||
83b308983f | |||
f2b1a04bca | |||
3e36e6dcf8 | |||
6feb6a27be | |||
c5ceb15e02 | |||
57e928d1d0 | |||
e2c68d8775 | |||
d173e6ef87 | |||
c230360f4c | |||
384b486b29 | |||
b72e91f681 | |||
46d9ba5ca0 | |||
a9240a42bf | |||
a7204d5353 | |||
f570ef1c66 | |||
ee0195d588 | |||
448b8b1c17 | |||
4d77fa900b | |||
7ccd771ccc | |||
e9f8b5b9db | |||
2366c1ebaf | |||
c5de237276 | |||
aa9bc57b4d | |||
11df477b20 | |||
7141750668 | |||
68675bd1ab | |||
19b3cacd60 | |||
bcfaf5d994 | |||
e9499ac5b8 | |||
7ff721e563 | |||
fda3b9bbd4 | |||
cf70e5ff2f | |||
a86618faf3 | |||
6693386bc5 | |||
4a8a0d03a3 | |||
2c9d288ca9 | |||
bb0aabae75 | |||
5cda0ed964 | |||
0aba74935b | |||
4eb666d4f9 | |||
d5e0cf81ff | |||
3ea784aff7 | |||
fef93958c8 | |||
cae88c90b1 | |||
1a8da769b6 | |||
2b259aeb41 | |||
de7e9b4b4c | |||
0f95031b99 | |||
d622742b84 | |||
ff254fbe5f | |||
05153e4884 | |||
2ece27ee3a | |||
a58df52205 | |||
2ea6f86199 | |||
7c5172a65e | |||
821e3bc3ca | |||
5dd2f737a3 | |||
c9bb5c1f5b | |||
5d936e5c8a | |||
e985c2e7d5 | |||
308b6c3371 | |||
ea7fa11b3e | |||
5a40ea3fd7 | |||
102510ac0e | |||
2158329058 | |||
bc484ffe5f | |||
6fcf4584d5 | |||
1adc83d148 | |||
647053e973 | |||
95b98b3845 | |||
f27613754a | |||
3e351b0b13 | |||
79ece53e3c | |||
f341b2ec10 | |||
167b079e29 | |||
7ded5a70be | |||
fc476ff979 | |||
c3279c8a00 | |||
e471ea41da | |||
552d4adff5 | |||
0c33c9e0d7 | |||
fae9fff24c | |||
79924e407c | |||
18d4da0076 | |||
416c141775 | |||
af1a2e83bc | |||
4cdb9a73f8 | |||
4433730610 | |||
71eb5bdecc | |||
029e2db2cf | |||
81db333490 | |||
c68ee0040d | |||
d96e267624 | |||
0b47404ba6 | |||
7f4844f426 | |||
50e1e0ae47 | |||
538c3b63e1 | |||
678b2870ff | |||
308d8c254d | |||
f11aa4a57b | |||
c52d4eca0b | |||
7672506b45 | |||
80a02359f7 | |||
ab3968e3bf | |||
42ebf9502a | |||
bd4fcf4ac6 | |||
4dceb73909 | |||
dd819cec3d | |||
5115cd7798 | |||
cbb8dee360 | |||
e0cdcb0973 | |||
a6a2a745ae | |||
297896bc49 | |||
f372840354 | |||
4c4659be13 | |||
1b79fe73a1 | |||
5fa072cf16 | |||
212874e155 | |||
75212f40e7 | |||
6fde65577e | |||
80ecef2832 | |||
edf2ffaf4e | |||
6c275ea5ef | |||
23ed65b339 | |||
9c7913ac9e | |||
8b01e6ac0b | |||
ff5854396a | |||
f0725b4900 | |||
327ba5301d | |||
dcce475f0b | |||
aa2104a21b | |||
0206020104 | |||
33bd1229d9 | |||
195098ca2b | |||
9daa7bdbe2 | |||
6bd18e18ea | |||
8f046cb1f8 | |||
735a0ee16d | |||
537be6a29d | |||
2b528e2225 | |||
75505bbd72 | |||
e1fc7444f9 | |||
940caf7876 | |||
fcdb0403ba | |||
caeb55d066 | |||
f11e60b801 | |||
54f2146429 | |||
f60ee87a52 | |||
9c06fe25df | |||
1eec8bf57f | |||
ddb24ebb61 | |||
a58c83d999 | |||
6656ec816c | |||
8d2bd43100 | |||
429ea98ace | |||
3d80926508 | |||
d713e3c2cf | |||
5d20d1ddbf | |||
257acdcda1 | |||
dab98dcd81 | |||
99653a4d04 | |||
dda563a169 | |||
782aa7b23b | |||
813e438d18 | |||
7a71adaa8c | |||
ce8796bc2e | |||
c7e1409f7b | |||
9de9379925 | |||
7d68b6edc8 | |||
48b5344586 | |||
686b7d3737 | |||
7c65e2fbfc | |||
96a6e09050 | |||
b3f823d544 | |||
ea21c7a43e | |||
437fb1a8d7 | |||
166099b9d9 | |||
c707b3d2e7 | |||
f7d294de90 | |||
4ecd0a0e45 | |||
7ebbaaeb2d | |||
cdcf59ede0 | |||
5d065133ef | |||
d403808564 | |||
3ffdca193d | |||
69688a18c7 | |||
7193bf28b6 | |||
637f890b91 | |||
009d5adcba | |||
52c55a0335 | |||
23428b0381 | |||
0e305bd7dd | |||
c068ca4cb7 | |||
6a8379109d | |||
120add0e82 | |||
b92ee51c2d | |||
cba3b35ac9 | |||
313fed375c | |||
1e63702c36 | |||
478ee9a1c4 | |||
eb1e5dcce4 | |||
84225beeef | |||
9cf0bd9b88 | |||
9d25d7611a | |||
1abefb2c7a | |||
bcc247f25f | |||
68ca9b2cb8 | |||
686e61d50c | |||
17d927ac74 | |||
966c55f58e | |||
d76d3162e5 | |||
d0a2d46923 | |||
a67f58e9a5 | |||
fece91c4d1 | |||
9d2d9a0189 | |||
6d3afc774a | |||
88646bf27d | |||
0696f9f497 | |||
b2ea2455e2 | |||
3f659a69fd | |||
2c62be951f | |||
2348733d6c | |||
cc229b535d | |||
7f810a29ff | |||
fc1dfd86d2 | |||
5deb34e5bd | |||
39df087902 | |||
6ff46540b6 | |||
dbab8792e4 | |||
4eb676afaa | |||
a6cb2f1bcf | |||
28af9a39b4 | |||
8cf5620b87 | |||
85d6627ee6 | |||
611a005ec9 | |||
90b3b90391 | |||
fd4f294fd3 | |||
145274c001 | |||
df5d6693f6 | |||
05c5603879 | |||
c2c48a5c3c | |||
4af556f70e | |||
8bad411962 | |||
5b0418793e | |||
4423ee6902 | |||
f0c39cc84d | |||
3d45b04da8 | |||
9e2f26a5d2 | |||
a016f6e82e | |||
eb3e5fd204 | |||
72282dc493 | |||
47a22c66b4 | |||
fb11d8a909 | |||
7d872f52f4 | |||
d882bfe65c | |||
103584ef27 | |||
1fb537deb9 | |||
2bd48b4207 | |||
f5a6db3dc0 | |||
dd0c1ac5b2 | |||
d8c9655128 | |||
09f2d273c5 | |||
f6eb85e7a3 | |||
0d85b43901 | |||
fdf94a77b4 | |||
af40ab0c04 | |||
015b7a1ddb | |||
ab3e460e64 | |||
194a84c8dd | |||
51d932dad1 | |||
561d31cc13 | |||
d6a8e437bb | |||
4631af5011 | |||
5d28729b2a | |||
8c08e614b7 | |||
e76bf1438b | |||
4e177877c9 | |||
60848b9d95 | |||
79b3564a26 | |||
1e8c36c555 | |||
94d015b089 | |||
cfb3736372 | |||
2b77f62233 | |||
e8d23c17ca | |||
a7ed2a304a | |||
0025b42c26 | |||
3f7f492cc0 | |||
490d7875dd | |||
4240edf710 | |||
30e50d0f70 | |||
751c1eba32 | |||
d349d6aa98 | |||
1f9152dc72 | |||
1b9d50172b | |||
084dbd7f58 | |||
58c0508f94 | |||
dcf82c024f | |||
b253ed0c46 | |||
61db53fc19 | |||
b0ead086a1 | |||
a3b22d0d33 | |||
28d24497a3 | |||
05cea4c1da | |||
260f5edfd6 | |||
7105136595 | |||
54db379bf2 | |||
effbf0b978 | |||
8e7a2a9587 | |||
18e6ff4167 | |||
fa1cdaa91a | |||
b538b67524 | |||
2b0f6355af | |||
11b9a0323d | |||
710fa822a0 | |||
aaf6ce5aea | |||
34ea483736 | |||
a3ff40476e | |||
4cca3ff454 | |||
3d9acdd970 | |||
428f220b88 | |||
10add6a8ac | |||
f06a8dceda | |||
545f4f1c87 | |||
77543d83ff | |||
eb6a30cb7c | |||
97372b8e63 | |||
cea29ed772 | |||
b5006b8f2b | |||
81c44c605b | |||
0b66a6626a | |||
e8be4d7eae | |||
30f0c25b65 | |||
73ae3c3301 | |||
f98e9aba48 | |||
84c28a077a | |||
350cf62b90 | |||
aa4f30c491 | |||
3de979aa7c | |||
5bc133985b | |||
87156e1364 | |||
45ff142871 | |||
2710ff271e | |||
468ac9facd | |||
705720f086 | |||
a219e78f00 | |||
7a41868173 | |||
e16acec901 | |||
de44d7475e | |||
c2dd009e0b | |||
5a8da75d06 | |||
848c6e2371 | |||
e3882950cf | |||
28f6fbee23 | |||
3144a70b18 | |||
bed5438831 | |||
6f991b3c11 | |||
03a8a5ed55 | |||
0c6d2ef1f4 | |||
d2be79f38c | |||
cc89801b12 | |||
dfa05a8742 | |||
d7d985365b | |||
0d4e4b18c2 | |||
7687436bef | |||
d531b9645d | |||
6a1b5a222a | |||
be2bf69c93 | |||
0672794692 | |||
c65c0d9b23 | |||
0ee86ff313 | |||
3b1aa846b5 | |||
0a34cb8023 | |||
227aa38c8a | |||
1dd467ed7d | |||
922dffb122 | |||
63985d4595 | |||
97dd1834d7 | |||
2ea030be48 | |||
606cfbfe1e | |||
90a4ab7e57 | |||
412e15fbdc | |||
ed0a590549 | |||
71f05cb23e | |||
5f99657523 | |||
587ae1bf3c | |||
461dea69d9 | |||
22c0e3cd54 | |||
3ed9567f96 | |||
c4fa841aa9 | |||
f284af1c3d | |||
46602ba9c3 | |||
81477246be | |||
9bd63867aa | |||
d1c317fd5f | |||
cbd664ba4b | |||
4bb7cefa15 | |||
82c86daa78 | |||
b95db62be3 | |||
0f7fdd71cc | |||
af1a7da0d5 | |||
d698b3da3a | |||
6d275d571c | |||
63acb82c87 | |||
4d05b74314 | |||
37dd511356 | |||
96c321da76 | |||
4701540cc9 | |||
f54615b4e3 | |||
9c456b2fb0 | |||
77bf17064a | |||
44150b2e85 | |||
8ec2fe15f3 | |||
687af3e3a4 | |||
72ab83cd45 | |||
4b07772e22 | |||
22d2c962b2 | |||
e771d36278 | |||
800c2dd370 | |||
f38842822f | |||
88a6fb86bf | |||
f6fe998ed4 | |||
16337d7c1e | |||
ae309f80f7 | |||
fa70b3bf70 | |||
3a90f138b2 | |||
033f6dcbcb | |||
5d8b2f899a | |||
490205ab84 | |||
2c0e704c82 | |||
253048f72d | |||
e09b8430ce | |||
9ae283dc3a | |||
f95a79d145 | |||
0dabdfd48e | |||
d2bb4dc14a | |||
b4dc180592 | |||
263577773f | |||
7d708be121 | |||
feb1669d39 | |||
2cbfe41422 | |||
b7653865b1 | |||
c72dced8fa | |||
6feed5fd56 | |||
b8fe5ae076 | |||
7e657d65f3 | |||
a166bb816e | |||
2952027d04 | |||
430d9d9314 | |||
fa247196c0 | |||
5d17c2b58f | |||
6ee45d282e | |||
cfc3bd0696 | |||
3e0e09555a | |||
1d8bb5144e | |||
67e0100866 | |||
f2ab08c65e | |||
04a93050e7 | |||
03401041db | |||
6eac744a05 | |||
ae29e2085f | |||
7ce0b58af8 | |||
ea5663c0da | |||
a61bfae8a4 | |||
5716898216 | |||
c0f9e452f2 | |||
4e3526394e | |||
6806a14a3f | |||
ec7e50b37d | |||
e7b7dfebf5 | |||
a9e0b27772 | |||
669164bada | |||
4f3a291391 | |||
56e37ad2f4 | |||
17de79a83a | |||
09e9139855 | |||
76fc5822c9 | |||
c767a854ed | |||
b60802ddff | |||
1c35d59f26 | |||
adcaf715c6 | |||
1f9494221b | |||
466d6f76b9 | |||
b05e6ce3db | |||
1d812e78d5 | |||
fba494343f | |||
0b878eccf8 | |||
98772b16d6 | |||
bb82ff0c80 | |||
71af03dc98 | |||
5671da4a0a | |||
d63493a852 | |||
c06582ba40 | |||
450f271cf7 | |||
a31889f129 | |||
ba6a6f5227 | |||
9a38d61048 | |||
903ec27754 | |||
0b56d603c2 | |||
4ffb5d157a | |||
816246ebee | |||
a9881aee05 | |||
7b5b989cfe | |||
c4b62e19f2 | |||
79a97ada04 | |||
da215d1a21 | |||
9ffc50bead | |||
f8352bac2f | |||
27c1410fdc | |||
9a4733bde7 | |||
f3df5df52c | |||
517d08c637 | |||
90dd794ae5 | |||
e0dbbba8a3 | |||
705df55a7f | |||
d354e85a9a | |||
e4e1f8ec1e | |||
0112a24179 | |||
d680f6b3a5 | |||
47e732717f | |||
ec56abfccb | |||
e7cdb402fb | |||
a3fe1965fb | |||
5256e6833e | |||
051cd2e1ff | |||
51929e7df8 | |||
a094507bb8 | |||
8effa4e3e0 | |||
1c9e7dbc45 | |||
799b249f02 | |||
7b4a378c92 | |||
47917d00d1 | |||
a4c49af859 | |||
1c1d7d1e0e | |||
d28536d76e | |||
63cfbb9497 | |||
231040b93e | |||
7c74afc35a | |||
7878a011eb | |||
c05416e27d | |||
ee200d8fa0 | |||
2f42658cd4 | |||
d95e8030fc | |||
4aedd3f1b6 | |||
bb89d6f54d | |||
ed10841e3d | |||
6dac87f2a7 | |||
a167d0d331 | |||
eed37820b5 | |||
124e1fa350 | |||
ac40434cdf | |||
39354c06f8 | |||
faedb88de0 | |||
5cd1fb486f | |||
5b5df49e6c | |||
86f9277e2d | |||
56b09bf0ac | |||
f4c4b9df9c | |||
6e568c69a7 | |||
14d624ee40 | |||
d5c0557891 | |||
1691060a22 | |||
a5ce578c72 | |||
05edfad13a | |||
136b43f461 | |||
ac40c1818f | |||
eb63dbcd2a | |||
4e2f1a519e | |||
55ec7f9fe9 | |||
b7ddefdbf9 | |||
ce361c2cdc | |||
ed6ba55261 | |||
ec333d2bd6 | |||
551f639259 | |||
da3bb6fb93 | |||
08bcb62016 | |||
8f4ce1e8d0 | |||
4a534d6abb | |||
b48a8c0555 | |||
1919ec247b | |||
3966eb5374 | |||
c22ef50cae | |||
be5f2ef9b9 | |||
adfcb79387 | |||
73c4c0ac5f | |||
6fc601f696 | |||
07111fb7bb | |||
a06d2170b0 | |||
7f53ea3bf3 | |||
b2accd1c2a | |||
8ef8a8dea7 | |||
e929404676 | |||
c2258bedae | |||
215fdbb7ed | |||
ee998f6882 | |||
826e95afca | |||
47583d48e7 | |||
e759cdf061 | |||
88503c2a09 | |||
d5be23dffe | |||
80c01dc085 | |||
45b2549fa9 | |||
c7ce454188 | |||
7059ea42d6 | |||
8ea1c29c9b | |||
33bbfdbc9b | |||
5de54f8853 | |||
a1ac41218a | |||
55fc647568 | |||
e83e898eed | |||
eb07e4588b | |||
563f834c96 | |||
183178681d | |||
8dba53e494 | |||
e4782b19a3 | |||
ec86b1dffa | |||
6cb8266c7b | |||
9c50302a39 | |||
3313c69898 | |||
530c6ca7ec | |||
07ed2fb523 | |||
d9ec380a15 | |||
b60eb3a899 | |||
b4df69791b | |||
c21b8a22b9 | |||
475a76e656 | |||
7ba5d5ef86 | |||
737dc1ddde | |||
164bf19b36 | |||
25976771d9 | |||
f2198c2e9a | |||
eec19c6d2c | |||
30e03feb5f | |||
58cd3bde9f | |||
662bfb7b88 | |||
5f3e3a17d3 | |||
feba2d9975 | |||
e3e3a1c457 | |||
90628f3c8d | |||
f6bcadb79d | |||
d4ac16773c | |||
96f044d2bf | |||
f31868b913 | |||
73b0ff5b55 | |||
64cf69045a | |||
e57dae0f31 | |||
6386e7d5cf | |||
4bad103da9 | |||
30a26adb7c | |||
8be4adfc0a | |||
fed4cc3965 | |||
7d1e074683 | |||
00516e50a1 | |||
e83d76fbd9 | |||
304f152315 | |||
3a82ebf7fd | |||
0253d34467 | |||
9209f9acde | |||
3dbbb398df | |||
17e8ad110f | |||
5e91d31ed3 | |||
fad9d20820 | |||
fe9a1c8580 | |||
cd6d7d5198 | |||
771478bc68 | |||
c4a59896f8 | |||
3eb1608403 | |||
8fde70d4dc | |||
5a047833ed | |||
f6c28e6be1 | |||
0ebf10d19d | |||
d3005d3ef3 | |||
effcef2184 | |||
89fc0ad7a9 | |||
410272ee1d | |||
1c97bf50b6 | |||
4ecd2c9d0b | |||
e592243a09 | |||
2f4a92e352 | |||
ceafc29040 | |||
b20efabfd2 | |||
85b6e7293c | |||
6aced927ad | |||
75997e6c08 | |||
9040d00110 | |||
8ebc5c6b07 | |||
d4807790ff | |||
0de5e7a285 | |||
c40000aeda | |||
31198bc105 | |||
92599acfca | |||
f6e70779fe | |||
3017bde686 | |||
9d84ec4bb3 | |||
586141adb2 | |||
3f763f99e2 | |||
15c7f36ea3 | |||
04d1a083fa | |||
327ee1dae8 | |||
22885c3e64 | |||
94ededb54c | |||
af6a07697a | |||
5f1d8c95eb | |||
7d9e032407 | |||
bc918a5ad5 | |||
ee54ce4727 | |||
e85bf2f2d5 | |||
a7460ffbd1 | |||
7fe1fd2f95 | |||
d30670e92e | |||
9b202c6e1e | |||
87946eafd5 | |||
7575d3c726 | |||
8b9713a934 | |||
ec713c18c4 | |||
c24b0a1a3f | |||
34e0cb0092 | |||
7b7c7cba21 | |||
c45343dd30 | |||
b7f6603c1f | |||
2d3b052dea | |||
dcb6234771 | |||
e44d423e83 | |||
5435bb734c | |||
13f59adf61 | |||
0fce3368d3 | |||
1ee5c81267 | |||
3bb9d5eb50 | |||
efb23f7cf9 | |||
013f4674de | |||
6966b25d9c | |||
d513f56c8c | |||
7aa05618a3 | |||
cdfbbe5e60 | |||
fe7d1cb81c | |||
c2a9395a4b | |||
586279bcfc | |||
8bd10e7c4c | |||
928e6165bc | |||
77c9e801aa | |||
c78132417f | |||
849928887e | |||
ba1163d49f | |||
6f9c89af39 | |||
246b8b1242 | |||
f0db68cb75 | |||
f0d1fdfb46 | |||
3b8b2e030a | |||
b4fee677a5 | |||
fe706583f9 | |||
d0e0c17ece | |||
5aaa38bcaf | |||
6ff9b27f8e | |||
3f4e035506 | |||
57d9fbb927 | |||
ee44e51b30 | |||
5011f24123 | |||
d1eda334f3 | |||
2ae5ce9f2c | |||
4f5ac78b7e | |||
074c9af020 | |||
2da2d4e365 | |||
8eb76ab2a5 | |||
a710d95243 | |||
a06535d7ed | |||
f511ac9be7 | |||
e28ad2177e | |||
cb16fe84cd | |||
ec3569aa39 | |||
246edecf53 | |||
34834c5af9 | |||
b845245614 | |||
5711fb9969 | |||
d1eaecde9a | |||
00c8505d1e | |||
33f01efe69 | |||
377d312c81 | |||
badf5d5412 | |||
0339f90b40 | |||
5455e8e6a9 | |||
6843b71a0d | |||
634408b5e8 | |||
d053f78b74 | |||
93b6fceb2f | |||
ac7860c35d | |||
b0eab8729f | |||
cb81f80b31 | |||
ea97529185 | |||
f1075191fe | |||
74c479fbc9 | |||
7e788d3a17 | |||
69b3c75f0d | |||
b2c2fa40a2 | |||
50458d9524 | |||
9679e3e356 | |||
6db9f92b8a | |||
4a44498d45 | |||
216510c573 | |||
fd338c3097 | |||
b66ebf5dec | |||
5da99de579 | |||
3aa2907bd6 | |||
05d1618659 | |||
86113811f2 | |||
53ecaa03f1 | |||
205c1aa505 | |||
9b54c1542b | |||
93d5d1b2ad | |||
4c0f3ed6f3 | |||
2580155bf2 | |||
6ab0dd4df9 | |||
4b8c36b6b9 | |||
359a8397c0 | |||
c9fd5d74b5 | |||
391744af97 | |||
587ab29e09 | |||
80f07dadc5 | |||
60609a44ba | |||
30c8fa46b4 | |||
7aab7d2f82 | |||
a8e1c44663 | |||
a2b92c35e1 | |||
9f2086c772 | |||
3eb005d492 | |||
68955bfcf4 | |||
9ac7070e08 | |||
e44e81bd17 | |||
f5eedd2d19 | |||
46059a37eb | |||
adc655a3a2 | |||
3058f80489 | |||
df98cae4b6 | |||
d327e0aabd | |||
17d3a6763c | |||
02c5b0343b | |||
2888e45fea | |||
f1311075d9 | |||
6c380e04a3 | |||
cef1c208a5 | |||
ef8eac92e3 | |||
9c9c63572b | |||
6c0c6de1d0 | |||
b57aecc24c | |||
290dde60a0 | |||
38623785f9 | |||
256ecc7208 | |||
76b06b47ba | |||
cf15cf587f | |||
134c7add57 | |||
ac0791826a | |||
d2622b7798 | |||
f82cbf3a27 | |||
aa7e3df8d6 | |||
ad00d7bd9c | |||
8d1f82c34d | |||
0cb2036e3a | |||
2b1e90b0a5 | |||
f2ccc133a2 | |||
5e824b39dd | |||
41efcae64b | |||
cf5671d058 | |||
2570bba6b1 | |||
71cb7d5c97 | |||
0df6541d5e | |||
52145caf7e | |||
86a50ae9e1 | |||
c64cfb74f3 | |||
26153d9919 | |||
5af922722f | |||
b70d730b32 | |||
bf4b856e0c | |||
0cf0ae6755 | |||
29061cff39 | |||
b7eec4c89f | |||
a3854c229e | |||
dcde256433 | |||
931bdbd5cd | |||
b7bd59c344 | |||
2dbf9a6017 | |||
fe93bba457 | |||
6e35f54738 | |||
089294a85e | |||
25c0b44641 | |||
58c1589688 | |||
bb53f69016 | |||
75659ca042 | |||
fc00594ea4 | |||
8d26be8b89 | |||
af4e95ae0f | |||
ffb4a7aa78 | |||
dcaeacc507 | |||
4f377e6710 | |||
122db85727 | |||
a598e4aa74 | |||
733b31ebbd | |||
dac9775de0 | |||
46c19a5783 | |||
aaeb5ba52f | |||
9f5a3d6064 | |||
4cdf873f98 | |||
b43ae748c3 | |||
02ddd89653 | |||
bbe6eccefe | |||
6677a7b66a | |||
75c37fcc73 | |||
5be71a8a9d | |||
b9ae7d1ebb | |||
8b02e0f57c | |||
342cc7350a | |||
2335a51ced | |||
868df1824c | |||
83c11f0f9d | |||
1022f1b0c6 | |||
c2c80232e3 | |||
115f4e54b8 | |||
669b1694b8 | |||
2128c58fbe | |||
e12e154877 | |||
73d3c17507 | |||
7f647a93da | |||
ecb3dbbb60 | |||
cc907ba69d | |||
5a45eef1dc | |||
0d980e89bc | |||
ef87832bff | |||
94507d1aca | |||
89924a38ff | |||
7faa2b8698 | |||
65352ce8e7 | |||
f1988ee1e3 | |||
82ac8eb731 | |||
ae47e34fa5 | |||
28e781efc3 | |||
5c3ceb8355 | |||
c9113b381d | |||
75e69eecfa | |||
f3c4acc723 | |||
2a0095e322 | |||
9ad5f3c65b | |||
579de64d49 | |||
d4200a7b1e | |||
84477835dc | |||
504b318ef1 | |||
f154c8c490 | |||
d4959bc157 | |||
87e025fe22 | |||
8049323ca8 | |||
b38c7ea2ff | |||
239b925fb3 | |||
60da7f7aaf | |||
8646ff4927 | |||
59be94a81f | |||
437c485e5c | |||
79a58da6a9 | |||
ae29641a18 | |||
9c3f65bca9 | |||
086365b4c4 | |||
64044da49c | |||
7b5b7feb63 | |||
2e059f8504 | |||
207b6686d1 | |||
abfd7d6951 | |||
7fc166b5ba | |||
021953d59a | |||
bbe89df2ff | |||
a638ec5911 | |||
26272a3600 | |||
8454eb79d0 | |||
796f4b981b | |||
34514d65bc | |||
2786357082 | |||
4badeacd1d | |||
63a0ba6ec8 | |||
9a4ce6d70e | |||
35ee2d0ce1 | |||
b04716d40d | |||
051fa6f1f1 | |||
8dc1b07e75 | |||
bee1e7ebaf | |||
f3f0b9f0c5 | |||
a5cf745e1c | |||
273b800047 | |||
6c1f1c2a7a | |||
9c62f8d81f | |||
82aef7ebe2 | |||
57636d3d5f | |||
dc87effc0a | |||
f0c9823e9f | |||
0b91dd6163 | |||
4955c6f13a | |||
2e7beca9ba | |||
59c1b9983d | |||
f7083e0923 | |||
6d4defdf96 | |||
b826f837f8 | |||
5855e18a4e | |||
3f38c0a245 | |||
cfe8b3fc55 | |||
e9ee020b5f | |||
1bcf3891b4 | |||
5456de63e9 | |||
9026c70952 | |||
99dc4ea4a9 | |||
0aaa500f7c | |||
5f5be83a17 | |||
7e44005a0f | |||
ee3fb985ea | |||
2a268aa528 | |||
cd262cf860 | |||
a1889c32d4 | |||
d42d024d9c | |||
7b88b8d159 | |||
4131071b9a | |||
ef6bd7e3b8 | |||
374bff6550 | |||
0a46bbe4f9 | |||
f4971be236 | |||
421273f862 | |||
2c7f229883 | |||
904eabad2f | |||
8b233f6be4 | |||
08fc821ca9 | |||
81706f2d75 | |||
7b50c3910f | |||
2d635386af | |||
a604dcb4c4 | |||
7736b9cac6 | |||
d2dd005a59 | |||
6e8f99d9b2 | |||
685de30047 | |||
17cc9ab07f | |||
3f10bf44db | |||
27984e469a | |||
a2c05b112e | |||
a578c1a5e3 | |||
500aaed48e | |||
4a94da8a94 | |||
cc447c0fda | |||
0ae69bdcd9 | |||
5ba20a94e8 | |||
f168c377fd | |||
dfb754dd13 | |||
455050e19c | |||
317031f455 | |||
b132ce1944 | |||
8b226652aa | |||
2c7fe3ed8d | |||
3d5f2b3c28 | |||
7a79afe4a6 | |||
1f7387a39b | |||
0fc2bee144 | |||
791ae852a2 | |||
c2fcd876d7 | |||
d239d4a495 | |||
aec05ef602 | |||
e5d46d998b | |||
b2e3299539 | |||
c308a6459f | |||
4eb1bc08a7 | |||
ff5e1c635f | |||
6149c2fcb5 | |||
d7cd80dce5 | |||
6264508f5e | |||
a3869dd4c1 | |||
a3d2831f8c | |||
4cd1fa8c38 | |||
1511dc43d7 | |||
3d82807965 | |||
4180571660 | |||
421d9aa501 | |||
898f4971a2 | |||
7ab3331f01 | |||
b4ca414492 | |||
73abea088a | |||
2376dfc139 | |||
d2f95d5319 | |||
cd96843699 | |||
ca80bc33c6 | |||
19607886f7 | |||
3c11a91f77 | |||
b781fdbd04 | |||
765d901530 | |||
3cedbc493e | |||
0488d0a82f | |||
f0be595e4c | |||
55100854d6 | |||
600a1f8866 | |||
95bf68f3f5 | |||
bcdb058492 | |||
7f46aef624 | |||
e779496dfb | |||
3d77fa5fbc | |||
250830ade9 | |||
7b2eb7ccfc | |||
458c27c6e9 | |||
a49e664e63 | |||
f20380d6b4 | |||
05a5e551d6 | |||
d278b71cb2 | |||
a485c141d5 | |||
8a9f6b9ae3 | |||
7144090528 | |||
ee0015ac38 | |||
8b7f7f1088 | |||
c95c6a75f8 | |||
44bf79e35f | |||
bb654f286c | |||
1acd2aa8cf | |||
18d3659b91 | |||
63a4bafa72 | |||
4eb2e84c9f | |||
73c7fb87e8 | |||
c1496722aa | |||
d9f81b0c8c | |||
d69beaabe1 | |||
b7a0bd6347 | |||
882ea6b672 | |||
736d3eabae | |||
af53197c04 | |||
cf186c5762 | |||
f384a2ce85 | |||
803b76e997 | |||
230d7c3dd6 | |||
4f629dd982 | |||
4fdd891b54 | |||
64a892321a | |||
a80991f2b3 | |||
c9cd81319a | |||
521ae21632 | |||
bcd6606a16 | |||
52ebb88205 | |||
1e91d09be7 | |||
02c573986b | |||
f2de486658 | |||
900b4f2644 | |||
1cfaa9afb6 | |||
801468d70d | |||
0601e05978 | |||
7ce11b5d1c | |||
f2d4799491 | |||
ebc458cd32 | |||
43cd631579 | |||
bc824c1a6c | |||
4223aff840 | |||
f107c6c2ca | |||
7daf14caa7 | |||
ded28c705f | |||
778bec0777 | |||
6967cf7f86 | |||
0ee3ec86bd | |||
e4c47e8417 | |||
98ae80f4ed | |||
876c77d0bc | |||
d44a6f7541 | |||
9040c04d27 | |||
ebbdef0538 | |||
bfbee988d0 | |||
1d4d0272ca | |||
77a76f0783 | |||
d9079de262 | |||
b3d732a1a1 | |||
52f1a02938 | |||
fe51669e85 | |||
670a6c50c9 | |||
86c1aaf7d8 | |||
658e787b60 | |||
40c50aef50 | |||
a24c2bbe73 | |||
bdbe90b891 | |||
3236be7877 | |||
1dca17fdb4 | |||
785e971698 | |||
2bfa20ff85 | |||
474a9af78d | |||
61425eacb8 | |||
4870def1fb | |||
3e73fb9233 | |||
5ad6061c3f | |||
fae019b974 | |||
3bb06d8364 | |||
c9c9afa472 | |||
bd0671e123 | |||
6f3ec8d21f | |||
9a0bf13feb | |||
9ff1a6f0cd | |||
a59f64cae1 | |||
a4ecd09723 | |||
f159dfd15a | |||
9e8ec86fa3 | |||
62bb78f58d | |||
893011c3ba | |||
880cb8e7cc | |||
85f83f2c74 | |||
4751e459cc | |||
138efa6cec | |||
a68e50935e | |||
e8f5fb35ac | |||
6af27669b0 | |||
e162f24119 | |||
dbcc462a48 | |||
2d5313639a | |||
38af0f436d | |||
888c2ffb20 | |||
588593f619 | |||
2cdd515b12 | |||
0aad71d46e | |||
6f9285322d | |||
68c7f992fa | |||
1feff408ff | |||
f752e02487 | |||
c9c7fb0a27 | |||
de680c2a8e | |||
03695ba4c5 | |||
c2e2960bf7 | |||
385d2a580c | |||
7e02652068 | |||
ae29c9b4a0 | |||
078f917e61 | |||
b65f04d500 | |||
6acaffe581 | |||
e47ef42a33 | |||
b950e33d81 | |||
ec8cfc77ad | |||
00a16db9cd | |||
4b9f115586 | |||
c5cc91443e | |||
48d94143e7 | |||
8174a05156 | |||
63cf6363a2 | |||
cc6de605ac | |||
d0151d2b79 | |||
6b45d453b8 | |||
b992a84d67 | |||
cb362e9052 | |||
ccb478c1f6 | |||
6af3680f99 | |||
e6c3c215ab | |||
5c66bbde01 | |||
77dd1bdd4a | |||
6268d540a8 | |||
5918e38747 | |||
3cfb571356 | |||
5eb80f8027 | |||
f6e5f2439d | |||
edf6272374 | |||
7f6a4b0ce3 | |||
3be5f25f2f | |||
1b6cdd5637 | |||
f752e55929 | |||
ebb089b3f1 | |||
ad6303f031 | |||
828b9d6717 | |||
444adcd1ca | |||
69ac305883 | |||
2ff57df2a0 | |||
7077f4cbe2 | |||
266f85f607 | |||
d90ab90145 | |||
48018b3f5b | |||
15584e7062 | |||
d415b17146 | |||
9ed953e8c3 | |||
b60a98bd6e | |||
a15e30d4b3 | |||
d5d133353f | |||
6badc98510 | |||
ea8bfb46ce | |||
58860ed19f | |||
583f652197 | |||
3215dcff78 | |||
38fdd17067 | |||
807ccd15ba | |||
1c923d2f9e | |||
2676b21400 | |||
fd5ef94b5a | |||
02c7eea236 | |||
34d1805b54 | |||
753eaa8266 | |||
0b39c6f98e | |||
55b8d0db4d | |||
3d7969d8a2 | |||
041de8082a | |||
3da1fa4d88 | |||
39df21de30 | |||
8cbb7d7362 | |||
10a0c47210 | |||
89bf3765f3 | |||
8181bc591b | |||
ca877e689c | |||
c6048e2bab | |||
60015aee04 | |||
43e6741071 | |||
b91f6bcbff | |||
64e2f1b949 | |||
13a2f05776 | |||
903374ae9b | |||
d366a07403 | |||
e94921174a | |||
dea5ab2f79 | |||
5e11078f34 | |||
d7670cd4ff | |||
29f3230089 | |||
d003efb522 | |||
97e772e87a | |||
0b33615979 | |||
249cead13e | |||
7c96dea359 | |||
374c9921fd | |||
fb55ab8c33 | |||
13485074ac | |||
4944c965e4 | |||
83c5b3bc38 | |||
7fc42de758 | |||
0a30bd74c1 | |||
9b12a79c8d | |||
0dcde23b05 | |||
8dc15b88eb | |||
d20c952f92 | |||
c2eeeb27fd | |||
180d8b67e4 | |||
9c989c46ee | |||
51633f509d | |||
705228ecc2 | |||
740f6d2258 | |||
3b9ef5ccab | |||
ab74e7f24f | |||
be9a670fb7 | |||
6e43e7a146 | |||
ab2093926a | |||
916b90f415 | |||
2ef3db9fab | |||
6987b6fd58 | |||
078179e9b8 | |||
50ccecdff5 | |||
e838a8c28a | |||
e5f7eeedbf | |||
d1948b5a00 | |||
c07f700c53 | |||
c934a30f66 | |||
310d01d8a2 | |||
f330739bc7 | |||
58626721ad | |||
584c8c07b8 | |||
a93ec03d2c | |||
7bd3a8e004 | |||
912a5f951e | |||
6869089111 | |||
6fd32fe850 | |||
81e2b36d38 | |||
7d811afab1 | |||
39f5aaab8b | |||
5fc81dd6c8 | |||
491a530d90 | |||
c12da50f9b | |||
41e8500fc5 | |||
a7f59ef3c1 | |||
f4466c8c0a | |||
bc6d6b20fa | |||
01326936e6 | |||
c960e8d351 | |||
fc69d31914 | |||
8d425e127b | |||
3cfb07ea38 | |||
76679ffb92 | |||
dc2ec925d7 | |||
81d6ba3ec5 | |||
014bdaa355 | |||
0c60fdd2ce | |||
43d986d14e | |||
123d7c6a37 | |||
5ac7df17f9 | |||
bc0dde696a | |||
c323bd3c87 | |||
5c672adc21 | |||
2f80747dc7 | |||
95749ed0e3 | |||
94eea3abec | |||
fe32159673 | |||
07aa2e1260 | |||
6fec8fad57 | |||
84df487f7d | |||
49708e92d3 | |||
daadae7987 | |||
2b788d06b7 | |||
90cd9bd533 | |||
d63506f98c | |||
17de6876bb | |||
fc540395f9 | |||
da2b4962a9 | |||
3abe305a21 | |||
46e8c09bd8 |
2
.buildkite/hooks/post-checkout
Normal file
2
.buildkite/hooks/post-checkout
Normal file
@ -0,0 +1,2 @@
|
||||
CI_BUILD_START=$(date +%s)
|
||||
export CI_BUILD_START
|
1
.buildkite/hooks/post-checkout.sh
Symbolic link
1
.buildkite/hooks/post-checkout.sh
Symbolic link
@ -0,0 +1 @@
|
||||
post-checkout
|
45
.buildkite/hooks/post-command
Normal file
45
.buildkite/hooks/post-command
Normal file
@ -0,0 +1,45 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
#
|
||||
# Save target/ for the next CI build on this machine
|
||||
#
|
||||
if [[ -n $CARGO_TARGET_CACHE_NAME ]]; then
|
||||
(
|
||||
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
|
||||
mkdir -p "$d"
|
||||
set -x
|
||||
rsync -a --delete --link-dest="$PWD" target "$d"
|
||||
du -hs "$d"
|
||||
)
|
||||
fi
|
||||
|
||||
#
|
||||
# Add job_stats data point
|
||||
#
|
||||
if [[ -z $CI_BUILD_START ]]; then
|
||||
echo Error: CI_BUILD_START empty
|
||||
else
|
||||
CI_BUILD_DURATION=$(( $(date +%s) - CI_BUILD_START + 1 ))
|
||||
|
||||
CI_LABEL=${BUILDKITE_LABEL:-build label missing}
|
||||
|
||||
PR=false
|
||||
if [[ $BUILDKITE_BRANCH =~ pull/* ]]; then
|
||||
PR=true
|
||||
fi
|
||||
|
||||
SUCCESS=true
|
||||
if [[ $BUILDKITE_COMMAND_EXIT_STATUS != 0 ]]; then
|
||||
SUCCESS=false
|
||||
fi
|
||||
|
||||
point_tags="pipeline=$BUILDKITE_PIPELINE_SLUG,job=$CI_LABEL,pr=$PR,success=$SUCCESS"
|
||||
point_tags="${point_tags// /\\ }" # Escape spaces
|
||||
|
||||
point_fields="duration=$CI_BUILD_DURATION"
|
||||
point_fields="${point_fields// /\\ }" # Escape spaces
|
||||
|
||||
point="job_stats,$point_tags $point_fields"
|
||||
|
||||
multinode-demo/metrics_write_datapoint.sh "$point" || true
|
||||
fi
|
1
.buildkite/hooks/post-command.sh
Symbolic link
1
.buildkite/hooks/post-command.sh
Symbolic link
@ -0,0 +1 @@
|
||||
post-command
|
13
.buildkite/hooks/pre-command
Normal file
13
.buildkite/hooks/pre-command
Normal file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n "$CARGO_TARGET_CACHE_NAME" ]] || exit 0
|
||||
|
||||
#
|
||||
# Restore target/ from the previous CI build on this machine
|
||||
#
|
||||
(
|
||||
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
|
||||
mkdir -p "$d"/target
|
||||
set -x
|
||||
rsync -a --delete --link-dest="$d" "$d"/target .
|
||||
)
|
1
.buildkite/hooks/pre-command.sh
Symbolic link
1
.buildkite/hooks/pre-command.sh
Symbolic link
@ -0,0 +1 @@
|
||||
pre-command
|
1
.clippy.toml
Normal file
1
.clippy.toml
Normal file
@ -0,0 +1 @@
|
||||
too-many-arguments-threshold = 9
|
@ -1,2 +1,5 @@
|
||||
ignore:
|
||||
- "src/bin"
|
||||
coverage:
|
||||
status:
|
||||
patch: off
|
||||
|
13
.gitignore
vendored
13
.gitignore
vendored
@ -1,3 +1,16 @@
|
||||
Cargo.lock
|
||||
/target/
|
||||
|
||||
**/*.rs.bk
|
||||
.cargo
|
||||
|
||||
# node configuration files
|
||||
/config/
|
||||
/config-private/
|
||||
/config-drone/
|
||||
/config-validator/
|
||||
/config-client/
|
||||
/multinode-demo/test/config-client/
|
||||
|
||||
# test temp files, ledgers, etc.
|
||||
/farf/
|
||||
|
22
.travis.yml
22
.travis.yml
@ -1,22 +0,0 @@
|
||||
language: rust
|
||||
required: sudo
|
||||
services:
|
||||
- docker
|
||||
matrix:
|
||||
allow_failures:
|
||||
- rust: nightly
|
||||
include:
|
||||
- rust: stable
|
||||
- rust: nightly
|
||||
env:
|
||||
- FEATURES='unstable'
|
||||
before_script: |
|
||||
export PATH="$PATH:$HOME/.cargo/bin"
|
||||
rustup component add rustfmt-preview
|
||||
script:
|
||||
- cargo fmt -- --write-mode=diff
|
||||
- cargo build --verbose --features "$FEATURES"
|
||||
- cargo test --verbose --features "$FEATURES"
|
||||
after_success: |
|
||||
docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
|
||||
bash <(curl -s https://codecov.io/bash) -s target/cov
|
53
CONTRIBUTING.md
Normal file
53
CONTRIBUTING.md
Normal file
@ -0,0 +1,53 @@
|
||||
Solana Coding Guidelines
|
||||
===
|
||||
|
||||
The goal of these guidelines is to improve developer productivity by allowing developers to
|
||||
jump any file in the codebase and not need to adapt to inconsistencies in how the code is
|
||||
written. The codebase should appear as if it had been authored by a single developer. If you
|
||||
don't agree with a convention, submit a PR patching this document and let's discuss! Once
|
||||
the PR is accepted, *all* code should be updated as soon as possible to reflect the new
|
||||
conventions.
|
||||
|
||||
Rust coding conventions
|
||||
---
|
||||
|
||||
* All Rust code is formatted using the latest version of `rustfmt`. Once installed, it will be
|
||||
updated automatically when you update the compiler with `rustup`.
|
||||
|
||||
* All Rust code is linted with Clippy. If you'd prefer to ignore its advice, do so explicitly:
|
||||
|
||||
```rust
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
|
||||
```
|
||||
|
||||
Note: Clippy defaults can be overridden in the top-level file `.clippy.toml`.
|
||||
|
||||
* For variable names, when in doubt, spell it out. The mapping from type names to variable names
|
||||
is to lowercase the type name, putting an underscore before each capital letter. Variable names
|
||||
should *not* be abbreviated unless being used as closure arguments and the brevity improves
|
||||
readability. When a function has multiple instances of the same type, qualify each with a
|
||||
prefix and underscore (i.e. alice_keypair) or a numeric suffix (i.e. tx0).
|
||||
|
||||
* For function and method names, use `<verb>_<subject>`. For unit tests, that verb should
|
||||
always be `test` and for benchmarks the verb should always be `bench`. Avoid namespacing
|
||||
function names with some arbitrary word. Avoid abreviating words in function names.
|
||||
|
||||
* As they say, "When in Rome, do as the Romans do." A good patch should acknowledge the coding
|
||||
conventions of the code that surrounds it, even in the case where that code has not yet been
|
||||
updated to meet the conventions described here.
|
||||
|
||||
|
||||
Terminology
|
||||
---
|
||||
|
||||
Inventing new terms is allowed, but should only be done when the term is widely used and
|
||||
understood. Avoid introducing new 3-letter terms, which can be confused with 3-letter acronyms.
|
||||
|
||||
Some terms we currently use regularly in the codebase:
|
||||
|
||||
* fullnode: n. A fully participating network node.
|
||||
* hash: n. A SHA-256 Hash.
|
||||
* keypair: n. A Ed25519 key-pair, containing a public and private key.
|
||||
* pubkey: n. The public key of a Ed25519 key-pair.
|
||||
* sigverify: v. To verify a Ed25519 digital signature.
|
||||
|
106
Cargo.toml
106
Cargo.toml
@ -1,39 +1,57 @@
|
||||
[package]
|
||||
name = "solana"
|
||||
description = "High Performance Blockchain"
|
||||
version = "0.4.0"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.7.2"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "http://loomprotocol.com/"
|
||||
homepage = "http://solana.com/"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
authors = [
|
||||
"Anatoly Yakovenko <anatoly@solana.co>",
|
||||
"Greg Fitzgerald <greg@solana.co>",
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
"Greg Fitzgerald <greg@solana.com>",
|
||||
"Stephen Akridge <stephen@solana.com>",
|
||||
"Michael Vines <mvines@solana.com>",
|
||||
"Rob Walker <rob@solana.com>",
|
||||
"Pankaj Garg <pankaj@solana.com>",
|
||||
"Tyera Eulberg <tyera@solana.com>",
|
||||
]
|
||||
license = "Apache-2.0"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-historian-demo"
|
||||
path = "src/bin/historian-demo.rs"
|
||||
name = "solana-bench-tps"
|
||||
path = "src/bin/bench-tps.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-client-demo"
|
||||
path = "src/bin/client-demo.rs"
|
||||
name = "solana-bench-streamer"
|
||||
path = "src/bin/bench-streamer.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-testnode"
|
||||
path = "src/bin/testnode.rs"
|
||||
name = "solana-drone"
|
||||
path = "src/bin/drone.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-fullnode"
|
||||
path = "src/bin/fullnode.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-fullnode-config"
|
||||
path = "src/bin/fullnode-config.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-genesis"
|
||||
path = "src/bin/genesis.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-genesis-demo"
|
||||
path = "src/bin/genesis-demo.rs"
|
||||
name = "solana-ledger-tool"
|
||||
path = "src/bin/ledger-tool.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-mint"
|
||||
path = "src/bin/mint.rs"
|
||||
name = "solana-keygen"
|
||||
path = "src/bin/keygen.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-wallet"
|
||||
path = "src/bin/wallet.rs"
|
||||
|
||||
[badges]
|
||||
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
||||
@ -41,17 +59,61 @@ codecov = { repository = "solana-labs/solana", branch = "master", service = "git
|
||||
[features]
|
||||
unstable = []
|
||||
ipv6 = []
|
||||
cuda = []
|
||||
erasure = []
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2"
|
||||
bincode = "1.0.0"
|
||||
bs58 = "0.2.0"
|
||||
byteorder = "1.2.1"
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
clap = "2.31"
|
||||
dirs = "1.0.2"
|
||||
env_logger = "0.5.12"
|
||||
futures = "0.1.21"
|
||||
generic-array = { version = "0.11.1", default-features = false, features = ["serde"] }
|
||||
getopts = "0.2"
|
||||
influx_db_client = "0.3.4"
|
||||
itertools = "0.7.8"
|
||||
libc = "0.2.1"
|
||||
log = "0.4.2"
|
||||
matches = "0.1.6"
|
||||
pnet_datalink = "0.21.0"
|
||||
rand = "0.5.1"
|
||||
rayon = "1.0.0"
|
||||
reqwest = "0.8.6"
|
||||
ring = "0.13.2"
|
||||
sha2 = "0.7.0"
|
||||
generic-array = { version = "0.9.0", default-features = false, features = ["serde"] }
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
serde_json = "1.0.10"
|
||||
ring = "0.12.1"
|
||||
untrusted = "0.5.1"
|
||||
bincode = "1.0.0"
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
log = "^0.4.1"
|
||||
matches = "^0.1.6"
|
||||
sys-info = "0.5.6"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-core = "0.1.17"
|
||||
tokio-io = "0.1"
|
||||
untrusted = "0.6.2"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.2"
|
||||
|
||||
[[bench]]
|
||||
name = "bank"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "banking_stage"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "ledger"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "signature"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "sigverify"
|
||||
harness = false
|
||||
|
2
LICENSE
2
LICENSE
@ -1,4 +1,4 @@
|
||||
Copyright 2018 Anatoly Yakovenko <anatoly@loomprotocol.com> and Greg Fitzgerald <garious@gmail.com>
|
||||
Copyright 2018 Solana Labs, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
298
README.md
298
README.md
@ -1,22 +1,42 @@
|
||||
[](https://crates.io/crates/solana)
|
||||
[](https://docs.rs/solana)
|
||||
[](https://travis-ci.org/solana-labs/solana)
|
||||
[](https://solana-ci-gate.herokuapp.com/buildkite_public_log?https://buildkite.com/solana-labs/solana/builds/latest/master)
|
||||
[](https://codecov.io/gh/solana-labs/solana)
|
||||
|
||||
Blockchain, Rebuilt for Scale
|
||||
===
|
||||
|
||||
Solana™ is a new blockchain architecture built from the ground up for scale. The architecture supports
|
||||
up to 710 thousand transactions per second on a gigabit network.
|
||||
|
||||
Disclaimer
|
||||
===
|
||||
|
||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
||||
|
||||
Solana: High Performance Blockchain
|
||||
Introduction
|
||||
===
|
||||
|
||||
Solana™ is a new architecture for a high performance blockchain. It aims to support
|
||||
over 700 thousand transactions per second on a gigabit network.
|
||||
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [H.T.Kung, J.T.Robinson (1981)]. At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! Furthermore, and much to our surprise, it can implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
|
||||
|
||||
Running the demo
|
||||
|
||||
Testnet Demos
|
||||
===
|
||||
|
||||
The Solana repo contains all the scripts you might need to spin up your own
|
||||
local testnet. Depending on what you're looking to achieve, you may want to
|
||||
run a different variation, as the full-fledged, performance-enhanced
|
||||
multinode testnet is considerably more complex to set up than a Rust-only,
|
||||
singlenode testnode. If you are looking to develop high-level features, such
|
||||
as experimenting with smart contracts, save yourself some setup headaches and
|
||||
stick to the Rust-only singlenode demo. If you're doing performance optimization
|
||||
of the transaction pipeline, consider the enhanced singlenode demo. If you're
|
||||
doing consensus work, you'll need at least a Rust-only multinode demo. If you want
|
||||
to reproduce our TPS metrics, run the enhanced multinode demo.
|
||||
|
||||
For all four variations, you'd need the latest Rust toolchain and the Solana
|
||||
source code:
|
||||
|
||||
First, install Rust's package manager Cargo.
|
||||
|
||||
```bash
|
||||
@ -24,54 +44,195 @@ $ curl https://sh.rustup.rs -sSf | sh
|
||||
$ source $HOME/.cargo/env
|
||||
```
|
||||
|
||||
The testnode server is initialized with a ledger from stdin and
|
||||
generates new ledger entries on stdout. To create the input ledger, we'll need
|
||||
to create *the mint* and use it to generate a *genesis ledger*. It's done in
|
||||
two steps because the mint.json file contains a private key that will be
|
||||
used later in this demo.
|
||||
Now checkout the code from github:
|
||||
|
||||
```bash
|
||||
$ echo 1000000000 | cargo run --release --bin solana-mint | tee mint.json
|
||||
$ cat mint.json | cargo run --release --bin solana-genesis | tee genesis.log
|
||||
$ git clone https://github.com/solana-labs/solana.git
|
||||
$ cd solana
|
||||
```
|
||||
|
||||
Now you can start the server:
|
||||
The demo code is sometimes broken between releases as we add new low-level
|
||||
features, so if this is your first time running the demo, you'll improve
|
||||
your odds of success if you check out the
|
||||
[latest release](https://github.com/solana-labs/solana/releases)
|
||||
before proceeding:
|
||||
|
||||
```bash
|
||||
$ cat genesis.log | cargo run --release --bin solana-testnode | tee transactions0.log
|
||||
$ git checkout v0.7.0-beta
|
||||
```
|
||||
|
||||
Then, in a separate shell, let's execute some transactions. Note we pass in
|
||||
Configuration Setup
|
||||
---
|
||||
|
||||
The network is initialized with a genesis ledger and leader/validator configuration files.
|
||||
These files can be generated by running the following script.
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/setup.sh
|
||||
```
|
||||
|
||||
Drone
|
||||
---
|
||||
|
||||
In order for the leader, client and validators to work, we'll need to
|
||||
spin up a drone to give out some test tokens. The drone delivers Milton
|
||||
Friedman-style "air drops" (free tokens to requesting clients) to be used in
|
||||
test transactions.
|
||||
|
||||
Start the drone on the leader node with:
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/drone.sh
|
||||
```
|
||||
|
||||
Singlenode Testnet
|
||||
---
|
||||
|
||||
Before you start a fullnode, make sure you know the IP address of the machine you
|
||||
want to be the leader for the demo, and make sure that udp ports 8000-10000 are
|
||||
open on all the machines you want to test with.
|
||||
|
||||
Now start the server:
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/leader.sh
|
||||
```
|
||||
|
||||
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
|
||||
receive transactions. The leader will request some tokens from the drone if it doesn't have any.
|
||||
The drone does not need to be running for subsequent leader starts.
|
||||
|
||||
Multinode Testnet
|
||||
---
|
||||
|
||||
To run a multinode testnet, after starting a leader node, spin up some validator nodes:
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
```
|
||||
|
||||
To run a performance-enhanced leader or validator (on Linux),
|
||||
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
|
||||
your system:
|
||||
```bash
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
Testnet Client Demo
|
||||
---
|
||||
|
||||
Now that your singlenode or multinode testnet is up and running, in a separate shell, let's send it some transactions! Note we pass in
|
||||
the JSON configuration file here, not the genesis ledger.
|
||||
|
||||
```bash
|
||||
$ cat mint.json | cargo run --release --bin solana-client-demo
|
||||
$ ./multinode-demo/client.sh ubuntu@10.0.1.51:~/solana 2 #The leader machine and the total number of nodes in the network
|
||||
```
|
||||
|
||||
Now kill the server with Ctrl-C, and take a look at the ledger. You should
|
||||
see something similar to:
|
||||
|
||||
```json
|
||||
{"num_hashes":27,"id":[0, "..."],"event":"Tick"}
|
||||
{"num_hashes":3,"id":[67, "..."],"event":{"Transaction":{"tokens":42}}}
|
||||
{"num_hashes":27,"id":[0, "..."],"event":"Tick"}
|
||||
```
|
||||
|
||||
Now restart the server from where we left off. Pass it both the genesis ledger, and
|
||||
the transaction ledger.
|
||||
What just happened? The client demo spins up several threads to send 500,000 transactions
|
||||
to the testnet as quickly as it can. The client then pings the testnet periodically to see
|
||||
how many transactions it processed in that time. Take note that the demo intentionally
|
||||
floods the network with UDP packets, such that the network will almost certainly drop a
|
||||
bunch of them. This ensures the testnet has an opportunity to reach 710k TPS. The client
|
||||
demo completes after it has convinced itself the testnet won't process any additional
|
||||
transactions. You should see several TPS measurements printed to the screen. In the
|
||||
multinode variation, you'll see TPS measurements for each validator node as well.
|
||||
|
||||
Linux Snap
|
||||
---
|
||||
A Linux [Snap](https://snapcraft.io/) is available, which can be used to
|
||||
easily get Solana running on supported Linux systems without building anything
|
||||
from source. The `edge` Snap channel is updated daily with the latest
|
||||
development from the `master` branch. To install:
|
||||
```bash
|
||||
$ cat genesis.log transactions0.log | cargo run --release --bin solana-testnode | tee transactions1.log
|
||||
$ sudo snap install solana --edge --devmode
|
||||
```
|
||||
(`--devmode` flag is required only for `solana.fullnode-cuda`)
|
||||
|
||||
Lastly, run the client demo again, and verify that all funds were spent in the
|
||||
previous round, and so no additional transactions are added.
|
||||
Once installed the usual Solana programs will be available as `solona.*` instead
|
||||
of `solana-*`. For example, `solana.fullnode` instead of `solana-fullnode`.
|
||||
|
||||
Update to the latest version at any time with:
|
||||
```bash
|
||||
$ cat mint.json | cargo run --release --bin solana-client-demo
|
||||
$ snap info solana
|
||||
$ sudo snap refresh solana --devmode
|
||||
```
|
||||
|
||||
Stop the server again, and verify there are only Tick entries, and no Transaction entries.
|
||||
### Daemon support
|
||||
The snap supports running a leader, validator or leader+drone node as a system
|
||||
daemon.
|
||||
|
||||
Run `sudo snap get solana` to view the current daemon configuration. To view
|
||||
daemon logs:
|
||||
1. Run `sudo snap logs -n=all solana` to view the daemon initialization log
|
||||
2. Runtime logging can be found under `/var/snap/solana/current/leader/`,
|
||||
`/var/snap/solana/current/validator/`, or `/var/snap/solana/current/drone/` depending
|
||||
on which `mode=` was selected. Within each log directory the file `current`
|
||||
contains the latest log, and the files `*.s` (if present) contain older rotated
|
||||
logs.
|
||||
|
||||
Disable the daemon at any time by running:
|
||||
```bash
|
||||
$ sudo snap set solana mode=
|
||||
```
|
||||
|
||||
Runtime configuration files for the daemon can be found in
|
||||
`/var/snap/solana/current/config`.
|
||||
|
||||
#### Leader daemon
|
||||
```bash
|
||||
$ sudo snap set solana mode=leader
|
||||
```
|
||||
|
||||
If CUDA is available:
|
||||
```bash
|
||||
$ sudo snap set solana mode=leader enable-cuda=1
|
||||
```
|
||||
|
||||
`rsync` must be configured and running on the leader.
|
||||
|
||||
1. Ensure rsync is installed with `sudo apt-get -y install rsync`
|
||||
2. Edit `/etc/rsyncd.conf` to include the following
|
||||
```
|
||||
[config]
|
||||
path = /var/snap/solana/current/config
|
||||
hosts allow = *
|
||||
read only = true
|
||||
```
|
||||
3. Run `sudo systemctl enable rsync; sudo systemctl start rsync`
|
||||
4. Test by running `rsync -Pzravv rsync://<ip-address-of-leader>/config
|
||||
solana-config` from another machine. **If the leader is running on a cloud
|
||||
provider it may be necessary to configure the Firewall rules to permit ingress
|
||||
to port tcp:873, tcp:9900 and the port range udp:8000-udp:10000**
|
||||
|
||||
|
||||
To run both the Leader and Drone:
|
||||
```bash
|
||||
$ sudo snap set solana mode=leader+drone
|
||||
|
||||
```
|
||||
|
||||
#### Validator daemon
|
||||
```bash
|
||||
$ sudo snap set solana mode=validator
|
||||
|
||||
```
|
||||
If CUDA is available:
|
||||
```bash
|
||||
$ sudo snap set solana mode=validator enable-cuda=1
|
||||
```
|
||||
|
||||
By default the validator will connect to **testnet.solana.com**, override
|
||||
the leader IP address by running:
|
||||
```bash
|
||||
$ sudo snap set solana mode=validator leader-address=127.0.0.1 #<-- change IP address
|
||||
```
|
||||
It's assumed that the leader will be running `rsync` configured as described in
|
||||
the previous **Leader daemon** section.
|
||||
|
||||
Developing
|
||||
===
|
||||
@ -87,6 +248,17 @@ $ source $HOME/.cargo/env
|
||||
$ rustup component add rustfmt-preview
|
||||
```
|
||||
|
||||
If your rustc version is lower than 1.26.1, please update it:
|
||||
|
||||
```bash
|
||||
$ rustup update
|
||||
```
|
||||
|
||||
On Linux systems you may need to install libssl-dev and pkg-config. On Ubuntu:
|
||||
```bash
|
||||
$ sudo apt-get install libssl-dev pkg-config
|
||||
```
|
||||
|
||||
Download the source code:
|
||||
|
||||
```bash
|
||||
@ -100,9 +272,37 @@ Testing
|
||||
Run the test suite:
|
||||
|
||||
```bash
|
||||
cargo test
|
||||
$ cargo test
|
||||
```
|
||||
|
||||
To emulate all the tests that will run on a Pull Request, run:
|
||||
```bash
|
||||
$ ./ci/run-local.sh
|
||||
```
|
||||
|
||||
Debugging
|
||||
---
|
||||
|
||||
There are some useful debug messages in the code, you can enable them on a per-module and per-level
|
||||
basis with the normal RUST\_LOG environment variable. Run the fullnode with this syntax:
|
||||
```bash
|
||||
$ RUST_LOG=solana::streamer=debug,solana::server=info cat genesis.log | ./target/release/solana-fullnode > transactions0.log
|
||||
```
|
||||
to see the debug and info sections for streamer and server respectively. Generally
|
||||
we are using debug for infrequent debug messages, trace for potentially frequent messages and
|
||||
info for performance-related logging.
|
||||
|
||||
Attaching to a running process with gdb:
|
||||
|
||||
```
|
||||
$ sudo gdb
|
||||
attach <PID>
|
||||
set logging on
|
||||
thread apply all bt
|
||||
```
|
||||
|
||||
This will dump all the threads stack traces into gdb.txt
|
||||
|
||||
Benchmarking
|
||||
---
|
||||
|
||||
@ -117,3 +317,35 @@ Run the benchmarks:
|
||||
```bash
|
||||
$ cargo +nightly bench --features="unstable"
|
||||
```
|
||||
|
||||
Code coverage
|
||||
---
|
||||
|
||||
To generate code coverage statistics, install cargo-cov. Note: the tool currently only works
|
||||
in Rust nightly.
|
||||
|
||||
```bash
|
||||
$ cargo +nightly install cargo-cov
|
||||
```
|
||||
|
||||
Run cargo-cov and generate a report:
|
||||
|
||||
```bash
|
||||
$ cargo +nightly cov test
|
||||
$ cargo +nightly cov report --open
|
||||
```
|
||||
|
||||
The coverage report will be written to `./target/cov/report/index.html`
|
||||
|
||||
|
||||
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
|
||||
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
|
||||
some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running
|
||||
the test suite should indicate that your change didn't *infringe* on anyone else's solutions. Adding a
|
||||
test *protects* your solution from future changes. Say you don't understand why a line of code exists,
|
||||
try deleting it and running the unit-tests. The nearest test failure should tell you what problem
|
||||
was solved by that code. If no test fails, go ahead and submit a Pull Request that asks, "what
|
||||
problem is solved by this code?" On the other hand, if a test does fail and you can think of a
|
||||
better way to solve the same problem, a Pull Request with your solution would most certainly be
|
||||
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
|
||||
send us that patch!
|
||||
|
1
_config.yml
Normal file
1
_config.yml
Normal file
@ -0,0 +1 @@
|
||||
theme: jekyll-theme-slate
|
66
benches/bank.rs
Normal file
66
benches/bank.rs
Normal file
@ -0,0 +1,66 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate bincode;
|
||||
extern crate rayon;
|
||||
extern crate solana;
|
||||
|
||||
use bincode::serialize;
|
||||
use criterion::{Bencher, Criterion};
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::*;
|
||||
use solana::hash::hash;
|
||||
use solana::mint::Mint;
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
|
||||
fn bench_process_transaction(bencher: &mut Bencher) {
|
||||
let mint = Mint::new(100_000_000);
|
||||
let bank = Bank::new(&mint);
|
||||
|
||||
// Create transactions between unrelated parties.
|
||||
let transactions: Vec<_> = (0..4096)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
// Seed the 'from' account.
|
||||
let rando0 = Keypair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 10_000, mint.last_id());
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
|
||||
// Seed the 'to' account and a cell for its signature.
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
|
||||
let rando1 = Keypair::new();
|
||||
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
|
||||
// Finally, return the transaction to the benchmark.
|
||||
tx
|
||||
})
|
||||
.collect();
|
||||
|
||||
bencher.iter_with_setup(
|
||||
|| {
|
||||
// Since benchmarker runs this multiple times, we need to clear the signatures.
|
||||
bank.clear_signatures();
|
||||
transactions.clone()
|
||||
},
|
||||
|transactions| {
|
||||
let results = bank.process_transactions(transactions);
|
||||
assert!(results.iter().all(Result::is_ok));
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_process_transaction", |bencher| {
|
||||
bench_process_transaction(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
229
benches/banking_stage.rs
Normal file
229
benches/banking_stage.rs
Normal file
@ -0,0 +1,229 @@
|
||||
extern crate bincode;
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate rayon;
|
||||
extern crate solana;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::Bank;
|
||||
use solana::banking_stage::BankingStage;
|
||||
use solana::mint::Mint;
|
||||
use solana::packet::{to_packets_chunked, PacketRecycler};
|
||||
use solana::record_stage::Signal;
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use std::iter;
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
|
||||
// use self::test::Bencher;
|
||||
// use bank::{Bank, MAX_ENTRY_IDS};
|
||||
// use bincode::serialize;
|
||||
// use hash::hash;
|
||||
// use mint::Mint;
|
||||
// use rayon::prelude::*;
|
||||
// use signature::{Keypair, KeypairUtil};
|
||||
// use std::collections::HashSet;
|
||||
// use std::time::Instant;
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// fn bench_process_transactions(_bencher: &mut Bencher) {
|
||||
// let mint = Mint::new(100_000_000);
|
||||
// let bank = Bank::new(&mint);
|
||||
// // Create transactions between unrelated parties.
|
||||
// let txs = 100_000;
|
||||
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
|
||||
// let transactions: Vec<_> = (0..txs)
|
||||
// .into_par_iter()
|
||||
// .map(|i| {
|
||||
// // Seed the 'to' account and a cell for its signature.
|
||||
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
|
||||
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
|
||||
// {
|
||||
// let mut last_ids = last_ids.lock().unwrap();
|
||||
// if !last_ids.contains(&last_id) {
|
||||
// last_ids.insert(last_id);
|
||||
// bank.register_entry_id(&last_id);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Seed the 'from' account.
|
||||
// let rando0 = Keypair::new();
|
||||
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// let rando1 = Keypair::new();
|
||||
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// // Finally, return a transaction that's unique
|
||||
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
// })
|
||||
// .collect();
|
||||
//
|
||||
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// let now = Instant::now();
|
||||
// assert!(banking_stage.process_transactions(transactions).is_ok());
|
||||
// let duration = now.elapsed();
|
||||
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
|
||||
// let tps = txs as f64 / sec;
|
||||
//
|
||||
// // Ensure that all transactions were successfully logged.
|
||||
// drop(banking_stage.historian_input);
|
||||
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
|
||||
// assert_eq!(entries.len(), 1);
|
||||
// assert_eq!(entries[0].transactions.len(), txs as usize);
|
||||
//
|
||||
// println!("{} tps", tps);
|
||||
// }
|
||||
|
||||
fn check_txs(receiver: &Receiver<Signal>, ref_tx_count: usize) {
|
||||
let mut total = 0;
|
||||
loop {
|
||||
let signal = receiver.recv().unwrap();
|
||||
if let Signal::Transactions(transactions) = signal {
|
||||
total += transactions.len();
|
||||
if total >= ref_tx_count {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
}
|
||||
assert_eq!(total, ref_tx_count);
|
||||
}
|
||||
|
||||
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
let tx = 10_000_usize;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let mint = Mint::new(mint_total);
|
||||
let num_dst_accounts = 8 * 1024;
|
||||
let num_src_accounts = 8 * 1024;
|
||||
|
||||
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| Keypair::new()).collect();
|
||||
let dstkeys: Vec<_> = (0..num_dst_accounts)
|
||||
.map(|_| Keypair::new().pubkey())
|
||||
.collect();
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&srckeys[i % num_src_accounts],
|
||||
dstkeys[i % num_dst_accounts],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
let setup_transactions: Vec<_> = (0..num_src_accounts)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
srckeys[i].pubkey(),
|
||||
mint_total / num_src_accounts as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
|
||||
let verified_setup: Vec<_> =
|
||||
to_packets_chunked(&packet_recycler, &setup_transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_setup_len = verified_setup.len();
|
||||
verified_sender.send(verified_setup).unwrap();
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(&signal_receiver, num_src_accounts);
|
||||
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), 192)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_len = verified.len();
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(&signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
|
||||
let tx = 10_000_usize;
|
||||
let mint = Mint::new(1_000_000_000_000);
|
||||
let mut pubkeys = Vec::new();
|
||||
let num_keys = 8;
|
||||
for _ in 0..num_keys {
|
||||
pubkeys.push(Keypair::new().pubkey());
|
||||
}
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
pubkeys[i % num_keys],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
let verified_len = verified.len();
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(&signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_banking_stage_multi_accounts", |bencher| {
|
||||
bench_banking_stage_multi_accounts(bencher);
|
||||
});
|
||||
criterion.bench_function("bench_process_stage_single_from", |bencher| {
|
||||
bench_banking_stage_single_from(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
40
benches/ledger.rs
Normal file
40
benches/ledger.rs
Normal file
@ -0,0 +1,40 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate solana;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use solana::hash::{hash, Hash};
|
||||
use solana::ledger::{next_entries, reconstruct_entries_from_blobs, Block};
|
||||
use solana::packet::BlobRecycler;
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
let keypair = Keypair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0; 10];
|
||||
let entries = next_entries(&zero, 1, transactions);
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
bencher.iter(|| {
|
||||
let mut blob_q = VecDeque::new();
|
||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap(), entries);
|
||||
});
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_block_to_blobs_to_block", |bencher| {
|
||||
bench_block_to_blobs_to_block(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
24
benches/signature.rs
Normal file
24
benches/signature.rs
Normal file
@ -0,0 +1,24 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate solana;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use solana::signature::GenKeys;
|
||||
|
||||
fn bench_gen_keys(b: &mut Bencher) {
|
||||
let mut rnd = GenKeys::new([0u8; 32]);
|
||||
b.iter(|| rnd.gen_n_keypairs(1000));
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_gen_keys", |bencher| {
|
||||
bench_gen_keys(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
36
benches/sigverify.rs
Normal file
36
benches/sigverify.rs
Normal file
@ -0,0 +1,36 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate bincode;
|
||||
extern crate rayon;
|
||||
extern crate solana;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use solana::packet::{to_packets, PacketRecycler};
|
||||
use solana::sigverify;
|
||||
use solana::transaction::test_tx;
|
||||
|
||||
fn bench_sigverify(bencher: &mut Bencher) {
|
||||
let tx = test_tx();
|
||||
|
||||
// generate packet vector
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let batches = to_packets(&packet_recycler, &vec![tx; 128]);
|
||||
|
||||
// verify packets
|
||||
bencher.iter(|| {
|
||||
let _ans = sigverify::ed25519_verify(&batches);
|
||||
})
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_sigverify", |bencher| {
|
||||
bench_sigverify(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
16
build.rs
Normal file
16
build.rs
Normal file
@ -0,0 +1,16 @@
|
||||
use std::env;
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rustc-link-search=native=.");
|
||||
if !env::var("CARGO_FEATURE_CUDA").is_err() {
|
||||
println!("cargo:rustc-link-lib=static=cuda_verify_ed25519");
|
||||
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
|
||||
println!("cargo:rustc-link-lib=dylib=cudart");
|
||||
println!("cargo:rustc-link-lib=dylib=cuda");
|
||||
println!("cargo:rustc-link-lib=dylib=cudadevrt");
|
||||
}
|
||||
if !env::var("CARGO_FEATURE_ERASURE").is_err() {
|
||||
println!("cargo:rustc-link-lib=dylib=Jerasure");
|
||||
println!("cargo:rustc-link-lib=dylib=gf_complete");
|
||||
}
|
||||
}
|
3
ci/.gitignore
vendored
Normal file
3
ci/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
/node_modules/
|
||||
/package-lock.json
|
||||
/snapcraft.credentials
|
89
ci/README.md
Normal file
89
ci/README.md
Normal file
@ -0,0 +1,89 @@
|
||||
|
||||
Our CI infrastructure is built around [BuildKite](https://buildkite.com) with some
|
||||
additional GitHub integration provided by https://github.com/mvines/ci-gate
|
||||
|
||||
## Agent Queues
|
||||
|
||||
We define two [Agent Queues](https://buildkite.com/docs/agent/v3/queues):
|
||||
`queue=default` and `queue=cuda`. The `default` queue should be favored and
|
||||
runs on lower-cost CPU instances. The `cuda` queue is only necessary for
|
||||
running **tests** that depend on GPU (via CUDA) access -- CUDA builds may still
|
||||
be run on the `default` queue, and the [buildkite artifact
|
||||
system](https://buildkite.com/docs/builds/artifacts) used to transfer build
|
||||
products over to a GPU instance for testing.
|
||||
|
||||
## Buildkite Agent Management
|
||||
|
||||
### Buildkite GCP Setup
|
||||
|
||||
CI runs on Google Cloud Platform via two Compute Engine Instance groups:
|
||||
`ci-default` and `ci-cuda`. Autoscaling is currently disabled and the number of
|
||||
VM Instances in each group is manually adjusted.
|
||||
|
||||
#### Updating a CI Disk Image
|
||||
|
||||
Each Instance group has its own disk image, `ci-default-vX` and
|
||||
`ci-cuda-vY`, where *X* and *Y* are incremented each time the image is changed.
|
||||
|
||||
The process to update a disk image is as follows (TODO: make this less manual):
|
||||
|
||||
1. Create a new VM Instance using the disk image to modify.
|
||||
2. Once the VM boots, ssh to it and modify the disk as desired.
|
||||
3. Stop the VM Instance running the modified disk. Remember the name of the VM disk
|
||||
4. From another machine, `gcloud auth login`, then create a new Disk Image based
|
||||
off the modified VM Instance:
|
||||
```
|
||||
$ gcloud compute images create ci-default-$(date +%Y%m%d%H%M) --source-disk xxx --source-disk-zone us-east1-b --family ci-default
|
||||
|
||||
```
|
||||
or
|
||||
```
|
||||
$ gcloud compute images create ci-cuda-$(date +%Y%m%d%H%M) --source-disk xxx --source-disk-zone us-east1-b --family ci-cuda
|
||||
```
|
||||
5. Delete the new VM instance.
|
||||
6. Go to the Instance templates tab, find the existing template named
|
||||
`ci-default-vX` or `ci-cuda-vY` and select it. Use the "Copy" button to create
|
||||
a new Instance template called `ci-default-vX+1` or `ci-cuda-vY+1` with the
|
||||
newly created Disk image.
|
||||
7. Go to the Instance Groups tag and find the applicable group, `ci-default` or
|
||||
`ci-cuda`. Edit the Instance Group in two steps: (a) Set the number of
|
||||
instances to 0 and wait for them all to terminate, (b) Update the Instance
|
||||
template and restore the number of instances to the original value.
|
||||
8. Clean up the previous version by deleting it from Instance Templates and
|
||||
Images.
|
||||
|
||||
|
||||
## Reference
|
||||
|
||||
### Buildkite AWS CloudFormation Setup
|
||||
|
||||
**AWS CloudFormation is currently inactive, although it may be restored in the
|
||||
future**
|
||||
|
||||
AWS CloudFormation can be used to scale machines up and down based on the
|
||||
current CI load. If no machine is currently running it can take up to 60
|
||||
seconds to spin up a new instance, please remain calm during this time.
|
||||
|
||||
#### AMI
|
||||
We use a custom AWS AMI built via https://github.com/solana-labs/elastic-ci-stack-for-aws/tree/solana/cuda.
|
||||
|
||||
Use the following process to update this AMI as dependencies change:
|
||||
```bash
|
||||
$ export AWS_ACCESS_KEY_ID=my_access_key
|
||||
$ export AWS_SECRET_ACCESS_KEY=my_secret_access_key
|
||||
$ git clone https://github.com/solana-labs/elastic-ci-stack-for-aws.git -b solana/cuda
|
||||
$ cd elastic-ci-stack-for-aws/
|
||||
$ make build
|
||||
$ make build-ami
|
||||
```
|
||||
|
||||
Watch for the *"amazon-ebs: AMI:"* log message to extract the name of the new
|
||||
AMI. For example:
|
||||
```
|
||||
amazon-ebs: AMI: ami-07118545e8b4ce6dc
|
||||
```
|
||||
The new AMI should also now be visible in your EC2 Dashboard. Go to the desired
|
||||
AWS CloudFormation stack, update the **ImageId** field to the new AMI id, and
|
||||
*apply* the stack changes.
|
||||
|
||||
|
32
ci/audit.sh
Executable file
32
ci/audit.sh
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Audits project dependencies for security vulnerabilities
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
maybe_cargo_install() {
|
||||
for cmd in "$@"; do
|
||||
set +e
|
||||
cargo "$cmd" --help > /dev/null 2>&1
|
||||
declare exitcode=$?
|
||||
set -e
|
||||
if [[ $exitcode -eq 101 ]]; then
|
||||
_ cargo install cargo-"$cmd"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
maybe_cargo_install audit tree
|
||||
|
||||
_ cargo tree
|
||||
_ cargo audit
|
4
ci/buildkite-snap.yml
Normal file
4
ci/buildkite-snap.yml
Normal file
@ -0,0 +1,4 @@
|
||||
steps:
|
||||
- command: "ci/snap.sh"
|
||||
timeout_in_minutes: 40
|
||||
name: "snap [public]"
|
49
ci/buildkite.yml
Normal file
49
ci/buildkite.yml
Normal file
@ -0,0 +1,49 @@
|
||||
steps:
|
||||
- command: "ci/docker-run.sh solanalabs/rust:1.28.0 ci/test-stable.sh"
|
||||
name: "stable [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 30
|
||||
- command: "ci/docker-run.sh solanalabs/rust:1.28.0 ci/test-bench.sh"
|
||||
name: "bench [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 30
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck [public]"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-08-14 ci/test-nightly.sh"
|
||||
name: "nightly [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "nightly"
|
||||
timeout_in_minutes: 30
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable-perf"
|
||||
timeout_in_minutes: 20
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
- command: "ci/test-large-network.sh || true"
|
||||
name: "large-network [public] [ignored]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 20
|
||||
agents:
|
||||
- "queue=large"
|
||||
- command: "ci/pr-snap.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "snap [public]"
|
||||
- wait
|
||||
- command: "ci/publish-crate.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "publish crate [public]"
|
||||
- trigger: "solana-snap"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
build:
|
||||
message: "${BUILDKITE_MESSAGE}"
|
||||
commit: "${BUILDKITE_COMMIT}"
|
||||
branch: "${BUILDKITE_BRANCH}"
|
||||
env:
|
||||
TRIGGERED_BUILDKITE_TAG: "${BUILDKITE_TAG}"
|
55
ci/docker-run.sh
Executable file
55
ci/docker-run.sh
Executable file
@ -0,0 +1,55 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 [docker image name] [command]"
|
||||
echo
|
||||
echo Runs command in the specified docker image with
|
||||
echo a CI-appropriate environment
|
||||
echo
|
||||
}
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
IMAGE="$1"
|
||||
if [[ -z "$IMAGE" ]]; then
|
||||
echo Error: image not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker pull "$IMAGE"
|
||||
shift
|
||||
|
||||
ARGS=(
|
||||
--workdir /solana
|
||||
--volume "$PWD:/solana"
|
||||
--rm
|
||||
)
|
||||
|
||||
if [[ -n $CI ]]; then
|
||||
ARGS+=(--volume "$HOME:/home")
|
||||
ARGS+=(--env "CARGO_HOME=/home/.cargo")
|
||||
fi
|
||||
|
||||
# kcov tries to set the personality of the binary which docker
|
||||
# doesn't allow by default.
|
||||
ARGS+=(--security-opt "seccomp=unconfined")
|
||||
|
||||
# Ensure files are created with the current host uid/gid
|
||||
if [[ -z "$SOLANA_DOCKER_RUN_NOSETUID" ]]; then
|
||||
ARGS+=(--user "$(id -u):$(id -g)")
|
||||
fi
|
||||
|
||||
# Environment variables to propagate into the container
|
||||
ARGS+=(
|
||||
--env BUILDKITE
|
||||
--env BUILDKITE_AGENT_ACCESS_TOKEN
|
||||
--env BUILDKITE_BRANCH
|
||||
--env BUILDKITE_JOB_ID
|
||||
--env BUILDKITE_TAG
|
||||
--env CODECOV_TOKEN
|
||||
--env CRATES_IO_TOKEN
|
||||
--env SNAPCRAFT_CREDENTIALS_KEY
|
||||
)
|
||||
|
||||
set -x
|
||||
exec docker run "${ARGS[@]}" "$IMAGE" "$@"
|
9
ci/docker-rust-nightly/Dockerfile
Normal file
9
ci/docker-rust-nightly/Dockerfile
Normal file
@ -0,0 +1,9 @@
|
||||
FROM rustlang/rust:nightly
|
||||
|
||||
RUN rustup component add clippy-preview --toolchain=nightly && \
|
||||
echo deb http://ftp.debian.org/debian stretch-backports main >> /etc/apt/sources.list && \
|
||||
apt update && \
|
||||
apt install -y \
|
||||
llvm-6.0 \
|
||||
&& \
|
||||
rm -rf /var/lib/apt/lists/*
|
6
ci/docker-rust-nightly/README.md
Normal file
6
ci/docker-rust-nightly/README.md
Normal file
@ -0,0 +1,6 @@
|
||||
Docker image containing rust nightly and some preinstalled crates used in CI.
|
||||
|
||||
This image may be manually updated by running `./build.sh` if you are a member
|
||||
of the [Solana Labs](https://hub.docker.com/u/solanalabs/) Docker Hub
|
||||
organization, but it is also automatically updated periodically by
|
||||
[this automation](https://buildkite.com/solana-labs/solana-ci-docker-rust-nightly).
|
6
ci/docker-rust-nightly/build.sh
Executable file
6
ci/docker-rust-nightly/build.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
docker build -t solanalabs/rust-nightly .
|
||||
docker push solanalabs/rust-nightly
|
15
ci/docker-rust/Dockerfile
Normal file
15
ci/docker-rust/Dockerfile
Normal file
@ -0,0 +1,15 @@
|
||||
FROM rust:1.28
|
||||
|
||||
RUN apt update && \
|
||||
apt-get install apt-transport-https && \
|
||||
echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list && \
|
||||
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 && \
|
||||
apt update && \
|
||||
apt install -y \
|
||||
buildkite-agent \
|
||||
rsync \
|
||||
sudo \
|
||||
cmake \
|
||||
&& \
|
||||
rustup component add rustfmt-preview && \
|
||||
rm -rf /var/lib/apt/lists/*
|
6
ci/docker-rust/README.md
Normal file
6
ci/docker-rust/README.md
Normal file
@ -0,0 +1,6 @@
|
||||
Docker image containing rust and some preinstalled packages used in CI.
|
||||
|
||||
This image may be manually updated by running `./build.sh` if you are a member
|
||||
of the [Solana Labs](https://hub.docker.com/u/solanalabs/) Docker Hub
|
||||
organization, but it is also automatically updated periodically by
|
||||
[this automation](https://buildkite.com/solana-labs/solana-ci-docker-rust).
|
6
ci/docker-rust/build.sh
Executable file
6
ci/docker-rust/build.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
docker build -t solanalabs/rust .
|
||||
docker push solanalabs/rust
|
7
ci/docker-snapcraft/Dockerfile
Normal file
7
ci/docker-snapcraft/Dockerfile
Normal file
@ -0,0 +1,7 @@
|
||||
FROM snapcraft/xenial-amd64
|
||||
|
||||
# Update snapcraft to latest version
|
||||
RUN apt-get update -qq \
|
||||
&& apt-get install -y snapcraft daemontools \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& snapcraft --version
|
6
ci/docker-snapcraft/build.sh
Executable file
6
ci/docker-snapcraft/build.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
docker build -t solanalabs/snapcraft .
|
||||
docker push solanalabs/snapcraft
|
81
ci/hoover.sh
Executable file
81
ci/hoover.sh
Executable file
@ -0,0 +1,81 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Regular maintenance performed on a buildkite agent to control disk usage
|
||||
#
|
||||
|
||||
|
||||
echo --- Delete all exited containers first
|
||||
(
|
||||
set -x
|
||||
exited=$(docker ps -aq --no-trunc --filter "status=exited")
|
||||
if [[ -n "$exited" ]]; then
|
||||
# shellcheck disable=SC2086 # Don't want to double quote "$exited"
|
||||
docker rm $exited
|
||||
fi
|
||||
)
|
||||
|
||||
echo --- Delete untagged images
|
||||
(
|
||||
set -x
|
||||
untagged=$(docker images | grep '<none>'| awk '{ print $3 }')
|
||||
if [[ -n "$untagged" ]]; then
|
||||
# shellcheck disable=SC2086 # Don't want to double quote "$untagged"
|
||||
docker rmi $untagged
|
||||
fi
|
||||
)
|
||||
|
||||
echo --- Delete all dangling images
|
||||
(
|
||||
set -x
|
||||
dangling=$(docker images --filter 'dangling=true' -q --no-trunc | sort | uniq)
|
||||
if [[ -n "$dangling" ]]; then
|
||||
# shellcheck disable=SC2086 # Don't want to double quote "$dangling"
|
||||
docker rmi $dangling
|
||||
fi
|
||||
)
|
||||
|
||||
echo --- Remove unused docker networks
|
||||
(
|
||||
set -x
|
||||
docker network prune -f
|
||||
)
|
||||
|
||||
echo "--- Delete /tmp files older than 1 day owned by $(whoami)"
|
||||
(
|
||||
set -x
|
||||
find /tmp -maxdepth 1 -user "$(whoami)" -mtime +1 -print0 | xargs -0 rm -rf
|
||||
)
|
||||
|
||||
echo --- Deleting stale buildkite agent build directories
|
||||
if [[ ! -d ../../../../builds/$BUILDKITE_AGENT_NAME ]]; then
|
||||
# We might not be where we think we are, do nothing
|
||||
echo Warning: Skipping flush of stale agent build directories
|
||||
echo " PWD=$PWD"
|
||||
else
|
||||
# NOTE: this will be horribly broken if we ever decide to run multiple
|
||||
# agents on the same machine.
|
||||
(
|
||||
for keepDir in "$BUILDKITE_PIPELINE_SLUG" \
|
||||
"$BUILDKITE_ORGANIZATION_SLUG" \
|
||||
"$BUILDKITE_AGENT_NAME"; do
|
||||
cd .. || exit 1
|
||||
for dir in *; do
|
||||
if [[ -d $dir && $dir != "$keepDir" ]]; then
|
||||
echo "Removing $dir"
|
||||
rm -rf "${dir:?}"/
|
||||
fi
|
||||
done
|
||||
done
|
||||
)
|
||||
fi
|
||||
|
||||
echo --- System Status
|
||||
(
|
||||
set -x
|
||||
docker images
|
||||
docker ps
|
||||
docker network ls
|
||||
df -h
|
||||
)
|
||||
|
||||
exit 0
|
32
ci/install-earlyoom.sh
Executable file
32
ci/install-earlyoom.sh
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash -x
|
||||
#
|
||||
# Install EarlyOOM
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
|
||||
# 64 - enable signalling of processes (term, kill, oom-kill)
|
||||
# TODO: This setting will not persist across reboots
|
||||
sysrq=$(( $(cat /proc/sys/kernel/sysrq) | 64 ))
|
||||
sudo sysctl -w kernel.sysrq=$sysrq
|
||||
|
||||
if command -v earlyoom; then
|
||||
sudo systemctl status earlyoom
|
||||
exit 0
|
||||
fi
|
||||
|
||||
wget http://ftp.us.debian.org/debian/pool/main/e/earlyoom/earlyoom_1.1-2_amd64.deb
|
||||
sudo apt install --quiet --yes ./earlyoom_1.1-2_amd64.deb
|
||||
|
||||
cat > earlyoom <<OOM
|
||||
# use the kernel OOM killer, trigger at 20% available RAM,
|
||||
EARLYOOM_ARGS="-k -m 20"
|
||||
OOM
|
||||
sudo cp earlyoom /etc/default/
|
||||
rm earlyoom
|
||||
|
||||
sudo systemctl stop earlyoom
|
||||
sudo systemctl enable earlyoom
|
||||
sudo systemctl start earlyoom
|
||||
|
||||
exit 0
|
8
ci/is-pr.sh
Executable file
8
ci/is-pr.sh
Executable file
@ -0,0 +1,8 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due
|
||||
# to how solana-ci-gate is used to trigger PR builds rather than using the
|
||||
# standard Buildkite PR trigger.
|
||||
#
|
||||
|
||||
[[ $BUILDKITE_BRANCH =~ pull/* ]]
|
86
ci/localnet-sanity.sh
Executable file
86
ci/localnet-sanity.sh
Executable file
@ -0,0 +1,86 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Perform a quick sanity test on a leader, drone, validator and client running
|
||||
# locally on the same machine
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")"/..
|
||||
source ci/upload_ci_artifact.sh
|
||||
source multinode-demo/common.sh
|
||||
|
||||
./multinode-demo/setup.sh
|
||||
|
||||
backgroundCommands="drone leader validator validator-x"
|
||||
pids=()
|
||||
|
||||
for cmd in $backgroundCommands; do
|
||||
echo "--- Start $cmd"
|
||||
rm -f log-"$cmd".txt
|
||||
./multinode-demo/"$cmd".sh > log-"$cmd".txt 2>&1 &
|
||||
declare pid=$!
|
||||
pids+=("$pid")
|
||||
echo "pid: $pid"
|
||||
done
|
||||
|
||||
killBackgroundCommands() {
|
||||
set +e
|
||||
for pid in "${pids[@]}"; do
|
||||
if kill "$pid"; then
|
||||
wait "$pid"
|
||||
else
|
||||
echo -e "^^^ +++\\nWarning: unable to kill $pid"
|
||||
fi
|
||||
done
|
||||
set -e
|
||||
pids=()
|
||||
}
|
||||
|
||||
shutdown() {
|
||||
exitcode=$?
|
||||
killBackgroundCommands
|
||||
|
||||
set +e
|
||||
|
||||
echo "--- Upload artifacts"
|
||||
for cmd in $backgroundCommands; do
|
||||
declare logfile=log-$cmd.txt
|
||||
upload_ci_artifact "$logfile"
|
||||
tail "$logfile"
|
||||
done
|
||||
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
trap shutdown EXIT INT
|
||||
|
||||
set -e
|
||||
|
||||
flag_error() {
|
||||
echo Failed
|
||||
echo "^^^ +++"
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo "--- Wallet sanity"
|
||||
(
|
||||
set -x
|
||||
multinode-demo/test/wallet-sanity.sh
|
||||
) || flag_error
|
||||
|
||||
echo "--- Node count"
|
||||
(
|
||||
set -x
|
||||
./multinode-demo/client.sh "$PWD" 3 -c --addr 127.0.0.1
|
||||
) || flag_error
|
||||
|
||||
killBackgroundCommands
|
||||
|
||||
echo "--- Ledger verification"
|
||||
(
|
||||
set -x
|
||||
$solana_ledger_tool --ledger "$SOLANA_CONFIG_DIR"/ledger verify
|
||||
) || flag_error
|
||||
|
||||
echo +++
|
||||
echo Ok
|
||||
exit 0
|
18
ci/pr-snap.sh
Executable file
18
ci/pr-snap.sh
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Only run snap.sh for pull requests that modify files under /snap
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
if ./is-pr.sh; then
|
||||
affected_files="$(buildkite-agent meta-data get affected_files)"
|
||||
echo "Affected files in this PR: $affected_files"
|
||||
if [[ ! ":$affected_files:" =~ :snap/ ]]; then
|
||||
echo "Skipping snap build as no files under /snap were modified"
|
||||
exit 0
|
||||
fi
|
||||
exec ./snap.sh
|
||||
else
|
||||
echo "Skipping snap build as this is not a pull request"
|
||||
fi
|
19
ci/publish-crate.sh
Executable file
19
ci/publish-crate.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if [[ -z "$BUILDKITE_TAG" ]]; then
|
||||
# Skip publish if this is not a tagged release
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -z "$CRATES_IO_TOKEN" ]]; then
|
||||
echo CRATES_IO_TOKEN undefined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
||||
ci/docker-run.sh rust \
|
||||
bash -exc "cargo package; cargo publish --token $CRATES_IO_TOKEN"
|
||||
|
||||
exit 0
|
19
ci/run-local.sh
Executable file
19
ci/run-local.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Run the entire buildkite CI pipeline locally for pre-testing before sending a
|
||||
# Github pull request
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
BKRUN=ci/node_modules/.bin/bkrun
|
||||
|
||||
if [[ ! -x $BKRUN ]]; then
|
||||
(
|
||||
set -x
|
||||
cd ci/
|
||||
npm install bkrun
|
||||
)
|
||||
fi
|
||||
|
||||
set -x
|
||||
exec ./ci/node_modules/.bin/bkrun ci/buildkite.yml
|
16
ci/shellcheck.sh
Executable file
16
ci/shellcheck.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Reference: https://github.com/koalaman/shellcheck/wiki/Directive
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
set -x
|
||||
find . -name "*.sh" \
|
||||
-not -regex ".*/.cargo/.*" \
|
||||
-not -regex ".*/node_modules/.*" \
|
||||
-not -regex ".*/target/.*" \
|
||||
-print0 \
|
||||
| xargs -0 \
|
||||
ci/docker-run.sh koalaman/shellcheck --color=always --external-sources --shell=bash
|
||||
|
||||
exit 0
|
55
ci/snap.sh
Executable file
55
ci/snap.sh
Executable file
@ -0,0 +1,55 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
DRYRUN=
|
||||
if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
|
||||
DRYRUN="echo"
|
||||
fi
|
||||
|
||||
# BUILDKITE_TAG is the normal environment variable set by Buildkite. However
|
||||
# when this script is run from a triggered pipeline, TRIGGERED_BUILDKITE_TAG is
|
||||
# used instead of BUILDKITE_TAG (due to Buildkite limitations that prevents
|
||||
# BUILDKITE_TAG from propagating through to triggered pipelines)
|
||||
if [[ -n "$BUILDKITE_TAG" || -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
|
||||
SNAP_CHANNEL=stable
|
||||
elif [[ $BUILDKITE_BRANCH = master ]]; then
|
||||
SNAP_CHANNEL=edge
|
||||
else
|
||||
SNAP_CHANNEL=beta
|
||||
fi
|
||||
|
||||
if [[ -z $DRYRUN ]]; then
|
||||
[[ -n $SNAPCRAFT_CREDENTIALS_KEY ]] || {
|
||||
echo SNAPCRAFT_CREDENTIALS_KEY not defined
|
||||
exit 1;
|
||||
}
|
||||
(
|
||||
openssl aes-256-cbc -d \
|
||||
-in ci/snapcraft.credentials.enc \
|
||||
-out ci/snapcraft.credentials \
|
||||
-k "$SNAPCRAFT_CREDENTIALS_KEY"
|
||||
|
||||
snapcraft login --with ci/snapcraft.credentials
|
||||
) || {
|
||||
rm -f ci/snapcraft.credentials;
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
||||
echo --- checking for multilog
|
||||
if [[ ! -x /usr/bin/multilog ]]; then
|
||||
echo "multilog not found, install with: sudo apt-get install -y daemontools"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo --- build: $SNAP_CHANNEL channel
|
||||
snapcraft
|
||||
|
||||
source ci/upload_ci_artifact.sh
|
||||
upload_ci_artifact solana_*.snap
|
||||
|
||||
echo --- publish: $SNAP_CHANNEL channel
|
||||
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL
|
BIN
ci/snapcraft.credentials.enc
Normal file
BIN
ci/snapcraft.credentials.enc
Normal file
Binary file not shown.
13
ci/test-bench.sh
Executable file
13
ci/test-bench.sh
Executable file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
ci/version-check.sh stable
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo bench --verbose
|
45
ci/test-large-network.sh
Executable file
45
ci/test-large-network.sh
Executable file
@ -0,0 +1,45 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
here=$(dirname "$0")
|
||||
cd "$here"/..
|
||||
|
||||
if ! ci/version-check.sh stable; then
|
||||
# This job doesn't run within a container, try once to upgrade tooling on a
|
||||
# version check failure
|
||||
rustup install stable
|
||||
ci/version-check.sh stable
|
||||
fi
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
export LD_LIBRARY_PATH+=:$PWD
|
||||
|
||||
export RUST_LOG=multinode=info
|
||||
|
||||
if [[ $(ulimit -n) -lt 65000 ]]; then
|
||||
echo 'Error: nofiles too small, run "ulimit -n 65000" to continue'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(sysctl -n net.core.rmem_default) -lt 1610612736 ]]; then
|
||||
echo 'Error: rmem_default too small, run "sudo sysctl -w net.core.rmem_default=1610612736" to continue'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(sysctl -n net.core.rmem_max) -lt 1610612736 ]]; then
|
||||
echo 'Error: rmem_max too small, run "sudo sysctl -w net.core.rmem_max=1610612736" to continue'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(sysctl -n net.core.wmem_default) -lt 1610612736 ]]; then
|
||||
echo 'Error: rmem_default too small, run "sudo sysctl -w net.core.wmem_default=1610612736" to continue'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(sysctl -n net.core.wmem_max) -lt 1610612736 ]]; then
|
||||
echo 'Error: rmem_max too small, run "sudo sysctl -w net.core.wmem_max=1610612736" to continue'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -x
|
||||
exec cargo test --release --features=erasure test_multi_node_dynamic_network -- --ignored
|
31
ci/test-nightly.sh
Executable file
31
ci/test-nightly.sh
Executable file
@ -0,0 +1,31 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
ci/version-check.sh nightly
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo build --verbose --features unstable
|
||||
_ cargo test --verbose --features unstable
|
||||
_ cargo clippy -- --deny=warnings
|
||||
|
||||
exit 0
|
||||
|
||||
# Coverage disabled (see issue #433)
|
||||
_ cargo cov test
|
||||
_ cargo cov report
|
||||
|
||||
echo --- Coverage report:
|
||||
ls -l target/cov/report/index.html
|
||||
|
||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo CODECOV_TOKEN undefined
|
||||
else
|
||||
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov-6.0 gcov'
|
||||
fi
|
||||
|
30
ci/test-stable-perf.sh
Executable file
30
ci/test-stable-perf.sh
Executable file
@ -0,0 +1,30 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if ! ci/version-check.sh stable; then
|
||||
# This job doesn't run within a container, try once to upgrade tooling on a
|
||||
# version check failure
|
||||
rustup install stable
|
||||
ci/version-check.sh stable
|
||||
fi
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
export LD_LIBRARY_PATH=$PWD:/usr/local/cuda/lib64
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo test --features=cuda,erasure
|
||||
|
||||
echo --- ci/localnet-sanity.sh
|
||||
(
|
||||
set -x
|
||||
# Assume |cargo build| has populated target/debug/ successfully.
|
||||
export PATH=$PWD/target/debug:$PATH
|
||||
USE_INSTALL=1 ci/localnet-sanity.sh
|
||||
)
|
25
ci/test-stable.sh
Executable file
25
ci/test-stable.sh
Executable file
@ -0,0 +1,25 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
ci/version-check.sh stable
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo fmt -- --check
|
||||
_ cargo build --verbose
|
||||
_ cargo test --verbose
|
||||
|
||||
echo --- ci/localnet-sanity.sh
|
||||
(
|
||||
set -x
|
||||
# Assume |cargo build| has populated target/debug/ successfully.
|
||||
export PATH=$PWD/target/debug:$PATH
|
||||
USE_INSTALL=1 ci/localnet-sanity.sh
|
||||
)
|
||||
|
||||
_ ci/audit.sh
|
519
ci/testnet-deploy.sh
Executable file
519
ci/testnet-deploy.sh
Executable file
@ -0,0 +1,519 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Deploys the Solana software running on the testnet full nodes
|
||||
#
|
||||
# This script must be run by a user/machine that has successfully authenticated
|
||||
# with GCP and has sufficient permission.
|
||||
#
|
||||
here=$(dirname "$0")
|
||||
metrics_write_datapoint="$here"/../multinode-demo/metrics_write_datapoint.sh
|
||||
|
||||
# TODO: Switch over to rolling updates
|
||||
ROLLING_UPDATE=false
|
||||
#ROLLING_UPDATE=true
|
||||
|
||||
if [[ -z $SOLANA_METRICS_CONFIG ]]; then
|
||||
echo Error: SOLANA_METRICS_CONFIG environment variable is unset
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# The SOLANA_METRICS_CONFIG environment variable is formatted as a
|
||||
# comma-delimited list of parameters. All parameters are optional.
|
||||
#
|
||||
# Example:
|
||||
# export SOLANA_METRICS_CONFIG="host=<metrics host>,db=<database name>,u=<username>,p=<password>"
|
||||
#
|
||||
configure_metrics() {
|
||||
[[ -n $SOLANA_METRICS_CONFIG ]] || return 0
|
||||
|
||||
declare metrics_params
|
||||
IFS=',' read -r -a metrics_params <<< "$SOLANA_METRICS_CONFIG"
|
||||
for param in "${metrics_params[@]}"; do
|
||||
IFS='=' read -r -a pair <<< "$param"
|
||||
if [[ ${#pair[@]} != 2 ]]; then
|
||||
echo Error: invalid metrics parameter: "$param" >&2
|
||||
else
|
||||
declare name="${pair[0]}"
|
||||
declare value="${pair[1]}"
|
||||
case "$name" in
|
||||
host)
|
||||
export INFLUX_HOST="$value"
|
||||
echo INFLUX_HOST="$INFLUX_HOST" >&2
|
||||
;;
|
||||
db)
|
||||
export INFLUX_DATABASE="$value"
|
||||
echo INFLUX_DATABASE="$INFLUX_DATABASE" >&2
|
||||
;;
|
||||
u)
|
||||
export INFLUX_USERNAME="$value"
|
||||
echo INFLUX_USERNAME="$INFLUX_USERNAME" >&2
|
||||
;;
|
||||
p)
|
||||
export INFLUX_PASSWORD="$value"
|
||||
echo INFLUX_PASSWORD="********" >&2
|
||||
;;
|
||||
*)
|
||||
echo Error: Unknown metrics parameter name: "$name" >&2
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
done
|
||||
}
|
||||
configure_metrics
|
||||
|
||||
# Default to edge channel. To select the beta channel:
|
||||
# export SOLANA_SNAP_CHANNEL=beta
|
||||
if [[ -z $SOLANA_SNAP_CHANNEL ]]; then
|
||||
SOLANA_SNAP_CHANNEL=edge
|
||||
fi
|
||||
|
||||
# Select default network URL based on SOLANA_SNAP_CHANNEL if SOLANA_NET_ENTRYPOINT is
|
||||
# unspecified
|
||||
if [[ -z $SOLANA_NET_ENTRYPOINT ]]; then
|
||||
case $SOLANA_SNAP_CHANNEL in
|
||||
edge)
|
||||
SOLANA_NET_ENTRYPOINT=master.testnet.solana.com
|
||||
unset SOLANA_NET_NAME
|
||||
;;
|
||||
beta)
|
||||
SOLANA_NET_ENTRYPOINT=testnet.solana.com
|
||||
unset SOLANA_NET_NAME
|
||||
;;
|
||||
*)
|
||||
echo Error: Unknown SOLANA_SNAP_CHANNEL=$SOLANA_SNAP_CHANNEL
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [[ -z $SOLANA_NET_NAME ]]; then
|
||||
SOLANA_NET_NAME=${SOLANA_NET_ENTRYPOINT//./-}
|
||||
fi
|
||||
|
||||
: ${SOLANA_NET_NAME:?$SOLANA_NET_ENTRYPOINT}
|
||||
netBasename=${SOLANA_NET_NAME/-*/}
|
||||
if [[ $netBasename != testnet ]]; then
|
||||
netBasename="testnet-$netBasename"
|
||||
fi
|
||||
|
||||
# Figure installation command
|
||||
SNAP_INSTALL_CMD="\
|
||||
for i in {1..3}; do \
|
||||
sudo snap install solana --$SOLANA_SNAP_CHANNEL --devmode && break;
|
||||
sleep 1; \
|
||||
done \
|
||||
"
|
||||
LOCAL_SNAP=$1
|
||||
if [[ -n $LOCAL_SNAP ]]; then
|
||||
if [[ ! -f $LOCAL_SNAP ]]; then
|
||||
echo "Error: $LOCAL_SNAP is not a file"
|
||||
exit 1
|
||||
fi
|
||||
SNAP_INSTALL_CMD="sudo snap install ~/solana_local.snap --devmode --dangerous"
|
||||
fi
|
||||
SNAP_INSTALL_CMD="sudo snap remove solana; $SNAP_INSTALL_CMD"
|
||||
|
||||
EARLYOOM_INSTALL_CMD="\
|
||||
wget --retry-connrefused --waitretry=1 \
|
||||
--read-timeout=20 --timeout=15 --tries=5 \
|
||||
-O install-earlyoom.sh \
|
||||
https://raw.githubusercontent.com/solana-labs/solana/v0.7/ci/install-earlyoom.sh; \
|
||||
bash install-earlyoom.sh \
|
||||
"
|
||||
SNAP_INSTALL_CMD="$EARLYOOM_INSTALL_CMD; $SNAP_INSTALL_CMD"
|
||||
|
||||
# `export SKIP_INSTALL=1` to reset the network without reinstalling the snap
|
||||
if [[ -n $SKIP_INSTALL ]]; then
|
||||
SNAP_INSTALL_CMD="echo Install skipped"
|
||||
fi
|
||||
|
||||
echo "+++ Configuration for $netBasename"
|
||||
publicUrl="$SOLANA_NET_ENTRYPOINT"
|
||||
if [[ $publicUrl = testnet.solana.com ]]; then
|
||||
publicIp="" # Use default value
|
||||
else
|
||||
publicIp=$(dig +short $publicUrl | head -n1)
|
||||
fi
|
||||
|
||||
echo "Network name: $SOLANA_NET_NAME"
|
||||
echo "Network entry point URL: $publicUrl ($publicIp)"
|
||||
echo "Snap channel: $SOLANA_SNAP_CHANNEL"
|
||||
echo "Install command: $SNAP_INSTALL_CMD"
|
||||
echo "Setup args: $SOLANA_SETUP_ARGS"
|
||||
[[ -z $LOCAL_SNAP ]] || echo "Local snap: $LOCAL_SNAP"
|
||||
|
||||
vmlist=() # Each array element is formatted as "class:vmName:vmZone:vmPublicIp"
|
||||
|
||||
vm_exec() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare message=$4
|
||||
declare cmd=$5
|
||||
|
||||
echo "--- $message $vmName in zone $vmZone ($vmPublicIp)"
|
||||
ssh -o BatchMode=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
|
||||
testnet-deploy@"$vmPublicIp" "$cmd"
|
||||
}
|
||||
|
||||
#
|
||||
# vm_foreach [cmd] [extra args to cmd]
|
||||
# where
|
||||
# cmd - the command to execute on each VM
|
||||
# The command will receive three fixed arguments, followed by any
|
||||
# additionl arguments supplied to vm_foreach:
|
||||
# vmName - GCP name of the VM
|
||||
# vmZone - The GCP zone the VM is located in
|
||||
# vmPublicIp - The public IP address of this VM
|
||||
# vmClass - The 'class' of this VM
|
||||
# count - Monotonically increasing count for each
|
||||
# invocation of cmd, starting at 1
|
||||
# ... - Extra args to cmd..
|
||||
#
|
||||
#
|
||||
vm_foreach() {
|
||||
declare cmd=$1
|
||||
shift
|
||||
|
||||
declare count=1
|
||||
for info in "${vmlist[@]}"; do
|
||||
declare vmClass vmName vmZone vmPublicIp
|
||||
IFS=: read -r vmClass vmName vmZone vmPublicIp < <(echo "$info")
|
||||
|
||||
eval "$cmd" "$vmName" "$vmZone" "$vmPublicIp" "$vmClass" "$count" "$@"
|
||||
count=$((count + 1))
|
||||
done
|
||||
}
|
||||
|
||||
#
|
||||
# vm_foreach_in_class [class] [cmd]
|
||||
# where
|
||||
# class - the desired VM class to operate on
|
||||
# cmd - the command to execute on each VM in the desired class.
|
||||
# The command will receive three arguments:
|
||||
# vmName - GCP name of the VM
|
||||
# vmZone - The GCP zone the VM is located in
|
||||
# vmPublicIp - The public IP address of this VM
|
||||
# count - Monotonically increasing count for each
|
||||
# invocation of cmd, starting at 1
|
||||
#
|
||||
#
|
||||
_run_cmd_if_class() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare vmClass=$4
|
||||
declare count=$5
|
||||
declare class=$6
|
||||
declare cmd=$7
|
||||
if [[ $class = "$vmClass" ]]; then
|
||||
eval "$cmd" "$vmName" "$vmZone" "$vmPublicIp" "$count"
|
||||
fi
|
||||
}
|
||||
|
||||
vm_foreach_in_class() {
|
||||
declare class=$1
|
||||
declare cmd=$2
|
||||
vm_foreach _run_cmd_if_class "$1" "$2"
|
||||
}
|
||||
|
||||
#
|
||||
# Load all VMs matching the specified filter and tag them with the specified
|
||||
# class into the `vmlist` array.
|
||||
findVms() {
|
||||
declare class="$1"
|
||||
declare filter="$2"
|
||||
gcloud compute instances list --filter="$filter"
|
||||
while read -r vmName vmZone vmPublicIp status; do
|
||||
if [[ $status != RUNNING ]]; then
|
||||
echo "Warning: $vmName is not RUNNING, ignoring it."
|
||||
continue
|
||||
fi
|
||||
vmlist+=("$class:$vmName:$vmZone:$vmPublicIp")
|
||||
done < <(gcloud compute instances list \
|
||||
--filter="$filter" \
|
||||
--format 'value(name,zone,networkInterfaces[0].accessConfigs[0].natIP,status)')
|
||||
}
|
||||
|
||||
wait_for_pids() {
|
||||
echo "--- Waiting for $*"
|
||||
for pid in "${pids[@]}"; do
|
||||
declare ok=true
|
||||
wait "$pid" || ok=false
|
||||
cat "log-$pid.txt"
|
||||
if ! $ok; then
|
||||
echo ^^^ +++
|
||||
exit 1
|
||||
fi
|
||||
rm "log-$pid.txt"
|
||||
done
|
||||
}
|
||||
|
||||
delete_unreachable_validators() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
if ! vm_exec "$vmName" "$vmZone" "$vmPublicIp" "Checking $vmName" uptime; then
|
||||
echo "^^^ +++"
|
||||
|
||||
# Validators are managed by a Compute Engine Instance Group, so deleting
|
||||
# one will just cause a new one to be spawned.
|
||||
echo "Warning: $vmName is unreachable, deleting it"
|
||||
gcloud compute instances delete "$vmName" --zone "$vmZone"
|
||||
fi
|
||||
echo "validator checked in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
|
||||
echo "Validator nodes (unverified):"
|
||||
findVms validator "name~^$SOLANA_NET_NAME-validator-"
|
||||
pids=()
|
||||
vm_foreach_in_class validator delete_unreachable_validators
|
||||
wait_for_pids validator sanity check
|
||||
vmlist=()
|
||||
|
||||
echo "Leader node:"
|
||||
findVms leader "name=$SOLANA_NET_NAME"
|
||||
[[ ${#vmlist[@]} = 1 ]] || {
|
||||
echo "Unable to find $SOLANA_NET_NAME"
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo "Client node(s):"
|
||||
findVms client "name~^$SOLANA_NET_NAME-client"
|
||||
|
||||
echo "Validator nodes:"
|
||||
findVms validator "name~^$SOLANA_NET_NAME-validator-"
|
||||
|
||||
fullnode_count=0
|
||||
inc_fullnode_count() {
|
||||
fullnode_count=$((fullnode_count + 1))
|
||||
}
|
||||
vm_foreach_in_class leader inc_fullnode_count
|
||||
vm_foreach_in_class validator inc_fullnode_count
|
||||
|
||||
# Add "network stopping" datapoint
|
||||
$metrics_write_datapoint "testnet-deploy,name=$netBasename stop=1"
|
||||
|
||||
client_start() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare count=$4
|
||||
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" \
|
||||
"Starting client $count:" \
|
||||
"\
|
||||
set -x;
|
||||
snap info solana; \
|
||||
sudo snap get solana; \
|
||||
threadCount=\$(nproc); \
|
||||
if [[ \$threadCount -gt 4 ]]; then threadCount=4; fi; \
|
||||
tmux kill-session -t solana; \
|
||||
tmux new -s solana -d \" \
|
||||
set -x; \
|
||||
sudo rm /tmp/solana.log; \
|
||||
while : ; do \
|
||||
/snap/bin/solana.bench-tps $SOLANA_NET_ENTRYPOINT $fullnode_count --loop -s 600 --sustained -t \$threadCount 2>&1 | tee -a /tmp/solana.log; \
|
||||
echo 'https://metrics.solana.com:8086/write?db=${INFLUX_DATABASE}&u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}' \
|
||||
| xargs curl --max-time 5 -XPOST --data-binary 'testnet-deploy,name=$netBasename clientexit=1'; \
|
||||
echo Error: bench-tps should never exit | tee -a /tmp/solana.log; \
|
||||
done; \
|
||||
bash \
|
||||
\"; \
|
||||
sleep 2; \
|
||||
tmux capture-pane -t solana -p -S -100; \
|
||||
tail /tmp/solana.log; \
|
||||
"
|
||||
}
|
||||
|
||||
client_stop() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare count=$4
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" \
|
||||
"Stopping client $vmName ($count):" \
|
||||
"\
|
||||
set -x;
|
||||
tmux list-sessions; \
|
||||
tmux capture-pane -t solana -p; \
|
||||
tmux kill-session -t solana; \
|
||||
$SNAP_INSTALL_CMD; \
|
||||
sudo snap set solana metrics-config=$SOLANA_METRICS_CONFIG \
|
||||
rust-log=$RUST_LOG \
|
||||
default-metrics-rate=$SOLANA_DEFAULT_METRICS_RATE \
|
||||
; \
|
||||
"
|
||||
echo "Client stopped in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
fullnode_start() {
|
||||
declare class=$1
|
||||
declare vmName=$2
|
||||
declare vmZone=$3
|
||||
declare vmPublicIp=$4
|
||||
declare count=$5
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
commonNodeConfig="\
|
||||
rust-log=$RUST_LOG \
|
||||
default-metrics-rate=$SOLANA_DEFAULT_METRICS_RATE \
|
||||
metrics-config=$SOLANA_METRICS_CONFIG \
|
||||
setup-args=$SOLANA_SETUP_ARGS \
|
||||
"
|
||||
if [[ $class = leader ]]; then
|
||||
nodeConfig="mode=leader+drone $commonNodeConfig"
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
nodeConfig="$nodeConfig enable-cuda=1"
|
||||
fi
|
||||
else
|
||||
nodeConfig="mode=validator leader-address=$publicIp $commonNodeConfig"
|
||||
fi
|
||||
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" "Starting $class $count:" \
|
||||
"\
|
||||
set -ex; \
|
||||
logmarker='solana deploy $(date)/$RANDOM'; \
|
||||
logger \"\$logmarker\"; \
|
||||
$SNAP_INSTALL_CMD; \
|
||||
sudo snap set solana $nodeConfig; \
|
||||
snap info solana; \
|
||||
sudo snap get solana; \
|
||||
echo Slight delay to get more syslog output; \
|
||||
sleep 2; \
|
||||
sudo grep -Pzo \"\$logmarker(.|\\n)*\" /var/log/syslog \
|
||||
"
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
leader_start() {
|
||||
fullnode_start leader "$@"
|
||||
}
|
||||
|
||||
validator_start() {
|
||||
fullnode_start validator "$@"
|
||||
}
|
||||
|
||||
fullnode_stop() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare count=$4
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
# Try to ping the machine first. When a machine (validator) is restarted,
|
||||
# there can be a delay between when the instance is reported as RUNNING and when
|
||||
# it's reachable over the network
|
||||
timeout 30s bash -c "set -o pipefail; until ping -c 3 $vmPublicIp | tr - _; do echo .; done"
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" "Shutting down" "\
|
||||
if snap list solana; then \
|
||||
sudo snap set solana mode=; \
|
||||
fi"
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
if [[ -n $LOCAL_SNAP ]]; then
|
||||
echo "--- Transferring $LOCAL_SNAP to node(s)"
|
||||
|
||||
transfer_local_snap() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare vmClass=$4
|
||||
declare count=$5
|
||||
|
||||
echo "--- $vmName in zone $vmZone ($count)"
|
||||
SECONDS=0
|
||||
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
|
||||
"$LOCAL_SNAP" testnet-deploy@"$vmPublicIp":solana_local.snap
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
}
|
||||
vm_foreach transfer_local_snap
|
||||
fi
|
||||
|
||||
echo "--- Stopping client node(s)"
|
||||
pids=()
|
||||
vm_foreach_in_class client client_stop
|
||||
client_stop_pids=("${pids[@]}")
|
||||
|
||||
if ! $ROLLING_UPDATE; then
|
||||
pids=()
|
||||
echo "--- Shutting down all full nodes"
|
||||
vm_foreach_in_class leader fullnode_stop
|
||||
vm_foreach_in_class validator fullnode_stop
|
||||
wait_for_pids fullnode shutdown
|
||||
fi
|
||||
|
||||
pids=()
|
||||
echo --- Starting leader node
|
||||
vm_foreach_in_class leader leader_start
|
||||
wait_for_pids leader
|
||||
|
||||
pids=()
|
||||
echo --- Starting validator nodes
|
||||
vm_foreach_in_class validator validator_start
|
||||
wait_for_pids validators
|
||||
|
||||
echo "--- $publicUrl sanity test"
|
||||
if [[ -z $CI ]]; then
|
||||
# TODO: ssh into a node and run testnet-sanity.sh there. It's not safe to
|
||||
# assume the correct Snap is installed on the current non-CI machine
|
||||
echo Skipped for non-CI deploy
|
||||
snapVersion=unknown
|
||||
else
|
||||
(
|
||||
set -x
|
||||
USE_SNAP=1 ci/testnet-sanity.sh $publicUrl $fullnode_count
|
||||
)
|
||||
IFS=\ read -r _ snapVersion _ < <(snap info solana | grep "^installed:")
|
||||
snapVersion=${snapVersion/0+git./}
|
||||
fi
|
||||
|
||||
pids=("${client_stop_pids[@]}")
|
||||
wait_for_pids client shutdown
|
||||
vm_foreach_in_class client client_start
|
||||
|
||||
# Add "network started" datapoint
|
||||
$metrics_write_datapoint "testnet-deploy,name=$netBasename start=1,version=\"$snapVersion\""
|
||||
|
||||
exit 0
|
77
ci/testnet-sanity.sh
Executable file
77
ci/testnet-sanity.sh
Executable file
@ -0,0 +1,77 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Perform a quick sanity test on the specific testnet
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
source multinode-demo/common.sh
|
||||
|
||||
NET_URL=$1
|
||||
if [[ -z $NET_URL ]]; then
|
||||
NET_URL=testnet.solana.com
|
||||
fi
|
||||
|
||||
EXPECTED_NODE_COUNT=$2
|
||||
if [[ -z $EXPECTED_NODE_COUNT ]]; then
|
||||
EXPECTED_NODE_COUNT=50
|
||||
fi
|
||||
|
||||
echo "--- $NET_URL: verify ledger"
|
||||
if [[ -z $NO_LEDGER_VERIFY ]]; then
|
||||
if [[ -d /var/snap/solana/current/config/ledger ]]; then
|
||||
# Note: here we assume this script is actually running on the leader node...
|
||||
(
|
||||
set -x
|
||||
sudo cp -r /var/snap/solana/current/config/ledger /var/snap/solana/current/config/ledger-verify-$$
|
||||
sudo solana.ledger-tool --ledger /var/snap/solana/current/config/ledger-verify-$$ verify
|
||||
)
|
||||
else
|
||||
echo "^^^ +++"
|
||||
echo "Ledger verify skipped"
|
||||
fi
|
||||
else
|
||||
echo "^^^ +++"
|
||||
echo "Ledger verify skipped (NO_LEDGER_VERIFY defined)"
|
||||
fi
|
||||
|
||||
echo "--- $NET_URL: wallet sanity"
|
||||
(
|
||||
set -x
|
||||
multinode-demo/test/wallet-sanity.sh $NET_URL
|
||||
)
|
||||
|
||||
echo "--- $NET_URL: node count"
|
||||
if [[ -n "$USE_SNAP" ]]; then
|
||||
# TODO: Merge client.sh functionality into solana-bench-tps proper and
|
||||
# remove this USE_SNAP case
|
||||
cmd=$solana_bench_tps
|
||||
else
|
||||
cmd=multinode-demo/client.sh
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
$cmd $NET_URL $EXPECTED_NODE_COUNT -c
|
||||
)
|
||||
|
||||
echo "--- $NET_URL: validator sanity"
|
||||
if [[ -z $NO_VALIDATOR_SANITY ]]; then
|
||||
(
|
||||
./multinode-demo/setup.sh -t validator
|
||||
set -e pipefail
|
||||
timeout 10s ./multinode-demo/validator.sh "$NET_URL" 2>&1 | tee validator.log
|
||||
)
|
||||
wc -l validator.log
|
||||
if grep -C100 panic validator.log; then
|
||||
echo "^^^ +++"
|
||||
echo "Panic observed"
|
||||
exit 1
|
||||
else
|
||||
echo "Validator log looks ok"
|
||||
fi
|
||||
else
|
||||
echo "^^^ +++"
|
||||
echo "Validator sanity disabled (NO_VALIDATOR_SANITY defined)"
|
||||
fi
|
||||
|
||||
exit 0
|
18
ci/upload_ci_artifact.sh
Normal file
18
ci/upload_ci_artifact.sh
Normal file
@ -0,0 +1,18 @@
|
||||
# |source| me
|
||||
|
||||
upload_ci_artifact() {
|
||||
echo "--- artifact: $1"
|
||||
if [[ -r "$1" ]]; then
|
||||
ls -l "$1"
|
||||
if ${BUILDKITE:-false}; then
|
||||
(
|
||||
set -x
|
||||
buildkite-agent artifact upload "$1"
|
||||
)
|
||||
fi
|
||||
else
|
||||
echo ^^^ +++
|
||||
echo "$1 not found"
|
||||
fi
|
||||
}
|
||||
|
35
ci/version-check.sh
Executable file
35
ci/version-check.sh
Executable file
@ -0,0 +1,35 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
require() {
|
||||
declare expectedProgram="$1"
|
||||
declare expectedVersion="$2"
|
||||
|
||||
read -r program version _ < <($expectedProgram -V)
|
||||
|
||||
declare ok=true
|
||||
[[ $program = "$expectedProgram" ]] || ok=false
|
||||
[[ $version =~ $expectedVersion ]] || ok=false
|
||||
|
||||
echo "Found $program $version"
|
||||
if ! $ok; then
|
||||
echo Error: expected "$expectedProgram $expectedVersion"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
case ${1:-stable} in
|
||||
nightly)
|
||||
require rustc 1.30.[0-9]+-nightly
|
||||
require cargo 1.29.[0-9]+-nightly
|
||||
;;
|
||||
stable)
|
||||
require rustc 1.28.[0-9]+
|
||||
require cargo 1.28.[0-9]+
|
||||
;;
|
||||
*)
|
||||
echo Error: unknown argument: "$1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
@ -1,65 +0,0 @@
|
||||
The Historian
|
||||
===
|
||||
|
||||
Create a *Historian* and send it *events* to generate an *event log*, where each *entry*
|
||||
is tagged with the historian's latest *hash*. Then ensure the order of events was not tampered
|
||||
with by verifying each entry's hash can be generated from the hash in the previous entry:
|
||||
|
||||

|
||||
|
||||
```rust
|
||||
extern crate solana;
|
||||
|
||||
use solana::historian::Historian;
|
||||
use solana::ledger::{verify_slice, Entry, Hash};
|
||||
use solana::event::{generate_keypair, get_pubkey, sign_claim_data, Event};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use std::sync::mpsc::SendError;
|
||||
|
||||
fn create_ledger(hist: &Historian<Hash>) -> Result<(), SendError<Event<Hash>>> {
|
||||
sleep(Duration::from_millis(15));
|
||||
let tokens = 42;
|
||||
let keypair = generate_keypair();
|
||||
let event0 = Event::new_claim(get_pubkey(&keypair), tokens, sign_claim_data(&tokens, &keypair));
|
||||
hist.sender.send(event0)?;
|
||||
sleep(Duration::from_millis(10));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let seed = Hash::default();
|
||||
let hist = Historian::new(&seed, Some(10));
|
||||
create_ledger(&hist).expect("send error");
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry<Hash>> = hist.receiver.iter().collect();
|
||||
for entry in &entries {
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
// Proof-of-History: Verify the historian learned about the events
|
||||
// in the same order they appear in the vector.
|
||||
assert!(verify_slice(&entries, &seed));
|
||||
}
|
||||
```
|
||||
|
||||
Running the program should produce a ledger similar to:
|
||||
|
||||
```rust
|
||||
Entry { num_hashes: 0, id: [0, ...], event: Tick }
|
||||
Entry { num_hashes: 3, id: [67, ...], event: Transaction { tokens: 42 } }
|
||||
Entry { num_hashes: 3, id: [123, ...], event: Tick }
|
||||
```
|
||||
|
||||
Proof-of-History
|
||||
---
|
||||
|
||||
Take note of the last line:
|
||||
|
||||
```rust
|
||||
assert!(verify_slice(&entries, &seed));
|
||||
```
|
||||
|
||||
[It's a proof!](https://en.wikipedia.org/wiki/Curry–Howard_correspondence) For each entry returned by the
|
||||
historian, we can verify that `id` is the result of applying a sha256 hash to the previous `id`
|
||||
exactly `num_hashes` times, and then hashing then event data on top of that. Because the event data is
|
||||
included in the hash, the events cannot be reordered without regenerating all the hashes.
|
@ -1,18 +0,0 @@
|
||||
msc {
|
||||
client,historian,recorder;
|
||||
|
||||
recorder=>historian [ label = "e0 = Entry{id: h0, n: 0, event: Tick}" ] ;
|
||||
recorder=>recorder [ label = "h1 = hash(h0)" ] ;
|
||||
recorder=>recorder [ label = "h2 = hash(h1)" ] ;
|
||||
client=>historian [ label = "Transaction(d0)" ] ;
|
||||
historian=>recorder [ label = "Transaction(d0)" ] ;
|
||||
recorder=>recorder [ label = "h3 = hash(h2 + d0)" ] ;
|
||||
recorder=>historian [ label = "e1 = Entry{id: hash(h3), n: 3, event: Transaction(d0)}" ] ;
|
||||
recorder=>recorder [ label = "h4 = hash(h3)" ] ;
|
||||
recorder=>recorder [ label = "h5 = hash(h4)" ] ;
|
||||
recorder=>recorder [ label = "h6 = hash(h5)" ] ;
|
||||
recorder=>historian [ label = "e2 = Entry{id: h6, n: 3, event: Tick}" ] ;
|
||||
client=>historian [ label = "collect()" ] ;
|
||||
historian=>client [ label = "entries = [e0, e1, e2]" ] ;
|
||||
client=>client [ label = "verify_slice(entries, h0)" ] ;
|
||||
}
|
35
doc/testnet.md
Normal file
35
doc/testnet.md
Normal file
@ -0,0 +1,35 @@
|
||||
# TestNet debugging info
|
||||
|
||||
Currently we have two testnets, 'perf' and 'master', both on the master branch of the solana repo. Deploys happen
|
||||
at the top of every hour with the latest code. 'perf' has more cores for the client machine to flood the network
|
||||
with transactions until failure.
|
||||
|
||||
## Deploy process
|
||||
|
||||
They are deployed with the `ci/testnet-deploy.sh` script. There is a scheduled buildkite job which runs to do the deploy,
|
||||
look at `testnet-deploy` to see the agent which ran it and the logs. There is also a manual job to do the deploy manually..
|
||||
Validators are selected based on their machine name and everyone gets the binaries installed from snap.
|
||||
|
||||
## Where are the testnet logs?
|
||||
|
||||
For the client they are put in `/tmp/solana`; for validators and leaders they are in `/var/snap/solana/current/`.
|
||||
You can also see the backtrace of the client by ssh'ing into the client node and doing:
|
||||
|
||||
```bash
|
||||
$ sudo -u testnet-deploy
|
||||
$ tmux attach -t solana
|
||||
```
|
||||
|
||||
## How do I reset the testnet?
|
||||
|
||||
Through buildkite.
|
||||
|
||||
## How can I scale the tx generation rate?
|
||||
|
||||
Increase the TX rate by increasing the number of cores on the client machine which is running
|
||||
`bench-tps` or run multiple clients. Decrease by lowering cores or using the rayon env
|
||||
variable `RAYON_NUM_THREADS=<xx>`
|
||||
|
||||
## How can I test a change on the testnet?
|
||||
|
||||
Currently, a merged PR is the only way to test a change on the testnet.
|
37
fetch-perf-libs.sh
Executable file
37
fetch-perf-libs.sh
Executable file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
echo Performance libraries are only available for Linux
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(uname -m) != x86_64 ]]; then
|
||||
echo Performance libraries are only available for x86_64 architecture
|
||||
exit 1
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
curl -o solana-perf.tgz \
|
||||
https://solana-perf.s3.amazonaws.com/v0.8.0/x86_64-unknown-linux-gnu/solana-perf.tgz
|
||||
tar zxvf solana-perf.tgz
|
||||
)
|
||||
|
||||
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then
|
||||
if ! diff /usr/local/cuda/version.txt cuda-version.txt > /dev/null; then
|
||||
echo ==============================================
|
||||
echo Warning: possible CUDA version mismatch
|
||||
echo
|
||||
echo "Expected version: $(cat cuda-version.txt)"
|
||||
echo "Detected version: $(cat /usr/local/cuda/version.txt)"
|
||||
echo ==============================================
|
||||
fi
|
||||
else
|
||||
echo ==============================================
|
||||
echo Warning: unable to validate CUDA version
|
||||
echo ==============================================
|
||||
fi
|
||||
|
||||
echo "Downloaded solana-perf version: $(cat solana-perf-HEAD.txt)"
|
||||
|
||||
exit 0
|
66
multinode-demo/client.sh
Executable file
66
multinode-demo/client.sh
Executable file
@ -0,0 +1,66 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
USAGE=" usage: $0 [leader_url] [num_nodes] [--loop] [extra args]
|
||||
|
||||
leader_url URL to the leader (defaults to ..)
|
||||
num_nodes Minimum number of nodes to look for while converging
|
||||
--loop Add this flag to cause the program to loop infinitely
|
||||
\"extra args\" Any additional arguments are pass along to solana-bench-tps
|
||||
"
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
leader=$1
|
||||
if [[ -n $leader ]]; then
|
||||
if [[ $leader == "-h" || $leader == "--help" ]]; then
|
||||
echo "$USAGE"
|
||||
exit 0
|
||||
fi
|
||||
shift
|
||||
else
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
leader=testnet.solana.com # Default to testnet when running as a Snap
|
||||
else
|
||||
leader=$here/.. # Default to local solana repo
|
||||
fi
|
||||
fi
|
||||
|
||||
count=$1
|
||||
if [[ -n $count ]]; then
|
||||
shift
|
||||
else
|
||||
count=1
|
||||
fi
|
||||
|
||||
loop=
|
||||
if [[ $1 = --loop ]]; then
|
||||
loop=1
|
||||
shift
|
||||
fi
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
(
|
||||
set -x
|
||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
|
||||
client_json="$SOLANA_CONFIG_CLIENT_DIR"/client.json
|
||||
[[ -r $client_json ]] || $solana_keygen -o "$client_json"
|
||||
)
|
||||
|
||||
iteration=0
|
||||
set -x
|
||||
while true; do
|
||||
$solana_bench_tps \
|
||||
-n "$count" \
|
||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json \
|
||||
-k "$SOLANA_CONFIG_CLIENT_DIR"/client.json \
|
||||
"$@"
|
||||
[[ -n $loop ]] || exit 0
|
||||
iteration=$((iteration + 1))
|
||||
echo ------------------------------------------------------------------------
|
||||
echo "Iteration: $iteration"
|
||||
echo ------------------------------------------------------------------------
|
||||
done
|
213
multinode-demo/common.sh
Normal file
213
multinode-demo/common.sh
Normal file
@ -0,0 +1,213 @@
|
||||
# |source| this file
|
||||
#
|
||||
# Disable complaints about unused variables in this file:
|
||||
# shellcheck disable=2034
|
||||
|
||||
rsync=rsync
|
||||
leader_logger="cat"
|
||||
validator_logger="cat"
|
||||
drone_logger="cat"
|
||||
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
# Protect against unsupported configurations to prevent non-obvious errors
|
||||
# later. Arguably these should be fatal errors but for now prefer tolerance.
|
||||
if [[ -n $USE_SNAP ]]; then
|
||||
echo "Warning: Snap is not supported on $(uname)"
|
||||
USE_SNAP=
|
||||
fi
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
echo "Warning: CUDA is not supported on $(uname)"
|
||||
SOLANA_CUDA=
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -d $SNAP ]]; then # Running inside a Linux Snap?
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
if [[ "$program" = wallet || "$program" = bench-tps ]]; then
|
||||
# TODO: Merge wallet.sh/client.sh functionality into
|
||||
# solana-wallet/solana-demo-client proper and remove this special case
|
||||
printf "%s/bin/solana-%s" "$SNAP" "$program"
|
||||
else
|
||||
printf "%s/command-%s.wrapper" "$SNAP" "$program"
|
||||
fi
|
||||
}
|
||||
rsync="$SNAP"/bin/rsync
|
||||
multilog="$SNAP/bin/multilog t s16777215 n200"
|
||||
leader_logger="$multilog $SNAP_DATA/leader"
|
||||
validator_logger="$multilog t $SNAP_DATA/validator"
|
||||
drone_logger="$multilog $SNAP_DATA/drone"
|
||||
# Create log directories manually to prevent multilog from creating them as
|
||||
# 0700
|
||||
mkdir -p "$SNAP_DATA"/{drone,leader,validator}
|
||||
|
||||
SOLANA_METRICS_CONFIG="$(snapctl get metrics-config)"
|
||||
SOLANA_DEFAULT_METRICS_RATE="$(snapctl get default-metrics-rate)"
|
||||
export SOLANA_DEFAULT_METRICS_RATE
|
||||
SOLANA_CUDA="$(snapctl get enable-cuda)"
|
||||
RUST_LOG="$(snapctl get rust-log)"
|
||||
|
||||
elif [[ -n $USE_SNAP ]]; then # Use the Linux Snap binaries
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
printf "solana.%s" "$program"
|
||||
}
|
||||
elif [[ -n $USE_INSTALL ]]; then # Assume |cargo install| was run
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
printf "solana-%s" "$program"
|
||||
}
|
||||
# CUDA was/wasn't selected at build time, can't affect CUDA state here
|
||||
unset SOLANA_CUDA
|
||||
else
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
declare features=""
|
||||
if [[ "$program" =~ ^(.*)-cuda$ ]]; then
|
||||
program=${BASH_REMATCH[1]}
|
||||
features="--features=cuda"
|
||||
fi
|
||||
if [[ -z $DEBUG ]]; then
|
||||
maybe_release=--release
|
||||
fi
|
||||
printf "cargo run $maybe_release --bin solana-%s %s -- " "$program" "$features"
|
||||
}
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
# shellcheck disable=2154 # 'here' is referenced but not assigned
|
||||
if [[ -z $here ]]; then
|
||||
echo "|here| is not defined"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Locate perf libs downloaded by |./fetch-perf-libs.sh|
|
||||
LD_LIBRARY_PATH=$(cd "$here" && dirname "$PWD"):$LD_LIBRARY_PATH
|
||||
export LD_LIBRARY_PATH
|
||||
fi
|
||||
fi
|
||||
|
||||
solana_bench_tps=$(solana_program bench-tps)
|
||||
solana_wallet=$(solana_program wallet)
|
||||
solana_drone=$(solana_program drone)
|
||||
solana_fullnode=$(solana_program fullnode)
|
||||
solana_fullnode_config=$(solana_program fullnode-config)
|
||||
solana_fullnode_cuda=$(solana_program fullnode-cuda)
|
||||
solana_genesis=$(solana_program genesis)
|
||||
solana_keygen=$(solana_program keygen)
|
||||
solana_ledger_tool=$(solana_program ledger-tool)
|
||||
|
||||
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
|
||||
# The SOLANA_METRICS_CONFIG environment variable is formatted as a
|
||||
# comma-delimited list of parameters. All parameters are optional.
|
||||
#
|
||||
# Example:
|
||||
# export SOLANA_METRICS_CONFIG="host=<metrics host>,db=<database name>,u=<username>,p=<password>"
|
||||
#
|
||||
configure_metrics() {
|
||||
[[ -n $SOLANA_METRICS_CONFIG ]] || return 0
|
||||
|
||||
declare metrics_params
|
||||
IFS=',' read -r -a metrics_params <<< "$SOLANA_METRICS_CONFIG"
|
||||
for param in "${metrics_params[@]}"; do
|
||||
IFS='=' read -r -a pair <<< "$param"
|
||||
if [[ ${#pair[@]} != 2 ]]; then
|
||||
echo Error: invalid metrics parameter: "$param" >&2
|
||||
else
|
||||
declare name="${pair[0]}"
|
||||
declare value="${pair[1]}"
|
||||
case "$name" in
|
||||
host)
|
||||
export INFLUX_HOST="$value"
|
||||
echo INFLUX_HOST="$INFLUX_HOST" >&2
|
||||
;;
|
||||
db)
|
||||
export INFLUX_DATABASE="$value"
|
||||
echo INFLUX_DATABASE="$INFLUX_DATABASE" >&2
|
||||
;;
|
||||
u)
|
||||
export INFLUX_USERNAME="$value"
|
||||
echo INFLUX_USERNAME="$INFLUX_USERNAME" >&2
|
||||
;;
|
||||
p)
|
||||
export INFLUX_PASSWORD="$value"
|
||||
echo INFLUX_PASSWORD="********" >&2
|
||||
;;
|
||||
*)
|
||||
echo Error: Unknown metrics parameter name: "$name" >&2
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
done
|
||||
}
|
||||
configure_metrics
|
||||
|
||||
tune_networking() {
|
||||
# Skip in CI
|
||||
[[ -z $CI ]] || return 0
|
||||
|
||||
# Reference: https://medium.com/@CameronSparr/increase-os-udp-buffers-to-improve-performance-51d167bb1360
|
||||
if [[ $(uname) = Linux ]]; then
|
||||
(
|
||||
set -x +e
|
||||
# test the existence of the sysctls before trying to set them
|
||||
# go ahead and return true and don't exit if these calls fail
|
||||
sysctl net.core.rmem_max 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
|
||||
) || true
|
||||
fi
|
||||
|
||||
if [[ $(uname) = Darwin ]]; then
|
||||
(
|
||||
if [[ $(sysctl net.inet.udp.maxdgram | cut -d\ -f2) != 65535 ]]; then
|
||||
echo "Adjusting maxdgram to allow for large UDP packets, see BLOB_SIZE in src/packet.rs:"
|
||||
set -x
|
||||
sudo sysctl net.inet.udp.maxdgram=65535
|
||||
fi
|
||||
)
|
||||
|
||||
fi
|
||||
}
|
||||
|
||||
oom_score_adj() {
|
||||
declare pid=$1
|
||||
declare score=$2
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
echo "$score" > "/proc/$pid/oom_score_adj" || true
|
||||
declare currentScore
|
||||
currentScore=$(cat "/proc/$pid/oom_score_adj" || true)
|
||||
if [[ $score != "$currentScore" ]]; then
|
||||
echo "Failed to set oom_score_adj to $score for pid $pid (current score: $currentScore)"
|
||||
fi
|
||||
}
|
||||
|
||||
SOLANA_CONFIG_DIR=${SNAP_DATA:-$PWD}/config
|
||||
SOLANA_CONFIG_PRIVATE_DIR=${SNAP_DATA:-$PWD}/config-private
|
||||
SOLANA_CONFIG_VALIDATOR_DIR=${SNAP_DATA:-$PWD}/config-validator
|
||||
SOLANA_CONFIG_CLIENT_DIR=${SNAP_USER_DATA:-$PWD}/config-client
|
||||
|
||||
rsync_url() { # adds the 'rsync://` prefix to URLs that need it
|
||||
declare url="$1"
|
||||
|
||||
if [[ $url =~ ^.*:.*$ ]]; then
|
||||
# assume remote-shell transport when colon is present, use $url unmodified
|
||||
echo "$url"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ -d $url ]]; then
|
||||
# assume local directory if $url is a valid directory, use $url unmodified
|
||||
echo "$url"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Default to rsync:// URL
|
||||
echo "rsync://$url"
|
||||
}
|
46
multinode-demo/drone.sh
Executable file
46
multinode-demo/drone.sh
Executable file
@ -0,0 +1,46 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# usage: $0 <rsync network path to solana repo on leader machine>
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
SOLANA_CONFIG_DIR="$SOLANA_CONFIG_DIR"-drone
|
||||
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
# Exit if mode is not yet configured
|
||||
# (typically the case after the Snap is first installed)
|
||||
[[ -n "$(snapctl get mode)" ]] || exit 0
|
||||
|
||||
# Select leader from the Snap configuration
|
||||
leader_address="$(snapctl get leader-address)"
|
||||
if [[ -z "$leader_address" ]]; then
|
||||
# Assume drone is running on the same node as the leader by default
|
||||
leader_address="localhost"
|
||||
fi
|
||||
leader="$leader_address"
|
||||
else
|
||||
leader=${1:-${here}/..} # Default to local tree for data
|
||||
fi
|
||||
|
||||
[[ -f "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json ]] || {
|
||||
echo "$SOLANA_CONFIG_PRIVATE_DIR/mint.json not found, create it by running:"
|
||||
echo
|
||||
echo " ${here}/setup.sh -t leader"
|
||||
exit 1
|
||||
}
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
set -ex
|
||||
mkdir -p "$SOLANA_CONFIG_DIR"
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_DIR"/
|
||||
|
||||
|
||||
trap 'kill "$pid" && wait "$pid"' INT TERM
|
||||
$solana_drone \
|
||||
-l "$SOLANA_CONFIG_DIR"/leader.json -k "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json \
|
||||
> >($drone_logger) 2>&1 &
|
||||
pid=$!
|
||||
oom_score_adj "$pid" 1000
|
||||
wait "$pid"
|
80
multinode-demo/gce_multinode.sh
Executable file
80
multinode-demo/gce_multinode.sh
Executable file
@ -0,0 +1,80 @@
|
||||
#!/bin/bash
|
||||
|
||||
command=$1
|
||||
prefix=
|
||||
num_nodes=
|
||||
out_file=
|
||||
image_name="ubuntu-16-04-cuda-9-2-new"
|
||||
|
||||
shift
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 <create|delete> <-p prefix> <-n num_nodes> <-o file> [-i image-name]
|
||||
|
||||
Manage a GCE multinode network
|
||||
|
||||
create|delete - Create or delete the network
|
||||
-p prefix - A common prefix for node names, to avoid collision
|
||||
-n num_nodes - Number of nodes
|
||||
-o out_file - Used for create option. Outputs an array of IP addresses
|
||||
of new nodes to the file
|
||||
-i image_name - Existing image on GCE (default $image_name)
|
||||
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
while getopts "h?p:i:n:o:" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
;;
|
||||
p)
|
||||
prefix=$OPTARG
|
||||
;;
|
||||
i)
|
||||
image_name=$OPTARG
|
||||
;;
|
||||
o)
|
||||
out_file=$OPTARG
|
||||
;;
|
||||
n)
|
||||
num_nodes=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
set -e
|
||||
|
||||
[[ -n $command ]] || usage "Need a command (create|delete)"
|
||||
|
||||
[[ -n $prefix ]] || usage "Need a prefix for GCE instance names"
|
||||
|
||||
[[ -n $num_nodes ]] || usage "Need number of nodes"
|
||||
|
||||
nodes=()
|
||||
for i in $(seq 1 "$num_nodes"); do
|
||||
nodes+=("$prefix$i")
|
||||
done
|
||||
|
||||
if [[ $command == "create" ]]; then
|
||||
[[ -n $out_file ]] || usage "Need an outfile to store IP Addresses"
|
||||
|
||||
ip_addr_list=$(gcloud beta compute instances create "${nodes[@]}" --zone=us-west1-b --tags=testnet \
|
||||
--image="$image_name" | awk '/RUNNING/ {print $5}')
|
||||
|
||||
echo "ip_addr_array=($ip_addr_list)" >"$out_file"
|
||||
elif [[ $command == "delete" ]]; then
|
||||
gcloud beta compute instances delete "${nodes[@]}"
|
||||
else
|
||||
usage "Unknown command: $command"
|
||||
fi
|
35
multinode-demo/leader.sh
Executable file
35
multinode-demo/leader.sh
Executable file
@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
# Exit if mode is not yet configured
|
||||
# (typically the case after the Snap is first installed)
|
||||
[[ -n "$(snapctl get mode)" ]] || exit 0
|
||||
fi
|
||||
|
||||
[[ -f "$SOLANA_CONFIG_DIR"/leader.json ]] || {
|
||||
echo "$SOLANA_CONFIG_DIR/leader.json not found, create it by running:"
|
||||
echo
|
||||
echo " ${here}/setup.sh"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [[ -n "$SOLANA_CUDA" ]]; then
|
||||
program="$solana_fullnode_cuda"
|
||||
else
|
||||
program="$solana_fullnode"
|
||||
fi
|
||||
|
||||
tune_networking
|
||||
|
||||
trap 'kill "$pid" && wait "$pid"' INT TERM
|
||||
$program \
|
||||
--identity "$SOLANA_CONFIG_DIR"/leader.json \
|
||||
--ledger "$SOLANA_CONFIG_DIR"/ledger \
|
||||
> >($leader_logger) 2>&1 &
|
||||
pid=$!
|
||||
oom_score_adj "$pid" 1000
|
||||
wait "$pid"
|
16
multinode-demo/metrics_write_datapoint.sh
Executable file
16
multinode-demo/metrics_write_datapoint.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
point=$1
|
||||
if [[ -z $point ]]; then
|
||||
echo "Data point not specified"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Influx data point: $point"
|
||||
if [[ -z $INFLUX_DATABASE || -z $INFLUX_USERNAME || -z $INFLUX_PASSWORD ]]; then
|
||||
echo Influx user credentials not found
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "https://metrics.solana.com:8086/write?db=${INFLUX_DATABASE}&u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}" \
|
||||
| xargs curl --max-time 5 -XPOST --data-binary "$point"
|
32
multinode-demo/oom_monitor.sh
Executable file
32
multinode-demo/oom_monitor.sh
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Reports Linux OOM Killer activity
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
syslog=/var/log/syslog
|
||||
if [[ ! -r $syslog ]]; then
|
||||
echo Unable to read $syslog
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Adjust OOM score to reduce the chance that this script will be killed
|
||||
# during an Out of Memory event since the purpose of this script is to
|
||||
# report such events
|
||||
oom_score_adj "self" -500
|
||||
|
||||
while read -r victim; do
|
||||
echo "Out of memory event detected, $victim killed"
|
||||
"$here"/metrics_write_datapoint.sh "oom-killer,victim=$victim killed=1"
|
||||
done < <( \
|
||||
tail --follow=name --retry -n0 $syslog \
|
||||
| sed --unbuffered -n 's/^.* Out of memory: Kill process [1-9][0-9]* (\([^)]*\)) .*/\1/p' \
|
||||
)
|
||||
exit 1
|
14
multinode-demo/remote_leader.sh
Executable file
14
multinode-demo/remote_leader.sh
Executable file
@ -0,0 +1,14 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n $FORCE ]] || exit
|
||||
|
||||
chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa
|
||||
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
|
||||
# Run setup
|
||||
USE_INSTALL=1 ./multinode-demo/setup.sh -p
|
||||
USE_INSTALL=1 ./multinode-demo/drone.sh >drone.log 2>&1 &
|
||||
USE_INSTALL=1 SOLANA_CUDA=1 ./multinode-demo/leader.sh >leader.log 2>&1 &
|
185
multinode-demo/remote_nodes.sh
Executable file
185
multinode-demo/remote_nodes.sh
Executable file
@ -0,0 +1,185 @@
|
||||
#!/bin/bash
|
||||
|
||||
command=$1
|
||||
ip_addr_file=
|
||||
remote_user=
|
||||
ssh_keys=
|
||||
|
||||
shift
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 <start|stop> <-f IP Addr Array file> <-u username> [-k ssh-keys]
|
||||
|
||||
Manage a GCE multinode network
|
||||
|
||||
start|stop - Create or delete the network
|
||||
-f file - A bash script that exports an array of IP addresses, ip_addr_array.
|
||||
Elements of the array are public IP address of remote nodes.
|
||||
-u username - The username for logging into remote nodes.
|
||||
-k ssh-keys - Path to public/private key pair that remote nodes can use to perform
|
||||
rsync and ssh among themselves. Must contain pub, and priv keys.
|
||||
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
while getopts "h?f:u:k:" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
;;
|
||||
f)
|
||||
ip_addr_file=$OPTARG
|
||||
;;
|
||||
u)
|
||||
remote_user=$OPTARG
|
||||
;;
|
||||
k)
|
||||
ssh_keys=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
set -e
|
||||
|
||||
# Sample IP Address array file contents
|
||||
# ip_addr_array=(192.168.1.1 192.168.1.5 192.168.2.2)
|
||||
|
||||
[[ -n $command ]] || usage "Need a command (start|stop)"
|
||||
[[ -n $ip_addr_file ]] || usage "Need a file with IP address array"
|
||||
[[ -n $remote_user ]] || usage "Need the username for remote nodes"
|
||||
|
||||
ip_addr_array=()
|
||||
# Get IP address array
|
||||
# shellcheck source=/dev/null
|
||||
source "$ip_addr_file"
|
||||
|
||||
build_project() {
|
||||
echo "Build started at $(date)"
|
||||
SECONDS=0
|
||||
|
||||
# Build and install locally
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
cargo install --force
|
||||
|
||||
echo "Build took $SECONDS seconds"
|
||||
}
|
||||
|
||||
common_start_setup() {
|
||||
ip_addr=$1
|
||||
|
||||
# Killing sshguard for now. TODO: Find a better solution
|
||||
# sshguard is blacklisting IP address after ssh-keyscan and ssh login attempts
|
||||
ssh "$remote_user@$ip_addr" " \
|
||||
set -ex; \
|
||||
sudo service sshguard stop; \
|
||||
sudo apt-get --assume-yes install rsync libssl-dev; \
|
||||
mkdir -p ~/.ssh ~/solana ~/.cargo/bin; \
|
||||
" >log/"$ip_addr".log
|
||||
|
||||
# If provided, deploy SSH keys
|
||||
if [[ -n $ssh_keys ]]; then
|
||||
{
|
||||
rsync -vPrz "$ssh_keys"/id_rsa "$remote_user@$ip_addr":~/.ssh/
|
||||
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/
|
||||
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/authorized_keys
|
||||
rsync -vPrz ./multinode-demo "$remote_user@$ip_addr":~/solana/
|
||||
} >>log/"$ip_addr".log
|
||||
fi
|
||||
}
|
||||
|
||||
start_leader() {
|
||||
common_start_setup "$1"
|
||||
|
||||
{
|
||||
rsync -vPrz ~/.cargo/bin/solana* "$remote_user@$ip_addr":~/.cargo/bin/
|
||||
rsync -vPrz ./fetch-perf-libs.sh "$remote_user@$ip_addr":~/solana/
|
||||
ssh -n -f "$remote_user@$ip_addr" 'cd solana; FORCE=1 ./multinode-demo/remote_leader.sh'
|
||||
} >>log/"$1".log
|
||||
|
||||
leader_ip=$1
|
||||
leader_time=$SECONDS
|
||||
SECONDS=0
|
||||
}
|
||||
|
||||
start_validator() {
|
||||
common_start_setup "$1"
|
||||
|
||||
ssh -n -f "$remote_user@$ip_addr" "cd solana; FORCE=1 ./multinode-demo/remote_validator.sh $leader_ip" >>log/"$1".log
|
||||
}
|
||||
|
||||
start_all_nodes() {
|
||||
echo "Deployment started at $(date)"
|
||||
SECONDS=0
|
||||
count=0
|
||||
leader_ip=
|
||||
leader_time=
|
||||
|
||||
mkdir -p log
|
||||
|
||||
for ip_addr in "${ip_addr_array[@]}"; do
|
||||
if ((!count)); then
|
||||
# Start the leader on the first node
|
||||
echo "Leader node $ip_addr, killing previous instance and restarting"
|
||||
start_leader "$ip_addr"
|
||||
else
|
||||
# Start validator on all other nodes
|
||||
echo "Validator[$count] node $ip_addr, killing previous instance and restarting"
|
||||
start_validator "$ip_addr" &
|
||||
# TBD: Remove the sleep or reduce time once GCP login quota is increased
|
||||
sleep 2
|
||||
fi
|
||||
|
||||
((count = count + 1))
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
((validator_count = count - 1))
|
||||
|
||||
echo "Deployment finished at $(date)"
|
||||
echo "Leader deployment too $leader_time seconds"
|
||||
echo "$validator_count Validator deployment took $SECONDS seconds"
|
||||
}
|
||||
|
||||
stop_all_nodes() {
|
||||
SECONDS=0
|
||||
local count=0
|
||||
for ip_addr in "${ip_addr_array[@]}"; do
|
||||
ssh-keygen -R "$ip_addr" >log/local.log
|
||||
ssh-keyscan "$ip_addr" >>~/.ssh/known_hosts 2>/dev/null
|
||||
|
||||
echo "Stopping node[$count] $ip_addr. Remote user $remote_user"
|
||||
|
||||
ssh -n -f "$remote_user@$ip_addr" " \
|
||||
set -ex; \
|
||||
sudo service sshguard stop; \
|
||||
pkill -9 solana-; \
|
||||
pkill -9 validator; \
|
||||
pkill -9 leader; \
|
||||
"
|
||||
sleep 2
|
||||
((count = count + 1))
|
||||
echo "Stopped node[$count] $ip_addr"
|
||||
done
|
||||
echo "Stopping $count nodes took $SECONDS seconds"
|
||||
}
|
||||
|
||||
if [[ $command == "start" ]]; then
|
||||
build_project
|
||||
stop_all_nodes
|
||||
start_all_nodes
|
||||
elif [[ $command == "stop" ]]; then
|
||||
stop_all_nodes
|
||||
else
|
||||
usage "Unknown command: $command"
|
||||
fi
|
17
multinode-demo/remote_validator.sh
Executable file
17
multinode-demo/remote_validator.sh
Executable file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n $FORCE ]] || exit
|
||||
|
||||
chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa
|
||||
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
|
||||
touch ~/.ssh/known_hosts
|
||||
ssh-keygen -R "$1" 2>/dev/null
|
||||
ssh-keyscan "$1" >>~/.ssh/known_hosts 2>/dev/null
|
||||
|
||||
rsync -vPrz "$1":~/.cargo/bin/solana* ~/.cargo/bin/
|
||||
|
||||
# Run setup
|
||||
USE_INSTALL=1 ./multinode-demo/setup.sh -p
|
||||
USE_INSTALL=1 ./multinode-demo/validator.sh "$1":~/solana "$1" >validator.log 2>&1
|
110
multinode-demo/setup.sh
Executable file
110
multinode-demo/setup.sh
Executable file
@ -0,0 +1,110 @@
|
||||
#!/bin/bash
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
usage () {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [-n num_tokens] [-l] [-p] [-t node_type]
|
||||
|
||||
Creates a fullnode configuration
|
||||
|
||||
-n num_tokens - Number of tokens to create
|
||||
-l - Detect network address from local machine configuration, which
|
||||
may be a private IP address unaccessible on the Intenet (default)
|
||||
-p - Detect public address using public Internet servers
|
||||
-t node_type - Create configuration files only for this kind of node. Valid
|
||||
options are validator or leader. Creates configuration files
|
||||
for both by default
|
||||
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
ip_address_arg=-l
|
||||
num_tokens=1000000000
|
||||
node_type_leader=true
|
||||
node_type_validator=true
|
||||
while getopts "h?n:lpt:" opt; do
|
||||
case $opt in
|
||||
h|\?)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
l)
|
||||
ip_address_arg=-l
|
||||
;;
|
||||
p)
|
||||
ip_address_arg=-p
|
||||
;;
|
||||
n)
|
||||
num_tokens="$OPTARG"
|
||||
;;
|
||||
t)
|
||||
node_type="$OPTARG"
|
||||
case $OPTARG in
|
||||
leader)
|
||||
node_type_leader=true
|
||||
node_type_validator=false
|
||||
;;
|
||||
validator)
|
||||
node_type_leader=false
|
||||
node_type_validator=true
|
||||
;;
|
||||
*)
|
||||
usage "Error: unknown node type: $node_type"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
leader_address_args=("$ip_address_arg")
|
||||
validator_address_args=("$ip_address_arg" -b 9000)
|
||||
leader_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/leader-id.json
|
||||
validator_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/validator-id.json
|
||||
mint_path="$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||
|
||||
set -e
|
||||
|
||||
for i in "$SOLANA_CONFIG_DIR" "$SOLANA_CONFIG_PRIVATE_DIR" "$SOLANA_CONFIG_VALIDATOR_DIR"; do
|
||||
echo "Cleaning $i"
|
||||
rm -rvf "$i"
|
||||
mkdir -p "$i"
|
||||
done
|
||||
|
||||
|
||||
$solana_keygen -o "$leader_id_path"
|
||||
$solana_keygen -o "$validator_id_path"
|
||||
|
||||
if $node_type_leader; then
|
||||
echo "Creating $mint_path with $num_tokens tokens"
|
||||
$solana_keygen -o "$mint_path"
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_DIR/ledger"
|
||||
$solana_genesis --tokens="$num_tokens" --ledger "$SOLANA_CONFIG_DIR"/ledger < "$mint_path"
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_DIR/leader.json"
|
||||
$solana_fullnode_config --keypair="$leader_id_path" "${leader_address_args[@]}" > "$SOLANA_CONFIG_DIR"/leader.json
|
||||
fi
|
||||
|
||||
|
||||
if $node_type_validator; then
|
||||
echo "Creating $SOLANA_CONFIG_VALIDATOR_DIR/validator.json"
|
||||
$solana_fullnode_config --keypair="$validator_id_path" "${validator_address_args[@]}" > "$SOLANA_CONFIG_VALIDATOR_DIR"/validator.json
|
||||
fi
|
||||
|
||||
ls -lhR "$SOLANA_CONFIG_DIR"/
|
||||
if $node_type_leader; then
|
||||
ls -lhR "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
fi
|
47
multinode-demo/test/wallet-sanity.sh
Executable file
47
multinode-demo/test/wallet-sanity.sh
Executable file
@ -0,0 +1,47 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Wallet sanity test
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
cd "$here"
|
||||
|
||||
if [[ -n "$USE_SNAP" ]]; then
|
||||
# TODO: Merge wallet.sh functionality into solana-wallet proper and
|
||||
# remove this USE_SNAP case
|
||||
wallet="solana.wallet $1"
|
||||
else
|
||||
wallet="../wallet.sh $1"
|
||||
fi
|
||||
|
||||
# Tokens transferred to this address are lost forever...
|
||||
garbage_address=vS3ngn1TfQmpsW1Z4NkLuqNAQFF3dYQw8UZ6TCx9bmq
|
||||
|
||||
check_balance_output() {
|
||||
declare expected_output="$1"
|
||||
exec 42>&1
|
||||
output=$($wallet balance | tee >(cat - >&42))
|
||||
if [[ ! "$output" =~ $expected_output ]]; then
|
||||
echo "Balance is incorrect. Expected: $expected_output"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
pay_and_confirm() {
|
||||
exec 42>&1
|
||||
signature=$($wallet pay "$@" | tee >(cat - >&42))
|
||||
$wallet confirm "$signature"
|
||||
}
|
||||
|
||||
$wallet reset
|
||||
$wallet address
|
||||
check_balance_output "Your balance is: 0"
|
||||
$wallet airdrop --tokens 60
|
||||
check_balance_output "Your balance is: 60"
|
||||
$wallet airdrop --tokens 40
|
||||
check_balance_output "Your balance is: 100"
|
||||
pay_and_confirm --to $garbage_address --tokens 99
|
||||
check_balance_output "Your balance is: 1"
|
||||
|
||||
echo PASS
|
||||
exit 0
|
4
multinode-demo/validator-x.sh
Executable file
4
multinode-demo/validator-x.sh
Executable file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
here=$(dirname "$0")
|
||||
|
||||
exec "$here"/validator.sh -x "$@"
|
111
multinode-demo/validator.sh
Executable file
111
multinode-demo/validator.sh
Executable file
@ -0,0 +1,111 @@
|
||||
#!/bin/bash
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
usage() {
|
||||
if [[ -n $1 ]]; then
|
||||
echo "$*"
|
||||
echo
|
||||
fi
|
||||
echo "usage: $0 [-x] [rsync network path to solana repo on leader machine] [network ip address of leader]"
|
||||
echo ""
|
||||
echo " -x: runs a new, dynamically-configured validator"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [[ $1 = -h ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
if [[ $1 == -x ]]; then
|
||||
self_setup=1
|
||||
shift
|
||||
else
|
||||
self_setup=0
|
||||
fi
|
||||
|
||||
if [[ -n $3 ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
if [[ -d $SNAP ]]; then
|
||||
# Exit if mode is not yet configured
|
||||
# (typically the case after the Snap is first installed)
|
||||
[[ -n $(snapctl get mode) ]] || exit 0
|
||||
|
||||
# Select leader from the Snap configuration
|
||||
leader_address=$(snapctl get leader-address)
|
||||
if [[ -z $leader_address ]]; then
|
||||
# Assume public testnet by default
|
||||
leader_address=35.227.93.37 # testnet.solana.com
|
||||
fi
|
||||
leader=$leader_address
|
||||
else
|
||||
if [[ -z $1 ]]; then
|
||||
leader=${1:-${here}/..} # Default to local tree for data
|
||||
leader_address=${2:-127.0.0.1} # Default to local leader
|
||||
elif [[ -z $2 ]]; then
|
||||
leader=$1
|
||||
leader_address=$(dig +short "${leader%:*}" | head -n1)
|
||||
if [[ -z $leader_address ]]; then
|
||||
usage "Error: unable to resolve IP address for $leader"
|
||||
fi
|
||||
else
|
||||
leader=$1
|
||||
leader_address=$2
|
||||
fi
|
||||
fi
|
||||
leader_port=8001
|
||||
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
program=$solana_fullnode_cuda
|
||||
else
|
||||
program=$solana_fullnode
|
||||
fi
|
||||
|
||||
if ((!self_setup)); then
|
||||
[[ -f $SOLANA_CONFIG_VALIDATOR_DIR/validator.json ]] || {
|
||||
echo "$SOLANA_CONFIG_VALIDATOR_DIR/validator.json not found, create it by running:"
|
||||
echo
|
||||
echo " ${here}/setup.sh"
|
||||
exit 1
|
||||
}
|
||||
validator_json_path=$SOLANA_CONFIG_VALIDATOR_DIR/validator.json
|
||||
SOLANA_LEADER_CONFIG_DIR=$SOLANA_CONFIG_VALIDATOR_DIR/leader-config
|
||||
else
|
||||
mkdir -p "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
validator_id_path=$SOLANA_CONFIG_PRIVATE_DIR/validator-id-x$$.json
|
||||
$solana_keygen -o "$validator_id_path"
|
||||
|
||||
mkdir -p "$SOLANA_CONFIG_VALIDATOR_DIR"
|
||||
validator_json_path=$SOLANA_CONFIG_VALIDATOR_DIR/validator-x$$.json
|
||||
|
||||
port=9000
|
||||
(((port += ($$ % 1000)) && (port == 9000) && port++))
|
||||
|
||||
$solana_fullnode_config --keypair="$validator_id_path" -l -b "$port" > "$validator_json_path"
|
||||
|
||||
SOLANA_LEADER_CONFIG_DIR=$SOLANA_CONFIG_VALIDATOR_DIR/leader-config-x$$
|
||||
fi
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
|
||||
tune_networking
|
||||
|
||||
set -ex
|
||||
$rsync -vPr "$rsync_leader_url"/config/ "$SOLANA_LEADER_CONFIG_DIR"
|
||||
[[ -d $SOLANA_LEADER_CONFIG_DIR/ledger ]] || {
|
||||
echo "Unable to retrieve ledger from $rsync_leader_url"
|
||||
exit 1
|
||||
}
|
||||
|
||||
trap 'kill "$pid" && wait "$pid"' INT TERM
|
||||
$program \
|
||||
--identity "$validator_json_path" \
|
||||
--testnet "$leader_address:$leader_port" \
|
||||
--ledger "$SOLANA_LEADER_CONFIG_DIR"/ledger \
|
||||
> >($validator_logger) 2>&1 &
|
||||
pid=$!
|
||||
oom_score_adj "$pid" 1000
|
||||
wait "$pid"
|
45
multinode-demo/wallet.sh
Executable file
45
multinode-demo/wallet.sh
Executable file
@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# usage: $0 <rsync network path to solana repo on leader machine>"
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
# if $1 isn't host:path, something.com, or a valid local path
|
||||
if [[ ${1%:} != "$1" || "$1" =~ [^.]\.[^.] || -d $1 ]]; then
|
||||
leader=$1 # interpret
|
||||
shift
|
||||
else
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
leader=testnet.solana.com # Default to testnet when running as a Snap
|
||||
else
|
||||
leader=$here/.. # Default to local solana repo
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$1" = "reset" ]]; then
|
||||
echo Wallet resetting
|
||||
rm -rf "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
|
||||
set -e
|
||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
if [[ ! -r "$SOLANA_CONFIG_CLIENT_DIR"/leader.json ]]; then
|
||||
echo "Fetching leader configuration from $rsync_leader_url"
|
||||
$rsync -Pz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
fi
|
||||
|
||||
client_id_path="$SOLANA_CONFIG_CLIENT_DIR"/id.json
|
||||
if [[ ! -r $client_id_path ]]; then
|
||||
echo "Generating client identity: $client_id_path"
|
||||
$solana_keygen -o "$client_id_path"
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086 # $solana_wallet should not be quoted
|
||||
exec $solana_wallet \
|
||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -k "$client_id_path" "$@"
|
190
rfcs/rfc-001-smart-contracts-engine.md
Normal file
190
rfcs/rfc-001-smart-contracts-engine.md
Normal file
@ -0,0 +1,190 @@
|
||||
# Smart Contracts Engine
|
||||
|
||||
The goal of this RFC is to define a set of constraints for APIs and runtime such that we can execute our smart contracts safely on massively parallel hardware such as a GPU. Our runtime is built around an OS *syscall* primitive. The difference in blockchain is that now the OS does a cryptographic check of memory region ownership before accessing the memory in the Solana kernel.
|
||||
|
||||
## Version
|
||||
|
||||
version 0.1
|
||||
|
||||
## Toolchain Stack
|
||||
|
||||
+---------------------+ +---------------------+
|
||||
| | | |
|
||||
| +------------+ | | +------------+ |
|
||||
| | | | | | | |
|
||||
| | frontend | | | | verifier | |
|
||||
| | | | | | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | | | |
|
||||
| | llvm | | | | loader | |
|
||||
| | | +------>+ | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | |
|
||||
| | | | | |
|
||||
| +-----+------+ | | +-----+------+ |
|
||||
| | | | | | | |
|
||||
| | ELF | | | | runtime | |
|
||||
| | | | | | | |
|
||||
| +------------+ | | +------------+ |
|
||||
| | | |
|
||||
| client | | solana |
|
||||
+---------------------+ +---------------------+
|
||||
|
||||
[Figure 1. Smart Contracts Stack]
|
||||
|
||||
In Figure 1 an untrusted client, creates a program in the front-end language of her choice, (like C/C++/Rust/Lua), and compiles it with LLVM to a position independent shared object ELF, targeting BPF bytecode. Solana will safely load and execute the ELF.
|
||||
|
||||
## Bytecode
|
||||
|
||||
Our bytecode is based on Berkley Packet Filter. The requirements for BPF overlap almost exactly with the requirements we have:
|
||||
|
||||
1. Deterministic amount of time to execute the code
|
||||
2. Bytecode that is portable between machine instruction sets
|
||||
3. Verified memory accesses
|
||||
4. Fast to load the object, verify the bytecode and JIT to local machine instruction set
|
||||
|
||||
For 1, that means that loops are unrolled, and for any jumps back we can guard them with a check against the number of instruction that have been executed at this point. If the limit is reached, the program yields its execution. This involves saving the stack and current instruction index.
|
||||
|
||||
For 2, the BPF bytecode already easily maps to x86–64, arm64 and other instruction sets.
|
||||
|
||||
For 3, every load and store that is relative can be checked to be within the expected memory that is passed into the ELF. Dynamic load and stores can do a runtime check against available memory, these will be slow and should be avoided.
|
||||
|
||||
For 4, Fully linked PIC ELF with just a single RX segment. Effectively we are linking a shared object with `-fpic -target bpf` and with a linker script to collect everything into a single RX segment. Writable globals are not supported.
|
||||
|
||||
### Address Checks
|
||||
|
||||
The interface to the module takes a `&mut Vec<Vec<u8>>` in rust, or a `int sz, void* data[sz], int szs[sz]` in `C`. Given the module's bytecode, for each method, we need to analyze the bounds on load and stores into each buffer the module uses. This check needs to be done `on chain`, and after those bounds are computed we can verify that the user supplied array of buffers will not cause a memory fault. For load and stores that we cannot analyze, we can replace with a `safe_load` and `safe_store` instruction that will check the table for access.
|
||||
|
||||
## Loader
|
||||
The loader is our first smart contract. The job of this contract is to load the actual program with its own instance data. The loader will verify the bytecode and that the object implements the expected entry points.
|
||||
|
||||
Since there is only one RX segment, the context for the contract instance is passed into each entry point as well as the event data for that entry point.
|
||||
|
||||
A client will create a transaction to create a new loader instance:
|
||||
|
||||
`Solana_NewLoader(Loader Instance PubKey, proof of key ownership, space I need for my elf)`
|
||||
|
||||
A client will then do a bunch of transactions to load its elf into the loader instance they created:
|
||||
|
||||
`Loader_UploadElf(Loader Instance PubKey, proof of key ownership, pos start, pos end, data)`
|
||||
|
||||
At this point the client can create a new instance of the module with its own instance address:
|
||||
|
||||
`Loader_NewInstance(Loader Instance PubKey, proof of key ownership, Instance PubKey, proof of key ownership)`
|
||||
|
||||
Once the instance has been created, the client may need to upload more user data to solana to configure this instance:
|
||||
|
||||
`Instance_UploadModuleData(Instance PubKey, proof of key ownership, pos start, pos end, data)`
|
||||
|
||||
Now clients can `start` the instance:
|
||||
|
||||
`Instance_Start(Instance PubKey, proof of key ownership)`
|
||||
|
||||
## Runtime
|
||||
|
||||
Our goal with the runtime is to have a general purpose execution environment that is highly parallelizable and doesn't require dynamic resource management. We want to execute as many contracts as we can in parallel, and have them pass or fail without a destructive state change.
|
||||
|
||||
### State and Entry Point
|
||||
|
||||
State is addressed by an account which is at the moment simply the PubKey. Our goal is to eliminate dynamic memory allocation in the smart contract itself, so the contract is a function that takes a mapping of [(PubKey,State)] and returns [(PubKey, State')]. The output of keys is a subset of the input. Three basic kinds of state exist:
|
||||
|
||||
* Instance State
|
||||
* Participant State
|
||||
* Caller State
|
||||
|
||||
There isn't any difference in how each is implemented, but conceptually Participant State is memory that is allocated for each participant in the contract. Instance State is memory that is allocated for the contract itself, and Caller State is memory that the transactions caller has allocated.
|
||||
|
||||
|
||||
### Call
|
||||
|
||||
```
|
||||
void call(
|
||||
const struct instance_data *data,
|
||||
const uint8_t kind[], //instance|participant|caller|read|write
|
||||
const uint8_t *keys[],
|
||||
uint8_t *data[],
|
||||
int num,
|
||||
uint8_t dirty[], //dirty memory bits
|
||||
uint8_t *userdata, //current transaction data
|
||||
);
|
||||
```
|
||||
|
||||
To call this operation, the transaction that is destined to the contract instance specifies what keyed state it should present to the `call` function. To allocate the state memory or a call context, the client has to first call a function on the contract with the designed address that will own the state.
|
||||
|
||||
At its core, this is a system call that requires cryptographic proof of ownership of memory regions instead of an OS that checks page tables for access rights.
|
||||
|
||||
* `Instance_AllocateContext(Instance PubKey, My PubKey, Proof of key ownership)`
|
||||
|
||||
Any transaction can then call `call` on the contract with a set of keys. It's up to the contract itself to manage ownership:
|
||||
|
||||
* `Instance_Call(Instance PubKey, [Context PubKeys], proofs of ownership, userdata...)`
|
||||
|
||||
Contracts should be able to read any state that is part of solana, but only write to state that the contract allocated.
|
||||
|
||||
#### Caller State
|
||||
|
||||
Caller `state` is memory allocated for the `call` that belongs to the public key that is issuing the `call`. This is the caller's context.
|
||||
|
||||
#### Instance State
|
||||
|
||||
Instance `state` is memory that belongs to this contract instance. We may also need module-wide `state` as well.
|
||||
|
||||
#### Participant State
|
||||
|
||||
Participant `state` is any other memory. In some cases it may make sense to have these allocated as part of the call by the caller.
|
||||
|
||||
### Reduce
|
||||
|
||||
Some operations on the contract will require iteration over all the keys. To make this parallelizable the iteration is broken up into reduce calls which are combined.
|
||||
|
||||
```
|
||||
void reduce_m(
|
||||
const struct instance_data *data,
|
||||
const uint8_t *keys[],
|
||||
const uint8_t *data[],
|
||||
int num,
|
||||
uint8_t *reduce_data,
|
||||
);
|
||||
|
||||
void reduce_r(
|
||||
const struct instance_data *data,
|
||||
const uint8_t *reduce_data[],
|
||||
int num,
|
||||
uint8_t *reduce_data,
|
||||
);
|
||||
```
|
||||
|
||||
### Execution
|
||||
|
||||
Transactions are batched and processed in parallel at each stage.
|
||||
```
|
||||
+-----------+ +--------------+ +-----------+ +---------------+
|
||||
| sigverify |-+->| debit commit |---+->| execution |-+->| memory commit |
|
||||
+-----------+ | +--------------+ | +-----------+ | +---------------+
|
||||
| | |
|
||||
| +---------------+ | | +--------------+
|
||||
|->| memory verify |->+ +->| debit undo |
|
||||
+---------------+ | +--------------+
|
||||
|
|
||||
| +---------------+
|
||||
+->| credit commit |
|
||||
+---------------+
|
||||
|
||||
|
||||
```
|
||||
The `debit verify` stage is very similar to `memory verify`. Proof of key ownership is used to check if the callers key has some state allocated with the contract, then the memory is loaded and executed. After execution stage, the dirty pages are written back by the contract. Because know all the memory accesses during execution, we can batch transactions that do not interfere with each other. We can also apply the `debit undo` and `credit commit` stages of the transaction. `debit undo` is run in case of an exception during contract execution, only transfers may be reversed, fees are commited to solana.
|
||||
|
||||
### GPU execution
|
||||
|
||||
A single contract can read and write to separate key pairs without interference. These separate calls to the same contract can execute on the same GPU thread over different memory using different SIMD lanes.
|
||||
|
||||
## Notes
|
||||
|
||||
1. There is no dynamic memory allocation.
|
||||
2. Persistant Memory is allocated to a Key with ownership
|
||||
3. Contracts can `call` to update key owned state
|
||||
4. Contracts can `reduce` over the memory to aggregate state
|
||||
5. `call` is just a *syscall* that does a cryptographic check of memory owndershp
|
122
rfcs/rfc-002-consensus.md
Normal file
122
rfcs/rfc-002-consensus.md
Normal file
@ -0,0 +1,122 @@
|
||||
# Consensus
|
||||
|
||||
VERY WIP
|
||||
|
||||
The goal of this RFC is to define the consensus algorithm used in solana. This proposal covers a Proof of Stake algorithm that leverages Proof of History. PoH is a permissionless clock for blockchain that is available before consensus. This PoS approach leverages PoH to make strong assumptions about time between partitions.
|
||||
|
||||
## Version
|
||||
|
||||
version 0.1
|
||||
|
||||
## Message Flow
|
||||
|
||||
1. Transactions are ingested at the leader.
|
||||
2. Leader filters for valid transactions
|
||||
3. Leader executes valid transactions on its state
|
||||
4. Leader packages transactions into blobs
|
||||
5. Leader transmits blobs to validator nodes.
|
||||
a. The set of supermajority + `M` by stake weight of nodes is rotated in round robin fashion.
|
||||
6. Validators retransmit blobs to peers in their set and to further downstream nodes.
|
||||
7. Validators validate the transactions and execute them on their state.
|
||||
8. Validators compute the hash of the state.
|
||||
9. Validators transmit votes to the leader.
|
||||
a. Votes are signatures of the hash of the computed state.
|
||||
10. Leader executes the votes as any other transaction and broadcasts them out to the network
|
||||
11. Validators observe their votes, and all the votes from the network.
|
||||
12. Validators continue voting if the supermajority of stake is observed in the vote for the same hash.
|
||||
|
||||
Supermajority is defined as `2/3rds + 1` vote of the PoS stakes.
|
||||
|
||||
## Staking
|
||||
|
||||
Validators `stake` some of their spendable sol into a staking account. The stakes are not spendable and can only be used for voting.
|
||||
|
||||
```
|
||||
CreateStake(
|
||||
PoH count,
|
||||
PoH hash,
|
||||
source public key,
|
||||
amount,
|
||||
destination public key,
|
||||
proof of ownership of destination public key,
|
||||
signature of the message with the source keypair
|
||||
)
|
||||
```
|
||||
|
||||
Creating the stake has a warmup period of TBD. Unstaking requires the node to miss a certain amount of validation votes.
|
||||
|
||||
## Validation Votes
|
||||
|
||||
```
|
||||
Validate(
|
||||
PoH count,
|
||||
PoH hash,
|
||||
stake public key,
|
||||
signature of the state,
|
||||
signature of the message with the stake keypair
|
||||
)
|
||||
```
|
||||
|
||||
## Validator Slashing
|
||||
|
||||
Validators `stake` some of their spendable sol into a staking account. The stakes are not spendable and can only be used for voting.
|
||||
|
||||
```
|
||||
Slash(Validate(
|
||||
PoH count,
|
||||
PoH hash,
|
||||
stake public key,
|
||||
...
|
||||
signature of the message with the stake keypair
|
||||
))
|
||||
```
|
||||
|
||||
When the `Slash` vote is processed, validators should lookup `PoH hash` at `PoH count` and compare it with the message. If they do not match, the stake at `stake public key` should be set to `0`.
|
||||
|
||||
## Leader Slashing
|
||||
|
||||
TBD. The goal of this is to discourage leaders from generating multiple PoH streams.
|
||||
|
||||
## Validation Vote Contract
|
||||
|
||||
The goal of this contract is to simulate economic cost of mining on a shorter branch.
|
||||
|
||||
1. With my signature I am certifying that I computed `state hash` at `PoH count` and `PoH hash`.
|
||||
2. I will not vote on a branch that doesn't contain this message for at least `N` counts, or until `PoH count` + `N` is reached by the PoH stream.
|
||||
3. I will not vote for any other branch below `PoH count`.
|
||||
a. if there are other votes not present in this PoH history the validator may need to `cancel` them before creating this vote.
|
||||
|
||||
## Leader Seed Generation
|
||||
|
||||
Leader selection is decided via a random seed. The process is as follows:
|
||||
|
||||
1. Periodically at a specific `PoH count` select the first vote signatures that create a supermajority from the previous round.
|
||||
2. append them together
|
||||
3. hash the string for `N` counts via a similar process as PoH itself.
|
||||
4. The resulting hash is the random seed for `M` counts, where M > N
|
||||
|
||||
## Leader Ranking and Rotation
|
||||
|
||||
Leader's transmit for a count of `T`. When `T` is reached all the validators should switch to the next ranked leader. To rank leaders, the supermajority + `M` nodes are shuffled with the using the above calculated random seed.
|
||||
|
||||
TBD: define a ranking for critical partitions without a node from supermajority + `M` set.
|
||||
|
||||
## Partition selection
|
||||
|
||||
Validators should select the first branch to reach finality, or the highest ranking leader.
|
||||
|
||||
## Examples
|
||||
|
||||
### Small Partition
|
||||
1. Network partition M occurs for 10% of the nodes
|
||||
2. The larger partition K, with 90% of the stake weight continues to operate as normal
|
||||
3. M cycles through the ranks until one of them is leader.
|
||||
4. M validators observe 10% of the vote pool, finality is not reached
|
||||
5. M and K re-connect.
|
||||
6. M validators cancel their votes on K which are below K's `PoH count`
|
||||
|
||||
### Leader Timeout
|
||||
1. Next rank node observes a timeout.
|
||||
2. Nodes receiving both PoH streams pick the higher rank node.
|
||||
3. 2, causes a partition, since nodes can only vote for 1 leader.
|
||||
4. Partition is resolved just like in the [Small Partition](#small-parition)
|
54
rfcs/rfc-003-storage.md
Normal file
54
rfcs/rfc-003-storage.md
Normal file
@ -0,0 +1,54 @@
|
||||
# Storage
|
||||
|
||||
The goal of this RFC is to define a protocol for storing a very large ledger over a p2p network that is verified by solana validators. At full capacity on a 1gbps network solana will generate 4 petabytes of data per year. To prevent the network from centralizing around full nodes that have to store the full data set this protocol proposes a way for mining nodes to provide storage capacity for pieces of the network.
|
||||
|
||||
# Version
|
||||
|
||||
version 0.1
|
||||
|
||||
# Background
|
||||
|
||||
The basic idea to Proof of Replication is encrypting a dataset with a public symmetric key using CBC encryption, then hash the encrypted dataset. The main problem with the naive approach is that a dishonest storage node can stream the encryption and delete the data as its hashed. The simple solution is to force the hash to be done on the reverse of the encryption, or perhaps with a random order. This ensures that all the data is present during the generation of the proof and it also requires the validator to have the entirety of the encrypted data present for verification of every proof of every identity. So the space required to validate is `(Number of Proofs)*(data size)`
|
||||
|
||||
# Optimization with PoH
|
||||
|
||||
Our improvement on this approach is to randomly sample the encrypted blocks faster than it takes to encrypt, and record the hash of those samples into the PoH ledger. Thus the blocks stay in the exact same order for every PoRep and verification can stream the data and verify all the proofs in a single batch. This way we can verify multiple proofs concurrently, each one on its own CUDA core. With the current generation of graphics cards our network can support up to 14k replication identities or symmetric keys. The total space required for verification is `(2 CBC blocks) * (Number of Identities)`, with core count of equal to (Number of Identities). A CBC block is expected to be 1MB in size.
|
||||
|
||||
# Network
|
||||
|
||||
Validators for PoRep are the same validators that are verifying transactions. They have some stake that they have put up as collateral that ensures that their work is honest. If you can prove that a validator verified a fake PoRep, then the validators stake can be slashed.
|
||||
|
||||
Replicators are specialized thin clients. They download a part of the ledger and store it, and provide PoReps of storing the ledger. For each verified PoRep replicators earn a reward of sol from the mining pool.
|
||||
|
||||
# Constraints
|
||||
|
||||
We have the following constraints:
|
||||
* At most 14k replication identities can be used, because thats how many CUDA cores we can fit in a $5k box at the moment.
|
||||
* Verification requires generating the CBC blocks. That requires space of 2 blocks per identity, and 1 CUDA core per identity for the same dataset. So as many identities at once should be batched with as many proofs for those identities verified concurrently for the same dataset.
|
||||
|
||||
# Validation and Replication Protocol
|
||||
|
||||
1. Network sets the replication target number, lets say 1k. 1k PoRep identities are created from signatures of a PoH hash. So they are tied to a specific PoH hash. It doesn't matter who creates them, or simply the last 1k validation signatures we saw for the ledger at that count. This maybe just the initial batch of identities, because we want to stagger identity rotation.
|
||||
2. Any client can use any of these identities to create PoRep proofs. Replicator identities are the CBC encryption keys.
|
||||
3. Periodically at a specific PoH count, replicator that want to create PoRep proofs sign the PoH hash at that count. That signature is the seed used to pick the block and identity to replicate. A block is 1TB of ledger.
|
||||
4. Periodically at a specific PoH count, replicator submits PoRep proofs for their selected block. A signature of the PoH hash at that count is the seed used to sample the 1TB encrypted block, and hash it. This is done faster than it takes to encrypt the 1TB block with the original identity.
|
||||
5. Replicators must submit some number of fake proofs, which they can prove to be fake by providing the seed for the hash result.
|
||||
6. Periodically at a specific PoH count, validators sign the hash and use the signature to select the 1TB block that they need to validate. They batch all the identities and proofs and submit approval for all the verified ones.
|
||||
7. After #6, replicator client submit the proofs of fake proofs.
|
||||
|
||||
For any random seed, we force everyone to use a signature that is derived from a PoH hash. Everyone must use the same count, so the same PoH hash is signed by every participant. The signatures are then each cryptographically tied to the keypair, which prevents a leader from grinding on the resulting value for more than 1 identity.
|
||||
|
||||
We need to stagger the rotation of the identity keys. Once this gets going, the next identity could be generated by hashing itself with a PoH hash, or via some other process based on the validation signatures.
|
||||
|
||||
Since there are many more client identities then encryption identities, we need to split the reward for multiple clients, and prevent Sybil attacks from generating many clients to acquire the same block of data. To remain BFT we want to avoid a single human entity from storing all the replications of a single chunk of the ledger.
|
||||
|
||||
Our solution to this is to force the clients to continue using the same identity. If the first round is used to acquire the same block for many client identities, the second round for the same client identities will force a redistribution of the signatures, and therefore PoRep identities and blocks. Thus to get a reward for storage clients need to store the first block for free and the network can reward long lived client identities more than new ones.
|
||||
|
||||
# Notes
|
||||
|
||||
* We can reduce the costs of verification of PoRep by using PoH, and actually make it feasible to verify a large number of proofs for a global dataset.
|
||||
* We can eliminate grinding by forcing everyone to sign the same PoH hash and use the signatures as the seed
|
||||
* The game between validators and replicators is over random blocks and random encryption identities and random data samples. The goal of randomization is to prevent colluding groups from having overlap on data or validation.
|
||||
* Replicator clients fish for lazy validators by submitting fake proofs that they can prove are fake.
|
||||
* Replication identities are just symmetric encryption keys, the number of them on the network is our storage replication target. Many more client identities can exist than replicator identities, so unlimited number of clients can provide proofs of the same replicator identity.
|
||||
* To defend against Sybil client identities that try to store the same block we force the clients to store for multiple rounds before receiving a reward.
|
77
rfcs/rfc-004-tictactoe-program.md
Normal file
77
rfcs/rfc-004-tictactoe-program.md
Normal file
@ -0,0 +1,77 @@
|
||||
|
||||
Two players want to play tic-tac-toe with each other on Solana.
|
||||
|
||||
The tic-tac-toe program has already been provisioned on the network, and the
|
||||
program author has advertised the following information to potential gamers:
|
||||
* `tictactoe_publickey` - the program's public key
|
||||
* `tictactoe_gamestate_size` - the number of bytes needed to maintain the game state
|
||||
|
||||
The game state is a well-documented data structure consisting of:
|
||||
- Player 1's public key
|
||||
- Player 2's public key
|
||||
- Game status. An 8-bit value where:
|
||||
* 0 = game uninitialized
|
||||
* 1 = Player 1's turn
|
||||
* 2 = Player 2's turn
|
||||
* 3 = Player 1 won
|
||||
* 4 = Player 2 won
|
||||
- Current board configuration. A 3x3 character array containing the values '\0', 'X' or 'O'
|
||||
|
||||
### Game Setup
|
||||
|
||||
1. Two players want to start a game. Player 2 sends Player 1 their public key,
|
||||
`player2_publickey` off-chain (IM, email, etc)
|
||||
|
||||
2. Player 1 creates a new keypair to represent the game state, `(gamestate_publickey,
|
||||
gamestate_privatekey)`.
|
||||
|
||||
3. Player 1 issues an allocate_memory transaction, assigning that memory page to the
|
||||
tic-tac-toe program. The `memory_fee` is used to *rent* the memory page for the
|
||||
duration of the game and is subtracted from current account balance of Player
|
||||
1:
|
||||
```
|
||||
allocate_memory(gamestate_publickey, tictactoe_publickey, tictactoe_gamestate_size, memory_fee)
|
||||
```
|
||||
|
||||
|
||||
4. Game state is then initialized by issuing a *new* call transaction to the
|
||||
tic-tac-toe program. This transaction is signed by `gamestate_privatekey`, known only
|
||||
to Player 1.
|
||||
```
|
||||
call(tictactoe_publickey, gamestate_publickey, 'new', player1_publickey, player2_publickey)
|
||||
```
|
||||
|
||||
5. Once the game is initialized, Player 1 shares `gamestate_publickey` with
|
||||
Player 2 off-chain (IM, email, etc)
|
||||
|
||||
Note that it's likely each player prefer to generate a game-specific keypair
|
||||
rather than sharing their primary public key (`player1_publickey`,
|
||||
`player2_publickey`) with each other and the tic-tac-toe program.
|
||||
|
||||
### Game Play
|
||||
|
||||
Both players poll the network, via a **TBD off-chain RPC API**, to read the
|
||||
current game state from the `gamestate_publickey` memory page.
|
||||
|
||||
When the *Game status* field indicates it's their turn, the player issues a
|
||||
*move* call transaction passing in the board position (1..9) that they want to
|
||||
mark as X or O:
|
||||
```
|
||||
call(tictactoe_publickey, gamestate_publickey, 'move', position)
|
||||
```
|
||||
The program will reject the transaction if it was not signed by the player whose
|
||||
turn it is.
|
||||
|
||||
The outcome of the *move* call is also observed by polling the current game state via
|
||||
the **TBD off-chain RPC API**.
|
||||
|
||||
### Game Cancellation
|
||||
|
||||
At any time Player 1 may conclude the game by issuing:
|
||||
```
|
||||
call(tictactoe_publickey, gamestate_publickey, 'abort')
|
||||
```
|
||||
causing any remaining *rent* tokens assigned to the `gamestate_publickey` page
|
||||
to be transferred back to Player 1 by the tic-tac-toe program. Lastly, the
|
||||
network recognizes the empty account and frees the `gamestate_publickey` memory
|
||||
page.
|
43
scripts/perf-plot.py
Executable file
43
scripts/perf-plot.py
Executable file
@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import matplotlib
|
||||
matplotlib.use('Agg')
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import json
|
||||
import sys
|
||||
|
||||
stages_to_counters = {}
|
||||
stages_to_time = {}
|
||||
|
||||
if len(sys.argv) != 2:
|
||||
print("USAGE: {} <input file>".format(sys.argv[0]))
|
||||
sys.exit(1)
|
||||
|
||||
with open(sys.argv[1]) as fh:
|
||||
for line in fh.readlines():
|
||||
if "COUNTER" in line:
|
||||
json_part = line[line.find("{"):]
|
||||
x = json.loads(json_part)
|
||||
counter = x['name']
|
||||
if not (counter in stages_to_counters):
|
||||
stages_to_counters[counter] = []
|
||||
stages_to_time[counter] = []
|
||||
stages_to_counters[counter].append(x['counts'])
|
||||
stages_to_time[counter].append(x['now'])
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
|
||||
for stage in stages_to_counters.keys():
|
||||
plt.plot(stages_to_time[stage], stages_to_counters[stage], label=stage)
|
||||
|
||||
plt.xlabel('ms')
|
||||
plt.ylabel('count')
|
||||
|
||||
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
|
||||
ncol=2, mode="expand", borderaxespad=0.)
|
||||
|
||||
plt.locator_params(axis='x', nbins=10)
|
||||
plt.grid(True)
|
||||
|
||||
plt.savefig("perf.pdf")
|
17
snap/README.md
Normal file
17
snap/README.md
Normal file
@ -0,0 +1,17 @@
|
||||
## Development
|
||||
|
||||
If you're running Ubuntu 16.04 and already have `snapcraft` installed, simply
|
||||
run:
|
||||
```
|
||||
$ snapcraft
|
||||
```
|
||||
|
||||
For other systems we provide a docker image that can be used for snap
|
||||
development:
|
||||
```
|
||||
$ ./ci/docker-run.sh solanalabs/snapcraft snapcraft -d
|
||||
```
|
||||
|
||||
## Reference
|
||||
* https://docs.snapcraft.io/
|
||||
|
39
snap/hooks/configure
vendored
Executable file
39
snap/hooks/configure
vendored
Executable file
@ -0,0 +1,39 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
echo Stopping daemons
|
||||
snapctl stop --disable solana.daemon-drone
|
||||
snapctl stop --disable solana.daemon-leader
|
||||
snapctl stop --disable solana.daemon-validator
|
||||
snapctl stop --disable solana.daemon-oom-monitor
|
||||
|
||||
mode="$(snapctl get mode)"
|
||||
if [[ -z "$mode" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
num_tokens="$(snapctl get num-tokens)"
|
||||
num_tokens="${num_tokens:+-n $num_tokens}"
|
||||
|
||||
setup_args="$(snapctl get setup-args)"
|
||||
|
||||
case $mode in
|
||||
leader+drone)
|
||||
"$SNAP"/bin/setup.sh -t leader $num_tokens -p $setup_args
|
||||
snapctl start --enable solana.daemon-drone
|
||||
snapctl start --enable solana.daemon-leader
|
||||
;;
|
||||
leader)
|
||||
"$SNAP"/bin/setup.sh -t leader $num_tokens -p $setup_args
|
||||
snapctl start --enable solana.daemon-leader
|
||||
;;
|
||||
validator)
|
||||
"$SNAP"/bin/setup.sh -t validator -p $setup_args
|
||||
snapctl start --enable solana.daemon-validator
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown mode: $mode"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
snapctl start --enable solana.daemon-oom-monitor
|
127
snap/snapcraft.yaml
Normal file
127
snap/snapcraft.yaml
Normal file
@ -0,0 +1,127 @@
|
||||
name: solana
|
||||
version: git
|
||||
summary: Blockchain, Rebuilt for Scale
|
||||
description: |
|
||||
710,000 tx/s with off-the-shelf hardware and no sharding.
|
||||
Scales with Moore's Law.
|
||||
grade: devel
|
||||
|
||||
# TODO: solana-perf-fullnode does not yet run with 'strict' confinement due to the
|
||||
# CUDA dependency, so use 'devmode' confinement for now
|
||||
confinement: devmode
|
||||
|
||||
hooks:
|
||||
configure:
|
||||
plugs: [network]
|
||||
|
||||
apps:
|
||||
drone:
|
||||
command: solana-drone
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
fullnode:
|
||||
command: solana-fullnode
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
fullnode-cuda:
|
||||
command: solana-fullnode-cuda
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
fullnode-config:
|
||||
command: solana-fullnode-config
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
genesis:
|
||||
command: solana-genesis
|
||||
keygen:
|
||||
command: solana-keygen
|
||||
plugs:
|
||||
- home
|
||||
ledger-tool:
|
||||
command: solana-ledger-tool
|
||||
plugs:
|
||||
- home
|
||||
bench-tps:
|
||||
# TODO: Merge client.sh functionality into solana-bench-tps proper
|
||||
command: client.sh
|
||||
#command: solana-bench-tps
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
wallet:
|
||||
# TODO: Merge wallet.sh functionality into solana-wallet proper
|
||||
command: wallet.sh
|
||||
#command: solana-wallet
|
||||
plugs:
|
||||
- network
|
||||
- home
|
||||
daemon-validator:
|
||||
daemon: simple
|
||||
command: validator.sh
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
daemon-leader:
|
||||
daemon: simple
|
||||
command: leader.sh
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
daemon-drone:
|
||||
daemon: simple
|
||||
command: drone.sh
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
daemon-oom-monitor:
|
||||
daemon: simple
|
||||
command: oom_monitor.sh
|
||||
plugs:
|
||||
- network
|
||||
|
||||
parts:
|
||||
solana:
|
||||
plugin: nil
|
||||
prime:
|
||||
- bin
|
||||
- usr/lib
|
||||
override-build: |
|
||||
# Install CUDA 9.2 runtime
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/nvidia-396/
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/x86_64-linux-gnu/
|
||||
cp -rav /usr/local/cuda-9.2/targets/x86_64-linux/lib/libcudart.so* $SNAPCRAFT_PART_INSTALL/usr/lib
|
||||
cp -rav /usr/lib/x86_64-linux-gnu/libcuda.so* $SNAPCRAFT_PART_INSTALL/usr/lib/x86_64-linux-gnu/
|
||||
cp -v /usr/lib/nvidia-396/libnvidia-fatbinaryloader.so* $SNAPCRAFT_PART_INSTALL/usr/lib/nvidia-396/
|
||||
|
||||
# Build/install solana-fullnode-cuda
|
||||
./fetch-perf-libs.sh
|
||||
cargo install --features=cuda --root $SNAPCRAFT_PART_INSTALL --bin solana-fullnode
|
||||
mv $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode $SNAPCRAFT_PART_INSTALL
|
||||
rm -rf $SNAPCRAFT_PART_INSTALL/bin/*
|
||||
mv $SNAPCRAFT_PART_INSTALL/solana-fullnode $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode-cuda
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/
|
||||
cp -f libJerasure.so $SNAPCRAFT_PART_INSTALL/usr/lib/libJerasure.so.2
|
||||
cp -f libgf_complete.so $SNAPCRAFT_PART_INSTALL/usr/lib/libgf_complete.so.1
|
||||
|
||||
# Build/install all other programs
|
||||
cargo install --root $SNAPCRAFT_PART_INSTALL --bins
|
||||
|
||||
# Install multinode scripts
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
||||
cp -av multinode-demo/* $SNAPCRAFT_PART_INSTALL/bin/
|
||||
|
||||
# TODO: build curl,rsync/multilog from source instead of sneaking it in from the host
|
||||
# system...
|
||||
set -x
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
||||
cp -av /usr/bin/curl $SNAPCRAFT_PART_INSTALL/bin/
|
||||
cp -av /usr/bin/multilog $SNAPCRAFT_PART_INSTALL/bin/
|
||||
cp -av /usr/bin/rsync $SNAPCRAFT_PART_INSTALL/bin/
|
@ -1,399 +0,0 @@
|
||||
//! The `accountant` module tracks client balances, and the progress of pending
|
||||
//! transactions. It offers a high-level public API that signs transactions
|
||||
//! on behalf of the caller, and a private low-level API for when they have
|
||||
//! already been signed and verified.
|
||||
|
||||
use chrono::prelude::*;
|
||||
use entry::Entry;
|
||||
use event::Event;
|
||||
use hash::Hash;
|
||||
use historian::Historian;
|
||||
use mint::Mint;
|
||||
use plan::{Plan, Witness};
|
||||
use recorder::Signal;
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::collections::hash_map::Entry::Occupied;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::result;
|
||||
use std::sync::mpsc::SendError;
|
||||
use transaction::Transaction;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum AccountingError {
|
||||
InsufficientFunds,
|
||||
InvalidTransfer,
|
||||
InvalidTransferSignature,
|
||||
SendError,
|
||||
}
|
||||
|
||||
pub type Result<T> = result::Result<T, AccountingError>;
|
||||
|
||||
/// Commit funds to the 'to' party.
|
||||
fn complete_transaction(balances: &mut HashMap<PublicKey, i64>, plan: &Plan) {
|
||||
if let Plan::Pay(ref payment) = *plan {
|
||||
*balances.entry(payment.to).or_insert(0) += payment.tokens;
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Accountant {
|
||||
pub historian: Historian,
|
||||
pub balances: HashMap<PublicKey, i64>,
|
||||
pub first_id: Hash,
|
||||
pending: HashMap<Signature, Plan>,
|
||||
time_sources: HashSet<PublicKey>,
|
||||
last_time: DateTime<Utc>,
|
||||
}
|
||||
|
||||
impl Accountant {
|
||||
/// Create an Accountant using an existing ledger.
|
||||
pub fn new_from_entries<I>(entries: I, ms_per_tick: Option<u64>) -> Self
|
||||
where
|
||||
I: IntoIterator<Item = Entry>,
|
||||
{
|
||||
let mut entries = entries.into_iter();
|
||||
|
||||
// The first item in the ledger is required to be an entry with zero num_hashes,
|
||||
// which implies its id can be used as the ledger's seed.
|
||||
let entry0 = entries.next().unwrap();
|
||||
let start_hash = entry0.id;
|
||||
|
||||
let hist = Historian::new(&start_hash, ms_per_tick);
|
||||
let mut acc = Accountant {
|
||||
historian: hist,
|
||||
balances: HashMap::new(),
|
||||
first_id: start_hash,
|
||||
pending: HashMap::new(),
|
||||
time_sources: HashSet::new(),
|
||||
last_time: Utc.timestamp(0, 0),
|
||||
};
|
||||
|
||||
// The second item in the ledger is a special transaction where the to and from
|
||||
// fields are the same. That entry should be treated as a deposit, not a
|
||||
// transfer to oneself.
|
||||
let entry1 = entries.next().unwrap();
|
||||
acc.process_verified_event(&entry1.events[0], true).unwrap();
|
||||
|
||||
for entry in entries {
|
||||
for event in entry.events {
|
||||
acc.process_verified_event(&event, false).unwrap();
|
||||
}
|
||||
}
|
||||
acc
|
||||
}
|
||||
|
||||
/// Create an Accountant with only a Mint. Typically used by unit tests.
|
||||
pub fn new(mint: &Mint, ms_per_tick: Option<u64>) -> Self {
|
||||
Self::new_from_entries(mint.create_entries(), ms_per_tick)
|
||||
}
|
||||
|
||||
fn is_deposit(allow_deposits: bool, from: &PublicKey, plan: &Plan) -> bool {
|
||||
if let Plan::Pay(ref payment) = *plan {
|
||||
allow_deposits && *from == payment.to
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Process and log the given Transaction.
|
||||
pub fn log_verified_transaction(&mut self, tr: Transaction) -> Result<()> {
|
||||
if self.get_balance(&tr.from).unwrap_or(0) < tr.tokens {
|
||||
return Err(AccountingError::InsufficientFunds);
|
||||
}
|
||||
|
||||
self.process_verified_transaction(&tr, false)?;
|
||||
if let Err(SendError(_)) = self.historian
|
||||
.sender
|
||||
.send(Signal::Event(Event::Transaction(tr)))
|
||||
{
|
||||
return Err(AccountingError::SendError);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verify and process the given Transaction.
|
||||
pub fn log_transaction(&mut self, tr: Transaction) -> Result<()> {
|
||||
if !tr.verify() {
|
||||
return Err(AccountingError::InvalidTransfer);
|
||||
}
|
||||
|
||||
self.log_verified_transaction(tr)
|
||||
}
|
||||
|
||||
/// Process a Transaction that has already been verified.
|
||||
fn process_verified_transaction(
|
||||
self: &mut Self,
|
||||
tr: &Transaction,
|
||||
allow_deposits: bool,
|
||||
) -> Result<()> {
|
||||
if !self.historian.reserve_signature(&tr.sig) {
|
||||
return Err(AccountingError::InvalidTransferSignature);
|
||||
}
|
||||
|
||||
if !Self::is_deposit(allow_deposits, &tr.from, &tr.plan) {
|
||||
if let Some(x) = self.balances.get_mut(&tr.from) {
|
||||
*x -= tr.tokens;
|
||||
}
|
||||
}
|
||||
|
||||
let mut plan = tr.plan.clone();
|
||||
plan.apply_witness(&Witness::Timestamp(self.last_time));
|
||||
|
||||
if plan.is_complete() {
|
||||
complete_transaction(&mut self.balances, &plan);
|
||||
} else {
|
||||
self.pending.insert(tr.sig, plan);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a Witness Signature that has already been verified.
|
||||
fn process_verified_sig(&mut self, from: PublicKey, tx_sig: Signature) -> Result<()> {
|
||||
if let Occupied(mut e) = self.pending.entry(tx_sig) {
|
||||
e.get_mut().apply_witness(&Witness::Signature(from));
|
||||
if e.get().is_complete() {
|
||||
complete_transaction(&mut self.balances, e.get());
|
||||
e.remove_entry();
|
||||
}
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a Witness Timestamp that has already been verified.
|
||||
fn process_verified_timestamp(&mut self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
||||
// If this is the first timestamp we've seen, it probably came from the genesis block,
|
||||
// so we'll trust it.
|
||||
if self.last_time == Utc.timestamp(0, 0) {
|
||||
self.time_sources.insert(from);
|
||||
}
|
||||
|
||||
if self.time_sources.contains(&from) {
|
||||
if dt > self.last_time {
|
||||
self.last_time = dt;
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check to see if any timelocked transactions can be completed.
|
||||
let mut completed = vec![];
|
||||
for (key, plan) in &mut self.pending {
|
||||
plan.apply_witness(&Witness::Timestamp(self.last_time));
|
||||
if plan.is_complete() {
|
||||
complete_transaction(&mut self.balances, plan);
|
||||
completed.push(key.clone());
|
||||
}
|
||||
}
|
||||
|
||||
for key in completed {
|
||||
self.pending.remove(&key);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process an Transaction or Witness that has already been verified.
|
||||
fn process_verified_event(self: &mut Self, event: &Event, allow_deposits: bool) -> Result<()> {
|
||||
match *event {
|
||||
Event::Transaction(ref tr) => self.process_verified_transaction(tr, allow_deposits),
|
||||
Event::Signature { from, tx_sig, .. } => self.process_verified_sig(from, tx_sig),
|
||||
Event::Timestamp { from, dt, .. } => self.process_verified_timestamp(from, dt),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create, sign, and process a Transaction from `keypair` to `to` of
|
||||
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
|
||||
pub fn transfer(
|
||||
self: &mut Self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tr = Transaction::new(keypair, to, n, last_id);
|
||||
let sig = tr.sig;
|
||||
self.log_transaction(tr).map(|_| sig)
|
||||
}
|
||||
|
||||
/// Create, sign, and process a postdated Transaction from `keypair`
|
||||
/// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID
|
||||
/// observed by the client.
|
||||
pub fn transfer_on_date(
|
||||
self: &mut Self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
dt: DateTime<Utc>,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tr = Transaction::new_on_date(keypair, to, dt, n, last_id);
|
||||
let sig = tr.sig;
|
||||
self.log_transaction(tr).map(|_| sig)
|
||||
}
|
||||
|
||||
pub fn get_balance(self: &Self, pubkey: &PublicKey) -> Option<i64> {
|
||||
self.balances.get(pubkey).cloned()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use recorder::ExitReason;
|
||||
use signature::KeyPairUtil;
|
||||
|
||||
#[test]
|
||||
fn test_accountant() {
|
||||
let alice = Mint::new(10_000);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let mut acc = Accountant::new(&alice, Some(2));
|
||||
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.seed())
|
||||
.unwrap();
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
|
||||
|
||||
acc.transfer(500, &alice.keypair(), bob_pubkey, alice.seed())
|
||||
.unwrap();
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_500);
|
||||
|
||||
drop(acc.historian.sender);
|
||||
assert_eq!(
|
||||
acc.historian.thread_hdl.join().unwrap(),
|
||||
ExitReason::RecvDisconnected
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_transfer() {
|
||||
let alice = Mint::new(11_000);
|
||||
let mut acc = Accountant::new(&alice, Some(2));
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.seed())
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
acc.transfer(10_001, &alice.keypair(), bob_pubkey, alice.seed()),
|
||||
Err(AccountingError::InsufficientFunds)
|
||||
);
|
||||
|
||||
let alice_pubkey = alice.keypair().pubkey();
|
||||
assert_eq!(acc.get_balance(&alice_pubkey).unwrap(), 10_000);
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
|
||||
|
||||
drop(acc.historian.sender);
|
||||
assert_eq!(
|
||||
acc.historian.thread_hdl.join().unwrap(),
|
||||
ExitReason::RecvDisconnected
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_overspend_attack() {
|
||||
let alice = Mint::new(1);
|
||||
let mut acc = Accountant::new(&alice, None);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let mut tr = Transaction::new(&alice.keypair(), bob_pubkey, 1, alice.seed());
|
||||
if let Plan::Pay(ref mut payment) = tr.plan {
|
||||
payment.tokens = 2; // <-- attack!
|
||||
}
|
||||
assert_eq!(
|
||||
acc.log_transaction(tr.clone()),
|
||||
Err(AccountingError::InvalidTransfer)
|
||||
);
|
||||
|
||||
// Also, ensure all branchs of the plan spend all tokens
|
||||
if let Plan::Pay(ref mut payment) = tr.plan {
|
||||
payment.tokens = 0; // <-- whoops!
|
||||
}
|
||||
assert_eq!(
|
||||
acc.log_transaction(tr.clone()),
|
||||
Err(AccountingError::InvalidTransfer)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_to_newb() {
|
||||
let alice = Mint::new(10_000);
|
||||
let mut acc = Accountant::new(&alice, Some(2));
|
||||
let alice_keypair = alice.keypair();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
acc.transfer(500, &alice_keypair, bob_pubkey, alice.seed())
|
||||
.unwrap();
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 500);
|
||||
|
||||
drop(acc.historian.sender);
|
||||
assert_eq!(
|
||||
acc.historian.thread_hdl.join().unwrap(),
|
||||
ExitReason::RecvDisconnected
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_on_date() {
|
||||
let alice = Mint::new(1);
|
||||
let mut acc = Accountant::new(&alice, Some(2));
|
||||
let alice_keypair = alice.keypair();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.seed())
|
||||
.unwrap();
|
||||
|
||||
// Alice's balance will be zero because all funds are locked up.
|
||||
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
|
||||
|
||||
// Bob's balance will be None because the funds have not been
|
||||
// sent.
|
||||
assert_eq!(acc.get_balance(&bob_pubkey), None);
|
||||
|
||||
// Now, acknowledge the time in the condition occurred and
|
||||
// that bob's funds are now available.
|
||||
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap();
|
||||
assert_eq!(acc.get_balance(&bob_pubkey), Some(1));
|
||||
|
||||
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
|
||||
assert_ne!(acc.get_balance(&bob_pubkey), Some(2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_after_date() {
|
||||
let alice = Mint::new(1);
|
||||
let mut acc = Accountant::new(&alice, Some(2));
|
||||
let alice_keypair = alice.keypair();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap();
|
||||
|
||||
// It's now past now, so this transfer should be processed immediately.
|
||||
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.seed())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
|
||||
assert_eq!(acc.get_balance(&bob_pubkey), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cancel_transfer() {
|
||||
let alice = Mint::new(1);
|
||||
let mut acc = Accountant::new(&alice, Some(2));
|
||||
let alice_keypair = alice.keypair();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
let sig = acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.seed())
|
||||
.unwrap();
|
||||
|
||||
// Alice's balance will be zero because all funds are locked up.
|
||||
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
|
||||
|
||||
// Bob's balance will be None because the funds have not been
|
||||
// sent.
|
||||
assert_eq!(acc.get_balance(&bob_pubkey), None);
|
||||
|
||||
// Now, cancel the trancaction. Alice gets her funds back, Bob never sees them.
|
||||
acc.process_verified_sig(alice.pubkey(), sig).unwrap();
|
||||
assert_eq!(acc.get_balance(&alice.pubkey()), Some(1));
|
||||
assert_eq!(acc.get_balance(&bob_pubkey), None);
|
||||
|
||||
acc.process_verified_sig(alice.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
||||
assert_ne!(acc.get_balance(&alice.pubkey()), Some(2));
|
||||
}
|
||||
}
|
@ -1,189 +0,0 @@
|
||||
//! The `accountant_skel` module is a microservice that exposes the high-level
|
||||
//! Accountant API to the network. Its message encoding is currently
|
||||
//! in flux. Clients should use AccountantStub to interact with it.
|
||||
|
||||
use accountant::Accountant;
|
||||
use bincode::{deserialize, serialize};
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use result::Result;
|
||||
use serde_json;
|
||||
use signature::PublicKey;
|
||||
use std::default::Default;
|
||||
use std::io::Write;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
use transaction::Transaction;
|
||||
use rayon::prelude::*;
|
||||
|
||||
pub struct AccountantSkel<W: Write + Send + 'static> {
|
||||
pub acc: Accountant,
|
||||
pub last_id: Hash,
|
||||
writer: W,
|
||||
}
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum Request {
|
||||
Transaction(Transaction),
|
||||
GetBalance { key: PublicKey },
|
||||
GetId { is_last: bool },
|
||||
}
|
||||
|
||||
impl Request {
|
||||
/// Verify the request is valid.
|
||||
pub fn verify(&self) -> bool {
|
||||
match *self {
|
||||
Request::Transaction(ref tr) => tr.verify(),
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parallel verfication of a batch of requests.
|
||||
fn filter_valid_requests(reqs: Vec<(Request, SocketAddr)>) -> Vec<(Request, SocketAddr)> {
|
||||
reqs.into_par_iter().filter({ |x| x.0.verify() }).collect()
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum Response {
|
||||
Balance { key: PublicKey, val: Option<i64> },
|
||||
Entries { entries: Vec<Entry> },
|
||||
Id { id: Hash, is_last: bool },
|
||||
}
|
||||
|
||||
impl<W: Write + Send + 'static> AccountantSkel<W> {
|
||||
/// Create a new AccountantSkel that wraps the given Accountant.
|
||||
pub fn new(acc: Accountant, w: W) -> Self {
|
||||
let last_id = acc.first_id;
|
||||
AccountantSkel {
|
||||
acc,
|
||||
last_id,
|
||||
writer: w,
|
||||
}
|
||||
}
|
||||
|
||||
/// Process any Entry items that have been published by the Historian.
|
||||
pub fn sync(&mut self) -> Hash {
|
||||
while let Ok(entry) = self.acc.historian.receiver.try_recv() {
|
||||
self.last_id = entry.id;
|
||||
writeln!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap();
|
||||
}
|
||||
self.last_id
|
||||
}
|
||||
|
||||
/// Process Request items sent by clients.
|
||||
pub fn log_verified_request(&mut self, msg: Request) -> Option<Response> {
|
||||
match msg {
|
||||
Request::Transaction(tr) => {
|
||||
if let Err(err) = self.acc.log_verified_transaction(tr) {
|
||||
eprintln!("Transaction error: {:?}", err);
|
||||
}
|
||||
None
|
||||
}
|
||||
Request::GetBalance { key } => {
|
||||
let val = self.acc.get_balance(&key);
|
||||
Some(Response::Balance { key, val })
|
||||
}
|
||||
Request::GetId { is_last } => Some(Response::Id {
|
||||
id: if is_last {
|
||||
self.sync()
|
||||
} else {
|
||||
self.acc.first_id
|
||||
},
|
||||
is_last,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
fn process(
|
||||
obj: &Arc<Mutex<AccountantSkel<W>>>,
|
||||
r_reader: &streamer::Receiver,
|
||||
s_responder: &streamer::Responder,
|
||||
packet_recycler: &streamer::PacketRecycler,
|
||||
response_recycler: &streamer::ResponseRecycler,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let msgs = r_reader.recv_timeout(timer)?;
|
||||
let msgs_ = msgs.clone();
|
||||
let rsps = streamer::allocate(response_recycler);
|
||||
let rsps_ = rsps.clone();
|
||||
{
|
||||
let mut reqs = vec![];
|
||||
for packet in &msgs.read().unwrap().packets {
|
||||
let rsp_addr = packet.meta.get_addr();
|
||||
let sz = packet.meta.size;
|
||||
let req = deserialize(&packet.data[0..sz])?;
|
||||
reqs.push((req, rsp_addr));
|
||||
}
|
||||
let reqs = filter_valid_requests(reqs);
|
||||
|
||||
let mut num = 0;
|
||||
let mut ursps = rsps.write().unwrap();
|
||||
for (req, rsp_addr) in reqs {
|
||||
if let Some(resp) = obj.lock().unwrap().log_verified_request(req) {
|
||||
if ursps.responses.len() <= num {
|
||||
ursps
|
||||
.responses
|
||||
.resize((num + 1) * 2, streamer::Response::default());
|
||||
}
|
||||
let rsp = &mut ursps.responses[num];
|
||||
let v = serialize(&resp)?;
|
||||
let len = v.len();
|
||||
rsp.data[..len].copy_from_slice(&v);
|
||||
rsp.meta.size = len;
|
||||
rsp.meta.set_addr(&rsp_addr);
|
||||
num += 1;
|
||||
}
|
||||
}
|
||||
ursps.responses.resize(num, streamer::Response::default());
|
||||
}
|
||||
s_responder.send(rsps_)?;
|
||||
streamer::recycle(packet_recycler, msgs_);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a UDP microservice that forwards messages the given AccountantSkel.
|
||||
/// Set `exit` to shutdown its threads.
|
||||
pub fn serve(
|
||||
obj: Arc<Mutex<AccountantSkel<W>>>,
|
||||
addr: &str,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Result<Vec<JoinHandle<()>>> {
|
||||
let read = UdpSocket::bind(addr)?;
|
||||
// make sure we are on the same interface
|
||||
let mut local = read.local_addr()?;
|
||||
local.set_port(0);
|
||||
let write = UdpSocket::bind(local)?;
|
||||
|
||||
let packet_recycler = Arc::new(Mutex::new(Vec::new()));
|
||||
let response_recycler = Arc::new(Mutex::new(Vec::new()));
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver = streamer::receiver(read, exit.clone(), packet_recycler.clone(), s_reader)?;
|
||||
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder =
|
||||
streamer::responder(write, exit.clone(), response_recycler.clone(), r_responder);
|
||||
|
||||
let skel = obj.clone();
|
||||
let t_server = spawn(move || loop {
|
||||
let e = AccountantSkel::process(
|
||||
&skel,
|
||||
&r_reader,
|
||||
&s_responder,
|
||||
&packet_recycler,
|
||||
&response_recycler,
|
||||
);
|
||||
if e.is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
});
|
||||
|
||||
Ok(vec![t_receiver, t_responder, t_server])
|
||||
}
|
||||
}
|
@ -1,126 +0,0 @@
|
||||
//! The `accountant_stub` module is a client-side object that interfaces with a server-side Accountant
|
||||
//! object via the network interface exposed by AccountantSkel. Client code should use
|
||||
//! this object instead of writing messages to the network directly. The binary
|
||||
//! encoding of its messages are unstable and may change in future releases.
|
||||
|
||||
use accountant_skel::{Request, Response};
|
||||
use bincode::{deserialize, serialize};
|
||||
use hash::Hash;
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::io;
|
||||
use std::net::UdpSocket;
|
||||
use transaction::Transaction;
|
||||
|
||||
pub struct AccountantStub {
|
||||
pub addr: String,
|
||||
pub socket: UdpSocket,
|
||||
}
|
||||
|
||||
impl AccountantStub {
|
||||
/// Create a new AccountantStub that will interface with AccountantSkel
|
||||
/// over `socket`. To receive responses, the caller must bind `socket`
|
||||
/// to a public address before invoking AccountantStub methods.
|
||||
pub fn new(addr: &str, socket: UdpSocket) -> Self {
|
||||
AccountantStub {
|
||||
addr: addr.to_string(),
|
||||
socket,
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a signed Transaction to the server for processing. This method
|
||||
/// does not wait for a response.
|
||||
pub fn transfer_signed(&self, tr: Transaction) -> io::Result<usize> {
|
||||
let req = Request::Transaction(tr);
|
||||
let data = serialize(&req).unwrap();
|
||||
self.socket.send_to(&data, &self.addr)
|
||||
}
|
||||
|
||||
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
|
||||
pub fn transfer(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
last_id: &Hash,
|
||||
) -> io::Result<Signature> {
|
||||
let tr = Transaction::new(keypair, to, n, *last_id);
|
||||
let sig = tr.sig;
|
||||
self.transfer_signed(tr).map(|_| sig)
|
||||
}
|
||||
|
||||
/// Request the balance of the user holding `pubkey`. This method blocks
|
||||
/// until the server sends a response. If the response packet is dropped
|
||||
/// by the network, this method will hang indefinitely.
|
||||
pub fn get_balance(&self, pubkey: &PublicKey) -> io::Result<Option<i64>> {
|
||||
let req = Request::GetBalance { key: *pubkey };
|
||||
let data = serialize(&req).expect("serialize GetBalance");
|
||||
self.socket.send_to(&data, &self.addr)?;
|
||||
let mut buf = vec![0u8; 1024];
|
||||
self.socket.recv_from(&mut buf)?;
|
||||
let resp = deserialize(&buf).expect("deserialize balance");
|
||||
if let Response::Balance { key, val } = resp {
|
||||
assert_eq!(key, *pubkey);
|
||||
return Ok(val);
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Request the first or last Entry ID from the server.
|
||||
fn get_id(&self, is_last: bool) -> io::Result<Hash> {
|
||||
let req = Request::GetId { is_last };
|
||||
let data = serialize(&req).expect("serialize GetId");
|
||||
self.socket.send_to(&data, &self.addr)?;
|
||||
let mut buf = vec![0u8; 1024];
|
||||
self.socket.recv_from(&mut buf)?;
|
||||
let resp = deserialize(&buf).expect("deserialize Id");
|
||||
if let Response::Id { id, .. } = resp {
|
||||
return Ok(id);
|
||||
}
|
||||
Ok(Default::default())
|
||||
}
|
||||
|
||||
/// Request the last Entry ID from the server. This method blocks
|
||||
/// until the server sends a response. At the time of this writing,
|
||||
/// it also has the side-effect of causing the server to log any
|
||||
/// entries that have been published by the Historian.
|
||||
pub fn get_last_id(&self) -> io::Result<Hash> {
|
||||
self.get_id(true)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use accountant::Accountant;
|
||||
use accountant_skel::AccountantSkel;
|
||||
use mint::Mint;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
// TODO: Figure out why this test sometimes hangs on TravisCI.
|
||||
#[test]
|
||||
fn test_accountant_stub() {
|
||||
let addr = "127.0.0.1:9000";
|
||||
let send_addr = "127.0.0.1:9001";
|
||||
let alice = Mint::new(10_000);
|
||||
let acc = Accountant::new(&alice, Some(30));
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let acc = Arc::new(Mutex::new(AccountantSkel::new(acc, sink())));
|
||||
let _threads = AccountantSkel::serve(acc, addr, exit.clone()).unwrap();
|
||||
sleep(Duration::from_millis(300));
|
||||
|
||||
let socket = UdpSocket::bind(send_addr).unwrap();
|
||||
|
||||
let acc = AccountantStub::new(addr, socket);
|
||||
let last_id = acc.get_last_id().unwrap();
|
||||
let _sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap().unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
}
|
1007
src/bank.rs
Executable file
1007
src/bank.rs
Executable file
File diff suppressed because it is too large
Load Diff
207
src/banking_stage.rs
Normal file
207
src/banking_stage.rs
Normal file
@ -0,0 +1,207 @@
|
||||
//! The `banking_stage` processes Transaction messages. It is intended to be used
|
||||
//! to contruct a software pipeline. The stage uses all available CPU cores and
|
||||
//! can do its processing in parallel with signature verification on the GPU.
|
||||
|
||||
use bank::Bank;
|
||||
use bincode::deserialize;
|
||||
use counter::Counter;
|
||||
use log::Level;
|
||||
use packet::{PacketRecycler, Packets, SharedPackets};
|
||||
use rayon::prelude::*;
|
||||
use record_stage::Signal;
|
||||
use result::{Error, Result};
|
||||
use service::Service;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use timing;
|
||||
use transaction::Transaction;
|
||||
|
||||
/// Stores the stage's thread handle and output receiver.
|
||||
pub struct BankingStage {
|
||||
/// Handle to the stage's thread.
|
||||
thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl BankingStage {
|
||||
/// Create the stage using `bank`. Exit when `verified_receiver` is dropped.
|
||||
/// Discard input packets using `packet_recycler` to minimize memory
|
||||
/// allocations in a previous stage such as the `fetch_stage`.
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
packet_recycler: PacketRecycler,
|
||||
) -> (Self, Receiver<Signal>) {
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-banking-stage".to_string())
|
||||
.spawn(move || loop {
|
||||
if let Err(e) = Self::process_packets(
|
||||
&bank,
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => error!("{:?}", e),
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
(BankingStage { thread_hdl }, signal_receiver)
|
||||
}
|
||||
|
||||
/// Convert the transactions from a blob of binary data to a vector of transactions and
|
||||
/// an unused `SocketAddr` that could be used to send a response.
|
||||
fn deserialize_transactions(p: &Packets) -> Vec<Option<(Transaction, SocketAddr)>> {
|
||||
p.packets
|
||||
.par_iter()
|
||||
.map(|x| {
|
||||
deserialize(&x.data[0..x.meta.size])
|
||||
.map(|req| (req, x.meta.addr()))
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Process the incoming packets and send output `Signal` messages to `signal_sender`.
|
||||
/// Discard packets via `packet_recycler`.
|
||||
pub fn process_packets(
|
||||
bank: &Arc<Bank>,
|
||||
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
signal_sender: &Sender<Signal>,
|
||||
packet_recycler: &PacketRecycler,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let recv_start = Instant::now();
|
||||
let mms = verified_receiver.recv_timeout(timer)?;
|
||||
let mut reqs_len = 0;
|
||||
let mms_len = mms.len();
|
||||
info!(
|
||||
"@{:?} process start stalled for: {:?}ms batches: {}",
|
||||
timing::timestamp(),
|
||||
timing::duration_as_ms(&recv_start.elapsed()),
|
||||
mms.len(),
|
||||
);
|
||||
let bank_starting_tx_count = bank.transaction_count();
|
||||
let count = mms.iter().map(|x| x.1.len()).sum();
|
||||
let proc_start = Instant::now();
|
||||
for (msgs, vers) in mms {
|
||||
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
|
||||
reqs_len += transactions.len();
|
||||
let transactions = transactions
|
||||
.into_iter()
|
||||
.zip(vers)
|
||||
.filter_map(|(tx, ver)| match tx {
|
||||
None => None,
|
||||
Some((tx, _addr)) => if tx.verify_plan() && ver != 0 {
|
||||
Some(tx)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
})
|
||||
.collect();
|
||||
|
||||
debug!("process_transactions");
|
||||
let results = bank.process_transactions(transactions);
|
||||
let transactions = results.into_iter().filter_map(|x| x.ok()).collect();
|
||||
signal_sender.send(Signal::Transactions(transactions))?;
|
||||
debug!("done process_transactions");
|
||||
|
||||
packet_recycler.recycle(msgs);
|
||||
}
|
||||
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
||||
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
||||
info!(
|
||||
"@{:?} done processing transaction batches: {} time: {:?}ms reqs: {} reqs/s: {}",
|
||||
timing::timestamp(),
|
||||
mms_len,
|
||||
total_time_ms,
|
||||
reqs_len,
|
||||
(reqs_len as f32) / (total_time_s)
|
||||
);
|
||||
inc_new_counter_info!("banking_stage-process_packets", count);
|
||||
inc_new_counter_info!(
|
||||
"banking_stage-process_transactions",
|
||||
bank.transaction_count() - bank_starting_tx_count
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for BankingStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
vec![self.thread_hdl]
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: When banking is pulled out of RequestStage, add this test back in.
|
||||
|
||||
//use bank::Bank;
|
||||
//use entry::Entry;
|
||||
//use hash::Hash;
|
||||
//use record_stage::RecordStage;
|
||||
//use record_stage::Signal;
|
||||
//use result::Result;
|
||||
//use std::sync::mpsc::{channel, Sender};
|
||||
//use std::sync::{Arc, Mutex};
|
||||
//use std::time::Duration;
|
||||
//use transaction::Transaction;
|
||||
//
|
||||
//#[cfg(test)]
|
||||
//mod tests {
|
||||
// use bank::Bank;
|
||||
// use mint::Mint;
|
||||
// use signature::{KeyPair, KeyPairUtil};
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// #[test]
|
||||
// // TODO: Move this test banking_stage. Calling process_transactions() directly
|
||||
// // defeats the purpose of this test.
|
||||
// fn test_banking_sequential_consistency() {
|
||||
// // In this attack we'll demonstrate that a verifier can interpret the ledger
|
||||
// // differently if either the server doesn't signal the ledger to add an
|
||||
// // Entry OR if the verifier tries to parallelize across multiple Entries.
|
||||
// let mint = Mint::new(2);
|
||||
// let bank = Bank::new(&mint);
|
||||
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// // Process a batch that includes a transaction that receives two tokens.
|
||||
// let alice = KeyPair::new();
|
||||
// let tx = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
|
||||
// let transactions = vec![tx];
|
||||
// let entry0 = banking_stage.process_transactions(transactions).unwrap();
|
||||
//
|
||||
// // Process a second batch that spends one of those tokens.
|
||||
// let tx = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
|
||||
// let transactions = vec![tx];
|
||||
// let entry1 = banking_stage.process_transactions(transactions).unwrap();
|
||||
//
|
||||
// // Collect the ledger and feed it to a new bank.
|
||||
// let entries = vec![entry0, entry1];
|
||||
//
|
||||
// // Assert the user holds one token, not two. If the server only output one
|
||||
// // entry, then the second transaction will be rejected, because it drives
|
||||
// // the account balance below zero before the credit is added.
|
||||
// let bank = Bank::new(&mint);
|
||||
// for entry in entries {
|
||||
// assert!(
|
||||
// bank
|
||||
// .process_transactions(entry.transactions)
|
||||
// .into_iter()
|
||||
// .all(|x| x.is_ok())
|
||||
// );
|
||||
// }
|
||||
// assert_eq!(bank.get_balance(&alice.pubkey()), Some(1));
|
||||
// }
|
||||
//}
|
90
src/bin/bench-streamer.rs
Normal file
90
src/bin/bench-streamer.rs
Normal file
@ -0,0 +1,90 @@
|
||||
extern crate solana;
|
||||
|
||||
use solana::packet::{Packet, PacketRecycler, BLOB_SIZE, PACKET_DATA_SIZE};
|
||||
use solana::result::Result;
|
||||
use solana::streamer::{receiver, PacketReceiver};
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
|
||||
fn producer(addr: &SocketAddr, recycler: &PacketRecycler, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let msgs = recycler.allocate();
|
||||
let msgs_ = msgs.clone();
|
||||
msgs.write().unwrap().packets.resize(10, Packet::default());
|
||||
for w in &mut msgs.write().unwrap().packets {
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
}
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in &msgs_.read().unwrap().packets {
|
||||
let a = p.meta.addr();
|
||||
assert!(p.meta.size < BLOB_SIZE);
|
||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||
num += 1;
|
||||
}
|
||||
assert_eq!(num, 10);
|
||||
})
|
||||
}
|
||||
|
||||
fn sink(
|
||||
recycler: PacketRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
rvs: Arc<AtomicUsize>,
|
||||
r: PacketReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
if let Ok(msgs) = r.recv_timeout(timer) {
|
||||
rvs.fetch_add(msgs.read().unwrap().packets.len(), Ordering::Relaxed);
|
||||
recycler.recycle(msgs);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let read = UdpSocket::bind("127.0.0.1:0")?;
|
||||
read.set_read_timeout(Some(Duration::new(1, 0)))?;
|
||||
|
||||
let addr = read.local_addr()?;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let pack_recycler = PacketRecycler::default();
|
||||
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
|
||||
let t_producer1 = producer(&addr, &pack_recycler, exit.clone());
|
||||
let t_producer2 = producer(&addr, &pack_recycler, exit.clone());
|
||||
let t_producer3 = producer(&addr, &pack_recycler, exit.clone());
|
||||
|
||||
let rvs = Arc::new(AtomicUsize::new(0));
|
||||
let t_sink = sink(pack_recycler.clone(), exit.clone(), rvs.clone(), r_reader);
|
||||
|
||||
let start = SystemTime::now();
|
||||
let start_val = rvs.load(Ordering::Relaxed);
|
||||
sleep(Duration::new(5, 0));
|
||||
let elapsed = start.elapsed().unwrap();
|
||||
let end_val = rvs.load(Ordering::Relaxed);
|
||||
let time = elapsed.as_secs() * 10_000_000_000 + u64::from(elapsed.subsec_nanos());
|
||||
let ftime = (time as f64) / 10_000_000_000_f64;
|
||||
let fcount = (end_val - start_val) as f64;
|
||||
println!("performance: {:?}", fcount / ftime);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_reader.join()?;
|
||||
t_producer1.join()?;
|
||||
t_producer2.join()?;
|
||||
t_producer3.join()?;
|
||||
t_sink.join()?;
|
||||
Ok(())
|
||||
}
|
714
src/bin/bench-tps.rs
Normal file
714
src/bin/bench-tps.rs
Normal file
@ -0,0 +1,714 @@
|
||||
extern crate bincode;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate influx_db_client;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use clap::{App, Arg};
|
||||
use influx_db_client as influxdb;
|
||||
use rayon::prelude::*;
|
||||
use solana::client::mk_client;
|
||||
use solana::crdt::{Crdt, NodeInfo};
|
||||
use solana::drone::DRONE_PORT;
|
||||
use solana::fullnode::Config;
|
||||
use solana::hash::Hash;
|
||||
use solana::logger;
|
||||
use solana::metrics;
|
||||
use solana::nat::{get_public_ip_addr, udp_random_bind};
|
||||
use solana::ncp::Ncp;
|
||||
use solana::service::Service;
|
||||
use solana::signature::{read_keypair, GenKeys, Keypair, KeypairUtil};
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::timing::{duration_as_ms, duration_as_s};
|
||||
use solana::transaction::Transaction;
|
||||
use solana::wallet::request_airdrop;
|
||||
use solana::window::default_window;
|
||||
use std::collections::VecDeque;
|
||||
use std::fs::File;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::{AtomicBool, AtomicIsize, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::thread::Builder;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
pub struct NodeStats {
|
||||
pub tps: f64, // Maximum TPS reported by this node
|
||||
pub tx: u64, // Total transactions reported by this node
|
||||
}
|
||||
|
||||
fn metrics_submit_token_balance(token_balance: i64) {
|
||||
println!("Token balance: {}", token_balance);
|
||||
metrics::submit(
|
||||
influxdb::Point::new("bench-tps")
|
||||
.add_tag("op", influxdb::Value::String("token_balance".to_string()))
|
||||
.add_field("balance", influxdb::Value::Integer(token_balance as i64))
|
||||
.to_owned(),
|
||||
);
|
||||
}
|
||||
|
||||
fn sample_tx_count(
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
maxes: &Arc<RwLock<Vec<(SocketAddr, NodeStats)>>>,
|
||||
first_tx_count: u64,
|
||||
v: &NodeInfo,
|
||||
sample_period: u64,
|
||||
) {
|
||||
let mut client = mk_client(&v);
|
||||
let mut now = Instant::now();
|
||||
let mut initial_tx_count = client.transaction_count();
|
||||
let mut max_tps = 0.0;
|
||||
let mut total;
|
||||
|
||||
let log_prefix = format!("{:21}:", v.contact_info.tpu.to_string());
|
||||
|
||||
loop {
|
||||
let tx_count = client.transaction_count();
|
||||
assert!(
|
||||
tx_count >= initial_tx_count,
|
||||
"expected tx_count({}) >= initial_tx_count({})",
|
||||
tx_count,
|
||||
initial_tx_count
|
||||
);
|
||||
let duration = now.elapsed();
|
||||
now = Instant::now();
|
||||
let sample = tx_count - initial_tx_count;
|
||||
initial_tx_count = tx_count;
|
||||
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||
if tps > max_tps {
|
||||
max_tps = tps;
|
||||
}
|
||||
if tx_count > first_tx_count {
|
||||
total = tx_count - first_tx_count;
|
||||
} else {
|
||||
total = 0;
|
||||
}
|
||||
println!(
|
||||
"{} {:9.2} TPS, Transactions: {:6}, Total transactions: {}",
|
||||
log_prefix, tps, sample, total
|
||||
);
|
||||
sleep(Duration::new(sample_period, 0));
|
||||
|
||||
if exit_signal.load(Ordering::Relaxed) {
|
||||
println!("{} Exiting validator thread", log_prefix);
|
||||
let stats = NodeStats {
|
||||
tps: max_tps,
|
||||
tx: total,
|
||||
};
|
||||
maxes.write().unwrap().push((v.contact_info.tpu, stats));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Send loopback payment of 0 tokens and confirm the network processed it
|
||||
fn send_barrier_transaction(barrier_client: &mut ThinClient, last_id: &mut Hash, id: &Keypair) {
|
||||
let transfer_start = Instant::now();
|
||||
|
||||
let mut poll_count = 0;
|
||||
loop {
|
||||
if poll_count > 0 && poll_count % 8 == 0 {
|
||||
println!(
|
||||
"polling for barrier transaction confirmation, attempt {}",
|
||||
poll_count
|
||||
);
|
||||
}
|
||||
|
||||
*last_id = barrier_client.get_last_id();
|
||||
let signature = barrier_client
|
||||
.transfer(0, &id, id.pubkey(), last_id)
|
||||
.expect("Unable to send barrier transaction");
|
||||
|
||||
let confirmatiom = barrier_client.poll_for_signature(&signature);
|
||||
let duration_ms = duration_as_ms(&transfer_start.elapsed());
|
||||
if confirmatiom.is_ok() {
|
||||
println!("barrier transaction confirmed in {}ms", duration_ms);
|
||||
|
||||
metrics::submit(
|
||||
influxdb::Point::new("bench-tps")
|
||||
.add_tag(
|
||||
"op",
|
||||
influxdb::Value::String("send_barrier_transaction".to_string()),
|
||||
)
|
||||
.add_field("poll_count", influxdb::Value::Integer(poll_count))
|
||||
.add_field("duration", influxdb::Value::Integer(duration_ms as i64))
|
||||
.to_owned(),
|
||||
);
|
||||
|
||||
// Sanity check that the client balance is still 1
|
||||
let balance = barrier_client.poll_get_balance(&id.pubkey()).unwrap_or(-1);
|
||||
if balance != 1 {
|
||||
panic!("Expected an account balance of 1 (balance: {}", balance);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Timeout after 3 minutes. When running a CPU-only leader+validator+drone+bench-tps on a dev
|
||||
// machine, some batches of transactions can take upwards of 1 minute...
|
||||
if duration_ms > 1000 * 60 * 3 {
|
||||
println!("Error: Couldn't confirm barrier transaction!");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let new_last_id = barrier_client.get_last_id();
|
||||
if new_last_id == *last_id {
|
||||
if poll_count > 0 && poll_count % 8 == 0 {
|
||||
println!("last_id is not advancing, still at {:?}", *last_id);
|
||||
}
|
||||
} else {
|
||||
*last_id = new_last_id;
|
||||
}
|
||||
|
||||
poll_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_txs(
|
||||
shared_txs: &Arc<RwLock<VecDeque<Vec<Transaction>>>>,
|
||||
id: &Keypair,
|
||||
keypairs: &[Keypair],
|
||||
last_id: &Hash,
|
||||
threads: usize,
|
||||
reclaim: bool,
|
||||
) {
|
||||
let tx_count = keypairs.len();
|
||||
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
|
||||
let signing_start = Instant::now();
|
||||
|
||||
let transactions: Vec<_> = keypairs
|
||||
.par_iter()
|
||||
.map(|keypair| {
|
||||
if !reclaim {
|
||||
Transaction::new(&id, keypair.pubkey(), 1, *last_id)
|
||||
} else {
|
||||
Transaction::new(keypair, id.pubkey(), 1, *last_id)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let duration = signing_start.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = (tx_count) as f64 / ns as f64;
|
||||
let nsps = ns as f64 / (tx_count) as f64;
|
||||
println!(
|
||||
"Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time",
|
||||
bsps * 1_000_000_f64,
|
||||
nsps / 1_000_f64,
|
||||
duration_as_ms(&duration),
|
||||
);
|
||||
metrics::submit(
|
||||
influxdb::Point::new("bench-tps")
|
||||
.add_tag("op", influxdb::Value::String("generate_txs".to_string()))
|
||||
.add_field(
|
||||
"duration",
|
||||
influxdb::Value::Integer(duration_as_ms(&duration) as i64),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
{
|
||||
let mut shared_txs_wl = shared_txs.write().unwrap();
|
||||
for chunk in chunks {
|
||||
shared_txs_wl.push_back(chunk.to_vec());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn do_tx_transfers(
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
shared_txs: &Arc<RwLock<VecDeque<Vec<Transaction>>>>,
|
||||
leader: &NodeInfo,
|
||||
shared_tx_thread_count: &Arc<AtomicIsize>,
|
||||
) {
|
||||
let client = mk_client(&leader);
|
||||
loop {
|
||||
let txs;
|
||||
{
|
||||
let mut shared_txs_wl = shared_txs.write().unwrap();
|
||||
txs = shared_txs_wl.pop_front();
|
||||
}
|
||||
if let Some(txs0) = txs {
|
||||
shared_tx_thread_count.fetch_add(1, Ordering::Relaxed);
|
||||
println!(
|
||||
"Transferring 1 unit {} times... to {}",
|
||||
txs0.len(),
|
||||
leader.contact_info.tpu
|
||||
);
|
||||
let tx_len = txs0.len();
|
||||
let transfer_start = Instant::now();
|
||||
for tx in txs0 {
|
||||
client.transfer_signed(&tx).unwrap();
|
||||
}
|
||||
shared_tx_thread_count.fetch_add(-1, Ordering::Relaxed);
|
||||
println!(
|
||||
"Tx send done. {} ms {} tps",
|
||||
duration_as_ms(&transfer_start.elapsed()),
|
||||
tx_len as f32 / duration_as_s(&transfer_start.elapsed()),
|
||||
);
|
||||
metrics::submit(
|
||||
influxdb::Point::new("bench-tps")
|
||||
.add_tag("op", influxdb::Value::String("do_tx_transfers".to_string()))
|
||||
.add_field(
|
||||
"duration",
|
||||
influxdb::Value::Integer(duration_as_ms(&transfer_start.elapsed()) as i64),
|
||||
)
|
||||
.add_field("count", influxdb::Value::Integer(tx_len as i64))
|
||||
.to_owned(),
|
||||
);
|
||||
}
|
||||
if exit_signal.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn airdrop_tokens(client: &mut ThinClient, leader: &NodeInfo, id: &Keypair, tx_count: i64) {
|
||||
let mut drone_addr = leader.contact_info.tpu;
|
||||
drone_addr.set_port(DRONE_PORT);
|
||||
|
||||
let starting_balance = client.poll_get_balance(&id.pubkey()).unwrap();
|
||||
metrics_submit_token_balance(starting_balance);
|
||||
|
||||
if starting_balance < tx_count {
|
||||
let airdrop_amount = tx_count - starting_balance;
|
||||
println!(
|
||||
"Airdropping {:?} tokens from {}",
|
||||
airdrop_amount, drone_addr
|
||||
);
|
||||
|
||||
let previous_balance = starting_balance;
|
||||
request_airdrop(&drone_addr, &id.pubkey(), airdrop_amount as u64).unwrap();
|
||||
|
||||
// TODO: return airdrop Result from Drone instead of polling the
|
||||
// network
|
||||
let mut current_balance = previous_balance;
|
||||
for _ in 0..20 {
|
||||
sleep(Duration::from_millis(500));
|
||||
current_balance = client.poll_get_balance(&id.pubkey()).unwrap();
|
||||
if starting_balance != current_balance {
|
||||
break;
|
||||
}
|
||||
println!(".");
|
||||
}
|
||||
metrics_submit_token_balance(current_balance);
|
||||
if current_balance - starting_balance != airdrop_amount {
|
||||
println!("Airdrop failed!");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_and_report_stats(
|
||||
maxes: &Arc<RwLock<Vec<(SocketAddr, NodeStats)>>>,
|
||||
sample_period: u64,
|
||||
tx_send_elapsed: &Duration,
|
||||
) {
|
||||
// Compute/report stats
|
||||
let mut max_of_maxes = 0.0;
|
||||
let mut total_txs = 0;
|
||||
let mut nodes_with_zero_tps = 0;
|
||||
let mut total_maxes = 0.0;
|
||||
println!(" Node address | Max TPS | Total Transactions");
|
||||
println!("---------------------+---------------+--------------------");
|
||||
|
||||
for (sock, stats) in maxes.read().unwrap().iter() {
|
||||
let maybe_flag = match stats.tx {
|
||||
0 => "!!!!!",
|
||||
_ => "",
|
||||
};
|
||||
|
||||
println!(
|
||||
"{:20} | {:13.2} | {} {}",
|
||||
(*sock).to_string(),
|
||||
stats.tps,
|
||||
stats.tx,
|
||||
maybe_flag
|
||||
);
|
||||
|
||||
if stats.tps == 0.0 {
|
||||
nodes_with_zero_tps += 1;
|
||||
}
|
||||
total_maxes += stats.tps;
|
||||
|
||||
if stats.tps > max_of_maxes {
|
||||
max_of_maxes = stats.tps;
|
||||
}
|
||||
total_txs += stats.tx;
|
||||
}
|
||||
|
||||
if total_maxes > 0.0 {
|
||||
let num_nodes_with_tps = maxes.read().unwrap().len() - nodes_with_zero_tps;
|
||||
let average_max = total_maxes / num_nodes_with_tps as f64;
|
||||
println!(
|
||||
"\nAverage max TPS: {:.2}, {} nodes had 0 TPS",
|
||||
average_max, nodes_with_zero_tps
|
||||
);
|
||||
}
|
||||
|
||||
println!(
|
||||
"\nHighest TPS: {:.2} sampling period {}s total transactions: {} clients: {}",
|
||||
max_of_maxes,
|
||||
sample_period,
|
||||
total_txs,
|
||||
maxes.read().unwrap().len()
|
||||
);
|
||||
println!(
|
||||
"\tAverage TPS: {}",
|
||||
total_txs as f32 / duration_as_s(tx_send_elapsed)
|
||||
);
|
||||
}
|
||||
|
||||
fn main() {
|
||||
logger::setup();
|
||||
metrics::set_panic_hook("bench-tps");
|
||||
let mut threads = 4usize;
|
||||
let mut num_nodes = 1usize;
|
||||
let mut time_sec = 90;
|
||||
let mut sustained = false;
|
||||
let mut tx_count = 500_000;
|
||||
|
||||
let matches = App::new("solana-bench-tps")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("leader")
|
||||
.short("l")
|
||||
.long("leader")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/leader.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.default_value("~/.config/solana/id.json")
|
||||
.help("/path/to/id.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num_nodes")
|
||||
.short("n")
|
||||
.long("nodes")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("number of nodes to converge to"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("threads")
|
||||
.short("t")
|
||||
.long("threads")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("number of threads"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("seconds")
|
||||
.short("s")
|
||||
.long("sec")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("send transactions for this many seconds"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("converge_only")
|
||||
.short("c")
|
||||
.help("exit immediately after converging"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("addr")
|
||||
.short("a")
|
||||
.long("addr")
|
||||
.value_name("IPADDR")
|
||||
.takes_value(true)
|
||||
.help("address to advertise to the network"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("sustained")
|
||||
.long("sustained")
|
||||
.help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("tx_count")
|
||||
.long("tx_count")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("number of transactions to send in a single batch")
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let leader: NodeInfo;
|
||||
if let Some(l) = matches.value_of("leader") {
|
||||
leader = read_leader(l).node_info;
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
leader = NodeInfo::new_leader(&server_addr);
|
||||
};
|
||||
|
||||
let id = read_keypair(matches.value_of("keypair").unwrap()).expect("client keypair");
|
||||
|
||||
if let Some(t) = matches.value_of("threads") {
|
||||
threads = t.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
if let Some(n) = matches.value_of("num_nodes") {
|
||||
num_nodes = n.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
if let Some(s) = matches.value_of("seconds") {
|
||||
time_sec = s.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
let addr = if let Some(s) = matches.value_of("addr") {
|
||||
s.to_string().parse().unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse {} as IP address error: {:?}", s, e);
|
||||
exit(1);
|
||||
})
|
||||
} else {
|
||||
get_public_ip_addr().unwrap_or_else(|e| {
|
||||
eprintln!("failed to get public IP, try --addr? error: {:?}", e);
|
||||
exit(1);
|
||||
})
|
||||
};
|
||||
|
||||
if let Some(s) = matches.value_of("tx_count") {
|
||||
tx_count = s.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
if matches.is_present("sustained") {
|
||||
sustained = true;
|
||||
}
|
||||
|
||||
let exit_signal = Arc::new(AtomicBool::new(false));
|
||||
let mut c_threads = vec![];
|
||||
let validators = converge(&leader, &exit_signal, num_nodes, &mut c_threads, addr);
|
||||
|
||||
println!(" Node address | Node identifier");
|
||||
println!("----------------------+------------------");
|
||||
for node in &validators {
|
||||
println!(
|
||||
" {:20} | {:16x}",
|
||||
node.contact_info.tpu.to_string(),
|
||||
node.debug_id()
|
||||
);
|
||||
}
|
||||
println!("Nodes: {}", validators.len());
|
||||
|
||||
if validators.len() < num_nodes {
|
||||
println!(
|
||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||
num_nodes
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if matches.is_present("converge_only") {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut client = mk_client(&leader);
|
||||
let mut barrier_client = mk_client(&leader);
|
||||
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&id.public_key_bytes()[..32]);
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
|
||||
println!("Creating {} keypairs...", tx_count / 2);
|
||||
let keypairs = rnd.gen_n_keypairs(tx_count / 2);
|
||||
let barrier_id = rnd.gen_n_keypairs(1).pop().unwrap();
|
||||
|
||||
println!("Get tokens...");
|
||||
airdrop_tokens(&mut client, &leader, &id, tx_count);
|
||||
airdrop_tokens(&mut barrier_client, &leader, &barrier_id, 1);
|
||||
|
||||
println!("Get last ID...");
|
||||
let mut last_id = client.get_last_id();
|
||||
println!("Got last ID {:?}", last_id);
|
||||
|
||||
let first_tx_count = client.transaction_count();
|
||||
println!("Initial transaction count {}", first_tx_count);
|
||||
|
||||
// Setup a thread per validator to sample every period
|
||||
// collect the max transaction rate and total tx count seen
|
||||
let maxes = Arc::new(RwLock::new(Vec::new()));
|
||||
let sample_period = 1; // in seconds
|
||||
println!("Sampling TPS every {} second...", sample_period);
|
||||
let v_threads: Vec<_> = validators
|
||||
.into_iter()
|
||||
.map(|v| {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let maxes = maxes.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sample".to_string())
|
||||
.spawn(move || {
|
||||
sample_tx_count(&exit_signal, &maxes, first_tx_count, &v, sample_period);
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
let shared_txs: Arc<RwLock<VecDeque<Vec<Transaction>>>> =
|
||||
Arc::new(RwLock::new(VecDeque::new()));
|
||||
|
||||
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
|
||||
|
||||
let s_threads: Vec<_> = (0..threads)
|
||||
.map(|_| {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let shared_txs = shared_txs.clone();
|
||||
let leader = leader.clone();
|
||||
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sender".to_string())
|
||||
.spawn(move || {
|
||||
do_tx_transfers(
|
||||
&exit_signal,
|
||||
&shared_txs,
|
||||
&leader,
|
||||
&shared_tx_active_thread_count,
|
||||
);
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
// generate and send transactions for the specified duration
|
||||
let time = Duration::new(time_sec, 0);
|
||||
let now = Instant::now();
|
||||
let mut reclaim_tokens_back_to_source_account = false;
|
||||
while now.elapsed() < time || reclaim_tokens_back_to_source_account {
|
||||
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(-1);
|
||||
metrics_submit_token_balance(balance);
|
||||
|
||||
// ping-pong between source and destination accounts for each loop iteration
|
||||
// this seems to be faster than trying to determine the balance of individual
|
||||
// accounts
|
||||
generate_txs(
|
||||
&shared_txs,
|
||||
&id,
|
||||
&keypairs,
|
||||
&last_id,
|
||||
threads,
|
||||
reclaim_tokens_back_to_source_account,
|
||||
);
|
||||
reclaim_tokens_back_to_source_account = !reclaim_tokens_back_to_source_account;
|
||||
|
||||
// In sustained mode overlap the transfers with generation
|
||||
// this has higher average performance but lower peak performance
|
||||
// in tested environments.
|
||||
if !sustained {
|
||||
while shared_tx_active_thread_count.load(Ordering::Relaxed) > 0 {
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
// It's not feasible (would take too much time) to confirm each of the `tx_count / 2`
|
||||
// transactions sent by `generate_txs()` so instead send and confirm a single transaction
|
||||
// to validate the network is still functional.
|
||||
send_barrier_transaction(&mut barrier_client, &mut last_id, &barrier_id);
|
||||
}
|
||||
|
||||
// Stop the sampling threads so it will collect the stats
|
||||
exit_signal.store(true, Ordering::Relaxed);
|
||||
|
||||
println!("Waiting for validator threads...");
|
||||
for t in v_threads {
|
||||
if let Err(err) = t.join() {
|
||||
println!(" join() failed with: {:?}", err);
|
||||
}
|
||||
}
|
||||
|
||||
// join the tx send threads
|
||||
println!("Waiting for transmit threads...");
|
||||
for t in s_threads {
|
||||
if let Err(err) = t.join() {
|
||||
println!(" join() failed with: {:?}", err);
|
||||
}
|
||||
}
|
||||
|
||||
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(-1);
|
||||
metrics_submit_token_balance(balance);
|
||||
|
||||
compute_and_report_stats(&maxes, sample_period, &now.elapsed());
|
||||
|
||||
// join the crdt client threads
|
||||
for t in c_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn spy_node(addr: IpAddr) -> (NodeInfo, UdpSocket) {
|
||||
let gossip_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
|
||||
let gossip_addr = SocketAddr::new(addr, gossip_socket.local_addr().unwrap().port());
|
||||
|
||||
let pubkey = Keypair::new().pubkey();
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
assert!(!gossip_addr.ip().is_unspecified());
|
||||
assert!(!gossip_addr.ip().is_multicast());
|
||||
let node = NodeInfo::new(pubkey, gossip_addr, daddr, daddr, daddr, daddr);
|
||||
(node, gossip_socket)
|
||||
}
|
||||
|
||||
fn converge(
|
||||
leader: &NodeInfo,
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
addr: IpAddr,
|
||||
) -> Vec<NodeInfo> {
|
||||
//lets spy on the network
|
||||
let (spy, spy_gossip) = spy_node(addr);
|
||||
let mut spy_crdt = Crdt::new(spy).expect("Crdt::new");
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let window = default_window();
|
||||
let gossip_send_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
let ncp = Ncp::new(
|
||||
&spy_ref,
|
||||
window.clone(),
|
||||
None,
|
||||
spy_gossip,
|
||||
gossip_send_socket,
|
||||
exit_signal.clone(),
|
||||
).expect("DataReplicator::new");
|
||||
let mut v: Vec<NodeInfo> = vec![];
|
||||
//wait for the network to converge, 30 seconds should be plenty
|
||||
for _ in 0..30 {
|
||||
v = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.filter(|x| Crdt::is_valid_address(x.contact_info.rpu))
|
||||
.cloned()
|
||||
.collect();
|
||||
if v.len() >= num_nodes {
|
||||
println!("CONVERGED!");
|
||||
break;
|
||||
} else {
|
||||
println!(
|
||||
"{} node(s) discovered (looking for {} or more)",
|
||||
v.len(),
|
||||
num_nodes
|
||||
);
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
threads.extend(ncp.thread_hdls().into_iter());
|
||||
v
|
||||
}
|
||||
|
||||
fn read_leader(path: &str) -> Config {
|
||||
let file = File::open(path).unwrap_or_else(|_| panic!("file not found: {}", path));
|
||||
serde_json::from_reader(file).unwrap_or_else(|_| panic!("failed to parse {}", path))
|
||||
}
|
@ -1,73 +0,0 @@
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use solana::accountant_stub::AccountantStub;
|
||||
use solana::mint::Mint;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use std::io::stdin;
|
||||
use std::net::UdpSocket;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::thread::sleep;
|
||||
use rayon::prelude::*;
|
||||
|
||||
fn main() {
|
||||
let addr = "127.0.0.1:8000";
|
||||
let send_addr = "127.0.0.1:8001";
|
||||
|
||||
let mint: Mint = serde_json::from_reader(stdin()).unwrap();
|
||||
let mint_keypair = mint.keypair();
|
||||
let mint_pubkey = mint.pubkey();
|
||||
|
||||
let socket = UdpSocket::bind(send_addr).unwrap();
|
||||
let acc = AccountantStub::new(addr, socket);
|
||||
let last_id = acc.get_last_id().unwrap();
|
||||
|
||||
let mint_balance = acc.get_balance(&mint_pubkey).unwrap().unwrap();
|
||||
println!("Mint's Initial Balance {}", mint_balance);
|
||||
|
||||
println!("Signing transactions...");
|
||||
let txs = 100_000;
|
||||
let now = Instant::now();
|
||||
let transactions: Vec<_> = (0..txs)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let rando_pubkey = KeyPair::new().pubkey();
|
||||
Transaction::new(&mint_keypair, rando_pubkey, 1, last_id)
|
||||
})
|
||||
.collect();
|
||||
let duration = now.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = txs as f64 / ns as f64;
|
||||
let nsps = ns as f64 / txs as f64;
|
||||
println!(
|
||||
"Done. {} thousand signatures per second, {}us per signature",
|
||||
bsps * 1_000_000_f64,
|
||||
nsps / 1_000_f64
|
||||
);
|
||||
|
||||
println!("Transferring 1 unit {} times...", txs);
|
||||
let now = Instant::now();
|
||||
let mut _sig = Default::default();
|
||||
for tr in transactions {
|
||||
_sig = tr.sig;
|
||||
acc.transfer_signed(tr).unwrap();
|
||||
}
|
||||
println!("Waiting for last transaction to be confirmed...",);
|
||||
let mut val = mint_balance;
|
||||
let mut prev = 0;
|
||||
while val != prev {
|
||||
sleep(Duration::from_millis(20));
|
||||
prev = val;
|
||||
val = acc.get_balance(&mint_pubkey).unwrap().unwrap();
|
||||
}
|
||||
println!("Mint's Final Balance {}", val);
|
||||
let txs = mint_balance - val;
|
||||
println!("Successful transactions {}", txs);
|
||||
|
||||
let duration = now.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
|
||||
println!("Done. {} tps!", tps);
|
||||
}
|
149
src/bin/drone.rs
Normal file
149
src/bin/drone.rs
Normal file
@ -0,0 +1,149 @@
|
||||
extern crate bincode;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
extern crate tokio;
|
||||
extern crate tokio_codec;
|
||||
extern crate tokio_io;
|
||||
|
||||
use bincode::deserialize;
|
||||
use clap::{App, Arg};
|
||||
use solana::crdt::NodeInfo;
|
||||
use solana::drone::{Drone, DroneRequest, DRONE_PORT};
|
||||
use solana::fullnode::Config;
|
||||
use solana::logger;
|
||||
use solana::metrics::set_panic_hook;
|
||||
use solana::signature::read_keypair;
|
||||
use std::fs::File;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::prelude::*;
|
||||
use tokio_codec::{BytesCodec, Decoder};
|
||||
|
||||
fn main() {
|
||||
logger::setup();
|
||||
set_panic_hook("drone");
|
||||
let matches = App::new("drone")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("leader")
|
||||
.short("l")
|
||||
.long("leader")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/leader.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("/path/to/mint.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("time")
|
||||
.short("t")
|
||||
.long("time")
|
||||
.value_name("SECONDS")
|
||||
.takes_value(true)
|
||||
.help("time slice over which to limit requests to drone"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("cap")
|
||||
.short("c")
|
||||
.long("cap")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("request limit for time slice"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let leader: NodeInfo;
|
||||
if let Some(l) = matches.value_of("leader") {
|
||||
leader = read_leader(l).node_info;
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
leader = NodeInfo::new_leader(&server_addr);
|
||||
};
|
||||
|
||||
let mint_keypair =
|
||||
read_keypair(matches.value_of("keypair").expect("keypair")).expect("client keypair");
|
||||
|
||||
let time_slice: Option<u64>;
|
||||
if let Some(t) = matches.value_of("time") {
|
||||
time_slice = Some(t.to_string().parse().expect("integer"));
|
||||
} else {
|
||||
time_slice = None;
|
||||
}
|
||||
let request_cap: Option<u64>;
|
||||
if let Some(c) = matches.value_of("cap") {
|
||||
request_cap = Some(c.to_string().parse().expect("integer"));
|
||||
} else {
|
||||
request_cap = None;
|
||||
}
|
||||
|
||||
let drone_addr: SocketAddr = format!("0.0.0.0:{}", DRONE_PORT).parse().unwrap();
|
||||
|
||||
let drone = Arc::new(Mutex::new(Drone::new(
|
||||
mint_keypair,
|
||||
drone_addr,
|
||||
leader.contact_info.tpu,
|
||||
leader.contact_info.rpu,
|
||||
time_slice,
|
||||
request_cap,
|
||||
)));
|
||||
|
||||
let drone1 = drone.clone();
|
||||
thread::spawn(move || loop {
|
||||
let time = drone1.lock().unwrap().time_slice;
|
||||
thread::sleep(time);
|
||||
drone1.lock().unwrap().clear_request_count();
|
||||
});
|
||||
|
||||
let socket = TcpListener::bind(&drone_addr).unwrap();
|
||||
println!("Drone started. Listening on: {}", drone_addr);
|
||||
let done = socket
|
||||
.incoming()
|
||||
.map_err(|e| println!("failed to accept socket; error = {:?}", e))
|
||||
.for_each(move |socket| {
|
||||
let drone2 = drone.clone();
|
||||
// let client_ip = socket.peer_addr().expect("drone peer_addr").ip();
|
||||
let framed = BytesCodec::new().framed(socket);
|
||||
let (_writer, reader) = framed.split();
|
||||
|
||||
let processor = reader
|
||||
.for_each(move |bytes| {
|
||||
let req: DroneRequest = deserialize(&bytes).or_else(|err| {
|
||||
use std::io;
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("deserialize packet in drone: {:?}", err),
|
||||
))
|
||||
})?;
|
||||
|
||||
println!("Airdrop requested...");
|
||||
// let res = drone2.lock().unwrap().check_rate_limit(client_ip);
|
||||
let res1 = drone2.lock().unwrap().send_airdrop(req);
|
||||
match res1 {
|
||||
Ok(_) => println!("Airdrop sent!"),
|
||||
Err(_) => println!("Request limit reached for this time slice"),
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
.then(|result| {
|
||||
println!("Socket closed with result: {:?}", result);
|
||||
Ok(())
|
||||
});
|
||||
tokio::spawn(processor)
|
||||
});
|
||||
tokio::run(done);
|
||||
}
|
||||
fn read_leader(path: &str) -> Config {
|
||||
let file = File::open(path).unwrap_or_else(|_| panic!("file not found: {}", path));
|
||||
serde_json::from_reader(file).unwrap_or_else(|_| panic!("failed to parse {}", path))
|
||||
}
|
83
src/bin/fullnode-config.rs
Normal file
83
src/bin/fullnode-config.rs
Normal file
@ -0,0 +1,83 @@
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate dirs;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use clap::{App, Arg};
|
||||
use solana::crdt::{get_ip_addr, parse_port_or_addr};
|
||||
use solana::fullnode::Config;
|
||||
use solana::nat::get_public_ip_addr;
|
||||
use solana::signature::read_pkcs8;
|
||||
use std::io;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
fn main() {
|
||||
let matches = App::new("fullnode-config")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("local")
|
||||
.short("l")
|
||||
.long("local")
|
||||
.takes_value(false)
|
||||
.help("detect network address from local machine configuration"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/id.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("public")
|
||||
.short("p")
|
||||
.long("public")
|
||||
.takes_value(false)
|
||||
.help("detect public network address using public servers"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("bind")
|
||||
.short("b")
|
||||
.long("bind")
|
||||
.value_name("PORT")
|
||||
.takes_value(true)
|
||||
.help("bind to port or address"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let bind_addr: SocketAddr = {
|
||||
let mut bind_addr = parse_port_or_addr({
|
||||
if let Some(b) = matches.value_of("bind") {
|
||||
Some(b.to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
if matches.is_present("local") {
|
||||
let ip = get_ip_addr().unwrap();
|
||||
bind_addr.set_ip(ip);
|
||||
}
|
||||
if matches.is_present("public") {
|
||||
let ip = get_public_ip_addr().unwrap();
|
||||
bind_addr.set_ip(ip);
|
||||
}
|
||||
bind_addr
|
||||
};
|
||||
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let id_path = if matches.is_present("keypair") {
|
||||
matches.value_of("keypair").unwrap()
|
||||
} else {
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
let pkcs8 = read_pkcs8(id_path).expect("client keypair");
|
||||
|
||||
// we need all the receiving sockets to be bound within the expected
|
||||
// port range that we open on aws
|
||||
let config = Config::new(&bind_addr, pkcs8);
|
||||
let stdout = io::stdout();
|
||||
serde_json::to_writer(stdout, &config).expect("serialize");
|
||||
}
|
118
src/bin/fullnode.rs
Normal file
118
src/bin/fullnode.rs
Normal file
@ -0,0 +1,118 @@
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate getopts;
|
||||
extern crate log;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use clap::{App, Arg};
|
||||
use solana::client::mk_client;
|
||||
use solana::crdt::{NodeInfo, TestNode};
|
||||
use solana::drone::DRONE_PORT;
|
||||
use solana::fullnode::{Config, Fullnode};
|
||||
use solana::logger;
|
||||
use solana::metrics::set_panic_hook;
|
||||
use solana::service::Service;
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::wallet::request_airdrop;
|
||||
use std::fs::File;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::process::exit;
|
||||
|
||||
fn main() -> () {
|
||||
logger::setup();
|
||||
set_panic_hook("fullnode");
|
||||
let matches = App::new("fullnode")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("identity")
|
||||
.short("i")
|
||||
.long("identity")
|
||||
.value_name("FILE")
|
||||
.takes_value(true)
|
||||
.help("run with the identity found in FILE"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("testnet")
|
||||
.short("t")
|
||||
.long("testnet")
|
||||
.value_name("HOST:PORT")
|
||||
.takes_value(true)
|
||||
.help("connect to the network at this gossip entry point"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("ledger")
|
||||
.short("l")
|
||||
.long("ledger")
|
||||
.value_name("DIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("use DIR as persistent ledger location"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
let mut keypair = Keypair::new();
|
||||
let mut repl_data = NodeInfo::new_leader_with_pubkey(keypair.pubkey(), &bind_addr);
|
||||
if let Some(i) = matches.value_of("identity") {
|
||||
let path = i.to_string();
|
||||
if let Ok(file) = File::open(path.clone()) {
|
||||
let parse: serde_json::Result<Config> = serde_json::from_reader(file);
|
||||
if let Ok(data) = parse {
|
||||
keypair = data.keypair();
|
||||
repl_data = data.node_info;
|
||||
} else {
|
||||
eprintln!("failed to parse {}", path);
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
eprintln!("failed to read {}", path);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
let leader_pubkey = keypair.pubkey();
|
||||
let repl_clone = repl_data.clone();
|
||||
|
||||
let ledger_path = matches.value_of("ledger").unwrap();
|
||||
|
||||
let mut node = TestNode::new_with_bind_addr(repl_data, bind_addr);
|
||||
let mut drone_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), DRONE_PORT);
|
||||
let fullnode = if let Some(t) = matches.value_of("testnet") {
|
||||
let testnet_address_string = t.to_string();
|
||||
let testnet_addr: SocketAddr = testnet_address_string.parse().unwrap();
|
||||
drone_addr.set_ip(testnet_addr.ip());
|
||||
|
||||
Fullnode::new(node, false, ledger_path, keypair, Some(testnet_addr))
|
||||
} else {
|
||||
node.data.leader_id = node.data.id;
|
||||
|
||||
Fullnode::new(node, true, ledger_path, keypair, None)
|
||||
};
|
||||
|
||||
let mut client = mk_client(&repl_clone);
|
||||
let previous_balance = client.poll_get_balance(&leader_pubkey).unwrap();
|
||||
eprintln!("balance is {}", previous_balance);
|
||||
|
||||
if previous_balance == 0 {
|
||||
eprintln!("requesting airdrop from {}", drone_addr);
|
||||
request_airdrop(&drone_addr, &leader_pubkey, 50).unwrap_or_else(|_| {
|
||||
panic!(
|
||||
"Airdrop failed, is the drone address correct {:?} drone running?",
|
||||
drone_addr
|
||||
)
|
||||
});
|
||||
|
||||
// Try multiple times to confirm a non-zero balance. |poll_get_balance| currently times
|
||||
// out after 1 second, and sometimes this is not enough time while the network is
|
||||
// booting
|
||||
let balance_ok = (0..30).any(|i| {
|
||||
let balance = client.poll_get_balance(&leader_pubkey).unwrap();
|
||||
eprintln!("new balance is {} (attempt #{})", balance, i);
|
||||
balance > 0
|
||||
});
|
||||
assert!(balance_ok, "0 balance, airdrop failed?");
|
||||
}
|
||||
|
||||
fullnode.join().expect("join");
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use solana::entry::create_entry;
|
||||
use solana::event::Event;
|
||||
use solana::hash::Hash;
|
||||
use solana::mint::Mint;
|
||||
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
use solana::transaction::Transaction;
|
||||
use std::io::stdin;
|
||||
|
||||
fn transfer(from: &KeyPair, (to, tokens): (PublicKey, i64), last_id: Hash) -> Event {
|
||||
Event::Transaction(Transaction::new(from, to, tokens, last_id))
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mint: Mint = serde_json::from_reader(stdin()).unwrap();
|
||||
let mut entries = mint.create_entries();
|
||||
|
||||
let from = mint.keypair();
|
||||
let seed = mint.seed();
|
||||
let alice = (KeyPair::new().pubkey(), 200);
|
||||
let bob = (KeyPair::new().pubkey(), 100);
|
||||
let events = vec![transfer(&from, alice, seed), transfer(&from, bob, seed)];
|
||||
entries.push(create_entry(&seed, 0, events));
|
||||
|
||||
for entry in entries {
|
||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
||||
}
|
||||
}
|
@ -1,14 +1,62 @@
|
||||
//! A command-line executable for generating the chain's genesis block.
|
||||
|
||||
extern crate atty;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use clap::{App, Arg};
|
||||
use solana::ledger::LedgerWriter;
|
||||
use solana::mint::Mint;
|
||||
use std::io::stdin;
|
||||
use std::error;
|
||||
use std::io::{stdin, Read};
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
let mint: Mint = serde_json::from_reader(stdin()).unwrap();
|
||||
for x in mint.create_entries() {
|
||||
println!("{}", serde_json::to_string(&x).unwrap());
|
||||
fn main() -> Result<(), Box<error::Error>> {
|
||||
let matches = App::new("solana-genesis")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("tokens")
|
||||
.short("t")
|
||||
.long("tokens")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("Number of tokens with which to initialize mint"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("ledger")
|
||||
.short("l")
|
||||
.long("ledger")
|
||||
.value_name("DIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("use DIR as persistent ledger location"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let tokens = value_t_or_exit!(matches, "tokens", i64);
|
||||
let ledger_path = matches.value_of("ledger").unwrap();
|
||||
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer)?;
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let pkcs8: Vec<u8> = serde_json::from_str(&buffer)?;
|
||||
let mint = Mint::new_with_pkcs8(tokens, pkcs8);
|
||||
|
||||
let mut ledger_writer = LedgerWriter::open(&ledger_path, true)?;
|
||||
ledger_writer.write_entries(mint.create_entries())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,37 +0,0 @@
|
||||
extern crate solana;
|
||||
|
||||
use solana::entry::Entry;
|
||||
use solana::event::Event;
|
||||
use solana::hash::Hash;
|
||||
use solana::historian::Historian;
|
||||
use solana::ledger::verify_slice;
|
||||
use solana::recorder::Signal;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use std::sync::mpsc::SendError;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
fn create_ledger(hist: &Historian, seed: &Hash) -> Result<(), SendError<Signal>> {
|
||||
sleep(Duration::from_millis(15));
|
||||
let keypair = KeyPair::new();
|
||||
let tr = Transaction::new(&keypair, keypair.pubkey(), 42, *seed);
|
||||
let signal0 = Signal::Event(Event::Transaction(tr));
|
||||
hist.sender.send(signal0)?;
|
||||
sleep(Duration::from_millis(10));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let seed = Hash::default();
|
||||
let hist = Historian::new(&seed, Some(10));
|
||||
create_ledger(&hist, &seed).expect("send error");
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry> = hist.receiver.iter().collect();
|
||||
for entry in &entries {
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
// Proof-of-History: Verify the historian learned about the events
|
||||
// in the same order they appear in the vector.
|
||||
assert!(verify_slice(&entries, &seed));
|
||||
}
|
51
src/bin/keygen.rs
Normal file
51
src/bin/keygen.rs
Normal file
@ -0,0 +1,51 @@
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate dirs;
|
||||
extern crate ring;
|
||||
extern crate serde_json;
|
||||
|
||||
use clap::{App, Arg};
|
||||
use ring::rand::SystemRandom;
|
||||
use ring::signature::Ed25519KeyPair;
|
||||
use std::error;
|
||||
use std::fs::{self, File};
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
|
||||
fn main() -> Result<(), Box<error::Error>> {
|
||||
let matches = App::new("solana-keygen")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("outfile")
|
||||
.short("o")
|
||||
.long("outfile")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("path to generated file"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let rnd = SystemRandom::new();
|
||||
let pkcs8_bytes = Ed25519KeyPair::generate_pkcs8(&rnd)?;
|
||||
let serialized = serde_json::to_string(&pkcs8_bytes.to_vec())?;
|
||||
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let outfile = if matches.is_present("outfile") {
|
||||
matches.value_of("outfile").unwrap()
|
||||
} else {
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
|
||||
if outfile == "-" {
|
||||
println!("{}", serialized);
|
||||
} else {
|
||||
if let Some(outdir) = Path::new(outfile).parent() {
|
||||
fs::create_dir_all(outdir)?;
|
||||
}
|
||||
let mut f = File::create(outfile)?;
|
||||
f.write_all(&serialized.into_bytes())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
137
src/bin/ledger-tool.rs
Normal file
137
src/bin/ledger-tool.rs
Normal file
@ -0,0 +1,137 @@
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use solana::bank::Bank;
|
||||
use solana::ledger::{read_ledger, verify_ledger};
|
||||
use solana::logger;
|
||||
use std::io::{stdout, Write};
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
logger::setup();
|
||||
let matches = App::new("ledger-tool")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("ledger")
|
||||
.short("l")
|
||||
.long("ledger")
|
||||
.value_name("DIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("use DIR for ledger location"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("head")
|
||||
.short("n")
|
||||
.long("head")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.help("at most the first NUM entries in ledger\n (only applies to verify, print, json commands)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("precheck")
|
||||
.short("p")
|
||||
.long("precheck")
|
||||
.help("use ledger_verify() to check internal ledger consistency before proceeding"),
|
||||
)
|
||||
.subcommand(SubCommand::with_name("print").about("Print the ledger"))
|
||||
.subcommand(SubCommand::with_name("json").about("Print the ledger in JSON format"))
|
||||
.subcommand(SubCommand::with_name("verify").about("Verify the ledger's PoH"))
|
||||
.get_matches();
|
||||
|
||||
let ledger_path = matches.value_of("ledger").unwrap();
|
||||
|
||||
if matches.is_present("precheck") {
|
||||
if let Err(e) = verify_ledger(&ledger_path) {
|
||||
eprintln!("ledger precheck failed, error: {:?} ", e);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
let entries = match read_ledger(ledger_path, true) {
|
||||
Ok(entries) => entries,
|
||||
Err(err) => {
|
||||
eprintln!("Failed to open ledger at {}: {}", ledger_path, err);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
let head = match matches.value_of("head") {
|
||||
Some(head) => head.parse().expect("please pass a number for --head"),
|
||||
None => <usize>::max_value(),
|
||||
};
|
||||
|
||||
match matches.subcommand() {
|
||||
("print", _) => {
|
||||
let entries = match read_ledger(ledger_path, true) {
|
||||
Ok(entries) => entries,
|
||||
Err(err) => {
|
||||
eprintln!("Failed to open ledger at {}: {}", ledger_path, err);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
for (i, entry) in entries.enumerate() {
|
||||
if i >= head {
|
||||
break;
|
||||
}
|
||||
let entry = entry.unwrap();
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
}
|
||||
("json", _) => {
|
||||
stdout().write_all(b"{\"ledger\":[\n").expect("open array");
|
||||
for (i, entry) in entries.enumerate() {
|
||||
if i >= head {
|
||||
break;
|
||||
}
|
||||
let entry = entry.unwrap();
|
||||
serde_json::to_writer(stdout(), &entry).expect("serialize");
|
||||
stdout().write_all(b",\n").expect("newline");
|
||||
}
|
||||
stdout().write_all(b"\n]}\n").expect("close array");
|
||||
}
|
||||
("verify", _) => {
|
||||
if head < 2 {
|
||||
eprintln!("verify requires at least 2 entries to run");
|
||||
exit(1);
|
||||
}
|
||||
let bank = Bank::default();
|
||||
|
||||
{
|
||||
let genesis = match read_ledger(ledger_path, true) {
|
||||
Ok(entries) => entries,
|
||||
Err(err) => {
|
||||
eprintln!("Failed to open ledger at {}: {}", ledger_path, err);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
let genesis = genesis.take(2).map(|e| e.unwrap());
|
||||
|
||||
if let Err(e) = bank.process_ledger(genesis) {
|
||||
eprintln!("verify failed at genesis err: {:?}", e);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
let entries = entries.map(|e| e.unwrap());
|
||||
|
||||
let head = head - 2;
|
||||
for (i, entry) in entries.skip(2).enumerate() {
|
||||
if i >= head {
|
||||
break;
|
||||
}
|
||||
if let Err(e) = bank.process_entry(entry) {
|
||||
eprintln!("verify failed at entry[{}], err: {:?}", i + 2, e);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
("", _) => {
|
||||
eprintln!("{}", matches.usage());
|
||||
exit(1);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
}
|
@ -1,15 +0,0 @@
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use solana::mint::Mint;
|
||||
use std::io;
|
||||
|
||||
fn main() {
|
||||
let mut input_text = String::new();
|
||||
io::stdin().read_line(&mut input_text).unwrap();
|
||||
let trimmed = input_text.trim();
|
||||
let tokens = trimmed.parse::<i64>().unwrap();
|
||||
|
||||
let mint = Mint::new(tokens);
|
||||
println!("{}", serde_json::to_string(&mint).unwrap());
|
||||
}
|
@ -1,25 +0,0 @@
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use solana::accountant::Accountant;
|
||||
use solana::accountant_skel::AccountantSkel;
|
||||
use std::io::{self, stdout, BufRead};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
fn main() {
|
||||
let addr = "127.0.0.1:8000";
|
||||
let stdin = io::stdin();
|
||||
let entries = stdin
|
||||
.lock()
|
||||
.lines()
|
||||
.map(|line| serde_json::from_str(&line.unwrap()).unwrap());
|
||||
let acc = Accountant::new_from_entries(entries, Some(1000));
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let skel = Arc::new(Mutex::new(AccountantSkel::new(acc, stdout())));
|
||||
eprintln!("Listening on {}", addr);
|
||||
let threads = AccountantSkel::serve(skel, addr, exit.clone()).unwrap();
|
||||
for t in threads {
|
||||
t.join().expect("join");
|
||||
}
|
||||
}
|
315
src/bin/wallet.rs
Normal file
315
src/bin/wallet.rs
Normal file
@ -0,0 +1,315 @@
|
||||
extern crate atty;
|
||||
extern crate bincode;
|
||||
extern crate bs58;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate dirs;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use solana::client::mk_client;
|
||||
use solana::crdt::NodeInfo;
|
||||
use solana::drone::DRONE_PORT;
|
||||
use solana::fullnode::Config;
|
||||
use solana::logger;
|
||||
use solana::signature::{read_keypair, Keypair, KeypairUtil, Pubkey, Signature};
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::wallet::request_airdrop;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::fs::File;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
enum WalletCommand {
|
||||
Address,
|
||||
Balance,
|
||||
AirDrop(i64),
|
||||
Pay(i64, Pubkey),
|
||||
Confirm(Signature),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum WalletError {
|
||||
CommandNotRecognized(String),
|
||||
BadParameter(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for WalletError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "invalid")
|
||||
}
|
||||
}
|
||||
|
||||
impl error::Error for WalletError {
|
||||
fn description(&self) -> &str {
|
||||
"invalid"
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
// Generic error, underlying cause isn't tracked.
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
struct WalletConfig {
|
||||
leader: NodeInfo,
|
||||
id: Keypair,
|
||||
drone_addr: SocketAddr,
|
||||
command: WalletCommand,
|
||||
}
|
||||
|
||||
impl Default for WalletConfig {
|
||||
fn default() -> WalletConfig {
|
||||
let default_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
WalletConfig {
|
||||
leader: NodeInfo::new_leader(&default_addr),
|
||||
id: Keypair::new(),
|
||||
drone_addr: default_addr,
|
||||
command: WalletCommand::Balance,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
let matches = App::new("solana-wallet")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("leader")
|
||||
.short("l")
|
||||
.long("leader")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/leader.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/id.json"),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("airdrop")
|
||||
.about("Request a batch of tokens")
|
||||
.arg(
|
||||
Arg::with_name("tokens")
|
||||
// .index(1)
|
||||
.long("tokens")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("The number of tokens to request"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("pay")
|
||||
.about("Send a payment")
|
||||
.arg(
|
||||
Arg::with_name("tokens")
|
||||
// .index(2)
|
||||
.long("tokens")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("the number of tokens to send"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("to")
|
||||
// .index(1)
|
||||
.long("to")
|
||||
.value_name("PUBKEY")
|
||||
.takes_value(true)
|
||||
.help("The pubkey of recipient"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("confirm")
|
||||
.about("Confirm your payment by signature")
|
||||
.arg(
|
||||
Arg::with_name("signature")
|
||||
.index(1)
|
||||
.value_name("SIGNATURE")
|
||||
.required(true)
|
||||
.help("The transaction signature to confirm"),
|
||||
),
|
||||
)
|
||||
.subcommand(SubCommand::with_name("balance").about("Get your balance"))
|
||||
.subcommand(SubCommand::with_name("address").about("Get your public key"))
|
||||
.get_matches();
|
||||
|
||||
let leader: NodeInfo;
|
||||
if let Some(l) = matches.value_of("leader") {
|
||||
leader = read_leader(l)?.node_info;
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
leader = NodeInfo::new_leader(&server_addr);
|
||||
};
|
||||
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let id_path = if matches.is_present("keypair") {
|
||||
matches.value_of("keypair").unwrap()
|
||||
} else {
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
let id = read_keypair(id_path).or_else(|err| {
|
||||
Err(WalletError::BadParameter(format!(
|
||||
"{}: Unable to open keypair file: {}",
|
||||
err, id_path
|
||||
)))
|
||||
})?;
|
||||
|
||||
let mut drone_addr = leader.contact_info.tpu;
|
||||
drone_addr.set_port(DRONE_PORT);
|
||||
|
||||
let command = match matches.subcommand() {
|
||||
("airdrop", Some(airdrop_matches)) => {
|
||||
let tokens = airdrop_matches.value_of("tokens").unwrap().parse()?;
|
||||
Ok(WalletCommand::AirDrop(tokens))
|
||||
}
|
||||
("pay", Some(pay_matches)) => {
|
||||
let to = if pay_matches.is_present("to") {
|
||||
let pubkey_vec = bs58::decode(pay_matches.value_of("to").unwrap())
|
||||
.into_vec()
|
||||
.expect("base58-encoded public key");
|
||||
|
||||
if pubkey_vec.len() != std::mem::size_of::<Pubkey>() {
|
||||
eprintln!("{}", pay_matches.usage());
|
||||
Err(WalletError::BadParameter("Invalid public key".to_string()))?;
|
||||
}
|
||||
Pubkey::new(&pubkey_vec)
|
||||
} else {
|
||||
id.pubkey()
|
||||
};
|
||||
|
||||
let tokens = pay_matches.value_of("tokens").unwrap().parse()?;
|
||||
|
||||
Ok(WalletCommand::Pay(tokens, to))
|
||||
}
|
||||
("confirm", Some(confirm_matches)) => {
|
||||
let signatures = bs58::decode(confirm_matches.value_of("signature").unwrap())
|
||||
.into_vec()
|
||||
.expect("base58-encoded signature");
|
||||
|
||||
if signatures.len() == std::mem::size_of::<Signature>() {
|
||||
let signature = Signature::new(&signatures);
|
||||
Ok(WalletCommand::Confirm(signature))
|
||||
} else {
|
||||
eprintln!("{}", confirm_matches.usage());
|
||||
Err(WalletError::BadParameter("Invalid signature".to_string()))
|
||||
}
|
||||
}
|
||||
("balance", Some(_balance_matches)) => Ok(WalletCommand::Balance),
|
||||
("address", Some(_address_matches)) => Ok(WalletCommand::Address),
|
||||
("", None) => {
|
||||
println!("{}", matches.usage());
|
||||
Err(WalletError::CommandNotRecognized(
|
||||
"no subcommand given".to_string(),
|
||||
))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}?;
|
||||
|
||||
Ok(WalletConfig {
|
||||
leader,
|
||||
id,
|
||||
drone_addr, // TODO: Add an option for this.
|
||||
command,
|
||||
})
|
||||
}
|
||||
|
||||
fn process_command(
|
||||
config: &WalletConfig,
|
||||
client: &mut ThinClient,
|
||||
) -> Result<(), Box<error::Error>> {
|
||||
match config.command {
|
||||
// Check client balance
|
||||
WalletCommand::Address => {
|
||||
println!("{}", config.id.pubkey());
|
||||
}
|
||||
WalletCommand::Balance => {
|
||||
println!("Balance requested...");
|
||||
let balance = client.poll_get_balance(&config.id.pubkey());
|
||||
match balance {
|
||||
Ok(balance) => {
|
||||
println!("Your balance is: {:?}", balance);
|
||||
}
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::Other => {
|
||||
println!("No account found! Request an airdrop to get started.");
|
||||
}
|
||||
Err(error) => {
|
||||
println!("An error occurred: {:?}", error);
|
||||
Err(error)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Request an airdrop from Solana Drone;
|
||||
// Request amount is set in request_airdrop function
|
||||
WalletCommand::AirDrop(tokens) => {
|
||||
println!(
|
||||
"Requesting airdrop of {:?} tokens from {}",
|
||||
tokens, config.drone_addr
|
||||
);
|
||||
let previous_balance = client.poll_get_balance(&config.id.pubkey())?;
|
||||
request_airdrop(&config.drone_addr, &config.id.pubkey(), tokens as u64)?;
|
||||
|
||||
// TODO: return airdrop Result from Drone instead of polling the
|
||||
// network
|
||||
let mut current_balance = previous_balance;
|
||||
for _ in 0..20 {
|
||||
sleep(Duration::from_millis(500));
|
||||
current_balance = client.poll_get_balance(&config.id.pubkey())?;
|
||||
if previous_balance != current_balance {
|
||||
break;
|
||||
}
|
||||
println!(".");
|
||||
}
|
||||
println!("Your balance is: {:?}", current_balance);
|
||||
if current_balance - previous_balance != tokens {
|
||||
Err("Airdrop failed!")?;
|
||||
}
|
||||
}
|
||||
// If client has positive balance, spend tokens in {balance} number of transactions
|
||||
WalletCommand::Pay(tokens, to) => {
|
||||
let last_id = client.get_last_id();
|
||||
let signature = client.transfer(tokens, &config.id, to, &last_id)?;
|
||||
println!("{}", signature);
|
||||
}
|
||||
// Confirm the last client transaction by signature
|
||||
WalletCommand::Confirm(signature) => {
|
||||
if client.check_signature(&signature) {
|
||||
println!("Confirmed");
|
||||
} else {
|
||||
println!("Not found");
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_leader(path: &str) -> Result<Config, WalletError> {
|
||||
let file = File::open(path.to_string()).or_else(|err| {
|
||||
Err(WalletError::BadParameter(format!(
|
||||
"{}: Unable to open leader file: {}",
|
||||
err, path
|
||||
)))
|
||||
})?;
|
||||
|
||||
serde_json::from_reader(file).or_else(|err| {
|
||||
Err(WalletError::BadParameter(format!(
|
||||
"{}: Failed to parse leader file: {}",
|
||||
err, path
|
||||
)))
|
||||
})
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Box<error::Error>> {
|
||||
logger::setup();
|
||||
let config = parse_args()?;
|
||||
let mut client = mk_client(&config.leader);
|
||||
process_command(&config, &mut client)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user