Compare commits
741 Commits
Author | SHA1 | Date | |
---|---|---|---|
f6b709ca48 | |||
ffa1fa557b | |||
e7631c85a1 | |||
edeadb503f | |||
d2044f2562 | |||
5703c740cf | |||
6ae20e78e2 | |||
506fc3baeb | |||
68523f4a7f | |||
beae217ab9 | |||
2c8c117e3c | |||
3a1285ebe5 | |||
e2660f2ac1 | |||
22eb1b977f | |||
43ef8d7bb7 | |||
d9271f2d30 | |||
dfbfd4d4dd | |||
9cb262ad4b | |||
73ee0cb100 | |||
9a6154beaf | |||
3f494bb91b | |||
2eb312796d | |||
3fb86662fb | |||
dce31f6002 | |||
39c42a6aba | |||
9961c0ee0a | |||
3f843f21b9 | |||
d07961a58b | |||
b85aa9282e | |||
1cd354cf15 | |||
92cd2d09ed | |||
a40122548f | |||
6e27f797bd | |||
476a585222 | |||
aa74ddb6c0 | |||
95921ce129 | |||
ee6d00a2fe | |||
212cbc4977 | |||
a6af1ba08d | |||
ee27e9e1cf | |||
4d21ee0546 | |||
493a2477b5 | |||
e284af33b9 | |||
f0aa14e135 | |||
fb9d8dfa99 | |||
4b02bbc802 | |||
18cf660f61 | |||
376303a1eb | |||
f295eb06d0 | |||
f423f61d8b | |||
94b06b2cbf | |||
9b2fc8cde7 | |||
d810752e86 | |||
fdaad1d85b | |||
7f29c1fe23 | |||
68df9d06db | |||
b60cb48c18 | |||
0fee854220 | |||
0cc7bbfe7d | |||
68834bd4c5 | |||
2df40cf9c9 | |||
f671b7f63f | |||
236113e417 | |||
a340b18b19 | |||
f6c8e1a4bf | |||
160cff4a30 | |||
48685cf766 | |||
0f32102684 | |||
d46682d1f2 | |||
55833e20b1 | |||
02cfa76916 | |||
9314eea7e9 | |||
1733beabf7 | |||
471d8f6ff9 | |||
e47fcb196b | |||
3ae53961c8 | |||
113b002095 | |||
9447537d8c | |||
7404b8739e | |||
7239395d95 | |||
926d459c8f | |||
7cabe203dc | |||
1e53f4266a | |||
24b513c3c7 | |||
b982595c73 | |||
af8a36b7fb | |||
208e7d7943 | |||
557736f1cf | |||
61927e1941 | |||
fc75827aaf | |||
2f2531d921 | |||
d5f20980eb | |||
21eae981f9 | |||
ead7f4287a | |||
3b33150cfb | |||
6d34a68e54 | |||
5c483c9928 | |||
a68c99d782 | |||
0aebbae909 | |||
a3a2215bda | |||
eb377993b3 | |||
5ca52d785c | |||
8d9912b4e2 | |||
c77b1c9687 | |||
8849ecd772 | |||
7977b97227 | |||
4f34822900 | |||
bbb38ac106 | |||
ce934a547e | |||
16b19d35dd | |||
45cfa5b574 | |||
df9ccce5b2 | |||
f8516b677a | |||
dfde83bdce | |||
cb0f19e4f1 | |||
26b99d3f85 | |||
2f9c0d1d9e | |||
0423cafbeb | |||
0bd1412562 | |||
0339642e77 | |||
37a0b7b132 | |||
c30b605047 | |||
76076d6fad | |||
0a819ec4e2 | |||
57a717056e | |||
856c48541f | |||
2045091c4f | |||
03ac5a6eef | |||
32fadc9c30 | |||
15a89d4f17 | |||
d0f43e9934 | |||
31e779d3f2 | |||
30c79fd40d | |||
639c93460a | |||
7611730cdb | |||
9df9c1433a | |||
4ea422bcec | |||
6074e4f962 | |||
d52e6d01ec | |||
63caca33be | |||
64efa62a74 | |||
912eb5e8e9 | |||
bb628e8495 | |||
d0c19c2c97 | |||
926fdb7519 | |||
c886625c83 | |||
f6c10d8a2e | |||
2bd877528f | |||
d09889b1dd | |||
1b2e9122d5 | |||
7424388924 | |||
537436bd5e | |||
32fc0cd7e9 | |||
fb99494858 | |||
5b4d4b97bc | |||
c5180c8092 | |||
515c200d86 | |||
32aab82e32 | |||
6aaa350145 | |||
d3b4dfe104 | |||
9fc30f6db4 | |||
2d0f07091d | |||
3828eda507 | |||
1e736ec16d | |||
bba6437ea9 | |||
e5ab9a856c | |||
1515bba9c6 | |||
14a9ef4bbe | |||
041040c659 | |||
47f69f2d24 | |||
9dd4dc2088 | |||
b534c32ee3 | |||
d2712f1457 | |||
183f560d06 | |||
ae150c0897 | |||
606e1396cf | |||
5c85e037f8 | |||
5c523716aa | |||
5f8cbf359e | |||
e83834e6be | |||
02225aa95c | |||
9931ac9780 | |||
2ba2bc72ca | |||
45b8ba9ede | |||
40968e09b7 | |||
262f26cf76 | |||
785c619198 | |||
24a993710d | |||
c240bb12ae | |||
eed3b9db94 | |||
29a8823db1 | |||
a80955eacb | |||
9716c3de71 | |||
34fa3208e0 | |||
9c4e19958b | |||
0403299728 | |||
95701114e3 | |||
a99d17c3ac | |||
517149d325 | |||
32aa2575b5 | |||
8fe7b96629 | |||
9350619afa | |||
d8d8f0bfc8 | |||
0a39722719 | |||
9c0fa4d1d2 | |||
da0404ad03 | |||
b508fdb62c | |||
680f90df21 | |||
1a68807ad9 | |||
d901767b54 | |||
13d4443d4d | |||
74b63c12a0 | |||
cd42f6591a | |||
5491422b12 | |||
23f3ff3cf0 | |||
f90488c77b | |||
beb4536841 | |||
3fa46dd66d | |||
ad5fcf778f | |||
83b000ae88 | |||
33e179caa6 | |||
b1e941cab9 | |||
6db961d256 | |||
83409ded59 | |||
396b2e9772 | |||
94459deb94 | |||
660af84b8d | |||
7b31020903 | |||
9a4143b4d9 | |||
aebc47ad55 | |||
b6b5455917 | |||
5bc01cd51a | |||
c79acac37b | |||
a5f2aa6777 | |||
4169e5c510 | |||
0727c440b3 | |||
19a7ff0c43 | |||
5f18403199 | |||
9f325fca09 | |||
10d08acefa | |||
52d50e6bc4 | |||
e7de7c32db | |||
a5f07638ec | |||
aa2a3fe201 | |||
abd13ba4ca | |||
485ba093b3 | |||
36b18e4fb5 | |||
8d92232949 | |||
e4d8c094a4 | |||
d26e1c51a9 | |||
675ff64094 | |||
423e7ebc3f | |||
f9fe6a0f72 | |||
8d007bd7f7 | |||
6cdbdfbbcb | |||
35e6343d61 | |||
7fb7839c8f | |||
dbc1ffc75e | |||
1fdbe893c5 | |||
55a542bff0 | |||
e10574c64d | |||
2e00be262e | |||
4172bde081 | |||
9c47e022dc | |||
874addc51a | |||
b7ae5b712a | |||
c6d7cd2d33 | |||
386a96b7e0 | |||
b238c57179 | |||
1821e72812 | |||
a23c230603 | |||
4e01fd5458 | |||
e416cf7adf | |||
25edb9e447 | |||
93c4f6c9b8 | |||
718031ec35 | |||
d546614936 | |||
ac8d738045 | |||
ca962371b8 | |||
e6f8922e35 | |||
7292ece7ad | |||
df3b78c18c | |||
c83dcea87d | |||
be20c99758 | |||
694add9919 | |||
afc764752c | |||
113c8b5880 | |||
a5b28349ed | |||
bb7ecc7cd9 | |||
14bc160674 | |||
d438c22618 | |||
bcbae0a64f | |||
f636408647 | |||
3ffc7aa5bc | |||
7b7e8c0d3f | |||
11ea9e7c4b | |||
2b82121325 | |||
5038e5ccd7 | |||
e943ed8caf | |||
c196952afd | |||
e7383a7e66 | |||
8a7545197f | |||
680072e5e2 | |||
4ca377a655 | |||
751dd7eebb | |||
8f0e0c4440 | |||
50cf73500e | |||
db310a044c | |||
88a609ade5 | |||
304d63623f | |||
407b2682e8 | |||
0f4fd8367d | |||
747ba6a8d3 | |||
bb99fd40de | |||
e972d6639d | |||
22e77c9485 | |||
bc88473030 | |||
95677a81c5 | |||
ea37d29d3a | |||
e030673c9d | |||
3e76efe97e | |||
f5a30615c1 | |||
e5e325154b | |||
9e3d2956d8 | |||
26b1466ef6 | |||
a1f01fb8f8 | |||
b2be0e2e5e | |||
1a45587c08 | |||
3199f174a3 | |||
a51c2f193e | |||
be31da3dce | |||
54b407b4ca | |||
e87cac06da | |||
ad4fef4f09 | |||
e3b3701e13 | |||
9228fe11c9 | |||
5ab38afa51 | |||
e49b8f0ce7 | |||
c50ac96f75 | |||
a9355c33b2 | |||
3dcee9f79e | |||
2614189157 | |||
beeb09646a | |||
67f1fbab5f | |||
c0e7e43e96 | |||
9bfead2e01 | |||
6073cd57fa | |||
5174be5fe7 | |||
62a18d4c02 | |||
a6c15684c9 | |||
5691bf557c | |||
8f01f7cf21 | |||
bb8c94ad2c | |||
d98e35e095 | |||
3163fbad0e | |||
0172422961 | |||
8ccfb26923 | |||
12a474b6ee | |||
270fd6d61c | |||
7b9c7d4150 | |||
55126f5fb6 | |||
431692d9d0 | |||
6732a9078d | |||
2981076a14 | |||
5740ea3807 | |||
cd2d50e06c | |||
8c8a4ba705 | |||
b10de40506 | |||
2030dfa435 | |||
bfe64f5f6e | |||
6d27751365 | |||
1fb1c0a681 | |||
062f654fe0 | |||
d3cb161c36 | |||
98b47d2540 | |||
f28ba3937b | |||
91cf14e641 | |||
7601a8001c | |||
0ee6c5bf9d | |||
6dee632d67 | |||
51e5de4d97 | |||
1f08b22c8e | |||
83ae5bcee2 | |||
339a570b26 | |||
5310b6e5a2 | |||
7d14f44a7c | |||
c830eeeae4 | |||
157fcf1de5 | |||
e050160ce5 | |||
f273351789 | |||
aebf7f88e5 | |||
aac1571670 | |||
8bae75a8a6 | |||
c2f7ca9d8f | |||
6ec0e42220 | |||
072b244575 | |||
7ac9d6c604 | |||
0125163190 | |||
a06f4b1d44 | |||
10daa015c4 | |||
0babee39a4 | |||
7c08b397eb | |||
155ee8792f | |||
f89f121d2b | |||
27986d7abb | |||
8b7edc6d64 | |||
7dfab867fe | |||
fd36954477 | |||
fd51599fa8 | |||
3ca80c676c | |||
be7cce1fd2 | |||
e142aafca9 | |||
4196cf43e8 | |||
a344eb7dd0 | |||
d12537bdb7 | |||
bcb3b3c21f | |||
d8c9a1aae9 | |||
9ca2f5b3f7 | |||
9e24775051 | |||
4dc30ea104 | |||
90df6237c6 | |||
80caa8fdce | |||
8706774ea7 | |||
1d7e87d430 | |||
1a4cd763f8 | |||
ee74b367ce | |||
f06113500d | |||
9ab5692acf | |||
e7a910b664 | |||
b52230097e | |||
a8fdb8a5a7 | |||
297f859631 | |||
5d19b799af | |||
af3eb5a16c | |||
b313b7f6f9 | |||
016ee36808 | |||
c3fc98c48f | |||
40aa0654fa | |||
bace2880d0 | |||
9d80eefb81 | |||
1c17c6dd2b | |||
2be0dbddbb | |||
a91b785ba5 | |||
0ef05de889 | |||
a093d5c809 | |||
fc64e1853c | |||
7f669094de | |||
5025d89c88 | |||
2b44c4504a | |||
d2c9beb843 | |||
9e6d3bf532 | |||
a89b611e9e | |||
ebcac3c2d1 | |||
7029e4395c | |||
5afcdcbbe6 | |||
3840b4b516 | |||
7aeb6d642b | |||
1d6c4aacae | |||
9f5c86e60c | |||
9f413fd656 | |||
97c3125a78 | |||
a77aca75b2 | |||
96bfd9478b | |||
e8206cb2d4 | |||
c3af0d9d25 | |||
932c994dc9 | |||
c34d911eaf | |||
ddd1871840 | |||
db825788fa | |||
b1b03ec13b | |||
73a8441add | |||
bf29590f41 | |||
51b27779c9 | |||
5169c8d08f | |||
0d945e6a92 | |||
1090254ba5 | |||
e51445d857 | |||
4b47abd3bf | |||
71a617b4dc | |||
a722802c95 | |||
e9f44b6661 | |||
9693de1867 | |||
f7ea95aed1 | |||
f07ce59be8 | |||
da423b6cf0 | |||
d5f60b68e4 | |||
78b3a8f7f9 | |||
d77699c126 | |||
09ba0dae15 | |||
a5c7575207 | |||
50f040530b | |||
7f99c90539 | |||
d8564b725c | |||
e4de25442a | |||
3b2ea8fd40 | |||
9a1832ed61 | |||
9e45f1f5e2 | |||
ee682d5bc3 | |||
05decc863f | |||
506a81e8cc | |||
dcb30a8489 | |||
a2631e89f6 | |||
ab208ddb77 | |||
09a48d773a | |||
88298bf321 | |||
d252f7f687 | |||
533ebc17f2 | |||
f4947236dc | |||
e088833b81 | |||
53e16f68d9 | |||
ed5fbaef06 | |||
b1bacf12a6 | |||
66ff602659 | |||
e175c9dea9 | |||
5a57d9b5d9 | |||
03e87e4169 | |||
abfff66d53 | |||
31dee553d5 | |||
9ca6a2d25b | |||
a3178c3bc7 | |||
aa07bdfbaa | |||
eaef9be710 | |||
cae345b416 | |||
acb1171422 | |||
52d8f293b6 | |||
636eb8d058 | |||
0fa27f65bb | |||
8f94e3f7ae | |||
05460eec0d | |||
072d0b67e4 | |||
fdc48d521c | |||
6560b0e2cc | |||
ec38dba209 | |||
d9e4bce6ad | |||
1fd4343621 | |||
8d87627a49 | |||
aacf27fb76 | |||
a51536d107 | |||
1c874fbc1b | |||
0362169671 | |||
e2e569cb43 | |||
8c51b47e85 | |||
017eb10e76 | |||
f50aeb0e58 | |||
48c19d3100 | |||
aaf0a23134 | |||
89db85dbf9 | |||
e677cda027 | |||
db9219ccc8 | |||
06fd945f85 | |||
6ad4a81123 | |||
bcaa0fdcb1 | |||
2cb1375217 | |||
9365a47d42 | |||
6ffe205447 | |||
ec3e62dd58 | |||
fa07c49cc9 | |||
449d7042f0 | |||
7e2b65374d | |||
8e39465700 | |||
43b4207101 | |||
ff991b87da | |||
c81c19234f | |||
399caf343c | |||
ffb72136c8 | |||
1a615bde2b | |||
cf2626a1c5 | |||
68c72d6f34 | |||
65f78905cd | |||
70a8ae4612 | |||
d82ec2634c | |||
b4a7a18334 | |||
c44c5f0b09 | |||
226d3b9471 | |||
2752bde683 | |||
b8816d722c | |||
2aa72cc72e | |||
8cc030ef84 | |||
9a9f89293a | |||
501deeef56 | |||
05f921d544 | |||
ab7a2960b1 | |||
4e2deaa33b | |||
d5ef18337c | |||
d18ea501b7 | |||
c9a1ac9b8c | |||
c2a4cb544e | |||
3ab12076e8 | |||
6a383c45fc | |||
7cc27e7bd1 | |||
0464087327 | |||
c193c7de12 | |||
61abee204f | |||
a99dbb2a0c | |||
e834c76b40 | |||
7b3c7f148b | |||
fb4b33b81b | |||
25d7dc7b96 | |||
d1f1cbe88f | |||
a4e7b6e90c | |||
fbc7c9c431 | |||
8b248dcf09 | |||
4938aad939 | |||
7e882dfe62 | |||
5c8cb96f88 | |||
9d1eb4f9ea | |||
210a4d0640 | |||
176e806d94 | |||
eb4e5a7bd0 | |||
ba27596076 | |||
63e44dcc35 | |||
c0ba676658 | |||
1af4cee63b | |||
cb52a335bd | |||
e308a4279e | |||
513a934ff6 | |||
77d820c842 | |||
30cbe7c6a9 | |||
18ef643dc7 | |||
73a0bf8d30 | |||
9d53208d68 | |||
d26f135159 | |||
c8e3ce26a9 | |||
f88970a964 | |||
51d911e3f4 | |||
bd5c6158ae | |||
cd0db7842c | |||
31d1087103 | |||
0efd64df6f | |||
28bdf346f6 | |||
48762834d9 | |||
8d0d429acd | |||
e5408368f7 | |||
61492fd27e | |||
bbce08a67b | |||
a002148098 | |||
90ae662e4d | |||
60d8f5489f | |||
59dd8b650d | |||
738247ad44 | |||
5b0bb7e607 | |||
f7c0d30167 | |||
8e98c7c9d6 | |||
50661e7b8d | |||
ad159e0906 | |||
d3fac8a06f | |||
c641ba1006 | |||
de379ed915 | |||
d4554c6b78 | |||
6fc21a4223 | |||
71319978df | |||
6147e54686 | |||
0c8eec2563 | |||
4ab58f069a | |||
85f96d926a | |||
816de4f8ec | |||
42229a1105 | |||
d8820053af | |||
731f8512c6 | |||
a133784706 | |||
be58fdf1bb | |||
57daeb35d2 | |||
9c5e69bf3d | |||
cfac127e4c | |||
fda4523cbf | |||
cabe80b129 | |||
d4c41219f9 | |||
4fdd9fbfca | |||
bdf5ac9c1a | |||
f1785c76a4 | |||
2de8fe9c5f | |||
d910ed68a3 | |||
f7f7ecd4c6 | |||
a9c3a28a3b | |||
96787ff4ac | |||
c3ed4d28de | |||
f1e35c3bc6 | |||
db3fb3a27c | |||
8282442956 | |||
a355d9f46c | |||
be4824c955 | |||
86c1d97c13 | |||
0b48aea937 | |||
cdec0cead2 | |||
831709ce7e | |||
b7b8a31532 | |||
15406545d8 | |||
5aced8224f | |||
af20a43b77 | |||
39c3280860 | |||
2d35345c50 | |||
a02910be32 | |||
b9ec97a30b | |||
2e89999d88 | |||
24b0031925 | |||
9eeaf2d502 | |||
c9e6fb36c3 | |||
8de317113c | |||
a1ec549630 | |||
ecddff98f5 | |||
10066d67bf | |||
a07f7435c6 | |||
d3523ebbe5 | |||
133ddb11ff | |||
1bf15ae907 | |||
f73f3941cd | |||
d69d79612b | |||
64ea5126e0 | |||
9df3aa50d5 | |||
cab75b7829 | |||
d9fac86015 | |||
1eb8724a89 | |||
c6662a4512 | |||
d3c09b4e96 | |||
124f6e83d2 | |||
569ff73b39 | |||
fc1dbddd93 | |||
3ae867bdd6 | |||
bc5f29150b | |||
46016b8c7e | |||
5dbecd6b6b | |||
877920e61b | |||
3d1e908dad | |||
6880c2bef0 | |||
78872ffb4b | |||
229d825fe0 | |||
edc5fc098e | |||
bbe815468d | |||
82e7725a42 | |||
dc61cf1c8d | |||
aba63e2c6c | |||
c2ddd056e2 | |||
c9508e84f2 | |||
f6f0900506 | |||
7aeef27b99 | |||
98d0ef6df5 | |||
208a7f16cb | |||
16cf31c3a3 | |||
2b48daaeba | |||
79d24ee227 | |||
a284030ecc |
@ -41,5 +41,5 @@ else
|
||||
|
||||
point="job_stats,$point_tags $point_fields"
|
||||
|
||||
multinode-demo/metrics_write_datapoint.sh "$point" || true
|
||||
scripts/metrics-write-datapoint.sh "$point" || true
|
||||
fi
|
||||
|
@ -1,13 +1,27 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n "$CARGO_TARGET_CACHE_NAME" ]] || exit 0
|
||||
# Ensure the pattern "+++ ..." never occurs when |set -x| is set, as buildkite
|
||||
# interprets this as the start of a log group.
|
||||
# Ref: https://buildkite.com/docs/pipelines/managing-log-output
|
||||
export PS4="++"
|
||||
|
||||
#
|
||||
# Restore target/ from the previous CI build on this machine
|
||||
#
|
||||
(
|
||||
[[ -n "$CARGO_TARGET_CACHE_NAME" ]] || (
|
||||
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
|
||||
|
||||
if [[ -d $d ]]; then
|
||||
du -hs "$d"
|
||||
read -r cacheSizeInGB _ < <(du -s --block-size=1000000000 "$d")
|
||||
if [[ $cacheSizeInGB -gt 5 ]]; then
|
||||
echo "$d has gotten too large, removing it"
|
||||
rm -rf "$d"
|
||||
fi
|
||||
fi
|
||||
|
||||
mkdir -p "$d"/target
|
||||
set -x
|
||||
rsync -a --delete --link-dest="$d" "$d"/target .
|
||||
)
|
||||
|
||||
|
6
.github/ISSUE_TEMPLATE.md
vendored
Normal file
6
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
#### Problem
|
||||
|
||||
|
||||
|
||||
#### Proposed Solution
|
||||
|
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
#### Problem
|
||||
|
||||
#### Summary of Changes
|
||||
|
||||
Fixes #
|
28
.github/RELEASE_TEMPLATE.md
vendored
Normal file
28
.github/RELEASE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
# Release v0.X.Y <milestone name>
|
||||
|
||||
fun blurb about the name, what's in the release
|
||||
|
||||
## Major Features And Improvements
|
||||
|
||||
* bulleted
|
||||
* list of features and improvements
|
||||
|
||||
## Breaking Changes
|
||||
|
||||
* bulleted
|
||||
* list
|
||||
* of
|
||||
* protocol changes/breaks
|
||||
* API breaks
|
||||
* CLI changes
|
||||
* etc.
|
||||
|
||||
## Bug Fixes and Other Changes
|
||||
|
||||
* can be pulled from commit log, or synthesized
|
||||
|
||||
## Thanks to our Contributors
|
||||
|
||||
This release contains contributions from many people at Solana, as well as:
|
||||
|
||||
pull from commit log
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -14,3 +14,6 @@ Cargo.lock
|
||||
|
||||
# test temp files, ledgers, etc.
|
||||
/farf/
|
||||
|
||||
# log files
|
||||
*.log
|
||||
|
84
Cargo.toml
84
Cargo.toml
@ -1,34 +1,35 @@
|
||||
[package]
|
||||
name = "solana"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.7.1"
|
||||
version = "0.10.0"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "http://solana.com/"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
authors = [
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
"Greg Fitzgerald <greg@solana.com>",
|
||||
"Stephen Akridge <stephen@solana.com>",
|
||||
"Michael Vines <mvines@solana.com>",
|
||||
"Rob Walker <rob@solana.com>",
|
||||
"Pankaj Garg <pankaj@solana.com>",
|
||||
"Tyera Eulberg <tyera@solana.com>",
|
||||
]
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
license = "Apache-2.0"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-bench-tps"
|
||||
path = "src/bin/bench-tps.rs"
|
||||
name = "solana-upload-perf"
|
||||
path = "src/bin/upload-perf.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-bench-streamer"
|
||||
path = "src/bin/bench-streamer.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-bench-tps"
|
||||
path = "src/bin/bench-tps.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-drone"
|
||||
path = "src/bin/drone.rs"
|
||||
|
||||
[[bin]]
|
||||
required-features = ["chacha"]
|
||||
name = "solana-replicator"
|
||||
path = "src/bin/replicator.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-fullnode"
|
||||
path = "src/bin/fullnode.rs"
|
||||
@ -57,63 +58,86 @@ path = "src/bin/wallet.rs"
|
||||
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
||||
|
||||
[features]
|
||||
unstable = []
|
||||
ipv6 = []
|
||||
bpf_c = []
|
||||
chacha = []
|
||||
cuda = []
|
||||
erasure = []
|
||||
ipv6 = []
|
||||
test = []
|
||||
unstable = []
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2"
|
||||
bincode = "1.0.0"
|
||||
bs58 = "0.2.0"
|
||||
byteorder = "1.2.1"
|
||||
bytes = "0.4"
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
clap = "2.31"
|
||||
dirs = "1.0.2"
|
||||
elf = "0.0.10"
|
||||
env_logger = "0.5.12"
|
||||
futures = "0.1.21"
|
||||
generic-array = { version = "0.11.1", default-features = false, features = ["serde"] }
|
||||
generic-array = { version = "0.12.0", default-features = false, features = ["serde"] }
|
||||
getopts = "0.2"
|
||||
influx_db_client = "0.3.4"
|
||||
hex-literal = "0.1.1"
|
||||
influx_db_client = "0.3.6"
|
||||
solana-jsonrpc-core = "0.3.0"
|
||||
solana-jsonrpc-http-server = "0.3.0"
|
||||
solana-jsonrpc-macros = "0.3.0"
|
||||
solana-jsonrpc-pubsub = "0.3.0"
|
||||
solana-jsonrpc-ws-server = "0.3.0"
|
||||
ipnetwork = "0.12.7"
|
||||
itertools = "0.7.8"
|
||||
libc = "0.2.1"
|
||||
libc = "0.2.43"
|
||||
libloading = "0.5.0"
|
||||
log = "0.4.2"
|
||||
matches = "0.1.6"
|
||||
nix = "0.11.0"
|
||||
pnet_datalink = "0.21.0"
|
||||
rand = "0.5.1"
|
||||
rayon = "1.0.0"
|
||||
reqwest = "0.8.6"
|
||||
reqwest = "0.9.0"
|
||||
ring = "0.13.2"
|
||||
sha2 = "0.7.0"
|
||||
sha2 = "0.8.0"
|
||||
serde = "1.0.27"
|
||||
serde_cbor = "0.9.0"
|
||||
serde_derive = "1.0.27"
|
||||
serde_json = "1.0.10"
|
||||
socket2 = "0.3.8"
|
||||
solana-sdk = { path = "sdk", version = "0.10.0" }
|
||||
sys-info = "0.5.6"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-core = "0.1.17"
|
||||
tokio-io = "0.1"
|
||||
untrusted = "0.6.2"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.2"
|
||||
solana-noop = { path = "programs/native/noop", version = "0.10.0" }
|
||||
solana-bpfloader = { path = "programs/native/bpf_loader", version = "0.10.0" }
|
||||
solana-lualoader = { path = "programs/native/lua_loader", version = "0.10.0" }
|
||||
|
||||
[[bench]]
|
||||
name = "bank"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "banking_stage"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "ledger"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "signature"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "sigverify"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
required-features = ["chacha"]
|
||||
name = "chacha"
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
".",
|
||||
"sdk",
|
||||
"programs/native/noop",
|
||||
"programs/native/bpf_loader",
|
||||
"programs/native/lua_loader",
|
||||
"programs/bpf/rust/noop",
|
||||
]
|
||||
|
81
README.md
81
README.md
@ -17,7 +17,11 @@ All claims, content, designs, algorithms, estimates, roadmaps, specifications, a
|
||||
Introduction
|
||||
===
|
||||
|
||||
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [H.T.Kung, J.T.Robinson (1981)]. At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! Furthermore, and much to our surprise, it can implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
|
||||
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.4735). At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
|
||||
|
||||
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1078)
|
||||
|
||||
Furthermore, and much to our surprise, it can be implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well en route towards that theoretical limit of 710,000 transactions per second.
|
||||
|
||||
|
||||
Testnet Demos
|
||||
@ -58,7 +62,7 @@ your odds of success if you check out the
|
||||
before proceeding:
|
||||
|
||||
```bash
|
||||
$ git checkout v0.7.0-beta
|
||||
$ git checkout v0.8.0
|
||||
```
|
||||
|
||||
Configuration Setup
|
||||
@ -92,45 +96,47 @@ Before you start a fullnode, make sure you know the IP address of the machine yo
|
||||
want to be the leader for the demo, and make sure that udp ports 8000-10000 are
|
||||
open on all the machines you want to test with.
|
||||
|
||||
Now start the server:
|
||||
Now start the server in a separate shell:
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/leader.sh
|
||||
```
|
||||
|
||||
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
|
||||
Wait a few seconds for the server to initialize. It will print "leader ready..." when it's ready to
|
||||
receive transactions. The leader will request some tokens from the drone if it doesn't have any.
|
||||
The drone does not need to be running for subsequent leader starts.
|
||||
|
||||
Multinode Testnet
|
||||
---
|
||||
|
||||
To run a multinode testnet, after starting a leader node, spin up some validator nodes:
|
||||
To run a multinode testnet, after starting a leader node, spin up some validator nodes in
|
||||
separate shells:
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
$ ./multinode-demo/validator.sh
|
||||
```
|
||||
|
||||
To run a performance-enhanced leader or validator (on Linux),
|
||||
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
|
||||
your system:
|
||||
|
||||
```bash
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/validator.sh
|
||||
```
|
||||
|
||||
|
||||
|
||||
Testnet Client Demo
|
||||
---
|
||||
|
||||
Now that your singlenode or multinode testnet is up and running, in a separate shell, let's send it some transactions! Note we pass in
|
||||
the JSON configuration file here, not the genesis ledger.
|
||||
Now that your singlenode or multinode testnet is up and running let's send it
|
||||
some transactions!
|
||||
|
||||
In a separate shell start the client:
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/client.sh ubuntu@10.0.1.51:~/solana 2 #The leader machine and the total number of nodes in the network
|
||||
$ ./multinode-demo/client.sh # runs against localhost by default
|
||||
```
|
||||
|
||||
What just happened? The client demo spins up several threads to send 500,000 transactions
|
||||
@ -142,21 +148,35 @@ demo completes after it has convinced itself the testnet won't process any addit
|
||||
transactions. You should see several TPS measurements printed to the screen. In the
|
||||
multinode variation, you'll see TPS measurements for each validator node as well.
|
||||
|
||||
Public Testnet
|
||||
--------------
|
||||
In this example the client connects to our public testnet. To run validators on the testnet you would need to open udp ports `8000-10000`.
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/client.sh --network $(dig +short testnet.solana.com):8001 --identity config-private/client-id.json --duration 60
|
||||
```
|
||||
|
||||
You can observe the effects of your client's transactions on our [dashboard](https://metrics.solana.com:3000/d/testnet/testnet-hud?orgId=2&from=now-30m&to=now&refresh=5s&var-testnet=testnet)
|
||||
|
||||
|
||||
Linux Snap
|
||||
---
|
||||
A Linux [Snap](https://snapcraft.io/) is available, which can be used to
|
||||
easily get Solana running on supported Linux systems without building anything
|
||||
from source. The `edge` Snap channel is updated daily with the latest
|
||||
development from the `master` branch. To install:
|
||||
|
||||
```bash
|
||||
$ sudo snap install solana --edge --devmode
|
||||
```
|
||||
|
||||
(`--devmode` flag is required only for `solana.fullnode-cuda`)
|
||||
|
||||
Once installed the usual Solana programs will be available as `solona.*` instead
|
||||
of `solana-*`. For example, `solana.fullnode` instead of `solana-fullnode`.
|
||||
|
||||
Update to the latest version at any time with:
|
||||
|
||||
```bash
|
||||
$ snap info solana
|
||||
$ sudo snap refresh solana --devmode
|
||||
@ -176,6 +196,7 @@ contains the latest log, and the files `*.s` (if present) contain older rotated
|
||||
logs.
|
||||
|
||||
Disable the daemon at any time by running:
|
||||
|
||||
```bash
|
||||
$ sudo snap set solana mode=
|
||||
```
|
||||
@ -184,11 +205,13 @@ Runtime configuration files for the daemon can be found in
|
||||
`/var/snap/solana/current/config`.
|
||||
|
||||
#### Leader daemon
|
||||
|
||||
```bash
|
||||
$ sudo snap set solana mode=leader
|
||||
```
|
||||
|
||||
If CUDA is available:
|
||||
|
||||
```bash
|
||||
$ sudo snap set solana mode=leader enable-cuda=1
|
||||
```
|
||||
@ -211,26 +234,31 @@ to port tcp:873, tcp:9900 and the port range udp:8000-udp:10000**
|
||||
|
||||
|
||||
To run both the Leader and Drone:
|
||||
|
||||
```bash
|
||||
$ sudo snap set solana mode=leader+drone
|
||||
|
||||
```
|
||||
|
||||
#### Validator daemon
|
||||
|
||||
```bash
|
||||
$ sudo snap set solana mode=validator
|
||||
|
||||
```
|
||||
If CUDA is available:
|
||||
|
||||
```bash
|
||||
$ sudo snap set solana mode=validator enable-cuda=1
|
||||
```
|
||||
|
||||
By default the validator will connect to **testnet.solana.com**, override
|
||||
the leader IP address by running:
|
||||
|
||||
```bash
|
||||
$ sudo snap set solana mode=validator leader-address=127.0.0.1 #<-- change IP address
|
||||
```
|
||||
|
||||
It's assumed that the leader will be running `rsync` configured as described in
|
||||
the previous **Leader daemon** section.
|
||||
|
||||
@ -254,9 +282,10 @@ If your rustc version is lower than 1.26.1, please update it:
|
||||
$ rustup update
|
||||
```
|
||||
|
||||
On Linux systems you may need to install libssl-dev and pkg-config. On Ubuntu:
|
||||
On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, etc. On Ubuntu:
|
||||
|
||||
```bash
|
||||
$ sudo apt-get install libssl-dev pkg-config
|
||||
$ sudo apt-get install libssl-dev pkg-config zlib1g-dev
|
||||
```
|
||||
|
||||
Download the source code:
|
||||
@ -276,6 +305,7 @@ $ cargo test
|
||||
```
|
||||
|
||||
To emulate all the tests that will run on a Pull Request, run:
|
||||
|
||||
```bash
|
||||
$ ./ci/run-local.sh
|
||||
```
|
||||
@ -284,17 +314,21 @@ Debugging
|
||||
---
|
||||
|
||||
There are some useful debug messages in the code, you can enable them on a per-module and per-level
|
||||
basis with the normal RUST\_LOG environment variable. Run the fullnode with this syntax:
|
||||
basis. Before running a leader or validator set the normal RUST\_LOG environment variable.
|
||||
|
||||
For example, to enable info everywhere and debug only in the solana::banking_stage module:
|
||||
|
||||
```bash
|
||||
$ RUST_LOG=solana::streamer=debug,solana::server=info cat genesis.log | ./target/release/solana-fullnode > transactions0.log
|
||||
$ export RUST_LOG=info,solana::banking_stage=debug
|
||||
```
|
||||
to see the debug and info sections for streamer and server respectively. Generally
|
||||
we are using debug for infrequent debug messages, trace for potentially frequent messages and
|
||||
info for performance-related logging.
|
||||
|
||||
Attaching to a running process with gdb:
|
||||
Generally we are using debug for infrequent debug messages, trace for potentially frequent
|
||||
messages and info for performance-related logging.
|
||||
|
||||
```
|
||||
You can also attach to a running process with GDB. The leader's process is named
|
||||
_solana-fullnode_:
|
||||
|
||||
```bash
|
||||
$ sudo gdb
|
||||
attach <PID>
|
||||
set logging on
|
||||
@ -318,6 +352,11 @@ Run the benchmarks:
|
||||
$ cargo +nightly bench --features="unstable"
|
||||
```
|
||||
|
||||
Release Process
|
||||
---
|
||||
The release process for this project is described [here](rfcs/rfc-005-branches-tags-and-channels.md).
|
||||
|
||||
|
||||
Code coverage
|
||||
---
|
||||
|
||||
|
32
RELEASE.md
Normal file
32
RELEASE.md
Normal file
@ -0,0 +1,32 @@
|
||||
# Solana Release process
|
||||
|
||||
## Introduction
|
||||
|
||||
Solana uses a channel-oriented, date-based branching process described [here](https://github.com/solana-labs/solana/blob/master/rfcs/rfc-005-branches-tags-and-channels.md).
|
||||
|
||||
## Release Steps
|
||||
|
||||
### Changing channels
|
||||
|
||||
When cutting a new channel branch these pre-steps are required:
|
||||
|
||||
1. Pick your branch point for release on master.
|
||||
2. Create the branch. The name should be "v" + the first 2 "version" fields from Cargo.toml. For example, a Cargo.toml with version = "0.9.0" implies the next branch name is "v0.9".
|
||||
3. Update Cargo.toml to the next semantic version (e.g. 0.9.0 -> 0.10.0) by running `./scripts/increment-cargo-version.sh`.
|
||||
4. Push your new branch to solana.git
|
||||
5. Land your Cargo.toml change as a master PR.
|
||||
|
||||
At this point, ci/channel-info.sh should show your freshly cut release branch as "BETA_CHANNEL" and the previous release branch as "STABLE_CHANNEL".
|
||||
|
||||
### Updating channels (i.e. "making a release")
|
||||
|
||||
We use [github's Releases UI](https://github.com/solana-labs/solana/releases) for tagging a release.
|
||||
|
||||
1. Go [there ;)](https://github.com/solana-labs/solana/releases).
|
||||
2. Click "Draft new release".
|
||||
3. If the first major release on the branch (e.g. v0.8.0), paste in [this template](https://raw.githubusercontent.com/solana-labs/solana/master/.github/RELEASE_TEMPLATE.md) and fill it in.
|
||||
4. Test the release by generating a tag using semver's rules. First try at a release should be <branchname>.X-rc.0.
|
||||
5. Verify release automation:
|
||||
1. [Crates.io](https://crates.io/crates/solana) should have an updated Solana version.
|
||||
2. ...
|
||||
6. After testnet deployment, verify that testnets are running correct software. http://metrics.solana.com should show testnet running on a hash from your newly created branch.
|
@ -1 +0,0 @@
|
||||
theme: jekyll-theme-slate
|
@ -1,66 +1,57 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
#![feature(test)]
|
||||
extern crate bincode;
|
||||
extern crate rayon;
|
||||
extern crate solana;
|
||||
extern crate test;
|
||||
|
||||
use bincode::serialize;
|
||||
use criterion::{Bencher, Criterion};
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::*;
|
||||
use solana::hash::hash;
|
||||
use solana::mint::Mint;
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::system_transaction::SystemTransaction;
|
||||
use solana::transaction::Transaction;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn bench_process_transaction(bencher: &mut Bencher) {
|
||||
let mint = Mint::new(100_000_000);
|
||||
let bank = Bank::new(&mint);
|
||||
|
||||
// Create transactions between unrelated parties.
|
||||
let transactions: Vec<_> = (0..4096)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
.into_iter()
|
||||
.map(|_| {
|
||||
// Seed the 'from' account.
|
||||
let rando0 = Keypair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 10_000, mint.last_id());
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
let tx = Transaction::system_move(
|
||||
&mint.keypair(),
|
||||
rando0.pubkey(),
|
||||
10_000,
|
||||
bank.last_id(),
|
||||
0,
|
||||
);
|
||||
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
||||
|
||||
// Seed the 'to' account and a cell for its signature.
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
|
||||
let rando1 = Keypair::new();
|
||||
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
let tx = Transaction::system_move(&rando0, rando1.pubkey(), 1, bank.last_id(), 0);
|
||||
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
||||
|
||||
// Finally, return the transaction to the benchmark.
|
||||
tx
|
||||
})
|
||||
.collect();
|
||||
}).collect();
|
||||
|
||||
bencher.iter_with_setup(
|
||||
|| {
|
||||
// Since benchmarker runs this multiple times, we need to clear the signatures.
|
||||
bank.clear_signatures();
|
||||
transactions.clone()
|
||||
},
|
||||
|transactions| {
|
||||
let results = bank.process_transactions(transactions);
|
||||
assert!(results.iter().all(Result::is_ok));
|
||||
},
|
||||
)
|
||||
let mut id = bank.last_id();
|
||||
|
||||
for _ in 0..(MAX_ENTRY_IDS - 1) {
|
||||
bank.register_entry_id(&id);
|
||||
id = hash(&id.as_ref())
|
||||
}
|
||||
|
||||
bencher.iter(|| {
|
||||
// Since benchmarker runs this multiple times, we need to clear the signatures.
|
||||
bank.clear_signatures();
|
||||
let results = bank.process_transactions(&transactions);
|
||||
assert!(results.iter().all(Result::is_ok));
|
||||
})
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_process_transaction", |bencher| {
|
||||
bench_process_transaction(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
||||
|
@ -1,229 +1,228 @@
|
||||
#![feature(test)]
|
||||
extern crate bincode;
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate rand;
|
||||
extern crate rayon;
|
||||
extern crate solana;
|
||||
extern crate solana_sdk;
|
||||
extern crate test;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::Bank;
|
||||
use solana::banking_stage::BankingStage;
|
||||
use solana::bank::{Bank, MAX_ENTRY_IDS};
|
||||
use solana::banking_stage::{BankingStage, NUM_THREADS};
|
||||
use solana::entry::Entry;
|
||||
use solana::hash::hash;
|
||||
use solana::mint::Mint;
|
||||
use solana::packet::{to_packets_chunked, PacketRecycler};
|
||||
use solana::record_stage::Signal;
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::packet::to_packets_chunked;
|
||||
use solana::signature::{KeypairUtil, Signature};
|
||||
use solana::system_transaction::SystemTransaction;
|
||||
use solana::transaction::Transaction;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::iter;
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use test::Bencher;
|
||||
|
||||
// use self::test::Bencher;
|
||||
// use bank::{Bank, MAX_ENTRY_IDS};
|
||||
// use bincode::serialize;
|
||||
// use hash::hash;
|
||||
// use mint::Mint;
|
||||
// use rayon::prelude::*;
|
||||
// use signature::{Keypair, KeypairUtil};
|
||||
// use std::collections::HashSet;
|
||||
// use std::time::Instant;
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// fn bench_process_transactions(_bencher: &mut Bencher) {
|
||||
// let mint = Mint::new(100_000_000);
|
||||
// let bank = Bank::new(&mint);
|
||||
// // Create transactions between unrelated parties.
|
||||
// let txs = 100_000;
|
||||
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
|
||||
// let transactions: Vec<_> = (0..txs)
|
||||
// .into_par_iter()
|
||||
// .map(|i| {
|
||||
// // Seed the 'to' account and a cell for its signature.
|
||||
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
|
||||
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
|
||||
// {
|
||||
// let mut last_ids = last_ids.lock().unwrap();
|
||||
// if !last_ids.contains(&last_id) {
|
||||
// last_ids.insert(last_id);
|
||||
// bank.register_entry_id(&last_id);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Seed the 'from' account.
|
||||
// let rando0 = Keypair::new();
|
||||
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// let rando1 = Keypair::new();
|
||||
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// // Finally, return a transaction that's unique
|
||||
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
// })
|
||||
// .collect();
|
||||
//
|
||||
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// let now = Instant::now();
|
||||
// assert!(banking_stage.process_transactions(transactions).is_ok());
|
||||
// let duration = now.elapsed();
|
||||
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
|
||||
// let tps = txs as f64 / sec;
|
||||
//
|
||||
// // Ensure that all transactions were successfully logged.
|
||||
// drop(banking_stage.historian_input);
|
||||
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
|
||||
// assert_eq!(entries.len(), 1);
|
||||
// assert_eq!(entries[0].transactions.len(), txs as usize);
|
||||
//
|
||||
// println!("{} tps", tps);
|
||||
// }
|
||||
|
||||
fn check_txs(receiver: &Receiver<Signal>, ref_tx_count: usize) {
|
||||
fn check_txs(receiver: &Receiver<Vec<Entry>>, ref_tx_count: usize) {
|
||||
let mut total = 0;
|
||||
loop {
|
||||
let signal = receiver.recv().unwrap();
|
||||
if let Signal::Transactions(transactions) = signal {
|
||||
total += transactions.len();
|
||||
if total >= ref_tx_count {
|
||||
break;
|
||||
let entries = receiver.recv_timeout(Duration::new(1, 0));
|
||||
if let Ok(entries) = entries {
|
||||
for entry in &entries {
|
||||
total += entry.transactions.len();
|
||||
}
|
||||
} else {
|
||||
assert!(false);
|
||||
break;
|
||||
}
|
||||
if total >= ref_tx_count {
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert_eq!(total, ref_tx_count);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
let tx = 10_000_usize;
|
||||
let txes = 1000 * NUM_THREADS;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let mint = Mint::new(mint_total);
|
||||
let num_dst_accounts = 8 * 1024;
|
||||
let num_src_accounts = 8 * 1024;
|
||||
|
||||
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| Keypair::new()).collect();
|
||||
let dstkeys: Vec<_> = (0..num_dst_accounts)
|
||||
.map(|_| Keypair::new().pubkey())
|
||||
.collect();
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&srckeys[i % num_src_accounts],
|
||||
dstkeys[i % num_dst_accounts],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
let setup_transactions: Vec<_> = (0..num_src_accounts)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
srckeys[i].pubkey(),
|
||||
mint_total / num_src_accounts as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
|
||||
let verified_setup: Vec<_> =
|
||||
to_packets_chunked(&packet_recycler, &setup_transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_setup_len = verified_setup.len();
|
||||
verified_sender.send(verified_setup).unwrap();
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(&signal_receiver, num_src_accounts);
|
||||
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), 192)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_len = verified.len();
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(&signal_receiver, tx);
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
let dummy = Transaction::system_move(
|
||||
&mint.keypair(),
|
||||
mint.keypair().pubkey(),
|
||||
1,
|
||||
mint.last_id(),
|
||||
0,
|
||||
);
|
||||
let transactions: Vec<_> = (0..txes)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let from: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let to: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
new.account_keys[0] = Pubkey::new(&from[0..32]);
|
||||
new.account_keys[1] = Pubkey::new(&to[0..32]);
|
||||
new.signature = Signature::new(&sig[0..64]);
|
||||
new
|
||||
}).collect();
|
||||
// fund all the accounts
|
||||
transactions.iter().for_each(|tx| {
|
||||
let fund = Transaction::system_move(
|
||||
&mint.keypair(),
|
||||
tx.account_keys[0],
|
||||
mint_total / txes as i64,
|
||||
mint.last_id(),
|
||||
0,
|
||||
);
|
||||
assert!(bank.process_transaction(&fund).is_ok());
|
||||
});
|
||||
}
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
assert!(res.is_ok(), "sanity test transactions");
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
let res = bank.process_transactions(&transactions);
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution");
|
||||
}
|
||||
bank.clear_signatures();
|
||||
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 192)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = x.read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
}).collect();
|
||||
let (_stage, signal_receiver) = BankingStage::new(
|
||||
&bank,
|
||||
verified_receiver,
|
||||
Default::default(),
|
||||
&mint.last_id(),
|
||||
0,
|
||||
None,
|
||||
);
|
||||
|
||||
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
|
||||
let tx = 10_000_usize;
|
||||
let mint = Mint::new(1_000_000_000_000);
|
||||
let mut pubkeys = Vec::new();
|
||||
let num_keys = 8;
|
||||
for _ in 0..num_keys {
|
||||
pubkeys.push(Keypair::new().pubkey());
|
||||
let mut id = mint.last_id();
|
||||
for _ in 0..MAX_ENTRY_IDS {
|
||||
id = hash(&id.as_ref());
|
||||
bank.register_entry_id(&id);
|
||||
}
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
pubkeys[i % num_keys],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
bencher.iter(move || {
|
||||
// make sure the tx last id is still registered
|
||||
if bank.count_valid_ids(&[mint.last_id()]).len() == 0 {
|
||||
bank.register_entry_id(&mint.last_id());
|
||||
}
|
||||
for v in verified.chunks(verified.len() / NUM_THREADS) {
|
||||
verified_sender.send(v.to_vec()).unwrap();
|
||||
}
|
||||
check_txs(&signal_receiver, txes);
|
||||
bank.clear_signatures();
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_banking_stage_multi_programs(bencher: &mut Bencher) {
|
||||
let progs = 5;
|
||||
let txes = 1000 * NUM_THREADS;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let mint = Mint::new(mint_total);
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
let dummy = Transaction::system_move(
|
||||
&mint.keypair(),
|
||||
mint.keypair().pubkey(),
|
||||
1,
|
||||
mint.last_id(),
|
||||
0,
|
||||
);
|
||||
let transactions: Vec<_> = (0..txes)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let from: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
|
||||
new.account_keys[0] = Pubkey::new(&from[0..32]);
|
||||
new.account_keys[1] = Pubkey::new(&to[0..32]);
|
||||
let prog = new.instructions[0].clone();
|
||||
for i in 1..progs {
|
||||
//generate programs that spend to random keys
|
||||
let to: Vec<u8> = (0..32).map(|_| thread_rng().gen()).collect();
|
||||
let to_key = Pubkey::new(&to[0..32]);
|
||||
new.account_keys.push(to_key);
|
||||
assert_eq!(new.account_keys.len(), i + 2);
|
||||
new.instructions.push(prog.clone());
|
||||
assert_eq!(new.instructions.len(), i + 1);
|
||||
new.instructions[i].accounts[1] = 1 + i as u8;
|
||||
assert_eq!(new.key(i, 1), Some(&to_key));
|
||||
assert_eq!(
|
||||
new.account_keys[new.instructions[i].accounts[1] as usize],
|
||||
to_key
|
||||
);
|
||||
}
|
||||
assert_eq!(new.instructions.len(), progs);
|
||||
new.signature = Signature::new(&sig[0..64]);
|
||||
new
|
||||
}).collect();
|
||||
transactions.iter().for_each(|tx| {
|
||||
let fund = Transaction::system_move(
|
||||
&mint.keypair(),
|
||||
tx.account_keys[0],
|
||||
mint_total / txes as i64,
|
||||
mint.last_id(),
|
||||
0,
|
||||
);
|
||||
assert!(bank.process_transaction(&fund).is_ok());
|
||||
});
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
assert!(res.is_ok(), "sanity test transactions");
|
||||
});
|
||||
bank.clear_signatures();
|
||||
//sanity check, make sure all the transactions can execute in parallel
|
||||
let res = bank.process_transactions(&transactions);
|
||||
for r in res {
|
||||
assert!(r.is_ok(), "sanity parallel execution");
|
||||
}
|
||||
bank.clear_signatures();
|
||||
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), 96)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = x.read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
}).collect();
|
||||
let (_stage, signal_receiver) = BankingStage::new(
|
||||
&bank,
|
||||
verified_receiver,
|
||||
Default::default(),
|
||||
&mint.last_id(),
|
||||
0,
|
||||
None,
|
||||
);
|
||||
|
||||
let mut id = mint.last_id();
|
||||
for _ in 0..MAX_ENTRY_IDS {
|
||||
id = hash(&id.as_ref());
|
||||
bank.register_entry_id(&id);
|
||||
}
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
let verified_len = verified.len();
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(&signal_receiver, tx);
|
||||
// make sure the transactions are still valid
|
||||
if bank.count_valid_ids(&[mint.last_id()]).len() == 0 {
|
||||
bank.register_entry_id(&mint.last_id());
|
||||
}
|
||||
for v in verified.chunks(verified.len() / NUM_THREADS) {
|
||||
verified_sender.send(v.to_vec()).unwrap();
|
||||
}
|
||||
check_txs(&signal_receiver, txes);
|
||||
bank.clear_signatures();
|
||||
});
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_banking_stage_multi_accounts", |bencher| {
|
||||
bench_banking_stage_multi_accounts(bencher);
|
||||
});
|
||||
criterion.bench_function("bench_process_stage_single_from", |bencher| {
|
||||
bench_banking_stage_single_from(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
||||
|
29
benches/chacha.rs
Normal file
29
benches/chacha.rs
Normal file
@ -0,0 +1,29 @@
|
||||
#![feature(test)]
|
||||
|
||||
extern crate solana;
|
||||
extern crate test;
|
||||
|
||||
use solana::chacha::chacha_cbc_encrypt_files;
|
||||
use std::fs::remove_file;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn bench_chacha_encrypt(bench: &mut Bencher) {
|
||||
let in_path = Path::new("bench_chacha_encrypt_file_input.txt");
|
||||
let out_path = Path::new("bench_chacha_encrypt_file_output.txt.enc");
|
||||
{
|
||||
let mut in_file = File::create(in_path).unwrap();
|
||||
for _ in 0..1024 {
|
||||
in_file.write("123456foobar".as_bytes()).unwrap();
|
||||
}
|
||||
}
|
||||
bench.iter(move || {
|
||||
chacha_cbc_encrypt_files(in_path, out_path, "thetestkey".to_string()).unwrap();
|
||||
});
|
||||
|
||||
remove_file(in_path).unwrap();
|
||||
remove_file(out_path).unwrap();
|
||||
}
|
@ -1,40 +1,25 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
#![feature(test)]
|
||||
extern crate solana;
|
||||
extern crate test;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use solana::hash::{hash, Hash};
|
||||
use solana::ledger::{next_entries, reconstruct_entries_from_blobs, Block};
|
||||
use solana::packet::BlobRecycler;
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::system_transaction::SystemTransaction;
|
||||
use solana::transaction::Transaction;
|
||||
use std::collections::VecDeque;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
let keypair = Keypair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let tx0 = Transaction::system_move(&keypair, keypair.pubkey(), 1, one, 0);
|
||||
let transactions = vec![tx0; 10];
|
||||
let entries = next_entries(&zero, 1, transactions);
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
bencher.iter(|| {
|
||||
let mut blob_q = VecDeque::new();
|
||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap(), entries);
|
||||
let blobs = entries.to_blobs();
|
||||
assert_eq!(reconstruct_entries_from_blobs(blobs).unwrap(), entries);
|
||||
});
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_block_to_blobs_to_block", |bencher| {
|
||||
bench_block_to_blobs_to_block(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
||||
|
@ -1,24 +1,12 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
#![feature(test)]
|
||||
extern crate solana;
|
||||
extern crate test;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use solana::signature::GenKeys;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn bench_gen_keys(b: &mut Bencher) {
|
||||
let mut rnd = GenKeys::new([0u8; 32]);
|
||||
b.iter(|| rnd.gen_n_keypairs(1000));
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_gen_keys", |bencher| {
|
||||
bench_gen_keys(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
||||
|
@ -1,36 +1,23 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
#![feature(test)]
|
||||
extern crate bincode;
|
||||
extern crate rayon;
|
||||
extern crate solana;
|
||||
extern crate test;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use solana::packet::{to_packets, PacketRecycler};
|
||||
use solana::packet::to_packets;
|
||||
use solana::sigverify;
|
||||
use solana::transaction::test_tx;
|
||||
use solana::system_transaction::test_tx;
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn bench_sigverify(bencher: &mut Bencher) {
|
||||
let tx = test_tx();
|
||||
|
||||
// generate packet vector
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let batches = to_packets(&packet_recycler, &vec![tx; 128]);
|
||||
let batches = to_packets(&vec![tx; 128]);
|
||||
|
||||
// verify packets
|
||||
bencher.iter(|| {
|
||||
let _ans = sigverify::ed25519_verify(&batches);
|
||||
})
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_sigverify", |bencher| {
|
||||
bench_sigverify(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
||||
|
56
build.rs
56
build.rs
@ -1,15 +1,63 @@
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::process::Command;
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rustc-link-search=native=.");
|
||||
if !env::var("CARGO_FEATURE_CUDA").is_err() {
|
||||
println!("cargo:rustc-link-lib=static=cuda_verify_ed25519");
|
||||
println!("cargo:rerun-if-changed=build.rs");
|
||||
|
||||
// Ensure target/perf-libs/ exists. It's been observed that
|
||||
// a cargo:rerun-if-changed= directive with a non-existent
|
||||
// directory triggers a rebuild on every |cargo build| invocation
|
||||
fs::create_dir("target/perf-libs").unwrap_or_else(|err| {
|
||||
if err.kind() != std::io::ErrorKind::AlreadyExists {
|
||||
panic!("Unable to create target/perf-libs: {:?}", err);
|
||||
}
|
||||
});
|
||||
|
||||
let bpf_c = !env::var("CARGO_FEATURE_BPF_C").is_err();
|
||||
let chacha = !env::var("CARGO_FEATURE_CHACHA").is_err();
|
||||
let cuda = !env::var("CARGO_FEATURE_CUDA").is_err();
|
||||
let erasure = !env::var("CARGO_FEATURE_ERASURE").is_err();
|
||||
|
||||
if bpf_c {
|
||||
let out_dir = "OUT_DIR=../../../target/".to_string()
|
||||
+ &env::var("PROFILE").unwrap()
|
||||
+ &"/bpf".to_string();
|
||||
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/sdk/bpf.mk");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/sdk/inc/solana_sdk.h");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/makefile");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/src/move_funds.c");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/src/noop.c");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/src/tictactoe.c");
|
||||
println!("cargo:rerun-if-changed=programs/bpf/c/src/tictactoe_dashboard.c");
|
||||
println!("cargo:warning=(not a warning) Compiling C-based BPF programs");
|
||||
let status = Command::new("make")
|
||||
.current_dir("programs/bpf/c")
|
||||
.arg("all")
|
||||
.arg(&out_dir)
|
||||
.status()
|
||||
.expect("Failed to build C-based BPF programs");
|
||||
assert!(status.success());
|
||||
}
|
||||
if chacha || cuda || erasure {
|
||||
println!("cargo:rerun-if-changed=target/perf-libs");
|
||||
println!("cargo:rustc-link-search=native=target/perf-libs");
|
||||
}
|
||||
if chacha {
|
||||
println!("cargo:rerun-if-changed=target/perf-libs/libcpu-crypt.a");
|
||||
}
|
||||
if cuda {
|
||||
println!("cargo:rerun-if-changed=target/perf-libs/libcuda-crypt.a");
|
||||
println!("cargo:rustc-link-lib=static=cuda-crypt");
|
||||
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
|
||||
println!("cargo:rustc-link-lib=dylib=cudart");
|
||||
println!("cargo:rustc-link-lib=dylib=cuda");
|
||||
println!("cargo:rustc-link-lib=dylib=cudadevrt");
|
||||
}
|
||||
if !env::var("CARGO_FEATURE_ERASURE").is_err() {
|
||||
if erasure {
|
||||
println!("cargo:rerun-if-changed=target/perf-libs/libgf_complete.so");
|
||||
println!("cargo:rerun-if-changed=target/perf-libs/libJerasure.so");
|
||||
println!("cargo:rustc-link-lib=dylib=Jerasure");
|
||||
println!("cargo:rustc-link-lib=dylib=gf_complete");
|
||||
}
|
||||
|
@ -16,10 +16,10 @@ _() {
|
||||
|
||||
maybe_cargo_install() {
|
||||
for cmd in "$@"; do
|
||||
set +e
|
||||
set +e
|
||||
cargo "$cmd" --help > /dev/null 2>&1
|
||||
declare exitcode=$?
|
||||
set -e
|
||||
set -e
|
||||
if [[ $exitcode -eq 101 ]]; then
|
||||
_ cargo install cargo-"$cmd"
|
||||
fi
|
||||
|
@ -2,3 +2,15 @@ steps:
|
||||
- command: "ci/snap.sh"
|
||||
timeout_in_minutes: 40
|
||||
name: "snap [public]"
|
||||
- command: "ci/docker-solana/build.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "docker-solana"
|
||||
- command: "ci/publish-crate.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "publish crate [public]"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
- command: "ci/publish-solana-tar.sh"
|
||||
timeout_in_minutes: 15
|
||||
name: "publish solana release tar"
|
||||
|
@ -1,18 +1,18 @@
|
||||
steps:
|
||||
- command: "ci/docker-run.sh solanalabs/rust ci/test-stable.sh"
|
||||
- command: "ci/docker-run.sh solanalabs/rust:1.30.0 ci/test-stable.sh"
|
||||
name: "stable [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 30
|
||||
- command: "ci/docker-run.sh solanalabs/rust ci/test-bench.sh"
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-10-04 ci/test-bench.sh"
|
||||
name: "bench [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
CARGO_TARGET_CACHE_NAME: "nightly"
|
||||
timeout_in_minutes: 30
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck [public]"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly ci/test-nightly.sh"
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-10-04 ci/test-nightly.sh"
|
||||
name: "nightly [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "nightly"
|
||||
@ -24,20 +24,18 @@ steps:
|
||||
timeout_in_minutes: 20
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
- command: "ci/test-large-network.sh"
|
||||
name: "large-network [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 20
|
||||
agents:
|
||||
- "queue=large"
|
||||
# TODO: Fix and re-enable test-large-network.sh
|
||||
# - command: "ci/test-large-network.sh || true"
|
||||
# name: "large-network [public] [ignored]"
|
||||
# env:
|
||||
# CARGO_TARGET_CACHE_NAME: "stable"
|
||||
# timeout_in_minutes: 20
|
||||
# agents:
|
||||
# - "queue=large"
|
||||
- command: "ci/pr-snap.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "snap [public]"
|
||||
- wait
|
||||
- command: "ci/publish-crate.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "publish crate [public]"
|
||||
- trigger: "solana-snap"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
|
91
ci/channel-info.sh
Executable file
91
ci/channel-info.sh
Executable file
@ -0,0 +1,91 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Computes the current branch names of the edge, beta and stable
|
||||
# channels, as well as the latest tagged release for beta and stable.
|
||||
#
|
||||
# stdout of this script may be eval-ed
|
||||
#
|
||||
|
||||
here="$(dirname "$0")"
|
||||
|
||||
# shellcheck source=ci/semver_bash/semver.sh
|
||||
source "$here"/semver_bash/semver.sh
|
||||
|
||||
remote=https://github.com/solana-labs/solana.git
|
||||
|
||||
# Fetch all vX.Y.Z tags
|
||||
#
|
||||
# NOTE: pre-release tags are explicitly ignored
|
||||
#
|
||||
# shellcheck disable=SC2207
|
||||
tags=( \
|
||||
$(git ls-remote --tags $remote \
|
||||
| cut -c52- \
|
||||
| grep '^v[[:digit:]][[:digit:]]*\.[[:digit:]][[:digit:]]*.[[:digit:]][[:digit:]]*$' \
|
||||
| cut -c2- \
|
||||
) \
|
||||
)
|
||||
|
||||
# Fetch all the vX.Y branches
|
||||
#
|
||||
# shellcheck disable=SC2207
|
||||
heads=( \
|
||||
$(git ls-remote --heads $remote \
|
||||
| cut -c53- \
|
||||
| grep '^v[[:digit:]][[:digit:]]*\.[[:digit:]][[:digit:]]*$' \
|
||||
| cut -c2- \
|
||||
) \
|
||||
)
|
||||
|
||||
# Figure the beta channel by looking for the largest vX.Y branch
|
||||
beta=
|
||||
for head in "${heads[@]}"; do
|
||||
if [[ -n $beta ]]; then
|
||||
if semverLT "$head.0" "$beta.0"; then
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
beta=$head
|
||||
done
|
||||
|
||||
# Figure the stable channel by looking for the second largest vX.Y branch
|
||||
stable=
|
||||
for head in "${heads[@]}"; do
|
||||
if [[ $head = "$beta" ]]; then
|
||||
continue
|
||||
fi
|
||||
if [[ -n $stable ]]; then
|
||||
if semverLT "$head.0" "$stable.0"; then
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
stable=$head
|
||||
done
|
||||
|
||||
for tag in "${tags[@]}"; do
|
||||
if [[ -n $beta && $tag = $beta* ]]; then
|
||||
if [[ -n $beta_tag ]]; then
|
||||
if semverLT "$tag" "$beta_tag"; then
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
beta_tag=$tag
|
||||
fi
|
||||
|
||||
if [[ -n $stable && $tag = $stable* ]]; then
|
||||
if [[ -n $stable_tag ]]; then
|
||||
if semverLT "$tag" "$stable_tag"; then
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
stable_tag=$tag
|
||||
fi
|
||||
done
|
||||
|
||||
echo EDGE_CHANNEL=master
|
||||
echo BETA_CHANNEL="${beta:+v$beta}"
|
||||
echo STABLE_CHANNEL="${stable:+v$stable}"
|
||||
echo BETA_CHANNEL_LATEST_TAG="${beta_tag:+v$beta_tag}"
|
||||
echo STABLE_CHANNEL_LATEST_TAG="${stable_tag:+v$stable_tag}"
|
||||
|
||||
exit 0
|
16
ci/crate-version.sh
Executable file
16
ci/crate-version.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Outputs the current crate version
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
while read -r name equals value _; do
|
||||
if [[ $name = version && $equals = = ]]; then
|
||||
echo "${value//\"/}"
|
||||
exit 0
|
||||
fi
|
||||
done < <(cat Cargo.toml)
|
||||
|
||||
echo Unable to locate version in Cargo.toml 1>&2
|
||||
exit 1
|
@ -1,22 +1,37 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 [docker image name] [command]"
|
||||
echo "Usage: $0 [--nopull] [docker image name] [command]"
|
||||
echo
|
||||
echo Runs command in the specified docker image with
|
||||
echo a CI-appropriate environment
|
||||
echo a CI-appropriate environment.
|
||||
echo
|
||||
echo "--nopull Skip the dockerhub image update"
|
||||
echo "--shell Skip command and enter an interactive shell"
|
||||
echo
|
||||
}
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
INTERACTIVE=false
|
||||
if [[ $1 = --shell ]]; then
|
||||
INTERACTIVE=true
|
||||
shift
|
||||
fi
|
||||
|
||||
NOPULL=false
|
||||
if [[ $1 = --nopull ]]; then
|
||||
NOPULL=true
|
||||
shift
|
||||
fi
|
||||
|
||||
IMAGE="$1"
|
||||
if [[ -z "$IMAGE" ]]; then
|
||||
echo Error: image not defined
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker pull "$IMAGE"
|
||||
$NOPULL || docker pull "$IMAGE"
|
||||
shift
|
||||
|
||||
ARGS=(
|
||||
@ -26,9 +41,14 @@ ARGS=(
|
||||
)
|
||||
|
||||
if [[ -n $CI ]]; then
|
||||
# Share the real ~/.cargo between docker containers in CI for speed
|
||||
ARGS+=(--volume "$HOME:/home")
|
||||
ARGS+=(--env "CARGO_HOME=/home/.cargo")
|
||||
else
|
||||
# Avoid sharing ~/.cargo when building locally to avoid a mixed macOS/Linux
|
||||
# ~/.cargo
|
||||
ARGS+=(--volume "$PWD:/home")
|
||||
fi
|
||||
ARGS+=(--env "CARGO_HOME=/home/.cargo")
|
||||
|
||||
# kcov tries to set the personality of the binary which docker
|
||||
# doesn't allow by default.
|
||||
@ -51,5 +71,15 @@ ARGS+=(
|
||||
--env SNAPCRAFT_CREDENTIALS_KEY
|
||||
)
|
||||
|
||||
if $INTERACTIVE; then
|
||||
if [[ -n $1 ]]; then
|
||||
echo
|
||||
echo "Note: '$*' ignored due to --shell argument"
|
||||
echo
|
||||
fi
|
||||
set -x
|
||||
exec docker run --interactive --tty "${ARGS[@]}" "$IMAGE" bash
|
||||
fi
|
||||
|
||||
set -x
|
||||
exec docker run "${ARGS[@]}" "$IMAGE" "$@"
|
||||
|
@ -1,9 +1,9 @@
|
||||
FROM rustlang/rust:nightly
|
||||
FROM solanalabs/rust
|
||||
ARG date
|
||||
|
||||
RUN rustup component add clippy-preview --toolchain=nightly && \
|
||||
echo deb http://ftp.debian.org/debian stretch-backports main >> /etc/apt/sources.list && \
|
||||
apt update && \
|
||||
apt install -y \
|
||||
llvm-6.0 \
|
||||
&& \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
RUN set -x && \
|
||||
rustup install nightly-$date && \
|
||||
rustup default nightly-$date && \
|
||||
rustc --version && \
|
||||
cargo --version && \
|
||||
cargo +nightly-$date install cargo-cov
|
||||
|
@ -1,6 +1,36 @@
|
||||
Docker image containing rust nightly and some preinstalled crates used in CI.
|
||||
|
||||
This image may be manually updated by running `./build.sh` if you are a member
|
||||
This image may be manually updated by running `CI=true ./build.sh` if you are a member
|
||||
of the [Solana Labs](https://hub.docker.com/u/solanalabs/) Docker Hub
|
||||
organization, but it is also automatically updated periodically by
|
||||
[this automation](https://buildkite.com/solana-labs/solana-ci-docker-rust-nightly).
|
||||
|
||||
## Moving to a newer nightly
|
||||
|
||||
We pin the version of nightly (see the `ARG nightly=xyz` line in `Dockerfile`)
|
||||
to avoid the build breaking at unexpected times, as occasionally nightly will
|
||||
introduce breaking changes.
|
||||
|
||||
To update the pinned version:
|
||||
1. Run `ci/docker-rust-nightly/build.sh` to rebuild the nightly image locally,
|
||||
or potentially `ci/docker-rust-nightly/build.sh YYYY-MM-DD` if there's a
|
||||
specific YYYY-MM-DD that is desired (default is today's build).
|
||||
1. Run `SOLANA_DOCKER_RUN_NOSETUID=1 ci/docker-run.sh --nopull solanalabs/rust-nightly:YYYY-MM-DD ci/test-nightly.sh`
|
||||
to confirm the new nightly image builds. Fix any issues as needed
|
||||
1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized.
|
||||
1. Run `CI=true ci/docker-rust-nightly/build.sh YYYY-MM-DD` to push the new nightly image to dockerhub.com.
|
||||
1. Modify the `solanalabs/rust-nightly:YYYY-MM-DD` reference in `ci/buildkite.yml` from the previous to
|
||||
new *YYYY-MM-DD* value, send a PR with this change and any codebase adjustments needed.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Resource is denied
|
||||
|
||||
When running `CI=true ci/docker-rust-nightly/build.sh`, you see:
|
||||
|
||||
```
|
||||
denied: requested access to the resource is denied
|
||||
```
|
||||
|
||||
Run `docker login` to enable pushing images to Docker Hub. Contact @mvines or @garious
|
||||
to get write access.
|
||||
|
@ -2,5 +2,12 @@
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
docker build -t solanalabs/rust-nightly .
|
||||
docker push solanalabs/rust-nightly
|
||||
nightlyDate=${1:-$(date +%Y-%m-%d)}
|
||||
docker build -t solanalabs/rust-nightly:"$nightlyDate" --build-arg date="$nightlyDate" .
|
||||
|
||||
maybeEcho=
|
||||
if [[ -z $CI ]]; then
|
||||
echo "Not CI, skipping |docker push|"
|
||||
maybeEcho="echo"
|
||||
fi
|
||||
$maybeEcho docker push solanalabs/rust-nightly:"$nightlyDate"
|
||||
|
@ -1,15 +1,26 @@
|
||||
FROM rust:1.28
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/buildkite.yml to pick up the new image tag
|
||||
FROM rust:1.30.0
|
||||
|
||||
RUN apt update && \
|
||||
RUN set -x && \
|
||||
apt update && \
|
||||
apt-get install apt-transport-https && \
|
||||
echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list && \
|
||||
echo deb http://apt.llvm.org/stretch/ llvm-toolchain-stretch-7 main > /etc/apt/sources.list.d/llvm.list && \
|
||||
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 && \
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - && \
|
||||
apt update && \
|
||||
apt install -y \
|
||||
buildkite-agent \
|
||||
cmake \
|
||||
lcov \
|
||||
libclang-common-7-dev \
|
||||
llvm-7 \
|
||||
rsync \
|
||||
sudo \
|
||||
cmake \
|
||||
&& \
|
||||
rustup component add rustfmt-preview && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
rustup component add clippy-preview && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
rustc --version && \
|
||||
cargo --version
|
||||
|
@ -3,4 +3,9 @@
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
docker build -t solanalabs/rust .
|
||||
|
||||
read -r rustc version _ < <(docker run solanalabs/rust rustc --version)
|
||||
[[ $rustc = rustc ]]
|
||||
docker tag solanalabs/rust:latest solanalabs/rust:"$version"
|
||||
|
||||
docker push solanalabs/rust
|
||||
|
1
ci/docker-solana/.gitignore
vendored
Normal file
1
ci/docker-solana/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
cargo-install/
|
13
ci/docker-solana/Dockerfile
Normal file
13
ci/docker-solana/Dockerfile
Normal file
@ -0,0 +1,13 @@
|
||||
FROM debian:stretch
|
||||
|
||||
# JSON RPC port
|
||||
EXPOSE 8899/tcp
|
||||
|
||||
# Install libssl
|
||||
RUN apt update && \
|
||||
apt-get install -y libssl-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY usr/bin /usr/bin/
|
||||
ENTRYPOINT [ "/usr/bin/solana-entrypoint.sh" ]
|
||||
CMD [""]
|
17
ci/docker-solana/README.md
Normal file
17
ci/docker-solana/README.md
Normal file
@ -0,0 +1,17 @@
|
||||
## Minimal Solana Docker image
|
||||
This image is automatically updated by CI
|
||||
|
||||
https://hub.docker.com/r/solanalabs/solana/
|
||||
|
||||
### Usage:
|
||||
Run the latest beta image:
|
||||
```bash
|
||||
$ docker run --rm -p 8899:8899 solanalabs/solana:beta
|
||||
```
|
||||
|
||||
Run the latest edge image:
|
||||
```bash
|
||||
$ docker run --rm -p 8899:8899 solanalabs/solana:edge
|
||||
```
|
||||
|
||||
Port *8899* is the JSON RPC port, which is used by clients to communicate with the network.
|
39
ci/docker-solana/build.sh
Executable file
39
ci/docker-solana/build.sh
Executable file
@ -0,0 +1,39 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
eval "$(../channel-info.sh)"
|
||||
|
||||
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
|
||||
CHANNEL=stable
|
||||
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||
CHANNEL=edge
|
||||
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
|
||||
CHANNEL=beta
|
||||
fi
|
||||
|
||||
if [[ -z $CHANNEL ]]; then
|
||||
echo Unable to determine channel to publish into, exiting.
|
||||
exit 0
|
||||
fi
|
||||
|
||||
rm -rf usr/
|
||||
../docker-run.sh solanalabs/rust:1.30.0 \
|
||||
cargo install --path . --root ci/docker-solana/usr
|
||||
cp -f entrypoint.sh usr/bin/solana-entrypoint.sh
|
||||
../../scripts/install-native-programs.sh usr/bin/
|
||||
|
||||
docker build -t solanalabs/solana:$CHANNEL .
|
||||
|
||||
maybeEcho=
|
||||
if [[ -z $CI ]]; then
|
||||
echo "Not CI, skipping |docker push|"
|
||||
maybeEcho="echo"
|
||||
else
|
||||
(
|
||||
set +x
|
||||
if [[ -n $DOCKER_PASSWORD && -n $DOCKER_USERNAME ]]; then
|
||||
echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin
|
||||
fi
|
||||
)
|
||||
fi
|
||||
$maybeEcho docker push solanalabs/solana:$CHANNEL
|
23
ci/docker-solana/entrypoint.sh
Executable file
23
ci/docker-solana/entrypoint.sh
Executable file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
solana-keygen -o /config/leader-keypair.json
|
||||
solana-keygen -o /config/drone-keypair.json
|
||||
|
||||
solana-genesis --tokens=1000000000 --ledger /ledger < /config/drone-keypair.json
|
||||
solana-fullnode-config --keypair=/config/leader-keypair.json -l > /config/leader-config.json
|
||||
|
||||
solana-drone --keypair /config/drone-keypair.json --network 127.0.0.1:8001 &
|
||||
drone=$!
|
||||
solana-fullnode --identity /config/leader-config.json --ledger /ledger/ &
|
||||
fullnode=$!
|
||||
|
||||
abort() {
|
||||
kill "$drone" "$fullnode"
|
||||
}
|
||||
|
||||
trap abort SIGINT SIGTERM
|
||||
wait "$fullnode"
|
||||
kill "$drone" "$fullnode"
|
@ -40,10 +40,10 @@ echo --- Remove unused docker networks
|
||||
docker network prune -f
|
||||
)
|
||||
|
||||
echo "--- Delete /tmp files older than 1 day owned by $(whoami)"
|
||||
echo "--- Delete /tmp files older than 1 day owned by $(id -un)"
|
||||
(
|
||||
set -x
|
||||
find /tmp -maxdepth 1 -user "$(whoami)" -mtime +1 -print0 | xargs -0 rm -rf
|
||||
find /tmp -maxdepth 1 -user "$(id -un)" -mtime +1 -print0 | xargs -0 rm -rf
|
||||
)
|
||||
|
||||
echo --- Deleting stale buildkite agent build directories
|
||||
|
@ -1,32 +0,0 @@
|
||||
#!/bin/bash -x
|
||||
#
|
||||
# Install EarlyOOM
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
|
||||
# 64 - enable signalling of processes (term, kill, oom-kill)
|
||||
# TODO: This setting will not persist across reboots
|
||||
sysrq=$(( $(cat /proc/sys/kernel/sysrq) | 64 ))
|
||||
sudo sysctl -w kernel.sysrq=$sysrq
|
||||
|
||||
if command -v earlyoom; then
|
||||
sudo systemctl status earlyoom
|
||||
exit 0
|
||||
fi
|
||||
|
||||
wget http://ftp.us.debian.org/debian/pool/main/e/earlyoom/earlyoom_1.1-2_amd64.deb
|
||||
sudo apt install --quiet --yes ./earlyoom_1.1-2_amd64.deb
|
||||
|
||||
cat > earlyoom <<OOM
|
||||
# use the kernel OOM killer, trigger at 20% available RAM,
|
||||
EARLYOOM_ARGS="-k -m 20"
|
||||
OOM
|
||||
sudo cp earlyoom /etc/default/
|
||||
rm earlyoom
|
||||
|
||||
sudo systemctl stop earlyoom
|
||||
sudo systemctl enable earlyoom
|
||||
sudo systemctl start earlyoom
|
||||
|
||||
exit 0
|
@ -6,9 +6,9 @@
|
||||
|
||||
cd "$(dirname "$0")"/..
|
||||
source ci/upload_ci_artifact.sh
|
||||
source multinode-demo/common.sh
|
||||
source scripts/configure-metrics.sh
|
||||
|
||||
./multinode-demo/setup.sh
|
||||
multinode-demo/setup.sh
|
||||
|
||||
backgroundCommands="drone leader validator validator-x"
|
||||
pids=()
|
||||
@ -16,7 +16,7 @@ pids=()
|
||||
for cmd in $backgroundCommands; do
|
||||
echo "--- Start $cmd"
|
||||
rm -f log-"$cmd".txt
|
||||
./multinode-demo/"$cmd".sh > log-"$cmd".txt 2>&1 &
|
||||
multinode-demo/"$cmd".sh > log-"$cmd".txt 2>&1 &
|
||||
declare pid=$!
|
||||
pids+=("$pid")
|
||||
echo "pid: $pid"
|
||||
@ -64,21 +64,28 @@ flag_error() {
|
||||
echo "--- Wallet sanity"
|
||||
(
|
||||
set -x
|
||||
multinode-demo/test/wallet-sanity.sh
|
||||
scripts/wallet-sanity.sh
|
||||
) || flag_error
|
||||
|
||||
echo "--- Node count"
|
||||
(
|
||||
source multinode-demo/common.sh
|
||||
set -x
|
||||
./multinode-demo/client.sh "$PWD" 3 -c --addr 127.0.0.1
|
||||
client_id=/tmp/client-id.json-$$
|
||||
$solana_keygen -o $client_id
|
||||
$solana_bench_tps --identity $client_id --num-nodes 3 --reject-extra-nodes --converge-only
|
||||
rm -rf $client_id
|
||||
) || flag_error
|
||||
|
||||
killBackgroundCommands
|
||||
|
||||
echo "--- Ledger verification"
|
||||
(
|
||||
source multinode-demo/common.sh
|
||||
set -x
|
||||
$solana_ledger_tool --ledger "$SOLANA_CONFIG_DIR"/ledger verify
|
||||
cp -R "$SOLANA_CONFIG_DIR"/ledger /tmp/ledger-$$
|
||||
$solana_ledger_tool --ledger /tmp/ledger-$$ verify
|
||||
rm -rf /tmp/ledger-$$
|
||||
) || flag_error
|
||||
|
||||
echo +++
|
||||
|
36
ci/publish-bpf-sdk.sh
Executable file
36
ci/publish-bpf-sdk.sh
Executable file
@ -0,0 +1,36 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
version=$(./ci/crate-version.sh)
|
||||
|
||||
echo --- Creating tarball
|
||||
(
|
||||
set -x
|
||||
rm -rf bpf-sdk/
|
||||
mkdir bpf-sdk/
|
||||
(
|
||||
echo "$version"
|
||||
git rev-parse HEAD
|
||||
) > bpf-sdk/version.txt
|
||||
|
||||
cp -ra programs/bpf/c/sdk/* bpf-sdk/
|
||||
|
||||
tar jvcf bpf-sdk.tar.bz2 bpf-sdk/
|
||||
)
|
||||
|
||||
|
||||
echo --- AWS S3 Store
|
||||
|
||||
set -x
|
||||
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
|
||||
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
|
||||
wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
|
||||
tar zxf s3cmd-2.0.1.tar.gz
|
||||
fi
|
||||
|
||||
python ./s3cmd-2.0.1/s3cmd --acl-public put bpf-sdk.tar.bz2 \
|
||||
s3://solana-sdk/"$version"/bpf-sdk.tar.bz2
|
||||
|
||||
exit 0
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if [[ -z "$BUILDKITE_TAG" ]]; then
|
||||
if [[ -z "$BUILDKITE_TAG" && -z "$TRIGGERED_BUILDKITE_TAG" ]]; then
|
||||
# Skip publish if this is not a tagged release
|
||||
exit 0
|
||||
fi
|
||||
@ -12,8 +12,18 @@ if [[ -z "$CRATES_IO_TOKEN" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
||||
ci/docker-run.sh rust \
|
||||
bash -exc "cargo package; cargo publish --token $CRATES_IO_TOKEN"
|
||||
maybePublish="echo Publish skipped"
|
||||
if [[ -n $CI ]]; then
|
||||
maybePublish="cargo publish --token $CRATES_IO_TOKEN"
|
||||
fi
|
||||
|
||||
# shellcheck disable=2044 # Disable 'For loops over find output are fragile...'
|
||||
for Cargo_toml in {sdk,programs/native/{bpf_loader,lua_loader,noop},.}/Cargo.toml; do
|
||||
# TODO: Ensure the published version matches the contents of BUILDKITE_TAG
|
||||
(
|
||||
set -x
|
||||
ci/docker-run.sh rust bash -exc "cd $(dirname "$Cargo_toml"); cargo package; $maybePublish"
|
||||
)
|
||||
done
|
||||
|
||||
exit 0
|
||||
|
58
ci/publish-solana-tar.sh
Executable file
58
ci/publish-solana-tar.sh
Executable file
@ -0,0 +1,58 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
DRYRUN=
|
||||
if [[ -z $BUILDKITE_BRANCH ]]; then
|
||||
DRYRUN="echo"
|
||||
fi
|
||||
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
|
||||
CHANNEL=stable
|
||||
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||
CHANNEL=edge
|
||||
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
|
||||
CHANNEL=beta
|
||||
fi
|
||||
|
||||
if [[ -z $CHANNEL ]]; then
|
||||
echo Unable to determine channel to publish into, exiting.
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo --- Creating tarball
|
||||
if [[ -z $DRYRUN ]]; then
|
||||
(
|
||||
set -x
|
||||
rm -rf solana-release/
|
||||
mkdir solana-release/
|
||||
(
|
||||
echo "$CHANNEL"
|
||||
git rev-parse HEAD
|
||||
) > solana-release/version.txt
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
cargo install --features=cuda --root solana-release
|
||||
./scripts/install-native-programs.sh solana-release
|
||||
|
||||
tar jvcf solana-release.tar.bz2 solana-release/
|
||||
)
|
||||
fi
|
||||
|
||||
|
||||
echo --- AWS S3 Store
|
||||
|
||||
set -x
|
||||
if [[ ! -r s3cmd-2.0.1/s3cmd ]]; then
|
||||
rm -rf s3cmd-2.0.1.tar.gz s3cmd-2.0.1
|
||||
$DRYRUN wget https://github.com/s3tools/s3cmd/releases/download/v2.0.1/s3cmd-2.0.1.tar.gz
|
||||
$DRYRUN tar zxf s3cmd-2.0.1.tar.gz
|
||||
fi
|
||||
|
||||
$DRYRUN python ./s3cmd-2.0.1/s3cmd --acl-public put solana-release.tar.bz2 \
|
||||
s3://solana-release/"$CHANNEL"/solana-release.tar.bz2
|
||||
|
||||
exit 0
|
||||
|
26
ci/semver_bash/LICENSE
Normal file
26
ci/semver_bash/LICENSE
Normal file
@ -0,0 +1,26 @@
|
||||
Copyright (c) 2013, Ray Bejjani
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
The views and conclusions contained in the software and documentation are those
|
||||
of the authors and should not be interpreted as representing official policies,
|
||||
either expressed or implied, of the FreeBSD Project.
|
31
ci/semver_bash/README.md
Normal file
31
ci/semver_bash/README.md
Normal file
@ -0,0 +1,31 @@
|
||||
semver_bash is a bash parser for semantic versioning
|
||||
====================================================
|
||||
|
||||
[Semantic Versioning](http://semver.org/) is a set of guidelines that help keep
|
||||
version and version management sane. This is a bash based parser to help manage
|
||||
a project's versions. Use it from a Makefile or any scripts you use in your
|
||||
project.
|
||||
|
||||
Usage
|
||||
-----
|
||||
semver_bash can be used from the command line as:
|
||||
|
||||
$ ./semver.sh "3.2.1" "3.2.1-alpha"
|
||||
3.2.1 -> M: 3 m:2 p:1 s:
|
||||
3.2.1-alpha -> M: 3 m:2 p:1 s:-alpha
|
||||
3.2.1 == 3.2.1-alpha -> 1.
|
||||
3.2.1 < 3.2.1-alpha -> 1.
|
||||
3.2.1 > 3.2.1-alpha -> 0.
|
||||
|
||||
|
||||
Alternatively, you can source it from within a script:
|
||||
|
||||
. ./semver.sh
|
||||
|
||||
local MAJOR=0
|
||||
local MINOR=0
|
||||
local PATCH=0
|
||||
local SPECIAL=""
|
||||
|
||||
semverParseInto "1.2.3" MAJOR MINOR PATCH SPECIAL
|
||||
semverParseInto "3.2.1" MAJOR MINOR PATCH SPECIAL
|
130
ci/semver_bash/semver.sh
Executable file
130
ci/semver_bash/semver.sh
Executable file
@ -0,0 +1,130 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
function semverParseInto() {
|
||||
local RE='[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z-]*\)'
|
||||
#MAJOR
|
||||
eval $2=`echo $1 | sed -e "s#$RE#\1#"`
|
||||
#MINOR
|
||||
eval $3=`echo $1 | sed -e "s#$RE#\2#"`
|
||||
#MINOR
|
||||
eval $4=`echo $1 | sed -e "s#$RE#\3#"`
|
||||
#SPECIAL
|
||||
eval $5=`echo $1 | sed -e "s#$RE#\4#"`
|
||||
}
|
||||
|
||||
function semverEQ() {
|
||||
local MAJOR_A=0
|
||||
local MINOR_A=0
|
||||
local PATCH_A=0
|
||||
local SPECIAL_A=0
|
||||
|
||||
local MAJOR_B=0
|
||||
local MINOR_B=0
|
||||
local PATCH_B=0
|
||||
local SPECIAL_B=0
|
||||
|
||||
semverParseInto $1 MAJOR_A MINOR_A PATCH_A SPECIAL_A
|
||||
semverParseInto $2 MAJOR_B MINOR_B PATCH_B SPECIAL_B
|
||||
|
||||
if [ $MAJOR_A -ne $MAJOR_B ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ $MINOR_A -ne $MINOR_B ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ $PATCH_A -ne $PATCH_B ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "_$SPECIAL_A" != "_$SPECIAL_B" ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
return 0
|
||||
|
||||
}
|
||||
|
||||
function semverLT() {
|
||||
local MAJOR_A=0
|
||||
local MINOR_A=0
|
||||
local PATCH_A=0
|
||||
local SPECIAL_A=0
|
||||
|
||||
local MAJOR_B=0
|
||||
local MINOR_B=0
|
||||
local PATCH_B=0
|
||||
local SPECIAL_B=0
|
||||
|
||||
semverParseInto $1 MAJOR_A MINOR_A PATCH_A SPECIAL_A
|
||||
semverParseInto $2 MAJOR_B MINOR_B PATCH_B SPECIAL_B
|
||||
|
||||
if [ $MAJOR_A -lt $MAJOR_B ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ $MAJOR_A -le $MAJOR_B && $MINOR_A -lt $MINOR_B ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ $MAJOR_A -le $MAJOR_B && $MINOR_A -le $MINOR_B && $PATCH_A -lt $PATCH_B ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ "_$SPECIAL_A" == "_" ]] && [[ "_$SPECIAL_B" == "_" ]] ; then
|
||||
return 1
|
||||
fi
|
||||
if [[ "_$SPECIAL_A" == "_" ]] && [[ "_$SPECIAL_B" != "_" ]] ; then
|
||||
return 1
|
||||
fi
|
||||
if [[ "_$SPECIAL_A" != "_" ]] && [[ "_$SPECIAL_B" == "_" ]] ; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ "_$SPECIAL_A" < "_$SPECIAL_B" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
|
||||
}
|
||||
|
||||
function semverGT() {
|
||||
semverEQ $1 $2
|
||||
local EQ=$?
|
||||
|
||||
semverLT $1 $2
|
||||
local LT=$?
|
||||
|
||||
if [ $EQ -ne 0 ] && [ $LT -ne 0 ]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
if [ "___semver.sh" == "___`basename $0`" ]; then
|
||||
|
||||
MAJOR=0
|
||||
MINOR=0
|
||||
PATCH=0
|
||||
SPECIAL=""
|
||||
|
||||
semverParseInto $1 MAJOR MINOR PATCH SPECIAL
|
||||
echo "$1 -> M: $MAJOR m:$MINOR p:$PATCH s:$SPECIAL"
|
||||
|
||||
semverParseInto $2 MAJOR MINOR PATCH SPECIAL
|
||||
echo "$2 -> M: $MAJOR m:$MINOR p:$PATCH s:$SPECIAL"
|
||||
|
||||
semverEQ $1 $2
|
||||
echo "$1 == $2 -> $?."
|
||||
|
||||
semverLT $1 $2
|
||||
echo "$1 < $2 -> $?."
|
||||
|
||||
semverGT $1 $2
|
||||
echo "$1 > $2 -> $?."
|
||||
|
||||
fi
|
151
ci/semver_bash/semver_test.sh
Executable file
151
ci/semver_bash/semver_test.sh
Executable file
@ -0,0 +1,151 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
. ./semver.sh
|
||||
|
||||
semverTest() {
|
||||
local A=R1.3.2
|
||||
local B=R2.3.2
|
||||
local C=R1.4.2
|
||||
local D=R1.3.3
|
||||
local E=R1.3.2a
|
||||
local F=R1.3.2b
|
||||
local G=R1.2.3
|
||||
|
||||
local MAJOR=0
|
||||
local MINOR=0
|
||||
local PATCH=0
|
||||
local SPECIAL=""
|
||||
|
||||
semverParseInto $A MAJOR MINOR PATCH SPECIAL
|
||||
echo "$A -> M:$MAJOR m:$MINOR p:$PATCH s:$SPECIAL. Expect M:1 m:3 p:2 s:"
|
||||
semverParseInto $E MAJOR MINOR PATCH SPECIAL
|
||||
echo "$E -> M:$MAJOR m:$MINOR p:$PATCH s:$SPECIAL. Expect M:1 m:3 p:2 s:a"
|
||||
|
||||
echo "Equality comparisions"
|
||||
semverEQ $A $A
|
||||
echo "$A == $A -> $?. Expect 0."
|
||||
|
||||
semverLT $A $A
|
||||
echo "$A < $A -> $?. Expect 1."
|
||||
|
||||
semverGT $A $A
|
||||
echo "$A > $A -> $?. Expect 1."
|
||||
|
||||
|
||||
echo "Major number comparisions"
|
||||
semverEQ $A $B
|
||||
echo "$A == $B -> $?. Expect 1."
|
||||
|
||||
semverLT $A $B
|
||||
echo "$A < $B -> $?. Expect 0."
|
||||
|
||||
semverGT $A $B
|
||||
echo "$A > $B -> $?. Expect 1."
|
||||
|
||||
semverEQ $B $A
|
||||
echo "$B == $A -> $?. Expect 1."
|
||||
|
||||
semverLT $B $A
|
||||
echo "$B < $A -> $?. Expect 1."
|
||||
|
||||
semverGT $B $A
|
||||
echo "$B > $A -> $?. Expect 0."
|
||||
|
||||
|
||||
echo "Minor number comparisions"
|
||||
semverEQ $A $C
|
||||
echo "$A == $C -> $?. Expect 1."
|
||||
|
||||
semverLT $A $C
|
||||
echo "$A < $C -> $?. Expect 0."
|
||||
|
||||
semverGT $A $C
|
||||
echo "$A > $C -> $?. Expect 1."
|
||||
|
||||
semverEQ $C $A
|
||||
echo "$C == $A -> $?. Expect 1."
|
||||
|
||||
semverLT $C $A
|
||||
echo "$C < $A -> $?. Expect 1."
|
||||
|
||||
semverGT $C $A
|
||||
echo "$C > $A -> $?. Expect 0."
|
||||
|
||||
echo "patch number comparisions"
|
||||
semverEQ $A $D
|
||||
echo "$A == $D -> $?. Expect 1."
|
||||
|
||||
semverLT $A $D
|
||||
echo "$A < $D -> $?. Expect 0."
|
||||
|
||||
semverGT $A $D
|
||||
echo "$A > $D -> $?. Expect 1."
|
||||
|
||||
semverEQ $D $A
|
||||
echo "$D == $A -> $?. Expect 1."
|
||||
|
||||
semverLT $D $A
|
||||
echo "$D < $A -> $?. Expect 1."
|
||||
|
||||
semverGT $D $A
|
||||
echo "$D > $A -> $?. Expect 0."
|
||||
|
||||
echo "special section vs no special comparisions"
|
||||
semverEQ $A $E
|
||||
echo "$A == $E -> $?. Expect 1."
|
||||
|
||||
semverLT $A $E
|
||||
echo "$A < $E -> $?. Expect 1."
|
||||
|
||||
semverGT $A $E
|
||||
echo "$A > $E -> $?. Expect 0."
|
||||
|
||||
semverEQ $E $A
|
||||
echo "$E == $A -> $?. Expect 1."
|
||||
|
||||
semverLT $E $A
|
||||
echo "$E < $A -> $?. Expect 0."
|
||||
|
||||
semverGT $E $A
|
||||
echo "$E > $A -> $?. Expect 1."
|
||||
|
||||
echo "special section vs special comparisions"
|
||||
semverEQ $E $F
|
||||
echo "$E == $F -> $?. Expect 1."
|
||||
|
||||
semverLT $E $F
|
||||
echo "$E < $F -> $?. Expect 0."
|
||||
|
||||
semverGT $E $F
|
||||
echo "$E > $F -> $?. Expect 1."
|
||||
|
||||
semverEQ $F $E
|
||||
echo "$F == $E -> $?. Expect 1."
|
||||
|
||||
semverLT $F $E
|
||||
echo "$F < $E -> $?. Expect 1."
|
||||
|
||||
semverGT $F $E
|
||||
echo "$F > $E -> $?. Expect 0."
|
||||
|
||||
echo "Minor and patch number comparisons"
|
||||
semverEQ $A $G
|
||||
echo "$A == $G -> $?. Expect 1."
|
||||
|
||||
semverLT $A $G
|
||||
echo "$A < $G -> $?. Expect 1."
|
||||
|
||||
semverGT $A $G
|
||||
echo "$A > $G -> $?. Expect 0."
|
||||
|
||||
semverEQ $G $A
|
||||
echo "$G == $A -> $?. Expect 1."
|
||||
|
||||
semverLT $G $A
|
||||
echo "$G < $A -> $?. Expect 0."
|
||||
|
||||
semverGT $G $A
|
||||
echo "$G > $A -> $?. Expect 1."
|
||||
}
|
||||
|
||||
semverTest
|
@ -6,6 +6,7 @@ cd "$(dirname "$0")/.."
|
||||
|
||||
set -x
|
||||
find . -name "*.sh" \
|
||||
-not -regex ".*/ci/semver_bash/.*" \
|
||||
-not -regex ".*/.cargo/.*" \
|
||||
-not -regex ".*/node_modules/.*" \
|
||||
-not -regex ".*/target/.*" \
|
||||
|
43
ci/snap.sh
43
ci/snap.sh
@ -2,19 +2,31 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if ! ci/version-check.sh stable; then
|
||||
# This job doesn't run within a container, try once to upgrade tooling on a
|
||||
# version check failure
|
||||
rustup install stable
|
||||
ci/version-check.sh stable
|
||||
fi
|
||||
|
||||
DRYRUN=
|
||||
if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
|
||||
DRYRUN="echo"
|
||||
fi
|
||||
|
||||
# BUILDKITE_TAG is the normal environment variable set by Buildkite. However
|
||||
# when this script is run from a triggered pipeline, TRIGGERED_BUILDKITE_TAG is
|
||||
# used instead of BUILDKITE_TAG (due to Buildkite limitations that prevents
|
||||
# BUILDKITE_TAG from propagating through to triggered pipelines)
|
||||
if [[ -z "$BUILDKITE_TAG" && -z "$TRIGGERED_BUILDKITE_TAG" ]]; then
|
||||
SNAP_CHANNEL=edge
|
||||
else
|
||||
SNAP_CHANNEL=beta
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then
|
||||
CHANNEL=stable
|
||||
elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then
|
||||
CHANNEL=edge
|
||||
elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then
|
||||
CHANNEL=beta
|
||||
fi
|
||||
|
||||
if [[ -z $CHANNEL ]]; then
|
||||
echo Unable to determine channel to publish into, exiting.
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -z $DRYRUN ]]; then
|
||||
@ -39,15 +51,20 @@ set -x
|
||||
|
||||
echo --- checking for multilog
|
||||
if [[ ! -x /usr/bin/multilog ]]; then
|
||||
echo "multilog not found, install with: sudo apt-get install -y daemontools"
|
||||
exit 1
|
||||
if [[ -z $CI ]]; then
|
||||
echo "multilog not found, install with: sudo apt-get install -y daemontools"
|
||||
exit 1
|
||||
fi
|
||||
sudo apt-get install -y daemontools
|
||||
fi
|
||||
|
||||
echo --- build
|
||||
echo --- build: $CHANNEL channel
|
||||
snapcraft
|
||||
|
||||
source ci/upload_ci_artifact.sh
|
||||
upload_ci_artifact solana_*.snap
|
||||
|
||||
echo --- publish
|
||||
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL
|
||||
if [[ -z $DO_NOT_PUBLISH_SNAP ]]; then
|
||||
echo --- publish: $CHANNEL channel
|
||||
$DRYRUN snapcraft push solana_*.snap --release $CHANNEL
|
||||
fi
|
||||
|
18
ci/solana-testnet.yml
Executable file
18
ci/solana-testnet.yml
Executable file
@ -0,0 +1,18 @@
|
||||
steps:
|
||||
- command: "ci/snap.sh"
|
||||
label: "create snap"
|
||||
|
||||
- wait
|
||||
|
||||
- command: "ci/testnet-automation.sh"
|
||||
label: "run testnet"
|
||||
agents:
|
||||
- "queue=testnet-deploy"
|
||||
|
||||
- wait: ~
|
||||
continue_on_failure: true
|
||||
|
||||
- command: "ci/testnet-automation-cleanup.sh"
|
||||
label: "delete testnet"
|
||||
agents:
|
||||
- "queue=testnet-deploy"
|
@ -2,7 +2,12 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
ci/version-check.sh stable
|
||||
# shellcheck disable=SC1091
|
||||
source ci/upload_ci_artifact.sh
|
||||
|
||||
eval "$(ci/channel-info.sh)"
|
||||
|
||||
ci/version-check.sh nightly
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
_() {
|
||||
@ -10,4 +15,19 @@ _() {
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo bench --verbose
|
||||
set -o pipefail
|
||||
|
||||
UPLOAD_METRICS=""
|
||||
TARGET_BRANCH=$BUILDKITE_BRANCH
|
||||
if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
|
||||
TARGET_BRANCH=$EDGE_CHANNEL
|
||||
else
|
||||
UPLOAD_METRICS="upload"
|
||||
fi
|
||||
|
||||
BENCH_FILE=bench_output.log
|
||||
BENCH_ARTIFACT=current_bench_results.log
|
||||
_ cargo bench --features=unstable --verbose -- -Z unstable-options --format=json | tee "$BENCH_FILE"
|
||||
_ cargo run --release --bin solana-upload-perf -- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" >"$BENCH_ARTIFACT"
|
||||
|
||||
upload_ci_artifact "$BENCH_ARTIFACT"
|
||||
|
@ -12,7 +12,7 @@ fi
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
export LD_LIBRARY_PATH+=:$PWD
|
||||
export LD_LIBRARY_PATH=$PWD/target/perf-libs:$LD_LIBRARY_PATH
|
||||
|
||||
export RUST_LOG=multinode=info
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
source ci/upload_ci_artifact.sh
|
||||
|
||||
ci/version-check.sh nightly
|
||||
export RUST_BACKTRACE=1
|
||||
@ -10,22 +11,50 @@ _() {
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo build --verbose --features unstable
|
||||
_ cargo test --verbose --features unstable
|
||||
_ cargo clippy -- --deny=warnings
|
||||
# Uncomment this to run nightly test suit
|
||||
# _ cargo test --verbose --features=unstable
|
||||
|
||||
exit 0
|
||||
maybe_cargo_install() {
|
||||
for cmd in "$@"; do
|
||||
set +e
|
||||
cargo "$cmd" --help > /dev/null 2>&1
|
||||
declare exitcode=$?
|
||||
set -e
|
||||
if [[ $exitcode -eq 101 ]]; then
|
||||
_ cargo install cargo-"$cmd"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Coverage disabled (see issue #433)
|
||||
_ cargo cov test
|
||||
maybe_cargo_install cov
|
||||
|
||||
# Generate coverage data and report via unit-test suite.
|
||||
_ cargo cov clean
|
||||
_ cargo cov test --lib
|
||||
_ cargo cov report
|
||||
|
||||
echo --- Coverage report:
|
||||
ls -l target/cov/report/index.html
|
||||
# Generate a coverage report with grcov via lcov.
|
||||
if [[ ! -f ./grcov ]]; then
|
||||
uname=$(uname | tr '[:upper:]' '[:lower:]')
|
||||
uname_m=$(uname -m | tr '[:upper:]' '[:lower:]')
|
||||
name=grcov-${uname}-${uname_m}.tar.bz2
|
||||
_ wget "https://github.com/mozilla/grcov/releases/download/v0.2.3/${name}"
|
||||
_ tar -xjf "${name}"
|
||||
fi
|
||||
_ ./grcov . -t lcov > lcov.info
|
||||
_ genhtml -o target/cov/report-lcov --show-details --highlight --ignore-errors source --legend lcov.info
|
||||
|
||||
# Upload to tarballs to buildkite.
|
||||
_ cd target/cov && tar -cjf cov-report.tar.bz2 report/* && cd -
|
||||
_ upload_ci_artifact "target/cov/cov-report.tar.bz2"
|
||||
|
||||
_ cd target/cov && tar -cjf lcov-report.tar.bz2 report-lcov/* && cd -
|
||||
_ upload_ci_artifact "target/cov/lcov-report.tar.bz2"
|
||||
|
||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo CODECOV_TOKEN undefined
|
||||
else
|
||||
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov-6.0 gcov'
|
||||
true
|
||||
# TODO: Why doesn't codecov grok our lcov files?
|
||||
#bash <(curl -s https://codecov.io/bash) -X gcov
|
||||
fi
|
||||
|
||||
|
@ -9,9 +9,10 @@ if ! ci/version-check.sh stable; then
|
||||
ci/version-check.sh stable
|
||||
fi
|
||||
export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
export LD_LIBRARY_PATH=$PWD:/usr/local/cuda/lib64
|
||||
export LD_LIBRARY_PATH=$PWD/target/perf-libs:/usr/local/cuda/lib64:$LD_LIBRARY_PATH
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
|
||||
_() {
|
||||
@ -19,7 +20,15 @@ _() {
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo test --features=cuda,erasure
|
||||
FEATURES=cuda,erasure,chacha
|
||||
_ cargo test --verbose --features="$FEATURES" --lib
|
||||
|
||||
# Run integration tests serially
|
||||
for test in tests/*.rs; do
|
||||
test=${test##*/} # basename x
|
||||
test=${test%.rs} # basename x .rs
|
||||
_ cargo test --verbose --jobs=1 --features="$FEATURES" --test="$test"
|
||||
done
|
||||
|
||||
echo --- ci/localnet-sanity.sh
|
||||
(
|
||||
|
@ -4,6 +4,7 @@ cd "$(dirname "$0")/.."
|
||||
|
||||
ci/version-check.sh stable
|
||||
export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
@ -12,7 +13,25 @@ _() {
|
||||
|
||||
_ cargo fmt -- --check
|
||||
_ cargo build --verbose
|
||||
_ cargo test --verbose
|
||||
_ cargo test --verbose --lib
|
||||
_ cargo clippy -- --deny=warnings
|
||||
|
||||
# Run integration tests serially
|
||||
for test in tests/*.rs; do
|
||||
test=${test##*/} # basename x
|
||||
test=${test%.rs} # basename x .rs
|
||||
_ cargo test --verbose --jobs=1 --test="$test"
|
||||
done
|
||||
|
||||
# Run native program's tests
|
||||
for program in programs/native/*; do
|
||||
echo --- "$program"
|
||||
(
|
||||
set -x
|
||||
cd "$program"
|
||||
cargo test --verbose
|
||||
)
|
||||
done
|
||||
|
||||
echo --- ci/localnet-sanity.sh
|
||||
(
|
||||
|
9
ci/testnet-automation-cleanup.sh
Executable file
9
ci/testnet-automation-cleanup.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
echo --- find testnet configuration
|
||||
net/gce.sh config -p testnet-automation
|
||||
|
||||
echo --- delete testnet
|
||||
net/gce.sh delete -p testnet-automation
|
7
ci/testnet-automation-json-parser.py
Executable file
7
ci/testnet-automation-json-parser.py
Executable file
@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
import sys, json
|
||||
|
||||
data=json.load(sys.stdin)
|
||||
print[\
|
||||
([result['series'][0]['columns'][1].encode(), result['series'][0]['values'][0][1]]) \
|
||||
for result in data['results']]
|
80
ci/testnet-automation.sh
Executable file
80
ci/testnet-automation.sh
Executable file
@ -0,0 +1,80 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
echo --- downloading snap from build artifacts
|
||||
buildkite-agent artifact download "solana_*.snap" .
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source ci/upload_ci_artifact.sh
|
||||
|
||||
[[ -n $ITERATION_WAIT ]] || ITERATION_WAIT=300
|
||||
[[ -n $NUMBER_OF_NODES ]] || NUMBER_OF_NODES="10 25 50 100"
|
||||
[[ -n $LEADER_CPU_MACHINE_TYPE ]] ||
|
||||
LEADER_CPU_MACHINE_TYPE="n1-standard-16 --accelerator count=2,type=nvidia-tesla-v100"
|
||||
[[ -n $CLIENT_COUNT ]] || CLIENT_COUNT=2
|
||||
[[ -n $TESTNET_TAG ]] || TESTNET_TAG=testnet-automation
|
||||
[[ -n $TESTNET_ZONE ]] || TESTNET_ZONE=us-west1-b
|
||||
|
||||
launchTestnet() {
|
||||
declare nodeCount=$1
|
||||
echo --- setup "$nodeCount" node test
|
||||
net/gce.sh create \
|
||||
-n "$nodeCount" -c "$CLIENT_COUNT" \
|
||||
-G "$LEADER_CPU_MACHINE_TYPE" \
|
||||
-p "$TESTNET_TAG" -z "$TESTNET_ZONE"
|
||||
|
||||
echo --- configure database
|
||||
net/init-metrics.sh -e
|
||||
|
||||
echo --- start "$nodeCount" node test
|
||||
net/net.sh start -o noValidatorSanity -S solana_*.snap
|
||||
|
||||
echo --- wait "$ITERATION_WAIT" seconds to complete test
|
||||
sleep "$ITERATION_WAIT"
|
||||
|
||||
declare q_mean_tps='
|
||||
SELECT round(mean("sum_count")) AS "mean_tps" FROM (
|
||||
SELECT sum("count") AS "sum_count"
|
||||
FROM "testnet-automation"."autogen"."counter-banking_stage-process_transactions"
|
||||
WHERE time > now() - 300s GROUP BY time(1s)
|
||||
)'
|
||||
|
||||
declare q_max_tps='
|
||||
SELECT round(max("sum_count")) AS "max_tps" FROM (
|
||||
SELECT sum("count") AS "sum_count"
|
||||
FROM "testnet-automation"."autogen"."counter-banking_stage-process_transactions"
|
||||
WHERE time > now() - 300s GROUP BY time(1s)
|
||||
)'
|
||||
|
||||
declare q_mean_finality='
|
||||
SELECT round(mean("duration_ms")) as "mean_finality"
|
||||
FROM "testnet-automation"."autogen"."leader-finality"
|
||||
WHERE time > now() - 300s'
|
||||
|
||||
declare q_max_finality='
|
||||
SELECT round(max("duration_ms")) as "max_finality"
|
||||
FROM "testnet-automation"."autogen"."leader-finality"
|
||||
WHERE time > now() - 300s'
|
||||
|
||||
declare q_99th_finality='
|
||||
SELECT round(percentile("duration_ms", 99)) as "99th_finality"
|
||||
FROM "testnet-automation"."autogen"."leader-finality"
|
||||
WHERE time > now() - 300s'
|
||||
|
||||
curl -G "https://metrics.solana.com:8086/query?u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}" \
|
||||
--data-urlencode "db=$INFLUX_DATABASE" \
|
||||
--data-urlencode "q=$q_mean_tps;$q_max_tps;$q_mean_finality;$q_max_finality;$q_99th_finality" |
|
||||
python ci/testnet-automation-json-parser.py >>TPS"$nodeCount".log
|
||||
|
||||
upload_ci_artifact TPS"$nodeCount".log
|
||||
}
|
||||
|
||||
# This is needed, because buildkite doesn't let us define an array of numbers.
|
||||
# The array is defined as a space separated string of numbers
|
||||
# shellcheck disable=SC2206
|
||||
nodes_count_array=($NUMBER_OF_NODES)
|
||||
|
||||
for n in "${nodes_count_array[@]}"; do
|
||||
launchTestnet "$n"
|
||||
done
|
@ -1,471 +1,162 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Deploys the Solana software running on the testnet full nodes
|
||||
#
|
||||
# This script must be run by a user/machine that has successfully authenticated
|
||||
# with GCP and has sufficient permission.
|
||||
#
|
||||
here=$(dirname "$0")
|
||||
metrics_write_datapoint="$here"/../multinode-demo/metrics_write_datapoint.sh
|
||||
|
||||
# TODO: Switch over to rolling updates
|
||||
ROLLING_UPDATE=false
|
||||
#ROLLING_UPDATE=true
|
||||
cd "$(dirname "$0")"/..
|
||||
|
||||
if [[ -z $SOLANA_METRICS_CONFIG ]]; then
|
||||
echo Error: SOLANA_METRICS_CONFIG environment variable is unset
|
||||
exit 1
|
||||
fi
|
||||
zone=
|
||||
leaderAddress=
|
||||
leaderMachineType=
|
||||
clientNodeCount=0
|
||||
validatorNodeCount=10
|
||||
publicNetwork=false
|
||||
snapChannel=edge
|
||||
releaseChannel=edge
|
||||
delete=false
|
||||
enableGpu=false
|
||||
useReleaseChannel=false
|
||||
|
||||
# Default to edge channel. To select the beta channel:
|
||||
# export SOLANA_SNAP_CHANNEL=beta
|
||||
if [[ -z $SOLANA_SNAP_CHANNEL ]]; then
|
||||
SOLANA_SNAP_CHANNEL=edge
|
||||
fi
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [name] [zone] [options...]
|
||||
|
||||
# Select default network URL based on SOLANA_SNAP_CHANNEL if SOLANA_NET_ENTRYPOINT is
|
||||
# unspecified
|
||||
if [[ -z $SOLANA_NET_ENTRYPOINT ]]; then
|
||||
case $SOLANA_SNAP_CHANNEL in
|
||||
edge)
|
||||
SOLANA_NET_ENTRYPOINT=master.testnet.solana.com
|
||||
unset SOLANA_NET_NAME
|
||||
Deploys a CD testnet
|
||||
|
||||
name - name of the network
|
||||
zone - zone to deploy the network into
|
||||
|
||||
options:
|
||||
-s edge|beta|stable - Deploy the specified Snap release channel
|
||||
(default: $snapChannel)
|
||||
-t edge|beta|stable - Deploy the specified prebuilt tar from channel
|
||||
(default: $releaseChannel)
|
||||
-n [number] - Number of validator nodes (default: $validatorNodeCount)
|
||||
-c [number] - Number of client nodes (default: $clientNodeCount)
|
||||
-P - Use public network IP addresses (default: $publicNetwork)
|
||||
-G - Enable GPU, and set count/type of GPUs to use (e.g n1-standard-16 --accelerator count=4,type=nvidia-tesla-k80)
|
||||
-g - Enable GPU (default: $enableGpu)
|
||||
-a [address] - Set the leader node's external IP address to this GCE address
|
||||
-d - Delete the network
|
||||
|
||||
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
||||
metrics
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
netName=$1
|
||||
zone=$2
|
||||
[[ -n $netName ]] || usage
|
||||
[[ -n $zone ]] || usage "Zone not specified"
|
||||
shift 2
|
||||
|
||||
while getopts "h?p:Pn:c:s:t:gG:a:d" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
;;
|
||||
beta)
|
||||
SOLANA_NET_ENTRYPOINT=testnet.solana.com
|
||||
unset SOLANA_NET_NAME
|
||||
P)
|
||||
publicNetwork=true
|
||||
;;
|
||||
n)
|
||||
validatorNodeCount=$OPTARG
|
||||
;;
|
||||
c)
|
||||
clientNodeCount=$OPTARG
|
||||
;;
|
||||
s)
|
||||
case $OPTARG in
|
||||
edge|beta|stable)
|
||||
snapChannel=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage "Invalid snap channel: $OPTARG"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
t)
|
||||
case $OPTARG in
|
||||
edge|beta|stable)
|
||||
releaseChannel=$OPTARG
|
||||
useReleaseChannel=true
|
||||
;;
|
||||
*)
|
||||
usage "Invalid release channel: $OPTARG"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
g)
|
||||
enableGpu=true
|
||||
;;
|
||||
G)
|
||||
enableGpu=true
|
||||
leaderMachineType=$OPTARG
|
||||
;;
|
||||
a)
|
||||
leaderAddress=$OPTARG
|
||||
;;
|
||||
d)
|
||||
delete=true
|
||||
;;
|
||||
*)
|
||||
echo Error: Unknown SOLANA_SNAP_CHANNEL=$SOLANA_SNAP_CHANNEL
|
||||
exit 1
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -z $SOLANA_NET_NAME ]]; then
|
||||
SOLANA_NET_NAME=${SOLANA_NET_ENTRYPOINT//./-}
|
||||
fi
|
||||
|
||||
: ${SOLANA_NET_NAME:?$SOLANA_NET_ENTRYPOINT}
|
||||
netBasename=${SOLANA_NET_NAME/-*/}
|
||||
if [[ $netBasename != testnet ]]; then
|
||||
netBasename="testnet-$netBasename"
|
||||
fi
|
||||
gce_create_args=(
|
||||
-a "$leaderAddress"
|
||||
-c "$clientNodeCount"
|
||||
-n "$validatorNodeCount"
|
||||
-p "$netName"
|
||||
-z "$zone"
|
||||
)
|
||||
|
||||
# Figure installation command
|
||||
SNAP_INSTALL_CMD="\
|
||||
for i in {1..3}; do \
|
||||
sudo snap install solana --$SOLANA_SNAP_CHANNEL --devmode && break;
|
||||
sleep 1; \
|
||||
done \
|
||||
"
|
||||
LOCAL_SNAP=$1
|
||||
if [[ -n $LOCAL_SNAP ]]; then
|
||||
if [[ ! -f $LOCAL_SNAP ]]; then
|
||||
echo "Error: $LOCAL_SNAP is not a file"
|
||||
exit 1
|
||||
if $enableGpu; then
|
||||
if [[ -z $leaderMachineType ]]; then
|
||||
gce_create_args+=(-g)
|
||||
else
|
||||
gce_create_args+=(-G "$leaderMachineType")
|
||||
fi
|
||||
SNAP_INSTALL_CMD="sudo snap install ~/solana_local.snap --devmode --dangerous"
|
||||
fi
|
||||
SNAP_INSTALL_CMD="sudo snap remove solana; $SNAP_INSTALL_CMD"
|
||||
|
||||
EARLYOOM_INSTALL_CMD="\
|
||||
wget -O install-earlyoom.sh https://raw.githubusercontent.com/solana-labs/solana/master/ci/install-earlyoom.sh; \
|
||||
bash install-earlyoom.sh \
|
||||
"
|
||||
SNAP_INSTALL_CMD="$EARLYOOM_INSTALL_CMD; $SNAP_INSTALL_CMD"
|
||||
|
||||
# `export SKIP_INSTALL=1` to reset the network without reinstalling the snap
|
||||
if [[ -n $SKIP_INSTALL ]]; then
|
||||
SNAP_INSTALL_CMD="echo Install skipped"
|
||||
fi
|
||||
|
||||
echo "+++ Configuration for $netBasename"
|
||||
publicUrl="$SOLANA_NET_ENTRYPOINT"
|
||||
if [[ $publicUrl = testnet.solana.com ]]; then
|
||||
publicIp="" # Use default value
|
||||
if $publicNetwork; then
|
||||
gce_create_args+=(-P)
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
||||
echo --- gce.sh delete
|
||||
time net/gce.sh delete -z "$zone" -p "$netName"
|
||||
if $delete; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo --- gce.sh create
|
||||
time net/gce.sh create "${gce_create_args[@]}"
|
||||
net/init-metrics.sh -e
|
||||
|
||||
echo --- net.sh start
|
||||
maybeRejectExtraNodes=
|
||||
if ! $publicNetwork; then
|
||||
maybeRejectExtraNodes="-o rejectExtraNodes"
|
||||
fi
|
||||
maybeNoValidatorSanity=
|
||||
if [[ -n $NO_VALIDATOR_SANITY ]]; then
|
||||
maybeNoValidatorSanity="-o noValidatorSanity"
|
||||
fi
|
||||
maybeNoLedgerVerify=
|
||||
if [[ -n $NO_LEDGER_VERIFY ]]; then
|
||||
maybeNoLedgerVerify="-o noLedgerVerify"
|
||||
fi
|
||||
# shellcheck disable=SC2086 # Don't want to double quote maybeRejectExtraNodes
|
||||
if ! $useReleaseChannel; then
|
||||
time net/net.sh start -s "$snapChannel" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
|
||||
else
|
||||
publicIp=$(dig +short $publicUrl | head -n1)
|
||||
time net/net.sh start -t "$releaseChannel" $maybeRejectExtraNodes $maybeNoValidatorSanity $maybeNoLedgerVerify
|
||||
fi
|
||||
|
||||
echo "Network name: $SOLANA_NET_NAME"
|
||||
echo "Network entry point URL: $publicUrl ($publicIp)"
|
||||
echo "Snap channel: $SOLANA_SNAP_CHANNEL"
|
||||
echo "Install command: $SNAP_INSTALL_CMD"
|
||||
echo "Setup args: $SOLANA_SETUP_ARGS"
|
||||
[[ -z $LOCAL_SNAP ]] || echo "Local snap: $LOCAL_SNAP"
|
||||
|
||||
vmlist=() # Each array element is formatted as "class:vmName:vmZone:vmPublicIp"
|
||||
|
||||
vm_exec() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare message=$4
|
||||
declare cmd=$5
|
||||
|
||||
echo "--- $message $vmName in zone $vmZone ($vmPublicIp)"
|
||||
ssh -o BatchMode=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
|
||||
testnet-deploy@"$vmPublicIp" "$cmd"
|
||||
}
|
||||
|
||||
#
|
||||
# vm_foreach [cmd] [extra args to cmd]
|
||||
# where
|
||||
# cmd - the command to execute on each VM
|
||||
# The command will receive three fixed arguments, followed by any
|
||||
# additionl arguments supplied to vm_foreach:
|
||||
# vmName - GCP name of the VM
|
||||
# vmZone - The GCP zone the VM is located in
|
||||
# vmPublicIp - The public IP address of this VM
|
||||
# vmClass - The 'class' of this VM
|
||||
# count - Monotonically increasing count for each
|
||||
# invocation of cmd, starting at 1
|
||||
# ... - Extra args to cmd..
|
||||
#
|
||||
#
|
||||
vm_foreach() {
|
||||
declare cmd=$1
|
||||
shift
|
||||
|
||||
declare count=1
|
||||
for info in "${vmlist[@]}"; do
|
||||
declare vmClass vmName vmZone vmPublicIp
|
||||
IFS=: read -r vmClass vmName vmZone vmPublicIp < <(echo "$info")
|
||||
|
||||
eval "$cmd" "$vmName" "$vmZone" "$vmPublicIp" "$vmClass" "$count" "$@"
|
||||
count=$((count + 1))
|
||||
done
|
||||
}
|
||||
|
||||
#
|
||||
# vm_foreach_in_class [class] [cmd]
|
||||
# where
|
||||
# class - the desired VM class to operate on
|
||||
# cmd - the command to execute on each VM in the desired class.
|
||||
# The command will receive three arguments:
|
||||
# vmName - GCP name of the VM
|
||||
# vmZone - The GCP zone the VM is located in
|
||||
# vmPublicIp - The public IP address of this VM
|
||||
# count - Monotonically increasing count for each
|
||||
# invocation of cmd, starting at 1
|
||||
#
|
||||
#
|
||||
_run_cmd_if_class() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare vmClass=$4
|
||||
declare count=$5
|
||||
declare class=$6
|
||||
declare cmd=$7
|
||||
if [[ $class = "$vmClass" ]]; then
|
||||
eval "$cmd" "$vmName" "$vmZone" "$vmPublicIp" "$count"
|
||||
fi
|
||||
}
|
||||
|
||||
vm_foreach_in_class() {
|
||||
declare class=$1
|
||||
declare cmd=$2
|
||||
vm_foreach _run_cmd_if_class "$1" "$2"
|
||||
}
|
||||
|
||||
#
|
||||
# Load all VMs matching the specified filter and tag them with the specified
|
||||
# class into the `vmlist` array.
|
||||
findVms() {
|
||||
declare class="$1"
|
||||
declare filter="$2"
|
||||
gcloud compute instances list --filter="$filter"
|
||||
while read -r vmName vmZone vmPublicIp status; do
|
||||
if [[ $status != RUNNING ]]; then
|
||||
echo "Warning: $vmName is not RUNNING, ignoring it."
|
||||
continue
|
||||
fi
|
||||
vmlist+=("$class:$vmName:$vmZone:$vmPublicIp")
|
||||
done < <(gcloud compute instances list \
|
||||
--filter="$filter" \
|
||||
--format 'value(name,zone,networkInterfaces[0].accessConfigs[0].natIP,status)')
|
||||
}
|
||||
|
||||
wait_for_pids() {
|
||||
echo "--- Waiting for $*"
|
||||
for pid in "${pids[@]}"; do
|
||||
declare ok=true
|
||||
wait "$pid" || ok=false
|
||||
cat "log-$pid.txt"
|
||||
if ! $ok; then
|
||||
echo ^^^ +++
|
||||
exit 1
|
||||
fi
|
||||
rm "log-$pid.txt"
|
||||
done
|
||||
}
|
||||
|
||||
delete_unreachable_validators() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
if ! vm_exec "$vmName" "$vmZone" "$vmPublicIp" "Checking $vmName" uptime; then
|
||||
echo "^^^ +++"
|
||||
|
||||
# Validators are managed by a Compute Engine Instance Group, so deleting
|
||||
# one will just cause a new one to be spawned.
|
||||
echo "Warning: $vmName is unreachable, deleting it"
|
||||
gcloud compute instances delete "$vmName" --zone "$vmZone"
|
||||
fi
|
||||
echo "validator checked in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
|
||||
echo "Validator nodes (unverified):"
|
||||
findVms validator "name~^$SOLANA_NET_NAME-validator-"
|
||||
pids=()
|
||||
vm_foreach_in_class validator delete_unreachable_validators
|
||||
wait_for_pids validator sanity check
|
||||
vmlist=()
|
||||
|
||||
echo "Leader node:"
|
||||
findVms leader "name=$SOLANA_NET_NAME"
|
||||
[[ ${#vmlist[@]} = 1 ]] || {
|
||||
echo "Unable to find $SOLANA_NET_NAME"
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo "Client node(s):"
|
||||
findVms client "name~^$SOLANA_NET_NAME-client"
|
||||
|
||||
echo "Validator nodes:"
|
||||
findVms validator "name~^$SOLANA_NET_NAME-validator-"
|
||||
|
||||
fullnode_count=0
|
||||
inc_fullnode_count() {
|
||||
fullnode_count=$((fullnode_count + 1))
|
||||
}
|
||||
vm_foreach_in_class leader inc_fullnode_count
|
||||
vm_foreach_in_class validator inc_fullnode_count
|
||||
|
||||
# Add "network stopping" datapoint
|
||||
$metrics_write_datapoint "testnet-deploy,name=$netBasename stop=1"
|
||||
|
||||
client_start() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare count=$4
|
||||
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" \
|
||||
"Starting client $count:" \
|
||||
"\
|
||||
set -x;
|
||||
snap info solana; \
|
||||
sudo snap get solana; \
|
||||
threadCount=\$(nproc); \
|
||||
if [[ \$threadCount -gt 4 ]]; then threadCount=4; fi; \
|
||||
tmux kill-session -t solana; \
|
||||
tmux new -s solana -d \" \
|
||||
set -x; \
|
||||
sudo rm /tmp/solana.log; \
|
||||
while : ; do \
|
||||
/snap/bin/solana.bench-tps $SOLANA_NET_ENTRYPOINT $fullnode_count --loop -s 600 --sustained -t \$threadCount 2>&1 | tee -a /tmp/solana.log; \
|
||||
echo 'https://metrics.solana.com:8086/write?db=${INFLUX_DATABASE}&u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}' \
|
||||
| xargs curl --max-time 5 -XPOST --data-binary 'testnet-deploy,name=$netBasename clientexit=1'; \
|
||||
echo Error: bench-tps should never exit | tee -a /tmp/solana.log; \
|
||||
done; \
|
||||
bash \
|
||||
\"; \
|
||||
sleep 2; \
|
||||
tmux capture-pane -t solana -p -S -100; \
|
||||
tail /tmp/solana.log; \
|
||||
"
|
||||
}
|
||||
|
||||
client_stop() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare count=$4
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" \
|
||||
"Stopping client $vmName ($count):" \
|
||||
"\
|
||||
set -x;
|
||||
tmux list-sessions; \
|
||||
tmux capture-pane -t solana -p; \
|
||||
tmux kill-session -t solana; \
|
||||
$SNAP_INSTALL_CMD; \
|
||||
sudo snap set solana metrics-config=$SOLANA_METRICS_CONFIG \
|
||||
rust-log=$RUST_LOG \
|
||||
default-metrics-rate=$SOLANA_DEFAULT_METRICS_RATE \
|
||||
; \
|
||||
"
|
||||
echo "Client stopped in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
fullnode_start() {
|
||||
declare class=$1
|
||||
declare vmName=$2
|
||||
declare vmZone=$3
|
||||
declare vmPublicIp=$4
|
||||
declare count=$5
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
commonNodeConfig="\
|
||||
rust-log=$RUST_LOG \
|
||||
default-metrics-rate=$SOLANA_DEFAULT_METRICS_RATE \
|
||||
metrics-config=$SOLANA_METRICS_CONFIG \
|
||||
setup-args=$SOLANA_SETUP_ARGS \
|
||||
"
|
||||
if [[ $class = leader ]]; then
|
||||
nodeConfig="mode=leader+drone $commonNodeConfig"
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
nodeConfig="$nodeConfig enable-cuda=1"
|
||||
fi
|
||||
else
|
||||
nodeConfig="mode=validator leader-address=$publicIp $commonNodeConfig"
|
||||
fi
|
||||
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" "Starting $class $count:" \
|
||||
"\
|
||||
set -ex; \
|
||||
logmarker='solana deploy $(date)/$RANDOM'; \
|
||||
logger \"\$logmarker\"; \
|
||||
$SNAP_INSTALL_CMD; \
|
||||
sudo snap set solana $nodeConfig; \
|
||||
snap info solana; \
|
||||
sudo snap get solana; \
|
||||
echo Slight delay to get more syslog output; \
|
||||
sleep 2; \
|
||||
sudo grep -Pzo \"\$logmarker(.|\\n)*\" /var/log/syslog \
|
||||
"
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
leader_start() {
|
||||
fullnode_start leader "$@"
|
||||
}
|
||||
|
||||
validator_start() {
|
||||
fullnode_start validator "$@"
|
||||
}
|
||||
|
||||
fullnode_stop() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare count=$4
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
# Try to ping the machine first. When a machine (validator) is restarted,
|
||||
# there can be a delay between when the instance is reported as RUNNING and when
|
||||
# it's reachable over the network
|
||||
timeout 30s bash -c "set -o pipefail; until ping -c 3 $vmPublicIp | tr - _; do echo .; done"
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" "Shutting down" "\
|
||||
if snap list solana; then \
|
||||
sudo snap set solana mode=; \
|
||||
fi"
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
if [[ -n $LOCAL_SNAP ]]; then
|
||||
echo "--- Transferring $LOCAL_SNAP to node(s)"
|
||||
|
||||
transfer_local_snap() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare vmClass=$4
|
||||
declare count=$5
|
||||
|
||||
echo "--- $vmName in zone $vmZone ($count)"
|
||||
SECONDS=0
|
||||
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
|
||||
"$LOCAL_SNAP" testnet-deploy@"$vmPublicIp":solana_local.snap
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
}
|
||||
vm_foreach transfer_local_snap
|
||||
fi
|
||||
|
||||
echo "--- Stopping client node(s)"
|
||||
pids=()
|
||||
vm_foreach_in_class client client_stop
|
||||
client_stop_pids=("${pids[@]}")
|
||||
|
||||
if ! $ROLLING_UPDATE; then
|
||||
pids=()
|
||||
echo "--- Shutting down all full nodes"
|
||||
vm_foreach_in_class leader fullnode_stop
|
||||
vm_foreach_in_class validator fullnode_stop
|
||||
wait_for_pids fullnode shutdown
|
||||
fi
|
||||
|
||||
pids=()
|
||||
echo --- Starting leader node
|
||||
vm_foreach_in_class leader leader_start
|
||||
wait_for_pids leader
|
||||
|
||||
pids=()
|
||||
echo --- Starting validator nodes
|
||||
vm_foreach_in_class validator validator_start
|
||||
wait_for_pids validators
|
||||
|
||||
echo "--- $publicUrl sanity test"
|
||||
if [[ -z $CI ]]; then
|
||||
# TODO: ssh into a node and run testnet-sanity.sh there. It's not safe to
|
||||
# assume the correct Snap is installed on the current non-CI machine
|
||||
echo Skipped for non-CI deploy
|
||||
snapVersion=unknown
|
||||
else
|
||||
(
|
||||
set -x
|
||||
USE_SNAP=1 ci/testnet-sanity.sh $publicUrl $fullnode_count
|
||||
)
|
||||
IFS=\ read -r _ snapVersion _ < <(snap info solana | grep "^installed:")
|
||||
snapVersion=${snapVersion/0+git./}
|
||||
fi
|
||||
|
||||
pids=("${client_stop_pids[@]}")
|
||||
wait_for_pids client shutdown
|
||||
vm_foreach_in_class client client_start
|
||||
|
||||
# Add "network started" datapoint
|
||||
$metrics_write_datapoint "testnet-deploy,name=$netBasename start=1,version=\"$snapVersion\""
|
||||
|
||||
exit 0
|
||||
|
@ -1,66 +1,40 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Perform a quick sanity test on the specific testnet
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
source multinode-demo/common.sh
|
||||
|
||||
NET_URL=$1
|
||||
if [[ -z $NET_URL ]]; then
|
||||
NET_URL=testnet.solana.com
|
||||
fi
|
||||
|
||||
EXPECTED_NODE_COUNT=$2
|
||||
if [[ -z $EXPECTED_NODE_COUNT ]]; then
|
||||
EXPECTED_NODE_COUNT=50
|
||||
fi
|
||||
|
||||
echo "--- $NET_URL: verify ledger"
|
||||
if [[ -d /var/snap/solana/current/config/ledger ]]; then
|
||||
# Note: here we assume this script is actually running on the leader node...
|
||||
sudo solana.ledger-tool --ledger /var/snap/solana/current/config/ledger verify
|
||||
else
|
||||
echo "^^^ +++"
|
||||
echo "Ledger verify skipped"
|
||||
fi
|
||||
|
||||
echo "--- $NET_URL: wallet sanity"
|
||||
(
|
||||
set -x
|
||||
multinode-demo/test/wallet-sanity.sh $NET_URL
|
||||
)
|
||||
|
||||
echo "--- $NET_URL: node count"
|
||||
if [[ -n "$USE_SNAP" ]]; then
|
||||
# TODO: Merge client.sh functionality into solana-bench-tps proper and
|
||||
# remove this USE_SNAP case
|
||||
cmd=$solana_bench_tps
|
||||
else
|
||||
cmd=multinode-demo/client.sh
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
$cmd $NET_URL $EXPECTED_NODE_COUNT -c
|
||||
)
|
||||
|
||||
echo "--- $NET_URL: validator sanity"
|
||||
if [[ -z $NO_VALIDATOR_SANITY ]]; then
|
||||
(
|
||||
./multinode-demo/setup.sh -t validator
|
||||
set -e pipefail
|
||||
timeout 10s ./multinode-demo/validator.sh "$NET_URL" 2>&1 | tee validator.log
|
||||
)
|
||||
wc -l validator.log
|
||||
if grep -C100 panic validator.log; then
|
||||
echo "^^^ +++ Panic observed"
|
||||
exit 1
|
||||
else
|
||||
echo "Validator log looks ok"
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
else
|
||||
echo "^^^ +++ Validator sanity disabled (NO_VALIDATOR_SANITY defined)"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [name] [zone]
|
||||
|
||||
Sanity check a CD testnet
|
||||
|
||||
name - name of the network
|
||||
zone - zone of the network
|
||||
|
||||
Note: the SOLANA_METRICS_CONFIG environment variable is used to configure
|
||||
metrics
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
netName=$1
|
||||
zone=$2
|
||||
[[ -n $netName ]] || usage ""
|
||||
[[ -n $zone ]] || usage "Zone not specified"
|
||||
|
||||
set -x
|
||||
echo --- gce.sh config
|
||||
net/gce.sh config -p "$netName" -z "$zone"
|
||||
net/init-metrics.sh -e
|
||||
echo --- net.sh sanity
|
||||
net/net.sh sanity \
|
||||
${NO_LEDGER_VERIFY:+-o noLedgerVerify} \
|
||||
${NO_VALIDATOR_SANITY:+-o noValidatorSanity} \
|
||||
${REJECT_EXTRA_NODES:+-o rejectExtraNodes} \
|
||||
|
||||
exit 0
|
||||
|
@ -19,12 +19,12 @@ require() {
|
||||
|
||||
case ${1:-stable} in
|
||||
nightly)
|
||||
require rustc 1.30.[0-9]+-nightly
|
||||
require cargo 1.29.[0-9]+-nightly
|
||||
require rustc 1.31.[0-9]+-nightly
|
||||
require cargo 1.31.[0-9]+-nightly
|
||||
;;
|
||||
stable)
|
||||
require rustc 1.28.[0-9]+
|
||||
require cargo 1.28.[0-9]+
|
||||
require rustc 1.30.[0-9]+
|
||||
require cargo 1.30.[0-9]+
|
||||
;;
|
||||
*)
|
||||
echo Error: unknown argument: "$1"
|
||||
|
339
doc/json-rpc.md
Normal file
339
doc/json-rpc.md
Normal file
@ -0,0 +1,339 @@
|
||||
Solana JSON RPC API
|
||||
===
|
||||
|
||||
Solana nodes accept HTTP requests using the [JSON-RPC 2.0](https://www.jsonrpc.org/specification) specification.
|
||||
|
||||
To interact with a Solana node inside a JavaScript application, use the [solana-web3.js](https://github.com/solana-labs/solana-web3.js) library, which gives a convenient interface for the RPC methods.
|
||||
|
||||
RPC HTTP Endpoint
|
||||
---
|
||||
|
||||
**Default port:** 8899
|
||||
eg. http://localhost:8899, http://192.168.1.88:8899
|
||||
|
||||
RPC PubSub WebSocket Endpoint
|
||||
---
|
||||
|
||||
**Default port:** 8900
|
||||
eg. ws://localhost:8900, http://192.168.1.88:8900
|
||||
|
||||
|
||||
Methods
|
||||
---
|
||||
|
||||
* [confirmTransaction](#confirmtransaction)
|
||||
* [getBalance](#getbalance)
|
||||
* [getAccountInfo](#getaccountinfo)
|
||||
* [getLastId](#getlastid)
|
||||
* [getSignatureStatus](#getsignaturestatus)
|
||||
* [getTransactionCount](#gettransactioncount)
|
||||
* [requestAirdrop](#requestairdrop)
|
||||
* [sendTransaction](#sendtransaction)
|
||||
* [startSubscriptionChannel](#startsubscriptionchannel)
|
||||
|
||||
* [Subscription Websocket](#subscription-websocket)
|
||||
* [accountSubscribe](#accountsubscribe)
|
||||
* [accountUnsubscribe](#accountunsubscribe)
|
||||
* [signatureSubscribe](#signaturesubscribe)
|
||||
* [signatureUnsubscribe](#signatureunsubscribe)
|
||||
|
||||
Request Formatting
|
||||
---
|
||||
|
||||
To make a JSON-RPC request, send an HTTP POST request with a `Content-Type: application/json` header. The JSON request data should contain 4 fields:
|
||||
|
||||
* `jsonrpc`, set to `"2.0"`
|
||||
* `id`, a unique client-generated identifying integer
|
||||
* `method`, a string containing the method to be invoked
|
||||
* `params`, a JSON array of ordered parameter values
|
||||
|
||||
Example using curl:
|
||||
```bash
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getBalance", "params":["83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri"]}' 192.168.1.88:8899
|
||||
```
|
||||
|
||||
The response output will be a JSON object with the following fields:
|
||||
|
||||
* `jsonrpc`, matching the request specification
|
||||
* `id`, matching the request identifier
|
||||
* `result`, requested data or success confirmation
|
||||
|
||||
Requests can be sent in batches by sending an array of JSON-RPC request objects as the data for a single POST.
|
||||
|
||||
Definitions
|
||||
---
|
||||
|
||||
* Hash: A SHA-256 hash of a chunk of data.
|
||||
* Pubkey: The public key of a Ed25519 key-pair.
|
||||
* Signature: An Ed25519 signature of a chunk of data.
|
||||
* Transaction: A Solana instruction signed by a client key-pair.
|
||||
|
||||
JSON RPC API Reference
|
||||
---
|
||||
|
||||
### confirmTransaction
|
||||
Returns a transaction receipt
|
||||
|
||||
##### Parameters:
|
||||
* `string` - Signature of Transaction to confirm, as base-58 encoded string
|
||||
|
||||
##### Results:
|
||||
* `boolean` - Transaction status, true if Transaction is confirmed
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"confirmTransaction", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":true,"id":1}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### getBalance
|
||||
Returns the balance of the account of provided Pubkey
|
||||
|
||||
##### Parameters:
|
||||
* `string` - Pubkey of account to query, as base-58 encoded string
|
||||
|
||||
##### Results:
|
||||
* `integer` - quantity, as a signed 64-bit integer
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getBalance", "params":["83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":0,"id":1}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### getAccountInfo
|
||||
Returns all information associated with the account of provided Pubkey
|
||||
|
||||
##### Parameters:
|
||||
* `string` - Pubkey of account to query, as base-58 encoded string
|
||||
|
||||
##### Results:
|
||||
The result field will be a JSON object with the following sub fields:
|
||||
|
||||
* `tokens`, number of tokens assigned to this account, as a signed 64-bit integer
|
||||
* `program_id`, array of 32 bytes representing the program this account has been assigned to
|
||||
* `userdata`, array of bytes representing any userdata associated with the account
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["FVxxngPx368XvMCoeskdd6U8cZJFsfa1BEtGWqyAxRj4"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"program_id":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### getLastId
|
||||
Returns the last entry ID from the ledger
|
||||
|
||||
##### Parameters:
|
||||
None
|
||||
|
||||
##### Results:
|
||||
* `string` - the ID of last entry, a Hash as base-58 encoded string
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLastId"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":"GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC","id":1}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### getSignatureStatus
|
||||
Returns the status of a given signature. This method is similar to
|
||||
[confirmTransaction](#confirmtransaction) but provides more resolution for error
|
||||
events.
|
||||
|
||||
##### Parameters:
|
||||
* `string` - Signature of Transaction to confirm, as base-58 encoded string
|
||||
|
||||
##### Results:
|
||||
* `string` - Transaction status:
|
||||
* `Confirmed` - Transaction was successful
|
||||
* `SignatureNotFound` - Unknown transaction
|
||||
* `ProgramRuntimeError` - An error occurred in the program that processed this Transaction
|
||||
* `AccountInUse` - Another Transaction had a write lock one of the Accounts specified in this Transaction. The Transaction may succeed if retried
|
||||
* `GenericFailure` - Some other error occurred. **Note**: In the future new Transaction statuses may be added to this list. It's safe to assume that all new statuses will be more specific error conditions that previously presented as `GenericFailure`
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatus", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":"SignatureNotFound","id":1}
|
||||
```
|
||||
|
||||
---
|
||||
### getTransactionCount
|
||||
Returns the current Transaction count from the ledger
|
||||
|
||||
##### Parameters:
|
||||
None
|
||||
|
||||
##### Results:
|
||||
* `integer` - count, as unsigned 64-bit integer
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":268,"id":1}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### requestAirdrop
|
||||
Requests an airdrop of tokens to a Pubkey
|
||||
|
||||
##### Parameters:
|
||||
* `string` - Pubkey of account to receive tokens, as base-58 encoded string
|
||||
* `integer` - token quantity, as a signed 64-bit integer
|
||||
|
||||
##### Results:
|
||||
* `string` - Transaction Signature of airdrop, as base-58 encoded string
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"requestAirdrop", "params":["83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri", 50]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":"5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW","id":1}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### sendTransaction
|
||||
Creates new transaction
|
||||
|
||||
##### Parameters:
|
||||
* `array` - array of octets containing a fully-signed Transaction
|
||||
|
||||
##### Results:
|
||||
* `string` - Transaction Signature, as base-58 encoded string
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"sendTransaction", "params":[[61, 98, 55, 49, 15, 187, 41, 215, 176, 49, 234, 229, 228, 77, 129, 221, 239, 88, 145, 227, 81, 158, 223, 123, 14, 229, 235, 247, 191, 115, 199, 71, 121, 17, 32, 67, 63, 209, 239, 160, 161, 2, 94, 105, 48, 159, 235, 235, 93, 98, 172, 97, 63, 197, 160, 164, 192, 20, 92, 111, 57, 145, 251, 6, 40, 240, 124, 194, 149, 155, 16, 138, 31, 113, 119, 101, 212, 128, 103, 78, 191, 80, 182, 234, 216, 21, 121, 243, 35, 100, 122, 68, 47, 57, 13, 39, 0, 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 40, 240, 124, 194, 149, 155, 16, 138, 31, 113, 119, 101, 212, 128, 103, 78, 191, 80, 182, 234, 216, 21, 121, 243, 35, 100, 122, 68, 47, 57, 11, 12, 106, 49, 74, 226, 201, 16, 161, 192, 28, 84, 124, 97, 190, 201, 171, 186, 6, 18, 70, 142, 89, 185, 176, 154, 115, 61, 26, 163, 77, 1, 88, 98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":"2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b","id":1}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Subscription Websocket
|
||||
After connect to the RPC PubSub websocket at `ws://<ADDRESS>/`:
|
||||
- Submit subscription requests to the websocket using the methods below
|
||||
- Multiple subscriptions may be active at once
|
||||
|
||||
---
|
||||
|
||||
### accountSubscribe
|
||||
Subscribe to an account to receive notifications when the userdata for a given account public key changes
|
||||
|
||||
##### Parameters:
|
||||
* `string` - account Pubkey, as base-58 encoded string
|
||||
|
||||
##### Results:
|
||||
* `integer` - Subscription id (needed to unsubscribe)
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"accountSubscribe", "params":["CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12"]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": 0,"id": 1}
|
||||
```
|
||||
|
||||
##### Notification Format:
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"program_id":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"tokens":1,"userdata":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### accountUnsubscribe
|
||||
Unsubscribe from account userdata change notifications
|
||||
|
||||
##### Parameters:
|
||||
* `integer` - id of account Subscription to cancel
|
||||
|
||||
##### Results:
|
||||
* `bool` - unsubscribe success message
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"accountUnsubscribe", "params":[0]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": true,"id": 1}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### signatureSubscribe
|
||||
Subscribe to a transaction signature to receive notification when the transaction is confirmed
|
||||
On `signatureNotification`, the subscription is automatically cancelled
|
||||
|
||||
##### Parameters:
|
||||
* `string` - Transaction Signature, as base-58 encoded string
|
||||
|
||||
##### Results:
|
||||
* `integer` - subscription id (needed to unsubscribe)
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"signatureSubscribe", "params":["2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b"]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": 0,"id": 1}
|
||||
```
|
||||
|
||||
##### Notification Format:
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "signatureNotification", "params": {"result": "Confirmed","subscription":0}}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### signatureUnsubscribe
|
||||
Unsubscribe from account userdata change notifications
|
||||
|
||||
##### Parameters:
|
||||
* `integer` - id of account subscription to cancel
|
||||
|
||||
##### Results:
|
||||
* `bool` - unsubscribe success message
|
||||
|
||||
##### Example:
|
||||
```bash
|
||||
// Request
|
||||
{"jsonrpc":"2.0", "id":1, "method":"signatureUnsubscribe", "params":[0]}
|
||||
|
||||
// Result
|
||||
{"jsonrpc": "2.0","result": true,"id": 1}
|
||||
```
|
@ -1,8 +1,11 @@
|
||||
# TestNet debugging info
|
||||
|
||||
Currently we have two testnets, 'perf' and 'master', both on the master branch of the solana repo. Deploys happen
|
||||
at the top of every hour with the latest code. 'perf' has more cores for the client machine to flood the network
|
||||
with transactions until failure.
|
||||
Currently we have three testnets:
|
||||
* `testnet` - public beta channel testnet accessible via testnet.solana.com. Runs 24/7
|
||||
* `testnet-perf` - private beta channel testnet with clients trying to flood the network
|
||||
with transactions until failure. Runs 24/7
|
||||
* `testnet-master` - private edge channel testnet with clients trying to flood the network
|
||||
with transactions until failure. Runs on weekday mornings for a couple hours
|
||||
|
||||
## Deploy process
|
||||
|
||||
@ -12,17 +15,21 @@ Validators are selected based on their machine name and everyone gets the binari
|
||||
|
||||
## Where are the testnet logs?
|
||||
|
||||
For the client they are put in `/tmp/solana`; for validators and leaders they are in `/var/snap/solana/current/`.
|
||||
You can also see the backtrace of the client by ssh'ing into the client node and doing:
|
||||
|
||||
Attach to the testnet first by running one of:
|
||||
```bash
|
||||
$ sudo -u testnet-deploy
|
||||
$ tmux attach -t solana
|
||||
$ net/gce.sh config testnet-solana-com
|
||||
$ net/gce.sh config master-testnet-solana-com
|
||||
$ net/gce.sh config perf-testnet-solana-com
|
||||
```
|
||||
|
||||
## How do I reset the testnet?
|
||||
Then run:
|
||||
```bash
|
||||
$ net/ssh.sh
|
||||
```
|
||||
for log location details
|
||||
|
||||
Through buildkite.
|
||||
## How do I reset the testnet?
|
||||
Manually trigger the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/) pipeline
|
||||
|
||||
## How can I scale the tx generation rate?
|
||||
|
||||
@ -32,4 +39,9 @@ variable `RAYON_NUM_THREADS=<xx>`
|
||||
|
||||
## How can I test a change on the testnet?
|
||||
|
||||
Currently, a merged PR is the only way to test a change on the testnet.
|
||||
Currently, a merged PR is the only way to test a change on the testnet. But you
|
||||
can run your own testnet using the scripts in the `net/` directory.
|
||||
|
||||
## Adjusting the number of clients or validators on the testnet
|
||||
Through the [testnet-deploy](https://buildkite.com/solana-labs/testnet-deploy/) settings.
|
||||
|
||||
|
@ -10,28 +10,30 @@ if [[ $(uname -m) != x86_64 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p target/perf-libs
|
||||
(
|
||||
set -x
|
||||
curl -o solana-perf.tgz \
|
||||
https://solana-perf.s3.amazonaws.com/master/x86_64-unknown-linux-gnu/solana-perf.tgz
|
||||
tar zxvf solana-perf.tgz
|
||||
)
|
||||
cd target/perf-libs
|
||||
(
|
||||
set -x
|
||||
curl https://solana-perf.s3.amazonaws.com/v0.10.3/x86_64-unknown-linux-gnu/solana-perf.tgz | tar zxvf -
|
||||
)
|
||||
|
||||
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then
|
||||
if ! diff /usr/local/cuda/version.txt cuda-version.txt > /dev/null; then
|
||||
if [[ -r /usr/local/cuda/version.txt && -r cuda-version.txt ]]; then
|
||||
if ! diff /usr/local/cuda/version.txt cuda-version.txt > /dev/null; then
|
||||
echo ==============================================
|
||||
echo Warning: possible CUDA version mismatch
|
||||
echo
|
||||
echo "Expected version: $(cat cuda-version.txt)"
|
||||
echo "Detected version: $(cat /usr/local/cuda/version.txt)"
|
||||
echo ==============================================
|
||||
fi
|
||||
else
|
||||
echo ==============================================
|
||||
echo Warning: possible CUDA version mismatch
|
||||
echo
|
||||
echo "Expected version: $(cat cuda-version.txt)"
|
||||
echo "Detected version: $(cat /usr/local/cuda/version.txt)"
|
||||
echo Warning: unable to validate CUDA version
|
||||
echo ==============================================
|
||||
fi
|
||||
else
|
||||
echo ==============================================
|
||||
echo Warning: unable to validate CUDA version
|
||||
echo ==============================================
|
||||
fi
|
||||
|
||||
echo "Downloaded solana-perf version: $(cat solana-perf-HEAD.txt)"
|
||||
echo "Downloaded solana-perf version: $(cat solana-perf-HEAD.txt)"
|
||||
)
|
||||
|
||||
exit 0
|
||||
|
@ -1,66 +1,25 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
USAGE=" usage: $0 [leader_url] [num_nodes] [--loop] [extra args]
|
||||
|
||||
leader_url URL to the leader (defaults to ..)
|
||||
num_nodes Minimum number of nodes to look for while converging
|
||||
--loop Add this flag to cause the program to loop infinitely
|
||||
\"extra args\" Any additional arguments are pass along to solana-bench-tps
|
||||
"
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
leader=$1
|
||||
if [[ -n $leader ]]; then
|
||||
if [[ $leader == "-h" || $leader == "--help" ]]; then
|
||||
echo "$USAGE"
|
||||
exit 0
|
||||
usage() {
|
||||
if [[ -n $1 ]]; then
|
||||
echo "$*"
|
||||
echo
|
||||
fi
|
||||
shift
|
||||
echo "usage: $0 [extra args]"
|
||||
echo
|
||||
echo " Run bench-tps "
|
||||
echo
|
||||
echo " extra args: additional arguments are pass along to solana-bench-tps"
|
||||
echo
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [[ -z $1 ]]; then # default behavior
|
||||
$solana_bench_tps --identity config-private/client-id.json --network 127.0.0.1:8001 --duration 90
|
||||
else
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
leader=testnet.solana.com # Default to testnet when running as a Snap
|
||||
else
|
||||
leader=$here/.. # Default to local solana repo
|
||||
fi
|
||||
$solana_bench_tps "$@"
|
||||
fi
|
||||
|
||||
count=$1
|
||||
if [[ -n $count ]]; then
|
||||
shift
|
||||
else
|
||||
count=1
|
||||
fi
|
||||
|
||||
loop=
|
||||
if [[ $1 = --loop ]]; then
|
||||
loop=1
|
||||
shift
|
||||
fi
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
(
|
||||
set -x
|
||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
|
||||
client_json="$SOLANA_CONFIG_CLIENT_DIR"/client.json
|
||||
[[ -r $client_json ]] || $solana_keygen -o "$client_json"
|
||||
)
|
||||
|
||||
iteration=0
|
||||
set -x
|
||||
while true; do
|
||||
$solana_bench_tps \
|
||||
-n "$count" \
|
||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json \
|
||||
-k "$SOLANA_CONFIG_CLIENT_DIR"/client.json \
|
||||
"$@"
|
||||
[[ -n $loop ]] || exit 0
|
||||
iteration=$((iteration + 1))
|
||||
echo ------------------------------------------------------------------------
|
||||
echo "Iteration: $iteration"
|
||||
echo ------------------------------------------------------------------------
|
||||
done
|
||||
|
@ -1,12 +1,16 @@
|
||||
# |source| this file
|
||||
#
|
||||
# Disable complaints about unused variables in this file:
|
||||
# Common utilities shared by other scripts in this directory
|
||||
#
|
||||
# The following directive disable complaints about unused variables in this
|
||||
# file:
|
||||
# shellcheck disable=2034
|
||||
#
|
||||
|
||||
rsync=rsync
|
||||
leader_logger="cat"
|
||||
validator_logger="cat"
|
||||
drone_logger="cat"
|
||||
leader_logger="tee leader.log"
|
||||
validator_logger="tee validator.log"
|
||||
drone_logger="tee drone.log"
|
||||
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
# Protect against unsupported configurations to prevent non-obvious errors
|
||||
@ -24,13 +28,7 @@ fi
|
||||
if [[ -d $SNAP ]]; then # Running inside a Linux Snap?
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
if [[ "$program" = wallet || "$program" = bench-tps ]]; then
|
||||
# TODO: Merge wallet.sh/client.sh functionality into
|
||||
# solana-wallet/solana-demo-client proper and remove this special case
|
||||
printf "%s/bin/solana-%s" "$SNAP" "$program"
|
||||
else
|
||||
printf "%s/command-%s.wrapper" "$SNAP" "$program"
|
||||
fi
|
||||
printf "%s/command-%s.wrapper" "$SNAP" "$program"
|
||||
}
|
||||
rsync="$SNAP"/bin/rsync
|
||||
multilog="$SNAP/bin/multilog t s16777215 n200"
|
||||
@ -41,12 +39,6 @@ if [[ -d $SNAP ]]; then # Running inside a Linux Snap?
|
||||
# 0700
|
||||
mkdir -p "$SNAP_DATA"/{drone,leader,validator}
|
||||
|
||||
SOLANA_METRICS_CONFIG="$(snapctl get metrics-config)"
|
||||
SOLANA_DEFAULT_METRICS_RATE="$(snapctl get default-metrics-rate)"
|
||||
export SOLANA_DEFAULT_METRICS_RATE
|
||||
SOLANA_CUDA="$(snapctl get enable-cuda)"
|
||||
RUST_LOG="$(snapctl get rust-log)"
|
||||
|
||||
elif [[ -n $USE_SNAP ]]; then # Use the Linux Snap binaries
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
@ -80,7 +72,7 @@ else
|
||||
fi
|
||||
|
||||
# Locate perf libs downloaded by |./fetch-perf-libs.sh|
|
||||
LD_LIBRARY_PATH=$(cd "$here" && dirname "$PWD"):$LD_LIBRARY_PATH
|
||||
LD_LIBRARY_PATH=$(cd "$here" && dirname "$PWD"/target/perf-libs):$LD_LIBRARY_PATH
|
||||
export LD_LIBRARY_PATH
|
||||
fi
|
||||
fi
|
||||
@ -98,50 +90,8 @@ solana_ledger_tool=$(solana_program ledger-tool)
|
||||
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
|
||||
# The SOLANA_METRICS_CONFIG environment variable is formatted as a
|
||||
# comma-delimited list of parameters. All parameters are optional.
|
||||
#
|
||||
# Example:
|
||||
# export SOLANA_METRICS_CONFIG="host=<metrics host>,db=<database name>,u=<username>,p=<password>"
|
||||
#
|
||||
configure_metrics() {
|
||||
[[ -n $SOLANA_METRICS_CONFIG ]] || return 0
|
||||
|
||||
declare metrics_params
|
||||
IFS=',' read -r -a metrics_params <<< "$SOLANA_METRICS_CONFIG"
|
||||
for param in "${metrics_params[@]}"; do
|
||||
IFS='=' read -r -a pair <<< "$param"
|
||||
if [[ ${#pair[@]} != 2 ]]; then
|
||||
echo Error: invalid metrics parameter: "$param" >&2
|
||||
else
|
||||
declare name="${pair[0]}"
|
||||
declare value="${pair[1]}"
|
||||
case "$name" in
|
||||
host)
|
||||
export INFLUX_HOST="$value"
|
||||
echo INFLUX_HOST="$INFLUX_HOST" >&2
|
||||
;;
|
||||
db)
|
||||
export INFLUX_DATABASE="$value"
|
||||
echo INFLUX_DATABASE="$INFLUX_DATABASE" >&2
|
||||
;;
|
||||
u)
|
||||
export INFLUX_USERNAME="$value"
|
||||
echo INFLUX_USERNAME="$INFLUX_USERNAME" >&2
|
||||
;;
|
||||
p)
|
||||
export INFLUX_PASSWORD="$value"
|
||||
echo INFLUX_PASSWORD="********" >&2
|
||||
;;
|
||||
*)
|
||||
echo Error: Unknown metrics parameter name: "$name" >&2
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
done
|
||||
}
|
||||
configure_metrics
|
||||
# shellcheck source=scripts/configure-metrics.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")"/../scripts/configure-metrics.sh
|
||||
|
||||
tune_networking() {
|
||||
# Skip in CI
|
||||
@ -154,10 +104,16 @@ tune_networking() {
|
||||
# test the existence of the sysctls before trying to set them
|
||||
# go ahead and return true and don't exit if these calls fail
|
||||
sysctl net.core.rmem_max 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
|
||||
sudo sysctl -w net.core.rmem_max=1610612736 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
|
||||
sudo sysctl -w net.core.rmem_default=1610612736 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.wmem_max 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.wmem_max=1610612736 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.wmem_default 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.wmem_default=1610612736 1>/dev/null 2>/dev/null
|
||||
) || true
|
||||
fi
|
||||
|
||||
@ -173,20 +129,6 @@ tune_networking() {
|
||||
fi
|
||||
}
|
||||
|
||||
oom_score_adj() {
|
||||
declare pid=$1
|
||||
declare score=$2
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
echo "$score" > "/proc/$pid/oom_score_adj" || true
|
||||
declare currentScore
|
||||
currentScore=$(cat "/proc/$pid/oom_score_adj" || true)
|
||||
if [[ $score != "$currentScore" ]]; then
|
||||
echo "Failed to set oom_score_adj to $score for pid $pid (current score: $currentScore)"
|
||||
fi
|
||||
}
|
||||
|
||||
SOLANA_CONFIG_DIR=${SNAP_DATA:-$PWD}/config
|
||||
SOLANA_CONFIG_PRIVATE_DIR=${SNAP_DATA:-$PWD}/config-private
|
||||
@ -211,3 +153,50 @@ rsync_url() { # adds the 'rsync://` prefix to URLs that need it
|
||||
# Default to rsync:// URL
|
||||
echo "rsync://$url"
|
||||
}
|
||||
|
||||
# called from drone, validator, client
|
||||
find_leader() {
|
||||
declare leader leader_address
|
||||
declare shift=0
|
||||
|
||||
if [[ -d $SNAP ]]; then
|
||||
if [[ -n $1 ]]; then
|
||||
usage "Error: unexpected parameter: $1"
|
||||
fi
|
||||
|
||||
# Select leader from the Snap configuration
|
||||
leader_ip=$(snapctl get leader-ip)
|
||||
if [[ -z $leader_ip ]]; then
|
||||
leader=testnet.solana.com
|
||||
leader_ip=$(dig +short "${leader%:*}" | head -n1)
|
||||
if [[ -z $leader_ip ]]; then
|
||||
usage "Error: unable to resolve IP address for $leader"
|
||||
fi
|
||||
fi
|
||||
leader=$leader_ip
|
||||
leader_address=$leader_ip:8001
|
||||
else
|
||||
if [[ -z $1 ]]; then
|
||||
leader=${here}/.. # Default to local tree for rsync
|
||||
leader_address=127.0.0.1:8001 # Default to local leader
|
||||
elif [[ -z $2 ]]; then
|
||||
leader=$1
|
||||
|
||||
declare leader_ip
|
||||
leader_ip=$(dig +short "${leader%:*}" | head -n1)
|
||||
|
||||
if [[ -z $leader_ip ]]; then
|
||||
usage "Error: unable to resolve IP address for $leader"
|
||||
fi
|
||||
|
||||
leader_address=$leader_ip:8001
|
||||
shift=1
|
||||
else
|
||||
leader=$1
|
||||
leader_address=$2
|
||||
shift=2
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "$leader" "$leader_address" "$shift"
|
||||
}
|
||||
|
@ -1,28 +1,26 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# usage: $0 <rsync network path to solana repo on leader machine>
|
||||
# Starts an instance of solana-drone
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
SOLANA_CONFIG_DIR="$SOLANA_CONFIG_DIR"-drone
|
||||
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
# Exit if mode is not yet configured
|
||||
# (typically the case after the Snap is first installed)
|
||||
[[ -n "$(snapctl get mode)" ]] || exit 0
|
||||
|
||||
# Select leader from the Snap configuration
|
||||
leader_address="$(snapctl get leader-address)"
|
||||
if [[ -z "$leader_address" ]]; then
|
||||
# Assume drone is running on the same node as the leader by default
|
||||
leader_address="localhost"
|
||||
usage() {
|
||||
if [[ -n $1 ]]; then
|
||||
echo "$*"
|
||||
echo
|
||||
fi
|
||||
leader="$leader_address"
|
||||
else
|
||||
leader=${1:-${here}/..} # Default to local tree for data
|
||||
fi
|
||||
echo "usage: $0 [network entry point]"
|
||||
echo
|
||||
echo " Run an airdrop drone for the specified network"
|
||||
echo
|
||||
exit 1
|
||||
}
|
||||
|
||||
read -r _ leader_address shift < <(find_leader "${@:1:1}")
|
||||
shift "$shift"
|
||||
|
||||
[[ -f "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json ]] || {
|
||||
echo "$SOLANA_CONFIG_PRIVATE_DIR/mint.json not found, create it by running:"
|
||||
@ -31,16 +29,12 @@ fi
|
||||
exit 1
|
||||
}
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
set -ex
|
||||
mkdir -p "$SOLANA_CONFIG_DIR"
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_DIR"/
|
||||
|
||||
|
||||
trap 'kill "$pid" && wait "$pid"' INT TERM
|
||||
$solana_drone \
|
||||
-l "$SOLANA_CONFIG_DIR"/leader.json -k "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json \
|
||||
--keypair "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json \
|
||||
--network "$leader_address" \
|
||||
> >($drone_logger) 2>&1 &
|
||||
pid=$!
|
||||
oom_score_adj "$pid" 1000
|
||||
wait "$pid"
|
||||
|
@ -1,80 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
command=$1
|
||||
prefix=
|
||||
num_nodes=
|
||||
out_file=
|
||||
image_name="ubuntu-16-04-cuda-9-2-new"
|
||||
|
||||
shift
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 <create|delete> <-p prefix> <-n num_nodes> <-o file> [-i image-name]
|
||||
|
||||
Manage a GCE multinode network
|
||||
|
||||
create|delete - Create or delete the network
|
||||
-p prefix - A common prefix for node names, to avoid collision
|
||||
-n num_nodes - Number of nodes
|
||||
-o out_file - Used for create option. Outputs an array of IP addresses
|
||||
of new nodes to the file
|
||||
-i image_name - Existing image on GCE (default $image_name)
|
||||
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
while getopts "h?p:i:n:o:" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
;;
|
||||
p)
|
||||
prefix=$OPTARG
|
||||
;;
|
||||
i)
|
||||
image_name=$OPTARG
|
||||
;;
|
||||
o)
|
||||
out_file=$OPTARG
|
||||
;;
|
||||
n)
|
||||
num_nodes=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
set -e
|
||||
|
||||
[[ -n $command ]] || usage "Need a command (create|delete)"
|
||||
|
||||
[[ -n $prefix ]] || usage "Need a prefix for GCE instance names"
|
||||
|
||||
[[ -n $num_nodes ]] || usage "Need number of nodes"
|
||||
|
||||
nodes=()
|
||||
for i in $(seq 1 "$num_nodes"); do
|
||||
nodes+=("$prefix$i")
|
||||
done
|
||||
|
||||
if [[ $command == "create" ]]; then
|
||||
[[ -n $out_file ]] || usage "Need an outfile to store IP Addresses"
|
||||
|
||||
ip_addr_list=$(gcloud beta compute instances create "${nodes[@]}" --zone=us-west1-b --tags=testnet \
|
||||
--image="$image_name" | awk '/RUNNING/ {print $5}')
|
||||
|
||||
echo "ip_addr_array=($ip_addr_list)" >"$out_file"
|
||||
elif [[ $command == "delete" ]]; then
|
||||
gcloud beta compute instances delete "${nodes[@]}"
|
||||
else
|
||||
usage "Unknown command: $command"
|
||||
fi
|
@ -1,9 +1,15 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Starts a leader node
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
# shellcheck source=scripts/oom-score-adj.sh
|
||||
source "$here"/../scripts/oom-score-adj.sh
|
||||
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
# Exit if mode is not yet configured
|
||||
# (typically the case after the Snap is first installed)
|
||||
|
@ -1,14 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n $FORCE ]] || exit
|
||||
|
||||
chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa
|
||||
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
|
||||
# Run setup
|
||||
USE_INSTALL=1 ./multinode-demo/setup.sh -p
|
||||
USE_INSTALL=1 ./multinode-demo/drone.sh >drone.log 2>&1 &
|
||||
USE_INSTALL=1 SOLANA_CUDA=1 ./multinode-demo/leader.sh >leader.log 2>&1 &
|
@ -1,185 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
command=$1
|
||||
ip_addr_file=
|
||||
remote_user=
|
||||
ssh_keys=
|
||||
|
||||
shift
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 <start|stop> <-f IP Addr Array file> <-u username> [-k ssh-keys]
|
||||
|
||||
Manage a GCE multinode network
|
||||
|
||||
start|stop - Create or delete the network
|
||||
-f file - A bash script that exports an array of IP addresses, ip_addr_array.
|
||||
Elements of the array are public IP address of remote nodes.
|
||||
-u username - The username for logging into remote nodes.
|
||||
-k ssh-keys - Path to public/private key pair that remote nodes can use to perform
|
||||
rsync and ssh among themselves. Must contain pub, and priv keys.
|
||||
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
while getopts "h?f:u:k:" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
;;
|
||||
f)
|
||||
ip_addr_file=$OPTARG
|
||||
;;
|
||||
u)
|
||||
remote_user=$OPTARG
|
||||
;;
|
||||
k)
|
||||
ssh_keys=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
set -e
|
||||
|
||||
# Sample IP Address array file contents
|
||||
# ip_addr_array=(192.168.1.1 192.168.1.5 192.168.2.2)
|
||||
|
||||
[[ -n $command ]] || usage "Need a command (start|stop)"
|
||||
[[ -n $ip_addr_file ]] || usage "Need a file with IP address array"
|
||||
[[ -n $remote_user ]] || usage "Need the username for remote nodes"
|
||||
|
||||
ip_addr_array=()
|
||||
# Get IP address array
|
||||
# shellcheck source=/dev/null
|
||||
source "$ip_addr_file"
|
||||
|
||||
build_project() {
|
||||
echo "Build started at $(date)"
|
||||
SECONDS=0
|
||||
|
||||
# Build and install locally
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
cargo install --force
|
||||
|
||||
echo "Build took $SECONDS seconds"
|
||||
}
|
||||
|
||||
common_start_setup() {
|
||||
ip_addr=$1
|
||||
|
||||
# Killing sshguard for now. TODO: Find a better solution
|
||||
# sshguard is blacklisting IP address after ssh-keyscan and ssh login attempts
|
||||
ssh "$remote_user@$ip_addr" " \
|
||||
set -ex; \
|
||||
sudo service sshguard stop; \
|
||||
sudo apt-get --assume-yes install rsync libssl-dev; \
|
||||
mkdir -p ~/.ssh ~/solana ~/.cargo/bin; \
|
||||
" >log/"$ip_addr".log
|
||||
|
||||
# If provided, deploy SSH keys
|
||||
if [[ -n $ssh_keys ]]; then
|
||||
{
|
||||
rsync -vPrz "$ssh_keys"/id_rsa "$remote_user@$ip_addr":~/.ssh/
|
||||
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/
|
||||
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/authorized_keys
|
||||
rsync -vPrz ./multinode-demo "$remote_user@$ip_addr":~/solana/
|
||||
} >>log/"$ip_addr".log
|
||||
fi
|
||||
}
|
||||
|
||||
start_leader() {
|
||||
common_start_setup "$1"
|
||||
|
||||
{
|
||||
rsync -vPrz ~/.cargo/bin/solana* "$remote_user@$ip_addr":~/.cargo/bin/
|
||||
rsync -vPrz ./fetch-perf-libs.sh "$remote_user@$ip_addr":~/solana/
|
||||
ssh -n -f "$remote_user@$ip_addr" 'cd solana; FORCE=1 ./multinode-demo/remote_leader.sh'
|
||||
} >>log/"$1".log
|
||||
|
||||
leader_ip=$1
|
||||
leader_time=$SECONDS
|
||||
SECONDS=0
|
||||
}
|
||||
|
||||
start_validator() {
|
||||
common_start_setup "$1"
|
||||
|
||||
ssh -n -f "$remote_user@$ip_addr" "cd solana; FORCE=1 ./multinode-demo/remote_validator.sh $leader_ip" >>log/"$1".log
|
||||
}
|
||||
|
||||
start_all_nodes() {
|
||||
echo "Deployment started at $(date)"
|
||||
SECONDS=0
|
||||
count=0
|
||||
leader_ip=
|
||||
leader_time=
|
||||
|
||||
mkdir -p log
|
||||
|
||||
for ip_addr in "${ip_addr_array[@]}"; do
|
||||
if ((!count)); then
|
||||
# Start the leader on the first node
|
||||
echo "Leader node $ip_addr, killing previous instance and restarting"
|
||||
start_leader "$ip_addr"
|
||||
else
|
||||
# Start validator on all other nodes
|
||||
echo "Validator[$count] node $ip_addr, killing previous instance and restarting"
|
||||
start_validator "$ip_addr" &
|
||||
# TBD: Remove the sleep or reduce time once GCP login quota is increased
|
||||
sleep 2
|
||||
fi
|
||||
|
||||
((count = count + 1))
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
((validator_count = count - 1))
|
||||
|
||||
echo "Deployment finished at $(date)"
|
||||
echo "Leader deployment too $leader_time seconds"
|
||||
echo "$validator_count Validator deployment took $SECONDS seconds"
|
||||
}
|
||||
|
||||
stop_all_nodes() {
|
||||
SECONDS=0
|
||||
local count=0
|
||||
for ip_addr in "${ip_addr_array[@]}"; do
|
||||
ssh-keygen -R "$ip_addr" >log/local.log
|
||||
ssh-keyscan "$ip_addr" >>~/.ssh/known_hosts 2>/dev/null
|
||||
|
||||
echo "Stopping node[$count] $ip_addr. Remote user $remote_user"
|
||||
|
||||
ssh -n -f "$remote_user@$ip_addr" " \
|
||||
set -ex; \
|
||||
sudo service sshguard stop; \
|
||||
pkill -9 solana-; \
|
||||
pkill -9 validator; \
|
||||
pkill -9 leader; \
|
||||
"
|
||||
sleep 2
|
||||
((count = count + 1))
|
||||
echo "Stopped node[$count] $ip_addr"
|
||||
done
|
||||
echo "Stopping $count nodes took $SECONDS seconds"
|
||||
}
|
||||
|
||||
if [[ $command == "start" ]]; then
|
||||
build_project
|
||||
stop_all_nodes
|
||||
start_all_nodes
|
||||
elif [[ $command == "stop" ]]; then
|
||||
stop_all_nodes
|
||||
else
|
||||
usage "Unknown command: $command"
|
||||
fi
|
@ -1,17 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n $FORCE ]] || exit
|
||||
|
||||
chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa
|
||||
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
|
||||
touch ~/.ssh/known_hosts
|
||||
ssh-keygen -R "$1" 2>/dev/null
|
||||
ssh-keyscan "$1" >>~/.ssh/known_hosts 2>/dev/null
|
||||
|
||||
rsync -vPrz "$1":~/.cargo/bin/solana* ~/.cargo/bin/
|
||||
|
||||
# Run setup
|
||||
USE_INSTALL=1 ./multinode-demo/setup.sh -p
|
||||
USE_INSTALL=1 ./multinode-demo/validator.sh "$1":~/solana "$1" >validator.log 2>&1
|
@ -1,4 +1,7 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Creates a fullnode configuration
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
@ -31,6 +34,7 @@ ip_address_arg=-l
|
||||
num_tokens=1000000000
|
||||
node_type_leader=true
|
||||
node_type_validator=true
|
||||
node_type_client=true
|
||||
while getopts "h?n:lpt:" opt; do
|
||||
case $opt in
|
||||
h|\?)
|
||||
@ -52,10 +56,17 @@ while getopts "h?n:lpt:" opt; do
|
||||
leader)
|
||||
node_type_leader=true
|
||||
node_type_validator=false
|
||||
node_type_client=false
|
||||
;;
|
||||
validator)
|
||||
node_type_leader=false
|
||||
node_type_validator=true
|
||||
node_type_client=false
|
||||
;;
|
||||
client)
|
||||
node_type_leader=false
|
||||
node_type_validator=false
|
||||
node_type_client=true
|
||||
;;
|
||||
*)
|
||||
usage "Error: unknown node type: $node_type"
|
||||
@ -69,25 +80,27 @@ while getopts "h?n:lpt:" opt; do
|
||||
done
|
||||
|
||||
|
||||
leader_address_args=("$ip_address_arg")
|
||||
validator_address_args=("$ip_address_arg" -b 9000)
|
||||
leader_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/leader-id.json
|
||||
validator_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/validator-id.json
|
||||
mint_path="$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||
|
||||
set -e
|
||||
|
||||
for i in "$SOLANA_CONFIG_DIR" "$SOLANA_CONFIG_PRIVATE_DIR" "$SOLANA_CONFIG_VALIDATOR_DIR"; do
|
||||
for i in "$SOLANA_CONFIG_DIR" "$SOLANA_CONFIG_VALIDATOR_DIR" "$SOLANA_CONFIG_PRIVATE_DIR"; do
|
||||
echo "Cleaning $i"
|
||||
rm -rvf "$i"
|
||||
mkdir -p "$i"
|
||||
done
|
||||
|
||||
|
||||
$solana_keygen -o "$leader_id_path"
|
||||
$solana_keygen -o "$validator_id_path"
|
||||
if $node_type_client; then
|
||||
client_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/client-id.json
|
||||
$solana_keygen -o "$client_id_path"
|
||||
ls -lhR "$SOLANA_CONFIG_PRIVATE_DIR"/
|
||||
fi
|
||||
|
||||
if $node_type_leader; then
|
||||
leader_address_args=("$ip_address_arg")
|
||||
leader_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/leader-id.json
|
||||
mint_path="$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||
|
||||
$solana_keygen -o "$leader_id_path"
|
||||
|
||||
echo "Creating $mint_path with $num_tokens tokens"
|
||||
$solana_keygen -o "$mint_path"
|
||||
|
||||
@ -96,15 +109,20 @@ if $node_type_leader; then
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_DIR/leader.json"
|
||||
$solana_fullnode_config --keypair="$leader_id_path" "${leader_address_args[@]}" > "$SOLANA_CONFIG_DIR"/leader.json
|
||||
|
||||
ls -lhR "$SOLANA_CONFIG_DIR"/
|
||||
ls -lhR "$SOLANA_CONFIG_PRIVATE_DIR"/
|
||||
fi
|
||||
|
||||
|
||||
if $node_type_validator; then
|
||||
validator_address_args=("$ip_address_arg" -b 9000)
|
||||
validator_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/validator-id.json
|
||||
|
||||
$solana_keygen -o "$validator_id_path"
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_VALIDATOR_DIR/validator.json"
|
||||
$solana_fullnode_config --keypair="$validator_id_path" "${validator_address_args[@]}" > "$SOLANA_CONFIG_VALIDATOR_DIR"/validator.json
|
||||
fi
|
||||
|
||||
ls -lhR "$SOLANA_CONFIG_DIR"/
|
||||
if $node_type_leader; then
|
||||
ls -lhR "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
ls -lhR "$SOLANA_CONFIG_VALIDATOR_DIR"/
|
||||
fi
|
||||
|
@ -1,47 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Wallet sanity test
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
cd "$here"
|
||||
|
||||
if [[ -n "$USE_SNAP" ]]; then
|
||||
# TODO: Merge wallet.sh functionality into solana-wallet proper and
|
||||
# remove this USE_SNAP case
|
||||
wallet="solana.wallet $1"
|
||||
else
|
||||
wallet="../wallet.sh $1"
|
||||
fi
|
||||
|
||||
# Tokens transferred to this address are lost forever...
|
||||
garbage_address=vS3ngn1TfQmpsW1Z4NkLuqNAQFF3dYQw8UZ6TCx9bmq
|
||||
|
||||
check_balance_output() {
|
||||
declare expected_output="$1"
|
||||
exec 42>&1
|
||||
output=$($wallet balance | tee >(cat - >&42))
|
||||
if [[ ! "$output" =~ $expected_output ]]; then
|
||||
echo "Balance is incorrect. Expected: $expected_output"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
pay_and_confirm() {
|
||||
exec 42>&1
|
||||
signature=$($wallet pay "$@" | tee >(cat - >&42))
|
||||
$wallet confirm "$signature"
|
||||
}
|
||||
|
||||
$wallet reset
|
||||
$wallet address
|
||||
check_balance_output "Your balance is: 0"
|
||||
$wallet airdrop --tokens 60
|
||||
check_balance_output "Your balance is: 60"
|
||||
$wallet airdrop --tokens 40
|
||||
check_balance_output "Your balance is: 100"
|
||||
pay_and_confirm --to $garbage_address --tokens 99
|
||||
check_balance_output "Your balance is: 1"
|
||||
|
||||
echo PASS
|
||||
exit 0
|
@ -1,4 +1,8 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Start a dynamically-configured validator node
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
|
||||
exec "$here"/validator.sh -x "$@"
|
||||
|
@ -1,16 +1,31 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Start a validator node
|
||||
#
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
# shellcheck source=scripts/oom-score-adj.sh
|
||||
source "$here"/../scripts/oom-score-adj.sh
|
||||
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
# Exit if mode is not yet configured
|
||||
# (typically the case after the Snap is first installed)
|
||||
[[ -n "$(snapctl get mode)" ]] || exit 0
|
||||
fi
|
||||
|
||||
usage() {
|
||||
if [[ -n $1 ]]; then
|
||||
echo "$*"
|
||||
echo
|
||||
fi
|
||||
echo "usage: $0 [-x] [rsync network path to solana repo on leader machine] [network ip address of leader]"
|
||||
echo ""
|
||||
echo " -x: runs a new, dynamically-configured validator"
|
||||
echo "usage: $0 [-x] [rsync network path to leader] [network entry point]"
|
||||
echo
|
||||
echo " Start a validator on the specified network"
|
||||
echo
|
||||
echo " -x: runs a new, dynamically-configured validator"
|
||||
echo
|
||||
exit 1
|
||||
}
|
||||
|
||||
@ -29,34 +44,8 @@ if [[ -n $3 ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
if [[ -d $SNAP ]]; then
|
||||
# Exit if mode is not yet configured
|
||||
# (typically the case after the Snap is first installed)
|
||||
[[ -n $(snapctl get mode) ]] || exit 0
|
||||
|
||||
# Select leader from the Snap configuration
|
||||
leader_address=$(snapctl get leader-address)
|
||||
if [[ -z $leader_address ]]; then
|
||||
# Assume public testnet by default
|
||||
leader_address=35.227.93.37 # testnet.solana.com
|
||||
fi
|
||||
leader=$leader_address
|
||||
else
|
||||
if [[ -z $1 ]]; then
|
||||
leader=${1:-${here}/..} # Default to local tree for data
|
||||
leader_address=${2:-127.0.0.1} # Default to local leader
|
||||
elif [[ -z $2 ]]; then
|
||||
leader=$1
|
||||
leader_address=$(dig +short "${leader%:*}" | head -n1)
|
||||
if [[ -z $leader_address ]]; then
|
||||
usage "Error: unable to resolve IP address for $leader"
|
||||
fi
|
||||
else
|
||||
leader=$1
|
||||
leader_address=$2
|
||||
fi
|
||||
fi
|
||||
leader_port=8001
|
||||
read -r leader leader_address shift < <(find_leader "${@:1:2}")
|
||||
shift "$shift"
|
||||
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
program=$solana_fullnode_cuda
|
||||
@ -103,7 +92,7 @@ $rsync -vPr "$rsync_leader_url"/config/ "$SOLANA_LEADER_CONFIG_DIR"
|
||||
trap 'kill "$pid" && wait "$pid"' INT TERM
|
||||
$program \
|
||||
--identity "$validator_json_path" \
|
||||
--testnet "$leader_address:$leader_port" \
|
||||
--network "$leader_address" \
|
||||
--ledger "$SOLANA_LEADER_CONFIG_DIR"/ledger \
|
||||
> >($validator_logger) 2>&1 &
|
||||
pid=$!
|
||||
|
@ -1,45 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# usage: $0 <rsync network path to solana repo on leader machine>"
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
# if $1 isn't host:path, something.com, or a valid local path
|
||||
if [[ ${1%:} != "$1" || "$1" =~ [^.]\.[^.] || -d $1 ]]; then
|
||||
leader=$1 # interpret
|
||||
shift
|
||||
else
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
leader=testnet.solana.com # Default to testnet when running as a Snap
|
||||
else
|
||||
leader=$here/.. # Default to local solana repo
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$1" = "reset" ]]; then
|
||||
echo Wallet resetting
|
||||
rm -rf "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
|
||||
set -e
|
||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
if [[ ! -r "$SOLANA_CONFIG_CLIENT_DIR"/leader.json ]]; then
|
||||
echo "Fetching leader configuration from $rsync_leader_url"
|
||||
$rsync -Pz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
fi
|
||||
|
||||
client_id_path="$SOLANA_CONFIG_CLIENT_DIR"/id.json
|
||||
if [[ ! -r $client_id_path ]]; then
|
||||
echo "Generating client identity: $client_id_path"
|
||||
$solana_keygen -o "$client_id_path"
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086 # $solana_wallet should not be quoted
|
||||
exec $solana_wallet \
|
||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -k "$client_id_path" "$@"
|
2
net/.gitignore
vendored
Normal file
2
net/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
/config/
|
||||
/log/
|
100
net/README.md
Normal file
100
net/README.md
Normal file
@ -0,0 +1,100 @@
|
||||
|
||||
# Network Management
|
||||
This directory contains scripts useful for working with a test network. It's
|
||||
intended to be both dev and CD friendly.
|
||||
|
||||
### User Account Prerequisites
|
||||
|
||||
GCP and AWS are supported.
|
||||
|
||||
#### GCP
|
||||
First authenticate with
|
||||
```bash
|
||||
$ gcloud auth login
|
||||
```
|
||||
|
||||
#### AWS
|
||||
Obtain your credentials from the AWS IAM Console and configure the AWS CLI with
|
||||
```bash
|
||||
$ aws configure
|
||||
```
|
||||
More information on AWS CLI configuration can be found [here](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-quick-configuration)
|
||||
|
||||
### Metrics configuration
|
||||
Ensure that `$(whoami)` is the name of an InfluxDB user account with enough
|
||||
access to create a new InfluxDB database. Ask mvines@ for help if needed.
|
||||
|
||||
## Quick Start
|
||||
|
||||
NOTE: This example uses GCP. If you are using AWS, replace `./gce.sh` with
|
||||
`./ec2.sh` in the commands.
|
||||
|
||||
```bash
|
||||
$ cd net/
|
||||
$ ./gce.sh create -n 5 -c 1 #<-- Create a GCE testnet with 5 validators, 1 client (billing starts here)
|
||||
$ ./init-metrics.sh $(whoami) #<-- Configure a metrics database for the testnet
|
||||
$ ./net.sh start #<-- Deploy the network from the local workspace
|
||||
$ ./ssh.sh #<-- Details on how to ssh into any testnet node
|
||||
$ ./gce.sh delete #<-- Dispose of the network (billing stops here)
|
||||
```
|
||||
|
||||
## Tips
|
||||
|
||||
### Running the network over public IP addresses
|
||||
By default private IP addresses are used with all instances in the same
|
||||
availability zone to avoid GCE network engress charges. However to run the
|
||||
network over public IP addresses:
|
||||
```bash
|
||||
$ ./gce.sh create -P ...
|
||||
```
|
||||
or
|
||||
```bash
|
||||
$ ./ec2.sh create -P ...
|
||||
```
|
||||
|
||||
### Deploying a Snap-based network
|
||||
To deploy the latest pre-built `edge` channel Snap (ie, latest from the `master`
|
||||
branch), once the testnet has been created run:
|
||||
|
||||
```bash
|
||||
$ ./net.sh start -s edge
|
||||
```
|
||||
|
||||
### Enabling CUDA
|
||||
First ensure the network instances are created with GPU enabled:
|
||||
```bash
|
||||
$ ./gce.sh create -g ...
|
||||
```
|
||||
or
|
||||
```bash
|
||||
$ ./ec2.sh create -g ...
|
||||
```
|
||||
|
||||
If deploying a Snap-based network nothing further is required, as GPU presence
|
||||
is detected at runtime and the CUDA build is auto selected.
|
||||
|
||||
If deploying a locally-built network, first run `./fetch-perf-libs.sh` then
|
||||
ensure the `cuda` feature is specified at network start:
|
||||
```bash
|
||||
$ ./net.sh start -f "cuda,erasure"
|
||||
```
|
||||
|
||||
### How to interact with a CD testnet deployed by ci/testnet-deploy.sh
|
||||
|
||||
**AWS-Specific Extra Setup**: Follow the steps in `scripts/add-solana-user-authorized_keys.sh`,
|
||||
then redeploy the testnet before continuing in this section.
|
||||
|
||||
Taking **master-testnet-solana-com** as an example, configure your workspace for
|
||||
the testnet using:
|
||||
```bash
|
||||
$ ./gce.sh config -p master-testnet-solana-com
|
||||
```
|
||||
or
|
||||
```bash
|
||||
$ ./ec2.sh config -p master-testnet-solana-com
|
||||
```
|
||||
|
||||
Then run the following for details on how to ssh into any testnet node
|
||||
```bash
|
||||
$ ./ssh.sh
|
||||
```
|
58
net/common.sh
Normal file
58
net/common.sh
Normal file
@ -0,0 +1,58 @@
|
||||
# |source| this file
|
||||
#
|
||||
# Common utilities shared by other scripts in this directory
|
||||
#
|
||||
# The following directive disable complaints about unused variables in this
|
||||
# file:
|
||||
# shellcheck disable=2034
|
||||
#
|
||||
|
||||
netDir=$(
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")" || exit
|
||||
echo "$PWD"
|
||||
)
|
||||
netConfigDir="$netDir"/config
|
||||
netLogDir="$netDir"/log
|
||||
mkdir -p "$netConfigDir" "$netLogDir"
|
||||
|
||||
# shellcheck source=scripts/configure-metrics.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")"/../scripts/configure-metrics.sh
|
||||
|
||||
configFile="$netConfigDir/config"
|
||||
|
||||
entrypointIp=
|
||||
publicNetwork=
|
||||
leaderIp=
|
||||
netBasename=
|
||||
sshPrivateKey=
|
||||
clientIpList=()
|
||||
sshOptions=()
|
||||
validatorIpList=()
|
||||
|
||||
buildSshOptions() {
|
||||
sshOptions=(
|
||||
-o "BatchMode=yes"
|
||||
-o "StrictHostKeyChecking=no"
|
||||
-o "UserKnownHostsFile=/dev/null"
|
||||
-o "User=solana"
|
||||
-o "IdentityFile=$sshPrivateKey"
|
||||
-o "LogLevel=ERROR"
|
||||
-F /dev/null
|
||||
)
|
||||
}
|
||||
|
||||
loadConfigFile() {
|
||||
[[ -r $configFile ]] || usage "Config file unreadable: $configFile"
|
||||
|
||||
# shellcheck source=/dev/null
|
||||
source "$configFile"
|
||||
[[ -n "$entrypointIp" ]] || usage "Config file invalid, entrypointIp unspecified: $configFile"
|
||||
[[ -n "$publicNetwork" ]] || usage "Config file invalid, publicNetwork unspecified: $configFile"
|
||||
[[ -n "$leaderIp" ]] || usage "Config file invalid, leaderIp unspecified: $configFile"
|
||||
[[ -n "$netBasename" ]] || usage "Config file invalid, netBasename unspecified: $configFile"
|
||||
[[ -n $sshPrivateKey ]] || usage "Config file invalid, sshPrivateKey unspecified: $configFile"
|
||||
[[ ${#validatorIpList[@]} -gt 0 ]] || usage "Config file invalid, validatorIpList unspecified: $configFile"
|
||||
|
||||
buildSshOptions
|
||||
configureMetrics
|
||||
}
|
1
net/ec2.sh
Symbolic link
1
net/ec2.sh
Symbolic link
@ -0,0 +1 @@
|
||||
gce.sh
|
414
net/gce.sh
Executable file
414
net/gce.sh
Executable file
@ -0,0 +1,414 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=net/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
cloudProvider=$(basename "$0" .sh)
|
||||
bootDiskType=""
|
||||
case $cloudProvider in
|
||||
gce)
|
||||
# shellcheck source=net/scripts/gce-provider.sh
|
||||
source "$here"/scripts/gce-provider.sh
|
||||
|
||||
imageName="ubuntu-16-04-cuda-9-2-new"
|
||||
cpuLeaderMachineType=n1-standard-16
|
||||
gpuLeaderMachineType="$cpuLeaderMachineType --accelerator count=4,type=nvidia-tesla-k80"
|
||||
leaderMachineType=$cpuLeaderMachineType
|
||||
validatorMachineType=n1-standard-16
|
||||
clientMachineType=n1-standard-16
|
||||
;;
|
||||
ec2)
|
||||
# shellcheck source=net/scripts/ec2-provider.sh
|
||||
source "$here"/scripts/ec2-provider.sh
|
||||
|
||||
imageName="ami-0466e26ccc0e752c1"
|
||||
cpuLeaderMachineType=m4.4xlarge
|
||||
gpuLeaderMachineType=p2.xlarge
|
||||
leaderMachineType=$cpuLeaderMachineType
|
||||
validatorMachineType=m4.xlarge
|
||||
clientMachineType=m4.4xlarge
|
||||
;;
|
||||
*)
|
||||
echo "Error: Unknown cloud provider: $cloudProvider"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
prefix=testnet-dev-${USER//[^A-Za-z0-9]/}
|
||||
validatorNodeCount=5
|
||||
clientNodeCount=1
|
||||
leaderBootDiskSizeInGb=1000
|
||||
validatorBootDiskSizeInGb=$leaderBootDiskSizeInGb
|
||||
clientBootDiskSizeInGb=75
|
||||
|
||||
publicNetwork=false
|
||||
enableGpu=false
|
||||
leaderAddress=
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [create|config|delete] [common options] [command-specific options]
|
||||
|
||||
Manage testnet instances
|
||||
|
||||
create - create a new testnet (implies 'config')
|
||||
config - configure the testnet and write a config file describing it
|
||||
delete - delete the testnet
|
||||
|
||||
common options:
|
||||
-p [prefix] - Optional common prefix for instance names to avoid
|
||||
collisions (default: $prefix)
|
||||
|
||||
create-specific options:
|
||||
-n [number] - Number of validator nodes (default: $validatorNodeCount)
|
||||
-c [number] - Number of client nodes (default: $clientNodeCount)
|
||||
-P - Use public network IP addresses (default: $publicNetwork)
|
||||
-z [zone] - Zone for the nodes (default: $zone)
|
||||
-g - Enable GPU (default: $enableGpu)
|
||||
-G - Enable GPU, and set count/type of GPUs to use (e.g $cpuLeaderMachineType --accelerator count=4,type=nvidia-tesla-k80)
|
||||
-a [address] - Set the leader node's external IP address to this value.
|
||||
For GCE, [address] is the "name" of the desired External
|
||||
IP Address.
|
||||
For EC2, [address] is the "allocation ID" of the desired
|
||||
Elastic IP.
|
||||
-d [disk-type] - Specify a boot disk type (default None) Use pd-ssd to get ssd on GCE.
|
||||
|
||||
config-specific options:
|
||||
none
|
||||
|
||||
delete-specific options:
|
||||
none
|
||||
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
|
||||
command=$1
|
||||
[[ -n $command ]] || usage
|
||||
shift
|
||||
[[ $command = create || $command = config || $command = delete ]] || usage "Invalid command: $command"
|
||||
|
||||
while getopts "h?p:Pn:c:z:gG:a:d:" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
;;
|
||||
p)
|
||||
[[ ${OPTARG//[^A-Za-z0-9-]/} == "$OPTARG" ]] || usage "Invalid prefix: \"$OPTARG\", alphanumeric only"
|
||||
prefix=$OPTARG
|
||||
;;
|
||||
P)
|
||||
publicNetwork=true
|
||||
;;
|
||||
n)
|
||||
validatorNodeCount=$OPTARG
|
||||
;;
|
||||
c)
|
||||
clientNodeCount=$OPTARG
|
||||
;;
|
||||
z)
|
||||
cloud_SetZone "$OPTARG"
|
||||
;;
|
||||
g)
|
||||
enableGpu=true
|
||||
leaderMachineType="$gpuLeaderMachineType"
|
||||
;;
|
||||
G)
|
||||
enableGpu=true
|
||||
leaderMachineType="$OPTARG"
|
||||
;;
|
||||
a)
|
||||
leaderAddress=$OPTARG
|
||||
;;
|
||||
d)
|
||||
bootDiskType=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
[[ -z $1 ]] || usage "Unexpected argument: $1"
|
||||
sshPrivateKey="$netConfigDir/id_$prefix"
|
||||
|
||||
|
||||
# cloud_ForEachInstance [cmd] [extra args to cmd]
|
||||
#
|
||||
# Execute a command for each element in the `instances` array
|
||||
#
|
||||
# cmd - The command to execute on each instance
|
||||
# The command will receive arguments followed by any
|
||||
# additionl arguments supplied to cloud_ForEachInstance:
|
||||
# name - name of the instance
|
||||
# publicIp - The public IP address of this instance
|
||||
# privateIp - The priate IP address of this instance
|
||||
# count - Monotonically increasing count for each
|
||||
# invocation of cmd, starting at 1
|
||||
# ... - Extra args to cmd..
|
||||
#
|
||||
#
|
||||
cloud_ForEachInstance() {
|
||||
declare cmd="$1"
|
||||
shift
|
||||
[[ -n $cmd ]] || { echo cloud_ForEachInstance: cmd not specified; exit 1; }
|
||||
|
||||
declare count=1
|
||||
for info in "${instances[@]}"; do
|
||||
declare name publicIp privateIp
|
||||
IFS=: read -r name publicIp privateIp < <(echo "$info")
|
||||
|
||||
eval "$cmd" "$name" "$publicIp" "$privateIp" "$count" "$@"
|
||||
count=$((count + 1))
|
||||
done
|
||||
}
|
||||
|
||||
prepareInstancesAndWriteConfigFile() {
|
||||
$metricsWriteDatapoint "testnet-deploy net-config-begin=1"
|
||||
|
||||
cat >> "$configFile" <<EOF
|
||||
# autogenerated at $(date)
|
||||
netBasename=$prefix
|
||||
publicNetwork=$publicNetwork
|
||||
sshPrivateKey=$sshPrivateKey
|
||||
EOF
|
||||
|
||||
buildSshOptions
|
||||
|
||||
recordInstanceIp() {
|
||||
declare name="$1"
|
||||
declare publicIp="$2"
|
||||
declare privateIp="$3"
|
||||
|
||||
declare arrayName="$5"
|
||||
|
||||
echo "$arrayName+=($publicIp) # $name" >> "$configFile"
|
||||
if [[ $arrayName = "leaderIp" ]]; then
|
||||
if $publicNetwork; then
|
||||
echo "entrypointIp=$publicIp" >> "$configFile"
|
||||
else
|
||||
echo "entrypointIp=$privateIp" >> "$configFile"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
waitForStartupComplete() {
|
||||
declare name="$1"
|
||||
declare publicIp="$2"
|
||||
|
||||
echo "Waiting for $name to finish booting..."
|
||||
(
|
||||
for i in $(seq 1 30); do
|
||||
if (set -x; ssh "${sshOptions[@]}" "$publicIp" "test -f /.instance-startup-complete"); then
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
echo "Retry $i..."
|
||||
done
|
||||
)
|
||||
echo "$name has booted."
|
||||
}
|
||||
|
||||
echo "Looking for leader instance..."
|
||||
cloud_FindInstance "$prefix-leader"
|
||||
[[ ${#instances[@]} -eq 1 ]] || {
|
||||
echo "Unable to find leader"
|
||||
exit 1
|
||||
}
|
||||
|
||||
(
|
||||
declare leaderName
|
||||
declare leaderIp
|
||||
IFS=: read -r leaderName leaderIp _ < <(echo "${instances[0]}")
|
||||
|
||||
# Try to ping the machine first.
|
||||
timeout 60s bash -c "set -o pipefail; until ping -c 3 $leaderIp | tr - _; do echo .; done"
|
||||
|
||||
if [[ ! -r $sshPrivateKey ]]; then
|
||||
echo "Fetching $sshPrivateKey from $leaderName"
|
||||
|
||||
# Try to scp in a couple times, sshd may not yet be up even though the
|
||||
# machine can be pinged...
|
||||
set -x -o pipefail
|
||||
for i in $(seq 1 30); do
|
||||
if cloud_FetchFile "$leaderName" "$leaderIp" /solana-id_ecdsa "$sshPrivateKey"; then
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
echo "Retry $i..."
|
||||
done
|
||||
|
||||
chmod 400 "$sshPrivateKey"
|
||||
ls -l "$sshPrivateKey"
|
||||
fi
|
||||
)
|
||||
|
||||
echo "leaderIp=()" >> "$configFile"
|
||||
cloud_ForEachInstance recordInstanceIp leaderIp
|
||||
cloud_ForEachInstance waitForStartupComplete
|
||||
|
||||
echo "Looking for validator instances..."
|
||||
cloud_FindInstances "$prefix-validator"
|
||||
[[ ${#instances[@]} -gt 0 ]] || {
|
||||
echo "Unable to find validators"
|
||||
exit 1
|
||||
}
|
||||
echo "validatorIpList=()" >> "$configFile"
|
||||
cloud_ForEachInstance recordInstanceIp validatorIpList
|
||||
cloud_ForEachInstance waitForStartupComplete
|
||||
|
||||
echo "clientIpList=()" >> "$configFile"
|
||||
echo "Looking for client instances..."
|
||||
cloud_FindInstances "$prefix-client"
|
||||
[[ ${#instances[@]} -eq 0 ]] || {
|
||||
cloud_ForEachInstance recordInstanceIp clientIpList
|
||||
cloud_ForEachInstance waitForStartupComplete
|
||||
}
|
||||
|
||||
echo "Wrote $configFile"
|
||||
$metricsWriteDatapoint "testnet-deploy net-config-complete=1"
|
||||
}
|
||||
|
||||
delete() {
|
||||
$metricsWriteDatapoint "testnet-deploy net-delete-begin=1"
|
||||
|
||||
# Delete the leader node first to prevent unusual metrics on the dashboard
|
||||
# during shutdown.
|
||||
# TODO: It would be better to fully cut-off metrics reporting before any
|
||||
# instances are deleted.
|
||||
for filter in "$prefix-leader" "$prefix-"; do
|
||||
echo "Searching for instances: $filter"
|
||||
cloud_FindInstances "$filter"
|
||||
|
||||
if [[ ${#instances[@]} -eq 0 ]]; then
|
||||
echo "No instances found matching '$filter'"
|
||||
else
|
||||
cloud_DeleteInstances true
|
||||
fi
|
||||
done
|
||||
rm -f "$configFile"
|
||||
|
||||
$metricsWriteDatapoint "testnet-deploy net-delete-complete=1"
|
||||
|
||||
}
|
||||
|
||||
case $command in
|
||||
delete)
|
||||
delete
|
||||
;;
|
||||
|
||||
create)
|
||||
[[ -n $validatorNodeCount ]] || usage "Need number of nodes"
|
||||
if [[ $validatorNodeCount -le 0 ]]; then
|
||||
usage "One or more validator nodes is required"
|
||||
fi
|
||||
|
||||
delete
|
||||
|
||||
$metricsWriteDatapoint "testnet-deploy net-create-begin=1"
|
||||
|
||||
rm -rf "$sshPrivateKey"{,.pub}
|
||||
|
||||
# Note: using rsa because |aws ec2 import-key-pair| seems to fail for ecdsa
|
||||
ssh-keygen -t rsa -N '' -f "$sshPrivateKey"
|
||||
|
||||
printNetworkInfo() {
|
||||
cat <<EOF
|
||||
========================================================================================
|
||||
|
||||
Network composition:
|
||||
Leader = $leaderMachineType (GPU=$enableGpu)
|
||||
Validators = $validatorNodeCount x $validatorMachineType
|
||||
Client(s) = $clientNodeCount x $clientMachineType
|
||||
|
||||
========================================================================================
|
||||
|
||||
EOF
|
||||
}
|
||||
printNetworkInfo
|
||||
|
||||
declare startupScript="$netConfigDir"/instance-startup-script.sh
|
||||
cat > "$startupScript" <<EOF
|
||||
#!/bin/bash -ex
|
||||
# autogenerated at $(date)
|
||||
|
||||
cat > /etc/motd <<EOM
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
This instance has not been fully configured.
|
||||
|
||||
See startup script log messages in /var/log/syslog for status:
|
||||
$ sudo cat /var/log/syslog | egrep \\(startup-script\\|cloud-init\)
|
||||
|
||||
To block until setup is complete, run:
|
||||
$ until [[ -f /.instance-startup-complete ]]; do sleep 1; done
|
||||
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
EOM
|
||||
|
||||
# Place the generated private key at /solana-id_ecdsa so it's retrievable by anybody
|
||||
# who is able to log into this machine
|
||||
cat > /solana-id_ecdsa <<EOK
|
||||
$(cat "$sshPrivateKey")
|
||||
EOK
|
||||
cat > /solana-id_ecdsa.pub <<EOK
|
||||
$(cat "$sshPrivateKey.pub")
|
||||
EOK
|
||||
chmod 444 /solana-id_ecdsa
|
||||
|
||||
USER=\$(id -un)
|
||||
|
||||
$(
|
||||
cd "$here"/scripts/
|
||||
cat \
|
||||
disable-background-upgrades.sh \
|
||||
create-solana-user.sh \
|
||||
add-solana-user-authorized_keys.sh \
|
||||
install-earlyoom.sh \
|
||||
install-libssl-compatability.sh \
|
||||
install-rsync.sh \
|
||||
network-config.sh \
|
||||
)
|
||||
|
||||
cat > /etc/motd <<EOM
|
||||
$(printNetworkInfo)
|
||||
EOM
|
||||
|
||||
touch /.instance-startup-complete
|
||||
|
||||
EOF
|
||||
|
||||
cloud_CreateInstances "$prefix" "$prefix-leader" 1 \
|
||||
"$imageName" "$leaderMachineType" "$leaderBootDiskSizeInGb" \
|
||||
"$startupScript" "$leaderAddress" "$bootDiskType"
|
||||
|
||||
cloud_CreateInstances "$prefix" "$prefix-validator" "$validatorNodeCount" \
|
||||
"$imageName" "$validatorMachineType" "$validatorBootDiskSizeInGb" \
|
||||
"$startupScript" "" "$bootDiskType"
|
||||
|
||||
if [[ $clientNodeCount -gt 0 ]]; then
|
||||
cloud_CreateInstances "$prefix" "$prefix-client" "$clientNodeCount" \
|
||||
"$imageName" "$clientMachineType" "$clientBootDiskSizeInGb" \
|
||||
"$startupScript" "" "$bootDiskType"
|
||||
fi
|
||||
|
||||
$metricsWriteDatapoint "testnet-deploy net-create-complete=1"
|
||||
|
||||
prepareInstancesAndWriteConfigFile
|
||||
;;
|
||||
|
||||
config)
|
||||
prepareInstancesAndWriteConfigFile
|
||||
;;
|
||||
*)
|
||||
usage "Unknown command: $command"
|
||||
esac
|
80
net/init-metrics.sh
Executable file
80
net/init-metrics.sh
Executable file
@ -0,0 +1,80 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=net/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [-e] [-d] [username]
|
||||
|
||||
Creates a testnet dev metrics database
|
||||
|
||||
username InfluxDB user with access to create a new database
|
||||
-d Delete the database instead of creating it
|
||||
-e Assume database already exists and SOLANA_METRICS_CONFIG is
|
||||
defined in the environment already
|
||||
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
loadConfigFile
|
||||
|
||||
useEnv=false
|
||||
delete=false
|
||||
while getopts "hde" opt; do
|
||||
case $opt in
|
||||
h|\?)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
d)
|
||||
delete=true
|
||||
;;
|
||||
e)
|
||||
useEnv=true
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
if $useEnv; then
|
||||
[[ -n $SOLANA_METRICS_CONFIG ]] ||
|
||||
usage "Error: SOLANA_METRICS_CONFIG is not defined in the environment"
|
||||
else
|
||||
username=$1
|
||||
[[ -n "$username" ]] || usage "username not specified"
|
||||
|
||||
read -rs -p "InfluxDB password for $username: " password
|
||||
[[ -n $password ]] || { echo "Password not specified"; exit 1; }
|
||||
echo
|
||||
|
||||
query() {
|
||||
echo "$*"
|
||||
curl -XPOST \
|
||||
"https://metrics.solana.com:8086/query?u=${username}&p=${password}" \
|
||||
--data-urlencode "q=$*"
|
||||
}
|
||||
|
||||
query "DROP DATABASE \"$netBasename\""
|
||||
! $delete || exit 0
|
||||
query "CREATE DATABASE \"$netBasename\""
|
||||
query "ALTER RETENTION POLICY autogen ON \"$netBasename\" DURATION 7d"
|
||||
query "GRANT READ ON \"$netBasename\" TO \"ro\""
|
||||
query "GRANT WRITE ON \"$netBasename\" TO \"scratch_writer\""
|
||||
|
||||
SOLANA_METRICS_CONFIG="db=$netBasename,u=scratch_writer,p=topsecret"
|
||||
fi
|
||||
|
||||
echo "export SOLANA_METRICS_CONFIG=\"$SOLANA_METRICS_CONFIG\"" >> "$configFile"
|
||||
|
||||
exit 0
|
386
net/net.sh
Executable file
386
net/net.sh
Executable file
@ -0,0 +1,386 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
here=$(dirname "$0")
|
||||
SOLANA_ROOT="$(cd "$here"/..; pwd)"
|
||||
|
||||
# shellcheck source=net/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [start|stop|restart|sanity] [command-specific options]
|
||||
|
||||
Operate a configured testnet
|
||||
|
||||
start - Start the network
|
||||
sanity - Sanity check the network
|
||||
stop - Stop the network
|
||||
restart - Shortcut for stop then start
|
||||
|
||||
start-specific options:
|
||||
-S [snapFilename] - Deploy the specified Snap file
|
||||
-s edge|beta|stable - Deploy the latest Snap on the specified Snap release channel
|
||||
-t edge|beta|stable - Deploy the latest tarball release for the specified channel
|
||||
-f [cargoFeatures] - List of |cargo --feaures=| to activate
|
||||
(ignored if -s or -S is specified)
|
||||
|
||||
Note: if RUST_LOG is set in the environment it will be propogated into the
|
||||
network nodes.
|
||||
|
||||
sanity/start-specific options:
|
||||
-o noLedgerVerify - Skip ledger verification
|
||||
-o noValidatorSanity - Skip validator sanity
|
||||
-o rejectExtraNodes - Require the exact number of nodes
|
||||
|
||||
stop-specific options:
|
||||
none
|
||||
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
snapChannel=
|
||||
releaseChannel=
|
||||
snapFilename=
|
||||
deployMethod=local
|
||||
sanityExtraArgs=
|
||||
cargoFeatures=
|
||||
|
||||
command=$1
|
||||
[[ -n $command ]] || usage
|
||||
shift
|
||||
|
||||
while getopts "h?S:s:t:o:f:" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
;;
|
||||
S)
|
||||
snapFilename=$OPTARG
|
||||
[[ -f $snapFilename ]] || usage "Snap not readable: $snapFilename"
|
||||
deployMethod=snap
|
||||
;;
|
||||
s)
|
||||
case $OPTARG in
|
||||
edge|beta|stable)
|
||||
snapChannel=$OPTARG
|
||||
deployMethod=snap
|
||||
;;
|
||||
*)
|
||||
usage "Invalid snap channel: $OPTARG"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
t)
|
||||
case $OPTARG in
|
||||
edge|beta|stable)
|
||||
releaseChannel=$OPTARG
|
||||
deployMethod=tar
|
||||
;;
|
||||
*)
|
||||
usage "Invalid release channel: $OPTARG"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
f)
|
||||
cargoFeatures=$OPTARG
|
||||
;;
|
||||
o)
|
||||
case $OPTARG in
|
||||
noLedgerVerify|noValidatorSanity|rejectExtraNodes)
|
||||
sanityExtraArgs="$sanityExtraArgs -o $OPTARG"
|
||||
;;
|
||||
*)
|
||||
echo "Error: unknown option: $OPTARG"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
loadConfigFile
|
||||
expectedNodeCount=$((${#validatorIpList[@]} + 1))
|
||||
|
||||
build() {
|
||||
declare MAYBE_DOCKER=
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
MAYBE_DOCKER="ci/docker-run.sh solanalabs/rust"
|
||||
fi
|
||||
SECONDS=0
|
||||
(
|
||||
cd "$SOLANA_ROOT"
|
||||
echo "--- Build started at $(date)"
|
||||
|
||||
set -x
|
||||
rm -rf farf
|
||||
$MAYBE_DOCKER cargo install --features="$cargoFeatures" --root farf
|
||||
./scripts/install-native-programs.sh farf/
|
||||
)
|
||||
echo "Build took $SECONDS seconds"
|
||||
}
|
||||
|
||||
startCommon() {
|
||||
declare ipAddress=$1
|
||||
test -d "$SOLANA_ROOT"
|
||||
ssh "${sshOptions[@]}" "$ipAddress" "mkdir -p ~/solana ~/.cargo/bin"
|
||||
rsync -vPrc -e "ssh ${sshOptions[*]}" \
|
||||
"$SOLANA_ROOT"/{fetch-perf-libs.sh,scripts,net,multinode-demo} \
|
||||
"$ipAddress":~/solana/
|
||||
}
|
||||
|
||||
startLeader() {
|
||||
declare ipAddress=$1
|
||||
declare logFile="$2"
|
||||
echo "--- Starting leader: $leaderIp"
|
||||
echo "start log: $logFile"
|
||||
|
||||
# Deploy local binaries to leader. Validators and clients later fetch the
|
||||
# binaries from the leader.
|
||||
(
|
||||
set -x
|
||||
startCommon "$ipAddress" || exit 1
|
||||
case $deployMethod in
|
||||
snap)
|
||||
rsync -vPrc -e "ssh ${sshOptions[*]}" "$snapFilename" "$ipAddress:~/solana/solana.snap"
|
||||
;;
|
||||
tar)
|
||||
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/bin/* "$ipAddress:~/.cargo/bin/"
|
||||
;;
|
||||
local)
|
||||
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/farf/bin/* "$ipAddress:~/.cargo/bin/"
|
||||
;;
|
||||
*)
|
||||
usage "Internal error: invalid deployMethod: $deployMethod"
|
||||
;;
|
||||
esac
|
||||
|
||||
ssh "${sshOptions[@]}" -n "$ipAddress" \
|
||||
"./solana/net/remote/remote-node.sh $deployMethod leader $publicNetwork $entrypointIp $expectedNodeCount \"$RUST_LOG\""
|
||||
) >> "$logFile" 2>&1 || {
|
||||
cat "$logFile"
|
||||
echo "^^^ +++"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
startValidator() {
|
||||
declare ipAddress=$1
|
||||
declare logFile="$netLogDir/validator-$ipAddress.log"
|
||||
|
||||
echo "--- Starting validator: $ipAddress"
|
||||
echo "start log: $logFile"
|
||||
(
|
||||
set -x
|
||||
startCommon "$ipAddress"
|
||||
ssh "${sshOptions[@]}" -n "$ipAddress" \
|
||||
"./solana/net/remote/remote-node.sh $deployMethod validator $publicNetwork $entrypointIp $expectedNodeCount \"$RUST_LOG\""
|
||||
) >> "$logFile" 2>&1 &
|
||||
declare pid=$!
|
||||
ln -sfT "validator-$ipAddress.log" "$netLogDir/validator-$pid.log"
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
startClient() {
|
||||
declare ipAddress=$1
|
||||
declare logFile="$2"
|
||||
echo "--- Starting client: $ipAddress"
|
||||
echo "start log: $logFile"
|
||||
(
|
||||
set -x
|
||||
startCommon "$ipAddress"
|
||||
ssh "${sshOptions[@]}" -f "$ipAddress" \
|
||||
"./solana/net/remote/remote-client.sh $deployMethod $entrypointIp $expectedNodeCount \"$RUST_LOG\""
|
||||
) >> "$logFile" 2>&1 || {
|
||||
cat "$logFile"
|
||||
echo "^^^ +++"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
sanity() {
|
||||
declare expectedNodeCount=$((${#validatorIpList[@]} + 1))
|
||||
declare ok=true
|
||||
|
||||
echo "--- Sanity"
|
||||
$metricsWriteDatapoint "testnet-deploy net-sanity-begin=1"
|
||||
|
||||
(
|
||||
set -x
|
||||
# shellcheck disable=SC2029 # remote-client.sh args are expanded on client side intentionally
|
||||
ssh "${sshOptions[@]}" "$leaderIp" \
|
||||
"./solana/net/remote/remote-sanity.sh $sanityExtraArgs"
|
||||
) || ok=false
|
||||
|
||||
$metricsWriteDatapoint "testnet-deploy net-sanity-complete=1"
|
||||
$ok || exit 1
|
||||
}
|
||||
|
||||
start() {
|
||||
case $deployMethod in
|
||||
snap)
|
||||
if [[ -n $snapChannel ]]; then
|
||||
rm -f "$SOLANA_ROOT"/solana_*.snap
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
(
|
||||
set -x
|
||||
SOLANA_DOCKER_RUN_NOSETUID=1 "$SOLANA_ROOT"/ci/docker-run.sh ubuntu:18.04 bash -c "
|
||||
set -ex;
|
||||
apt-get -qq update;
|
||||
apt-get -qq -y install snapd;
|
||||
snap download --channel=$snapChannel solana;
|
||||
"
|
||||
)
|
||||
else
|
||||
(
|
||||
cd "$SOLANA_ROOT"
|
||||
snap download --channel="$snapChannel" solana
|
||||
)
|
||||
fi
|
||||
snapFilename="$(echo "$SOLANA_ROOT"/solana_*.snap)"
|
||||
[[ -r $snapFilename ]] || {
|
||||
echo "Error: Snap not readable: $snapFilename"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
;;
|
||||
tar)
|
||||
if [[ -n $releaseChannel ]]; then
|
||||
rm -f "$SOLANA_ROOT"/solana-release.tar.bz2
|
||||
cd "$SOLANA_ROOT"
|
||||
|
||||
set -x
|
||||
curl -o solana-release.tar.bz2 http://solana-release.s3.amazonaws.com/"$releaseChannel"/solana-release.tar.bz2
|
||||
tar jxvf solana-release.tar.bz2
|
||||
fi
|
||||
;;
|
||||
local)
|
||||
build
|
||||
;;
|
||||
*)
|
||||
usage "Internal error: invalid deployMethod: $deployMethod"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Deployment started at $(date)"
|
||||
$metricsWriteDatapoint "testnet-deploy net-start-begin=1"
|
||||
|
||||
SECONDS=0
|
||||
declare leaderDeployTime=
|
||||
startLeader "$leaderIp" "$netLogDir/leader-$leaderIp.log"
|
||||
leaderDeployTime=$SECONDS
|
||||
$metricsWriteDatapoint "testnet-deploy net-leader-started=1"
|
||||
|
||||
SECONDS=0
|
||||
pids=()
|
||||
loopCount=0
|
||||
for ipAddress in "${validatorIpList[@]}"; do
|
||||
startValidator "$ipAddress"
|
||||
|
||||
# Staggering validator startup time. If too many validators
|
||||
# bootup simultaneously, leader node gets more rsync requests
|
||||
# from the validators than it can handle.
|
||||
((loopCount++ % 2 == 0)) && sleep 2
|
||||
done
|
||||
|
||||
for pid in "${pids[@]}"; do
|
||||
declare ok=true
|
||||
wait "$pid" || ok=false
|
||||
if ! $ok; then
|
||||
cat "$netLogDir/validator-$pid.log"
|
||||
echo ^^^ +++
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
$metricsWriteDatapoint "testnet-deploy net-validators-started=1"
|
||||
validatorDeployTime=$SECONDS
|
||||
|
||||
sanity
|
||||
|
||||
SECONDS=0
|
||||
for ipAddress in "${clientIpList[@]}"; do
|
||||
startClient "$ipAddress" "$netLogDir/client-$ipAddress.log"
|
||||
done
|
||||
clientDeployTime=$SECONDS
|
||||
$metricsWriteDatapoint "testnet-deploy net-start-complete=1"
|
||||
|
||||
if [[ $deployMethod = "snap" ]]; then
|
||||
declare networkVersion=unknown
|
||||
IFS=\ read -r _ networkVersion _ < <(
|
||||
ssh "${sshOptions[@]}" "$leaderIp" \
|
||||
"snap info solana | grep \"^installed:\""
|
||||
)
|
||||
networkVersion=${networkVersion/0+git./}
|
||||
$metricsWriteDatapoint "testnet-deploy version=\"$networkVersion\""
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "+++ Deployment Successful"
|
||||
echo "Leader deployment took $leaderDeployTime seconds"
|
||||
echo "Validator deployment (${#validatorIpList[@]} instances) took $validatorDeployTime seconds"
|
||||
echo "Client deployment (${#clientIpList[@]} instances) took $clientDeployTime seconds"
|
||||
echo "Network start logs in $netLogDir:"
|
||||
ls -l "$netLogDir"
|
||||
}
|
||||
|
||||
|
||||
stopNode() {
|
||||
local ipAddress=$1
|
||||
echo "--- Stopping node: $ipAddress"
|
||||
(
|
||||
set -x
|
||||
ssh "${sshOptions[@]}" "$ipAddress" "
|
||||
set -x
|
||||
if snap list solana; then
|
||||
sudo snap set solana mode=
|
||||
sudo snap remove solana
|
||||
fi
|
||||
! tmux list-sessions || tmux kill-session
|
||||
for pattern in solana- remote- oom-monitor net-stats; do
|
||||
pkill -9 \$pattern
|
||||
done
|
||||
"
|
||||
) || true
|
||||
}
|
||||
|
||||
stop() {
|
||||
SECONDS=0
|
||||
$metricsWriteDatapoint "testnet-deploy net-stop-begin=1"
|
||||
|
||||
stopNode "$leaderIp"
|
||||
|
||||
for ipAddress in "${validatorIpList[@]}" "${clientIpList[@]}"; do
|
||||
stopNode "$ipAddress"
|
||||
done
|
||||
|
||||
$metricsWriteDatapoint "testnet-deploy net-stop-complete=1"
|
||||
echo "Stopping nodes took $SECONDS seconds"
|
||||
}
|
||||
|
||||
case $command in
|
||||
restart)
|
||||
stop
|
||||
start
|
||||
;;
|
||||
start)
|
||||
start
|
||||
;;
|
||||
sanity)
|
||||
sanity
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
*)
|
||||
echo "Internal error: Unknown command: $command"
|
||||
exit 1
|
||||
esac
|
1
net/remote/README.md
Normal file
1
net/remote/README.md
Normal file
@ -0,0 +1 @@
|
||||
Scripts that run on the remote testnet nodes
|
83
net/remote/remote-client.sh
Executable file
83
net/remote/remote-client.sh
Executable file
@ -0,0 +1,83 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")"/../..
|
||||
|
||||
echo "$(date) | $0 $*" > client.log
|
||||
|
||||
deployMethod="$1"
|
||||
entrypointIp="$2"
|
||||
numNodes="$3"
|
||||
RUST_LOG="$4"
|
||||
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
||||
|
||||
missing() {
|
||||
echo "Error: $1 not specified"
|
||||
exit 1
|
||||
}
|
||||
|
||||
[[ -n $deployMethod ]] || missing deployMethod
|
||||
[[ -n $entrypointIp ]] || missing entrypointIp
|
||||
[[ -n $numNodes ]] || missing numNodes
|
||||
|
||||
source net/common.sh
|
||||
loadConfigFile
|
||||
|
||||
threadCount=$(nproc)
|
||||
if [[ $threadCount -gt 4 ]]; then
|
||||
threadCount=4
|
||||
fi
|
||||
|
||||
case $deployMethod in
|
||||
snap)
|
||||
net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/solana/solana.snap" .
|
||||
sudo snap install solana.snap --devmode --dangerous
|
||||
|
||||
solana_bench_tps=/snap/bin/solana.bench-tps
|
||||
solana_keygen=/snap/bin/solana.keygen
|
||||
;;
|
||||
local|tar)
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
export USE_INSTALL=1
|
||||
export SOLANA_DEFAULT_METRICS_RATE=1
|
||||
|
||||
net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/.cargo/bin/solana*" ~/.cargo/bin/
|
||||
solana_bench_tps=solana-bench-tps
|
||||
solana_keygen=solana-keygen
|
||||
;;
|
||||
*)
|
||||
echo "Unknown deployment method: $deployMethod"
|
||||
exit 1
|
||||
esac
|
||||
|
||||
scripts/oom-monitor.sh > oom-monitor.log 2>&1 &
|
||||
scripts/net-stats.sh > net-stats.log 2>&1 &
|
||||
|
||||
! tmux list-sessions || tmux kill-session
|
||||
|
||||
clientCommand="\
|
||||
$solana_bench_tps \
|
||||
--network $entrypointIp:8001 \
|
||||
--identity client.json \
|
||||
--num-nodes $numNodes \
|
||||
--duration 7500 \
|
||||
--sustained \
|
||||
--threads $threadCount \
|
||||
"
|
||||
|
||||
keygenCommand="$solana_keygen -o client.json"
|
||||
tmux new -s solana-bench-tps -d "
|
||||
[[ -r client.json ]] || {
|
||||
echo '$ $keygenCommand' | tee -a client.log
|
||||
$keygenCommand >> client.log 2>&1
|
||||
}
|
||||
|
||||
while true; do
|
||||
echo === Client start: \$(date) | tee -a client.log
|
||||
$metricsWriteDatapoint 'testnet-deploy client-begin=1'
|
||||
echo '$ $clientCommand' | tee -a client.log
|
||||
$clientCommand >> client.log 2>&1
|
||||
$metricsWriteDatapoint 'testnet-deploy client-complete=1'
|
||||
done
|
||||
"
|
||||
sleep 1
|
||||
tmux capture-pane -t solana-bench-tps -p -S -100
|
113
net/remote/remote-node.sh
Executable file
113
net/remote/remote-node.sh
Executable file
@ -0,0 +1,113 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")"/../..
|
||||
|
||||
deployMethod="$1"
|
||||
nodeType="$2"
|
||||
publicNetwork="$3"
|
||||
entrypointIp="$4"
|
||||
numNodes="$5"
|
||||
RUST_LOG="$6"
|
||||
|
||||
missing() {
|
||||
echo "Error: $1 not specified"
|
||||
exit 1
|
||||
}
|
||||
|
||||
[[ -n $deployMethod ]] || missing deployMethod
|
||||
[[ -n $nodeType ]] || missing nodeType
|
||||
[[ -n $publicNetwork ]] || missing publicNetwork
|
||||
[[ -n $entrypointIp ]] || missing entrypointIp
|
||||
[[ -n $numNodes ]] || missing numNodes
|
||||
|
||||
cat > deployConfig <<EOF
|
||||
deployMethod="$deployMethod"
|
||||
entrypointIp="$entrypointIp"
|
||||
numNodes="$numNodes"
|
||||
EOF
|
||||
|
||||
source net/common.sh
|
||||
loadConfigFile
|
||||
|
||||
if [[ $publicNetwork = true ]]; then
|
||||
setupArgs="-p"
|
||||
else
|
||||
setupArgs="-l"
|
||||
fi
|
||||
|
||||
case $deployMethod in
|
||||
snap)
|
||||
SECONDS=0
|
||||
[[ $nodeType = leader ]] ||
|
||||
net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/solana/solana.snap" .
|
||||
sudo snap install solana.snap --devmode --dangerous
|
||||
|
||||
# shellcheck disable=SC2089
|
||||
commonNodeConfig="\
|
||||
leader-ip=\"$entrypointIp\" \
|
||||
default-metrics-rate=1 \
|
||||
metrics-config=\"$SOLANA_METRICS_CONFIG\" \
|
||||
rust-log=\"$RUST_LOG\" \
|
||||
setup-args=\"$setupArgs\" \
|
||||
"
|
||||
|
||||
if [[ -e /dev/nvidia0 ]]; then
|
||||
commonNodeConfig="$commonNodeConfig enable-cuda=1"
|
||||
fi
|
||||
|
||||
if [[ $nodeType = leader ]]; then
|
||||
nodeConfig="mode=leader+drone $commonNodeConfig"
|
||||
ln -sf -T /var/snap/solana/current/leader/current leader.log
|
||||
ln -sf -T /var/snap/solana/current/drone/current drone.log
|
||||
else
|
||||
nodeConfig="mode=validator $commonNodeConfig"
|
||||
ln -sf -T /var/snap/solana/current/validator/current validator.log
|
||||
fi
|
||||
|
||||
logmarker="solana deploy $(date)/$RANDOM"
|
||||
logger "$logmarker"
|
||||
|
||||
# shellcheck disable=SC2086,SC2090 # Don't want to double quote "$nodeConfig"
|
||||
sudo snap set solana $nodeConfig
|
||||
snap info solana
|
||||
sudo snap get solana
|
||||
echo Slight delay to get more syslog output
|
||||
sleep 2
|
||||
sudo grep -Pzo "$logmarker(.|\\n)*" /var/log/syslog
|
||||
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
;;
|
||||
local|tar)
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
export USE_INSTALL=1
|
||||
export RUST_LOG
|
||||
export SOLANA_DEFAULT_METRICS_RATE=1
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
export LD_LIBRARY_PATH="$PWD/target/perf-libs:$LD_LIBRARY_PATH"
|
||||
|
||||
scripts/oom-monitor.sh > oom-monitor.log 2>&1 &
|
||||
scripts/net-stats.sh > net-stats.log 2>&1 &
|
||||
|
||||
case $nodeType in
|
||||
leader)
|
||||
./multinode-demo/setup.sh -t leader $setupArgs
|
||||
./multinode-demo/drone.sh > drone.log 2>&1 &
|
||||
./multinode-demo/leader.sh > leader.log 2>&1 &
|
||||
;;
|
||||
validator)
|
||||
net/scripts/rsync-retry.sh -vPrc "$entrypointIp:~/.cargo/bin/solana*" ~/.cargo/bin/
|
||||
|
||||
./multinode-demo/setup.sh -t validator $setupArgs
|
||||
./multinode-demo/validator.sh "$entrypointIp":~/solana "$entrypointIp:8001" >validator.log 2>&1 &
|
||||
;;
|
||||
*)
|
||||
echo "Error: unknown node type: $nodeType"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Unknown deployment method: $deployMethod"
|
||||
exit 1
|
||||
esac
|
153
net/remote/remote-sanity.sh
Executable file
153
net/remote/remote-sanity.sh
Executable file
@ -0,0 +1,153 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# This script is to be run on the leader node
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")"/../..
|
||||
|
||||
deployMethod=
|
||||
entrypointIp=
|
||||
numNodes=
|
||||
|
||||
[[ -r deployConfig ]] || {
|
||||
echo deployConfig missing
|
||||
exit 1
|
||||
}
|
||||
# shellcheck source=/dev/null # deployConfig is written by remote-node.sh
|
||||
source deployConfig
|
||||
|
||||
missing() {
|
||||
echo "Error: $1 not specified"
|
||||
exit 1
|
||||
}
|
||||
|
||||
[[ -n $deployMethod ]] || missing deployMethod
|
||||
[[ -n $entrypointIp ]] || missing entrypointIp
|
||||
[[ -n $numNodes ]] || missing numNodes
|
||||
|
||||
ledgerVerify=true
|
||||
validatorSanity=true
|
||||
rejectExtraNodes=false
|
||||
while [[ $1 = -o ]]; do
|
||||
opt="$2"
|
||||
shift 2
|
||||
case $opt in
|
||||
noLedgerVerify)
|
||||
ledgerVerify=false
|
||||
;;
|
||||
noValidatorSanity)
|
||||
validatorSanity=false
|
||||
;;
|
||||
rejectExtraNodes)
|
||||
rejectExtraNodes=true
|
||||
;;
|
||||
*)
|
||||
echo "Error: unknown option: $opt"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
source net/common.sh
|
||||
loadConfigFile
|
||||
|
||||
case $deployMethod in
|
||||
snap)
|
||||
PATH="/snap/bin:$PATH"
|
||||
export USE_SNAP=1
|
||||
entrypointRsyncUrl="$entrypointIp"
|
||||
|
||||
solana_bench_tps=solana.bench-tps
|
||||
solana_ledger_tool=solana.ledger-tool
|
||||
solana_keygen=solana.keygen
|
||||
|
||||
ledger=/var/snap/solana/current/config/ledger
|
||||
client_id=~/snap/solana/current/config/client-id.json
|
||||
|
||||
;;
|
||||
local|tar)
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
export USE_INSTALL=1
|
||||
entrypointRsyncUrl="$entrypointIp:~/solana"
|
||||
|
||||
solana_bench_tps=solana-bench-tps
|
||||
solana_ledger_tool=solana-ledger-tool
|
||||
solana_keygen=solana-keygen
|
||||
|
||||
ledger=config/ledger
|
||||
client_id=config/client-id.json
|
||||
;;
|
||||
*)
|
||||
echo "Unknown deployment method: $deployMethod"
|
||||
exit 1
|
||||
esac
|
||||
|
||||
|
||||
echo "--- $entrypointIp: wallet sanity"
|
||||
(
|
||||
set -x
|
||||
scripts/wallet-sanity.sh "$entrypointIp:8001"
|
||||
)
|
||||
|
||||
echo "+++ $entrypointIp: node count ($numNodes expected)"
|
||||
(
|
||||
set -x
|
||||
$solana_keygen -o "$client_id"
|
||||
|
||||
maybeRejectExtraNodes=
|
||||
if $rejectExtraNodes; then
|
||||
maybeRejectExtraNodes="--reject-extra-nodes"
|
||||
fi
|
||||
|
||||
$solana_bench_tps \
|
||||
--network "$entrypointIp:8001" \
|
||||
--identity "$client_id" \
|
||||
--num-nodes "$numNodes" \
|
||||
$maybeRejectExtraNodes \
|
||||
--converge-only
|
||||
)
|
||||
|
||||
echo "--- $entrypointIp: verify ledger"
|
||||
if $ledgerVerify; then
|
||||
if [[ -d $ledger ]]; then
|
||||
(
|
||||
set -x
|
||||
rm -rf /var/tmp/ledger-verify
|
||||
du -hs "$ledger"
|
||||
time cp -r "$ledger" /var/tmp/ledger-verify
|
||||
time $solana_ledger_tool --ledger /var/tmp/ledger-verify verify
|
||||
)
|
||||
else
|
||||
echo "^^^ +++"
|
||||
echo "Ledger verify skipped: directory does not exist: $ledger"
|
||||
fi
|
||||
else
|
||||
echo "^^^ +++"
|
||||
echo "Note: ledger verify disabled"
|
||||
fi
|
||||
|
||||
|
||||
echo "--- $entrypointIp: validator sanity"
|
||||
if $validatorSanity; then
|
||||
(
|
||||
set -ex -o pipefail
|
||||
./multinode-demo/setup.sh -t validator
|
||||
timeout 10s ./multinode-demo/validator.sh "$entrypointRsyncUrl" "$entrypointIp:8001" 2>&1 | tee validator.log
|
||||
) || {
|
||||
exitcode=$?
|
||||
[[ $exitcode -eq 124 ]] || exit $exitcode
|
||||
}
|
||||
wc -l validator.log
|
||||
if grep -C100 panic validator.log; then
|
||||
echo "^^^ +++"
|
||||
echo "Panic observed"
|
||||
exit 1
|
||||
else
|
||||
echo "Validator log looks ok"
|
||||
fi
|
||||
else
|
||||
echo "^^^ +++"
|
||||
echo "Note: validator sanity disabled"
|
||||
fi
|
||||
|
||||
echo --- Pass
|
20
net/scripts/add-solana-user-authorized_keys.sh
Executable file
20
net/scripts/add-solana-user-authorized_keys.sh
Executable file
@ -0,0 +1,20 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
[[ -d /home/solana/.ssh ]] || exit 1
|
||||
|
||||
# /solana-authorized_keys contains the public keys for users that should
|
||||
# automatically be granted access to ALL testnets.
|
||||
#
|
||||
# To add an entry into this list:
|
||||
# 1. Run: ssh-keygen -t ecdsa -N '' -f ~/.ssh/id-solana-testnet
|
||||
# 2. Inline ~/.ssh/id-solana-testnet.pub below
|
||||
cat > /solana-authorized_keys <<EOF
|
||||
ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFBNwLw0i+rI312gWshojFlNw9NV7WfaKeeUsYADqOvM2o4yrO2pPw+sgW8W+/rPpVyH7zU9WVRgTME8NgFV1Vc=
|
||||
EOF
|
||||
|
||||
sudo -u solana bash -c "
|
||||
cat /solana-authorized_keys >> /home/solana/.ssh/authorized_keys
|
||||
"
|
27
net/scripts/create-solana-user.sh
Executable file
27
net/scripts/create-solana-user.sh
Executable file
@ -0,0 +1,27 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
adduser solana --gecos "" --disabled-password --quiet
|
||||
adduser solana sudo
|
||||
echo "solana ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
|
||||
id solana
|
||||
|
||||
[[ -r /solana-id_ecdsa ]] || exit 1
|
||||
[[ -r /solana-id_ecdsa.pub ]] || exit 1
|
||||
|
||||
sudo -u solana bash -c "
|
||||
mkdir -p /home/solana/.ssh/
|
||||
cd /home/solana/.ssh/
|
||||
cp /solana-id_ecdsa.pub authorized_keys
|
||||
umask 377
|
||||
cp /solana-id_ecdsa id_ecdsa
|
||||
echo \"
|
||||
Host *
|
||||
BatchMode yes
|
||||
IdentityFile ~/.ssh/id_ecdsa
|
||||
StrictHostKeyChecking no
|
||||
\" > config
|
||||
"
|
||||
|
20
net/scripts/disable-background-upgrades.sh
Executable file
20
net/scripts/disable-background-upgrades.sh
Executable file
@ -0,0 +1,20 @@
|
||||
#!/bin/bash -ex
|
||||
#
|
||||
# Prevent background upgrades that block |apt-get|
|
||||
#
|
||||
# TODO: This approach is pretty uncompromising. An alternative solution that
|
||||
# doesn't involve deleting system files would be welcome.
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
rm -rf /usr/lib/apt/apt.systemd.daily
|
||||
rm -rf /usr/bin/unattended-upgrade
|
||||
killall apt.systemd.daily || true
|
||||
killall unattended-upgrade || true
|
||||
|
||||
while fuser /var/lib/dpkg/lock; do
|
||||
echo Waiting for lock release...
|
||||
sleep 1
|
||||
done
|
||||
|
240
net/scripts/ec2-provider.sh
Normal file
240
net/scripts/ec2-provider.sh
Normal file
@ -0,0 +1,240 @@
|
||||
# |source| this file
|
||||
#
|
||||
# Utilities for working with EC2 instances
|
||||
#
|
||||
|
||||
zone=
|
||||
region=
|
||||
|
||||
cloud_SetZone() {
|
||||
zone="$1"
|
||||
# AWS region is zone with the last character removed
|
||||
region="${zone:0:$((${#zone} - 1))}"
|
||||
}
|
||||
|
||||
# Set the default zone
|
||||
cloud_SetZone "us-east-1b"
|
||||
|
||||
# sshPrivateKey should be globally defined whenever this function is called.
|
||||
#
|
||||
# TODO: Remove usage of the sshPrivateKey global
|
||||
__cloud_SshPrivateKeyCheck() {
|
||||
# shellcheck disable=SC2154
|
||||
if [[ -z $sshPrivateKey ]]; then
|
||||
echo Error: sshPrivateKey not defined
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -r $sshPrivateKey ]]; then
|
||||
echo "Error: file is not readable: $sshPrivateKey"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# __cloud_FindInstances
|
||||
#
|
||||
# Find instances with name matching the specified pattern.
|
||||
#
|
||||
# For each matching instance, an entry in the `instances` array will be added with the
|
||||
# following information about the instance:
|
||||
# "name:public IP:private IP"
|
||||
#
|
||||
# filter - The instances to filter on
|
||||
#
|
||||
# examples:
|
||||
# $ __cloud_FindInstances "exact-machine-name"
|
||||
# $ __cloud_FindInstances "all-machines-with-a-common-machine-prefix*"
|
||||
#
|
||||
__cloud_FindInstances() {
|
||||
declare filter="$1"
|
||||
|
||||
instances=()
|
||||
declare name publicIp privateIp
|
||||
while read -r name publicIp privateIp; do
|
||||
printf "%-30s | publicIp=%-16s privateIp=%s\n" "$name" "$publicIp" "$privateIp"
|
||||
instances+=("$name:$publicIp:$privateIp")
|
||||
done < <(aws ec2 describe-instances \
|
||||
--region "$region" \
|
||||
--filters \
|
||||
"Name=tag:name,Values=$filter" \
|
||||
"Name=instance-state-name,Values=pending,running" \
|
||||
--query "Reservations[].Instances[].[InstanceId,PublicIpAddress,PrivateIpAddress]" \
|
||||
--output text
|
||||
)
|
||||
}
|
||||
|
||||
#
|
||||
# cloud_FindInstances [namePrefix]
|
||||
#
|
||||
# Find instances with names matching the specified prefix
|
||||
#
|
||||
# For each matching instance, an entry in the `instances` array will be added with the
|
||||
# following information about the instance:
|
||||
# "name:public IP:private IP"
|
||||
#
|
||||
# namePrefix - The instance name prefix to look for
|
||||
#
|
||||
# examples:
|
||||
# $ cloud_FindInstances all-machines-with-a-common-machine-prefix
|
||||
#
|
||||
cloud_FindInstances() {
|
||||
declare namePrefix="$1"
|
||||
__cloud_FindInstances "$namePrefix*"
|
||||
}
|
||||
|
||||
#
|
||||
# cloud_FindInstance [name]
|
||||
#
|
||||
# Find an instance with a name matching the exact pattern.
|
||||
#
|
||||
# For each matching instance, an entry in the `instances` array will be added with the
|
||||
# following information about the instance:
|
||||
# "name:public IP:private IP"
|
||||
#
|
||||
# name - The instance name to look for
|
||||
#
|
||||
# examples:
|
||||
# $ cloud_FindInstance exact-machine-name
|
||||
#
|
||||
cloud_FindInstance() {
|
||||
declare name="$1"
|
||||
__cloud_FindInstances "$name"
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# cloud_CreateInstances [networkName] [namePrefix] [numNodes] [imageName]
|
||||
# [machineType] [bootDiskSize] [startupScript] [address]
|
||||
#
|
||||
# Creates one more identical instances.
|
||||
#
|
||||
# networkName - unique name of this testnet
|
||||
# namePrefix - unique string to prefix all the instance names with
|
||||
# numNodes - number of instances to create
|
||||
# imageName - Disk image for the instances
|
||||
# machineType - GCE machine type
|
||||
# bootDiskSize - Optional size of the boot disk in GB
|
||||
# startupScript - Optional startup script to execute when the instance boots
|
||||
# address - Optional name of the GCE static IP address to attach to the
|
||||
# instance. Requires that |numNodes| = 1 and that addressName
|
||||
# has been provisioned in the GCE region that is hosting |zone|
|
||||
#
|
||||
# Tip: use cloud_FindInstances to locate the instances once this function
|
||||
# returns
|
||||
cloud_CreateInstances() {
|
||||
declare networkName="$1"
|
||||
declare namePrefix="$2"
|
||||
declare numNodes="$3"
|
||||
declare imageName="$4"
|
||||
declare machineType="$5"
|
||||
declare optionalBootDiskSize="$6"
|
||||
declare optionalStartupScript="$7"
|
||||
declare optionalAddress="$8"
|
||||
|
||||
__cloud_SshPrivateKeyCheck
|
||||
(
|
||||
set -x
|
||||
aws ec2 delete-key-pair --region "$region" --key-name "$networkName"
|
||||
aws ec2 import-key-pair --region "$region" --key-name "$networkName" \
|
||||
--public-key-material file://"${sshPrivateKey}".pub
|
||||
)
|
||||
|
||||
declare -a args
|
||||
args=(
|
||||
--key-name "$networkName"
|
||||
--count "$numNodes"
|
||||
--region "$region"
|
||||
--placement "AvailabilityZone=$zone"
|
||||
--security-groups testnet
|
||||
--image-id "$imageName"
|
||||
--instance-type "$machineType"
|
||||
--tag-specifications "ResourceType=instance,Tags=[{Key=name,Value=$namePrefix}]"
|
||||
)
|
||||
if [[ -n $optionalBootDiskSize ]]; then
|
||||
args+=(
|
||||
--block-device-mapping "[{\"DeviceName\": \"/dev/sda1\", \"Ebs\": { \"VolumeSize\": $optionalBootDiskSize }}]"
|
||||
)
|
||||
fi
|
||||
if [[ -n $optionalStartupScript ]]; then
|
||||
args+=(
|
||||
--user-data "file://$optionalStartupScript"
|
||||
)
|
||||
fi
|
||||
|
||||
if [[ -n $optionalAddress ]]; then
|
||||
[[ $numNodes = 1 ]] || {
|
||||
echo "Error: address may not be supplied when provisioning multiple nodes: $optionalAddress"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
aws ec2 run-instances "${args[@]}"
|
||||
)
|
||||
|
||||
if [[ -n $optionalAddress ]]; then
|
||||
cloud_FindInstance "$namePrefix"
|
||||
if [[ ${#instances[@]} -ne 1 ]]; then
|
||||
echo "Failed to find newly created instance: $namePrefix"
|
||||
fi
|
||||
|
||||
declare instanceId
|
||||
IFS=: read -r instanceId _ < <(echo "${instances[0]}")
|
||||
(
|
||||
set -x
|
||||
# TODO: Poll that the instance has moved to the 'running' state instead of
|
||||
# blindly sleeping for 30 seconds...
|
||||
sleep 30
|
||||
aws ec2 associate-address \
|
||||
--instance-id "$instanceId" \
|
||||
--region "$region" \
|
||||
--allocation-id "$optionalAddress"
|
||||
)
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# cloud_DeleteInstances
|
||||
#
|
||||
# Deletes all the instances listed in the `instances` array
|
||||
#
|
||||
cloud_DeleteInstances() {
|
||||
if [[ ${#instances[0]} -eq 0 ]]; then
|
||||
echo No instances to delete
|
||||
return
|
||||
fi
|
||||
declare names=("${instances[@]/:*/}")
|
||||
(
|
||||
set -x
|
||||
aws ec2 terminate-instances --region "$region" --instance-ids "${names[@]}"
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# cloud_FetchFile [instanceName] [publicIp] [remoteFile] [localFile]
|
||||
#
|
||||
# Fetch a file from the given instance. This function uses a cloud-specific
|
||||
# mechanism to fetch the file
|
||||
#
|
||||
cloud_FetchFile() {
|
||||
# shellcheck disable=SC2034 # instanceName is unused
|
||||
declare instanceName="$1"
|
||||
declare publicIp="$2"
|
||||
declare remoteFile="$3"
|
||||
declare localFile="$4"
|
||||
|
||||
__cloud_SshPrivateKeyCheck
|
||||
(
|
||||
set -x
|
||||
scp \
|
||||
-o "StrictHostKeyChecking=no" \
|
||||
-o "UserKnownHostsFile=/dev/null" \
|
||||
-o "User=solana" \
|
||||
-o "IdentityFile=$sshPrivateKey" \
|
||||
-o "LogLevel=ERROR" \
|
||||
-F /dev/null \
|
||||
"solana@$publicIp:$remoteFile" "$localFile"
|
||||
)
|
||||
}
|
201
net/scripts/gce-provider.sh
Normal file
201
net/scripts/gce-provider.sh
Normal file
@ -0,0 +1,201 @@
|
||||
# |source| this file
|
||||
#
|
||||
# Utilities for working with GCE instances
|
||||
#
|
||||
|
||||
# Default zone
|
||||
zone="us-west1-b"
|
||||
cloud_SetZone() {
|
||||
zone="$1"
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# __cloud_FindInstances
|
||||
#
|
||||
# Find instances matching the specified pattern.
|
||||
#
|
||||
# For each matching instance, an entry in the `instances` array will be added with the
|
||||
# following information about the instance:
|
||||
# "name:zone:public IP:private IP"
|
||||
#
|
||||
# filter - The instances to filter on
|
||||
#
|
||||
# examples:
|
||||
# $ __cloud_FindInstances "name=exact-machine-name"
|
||||
# $ __cloud_FindInstances "name~^all-machines-with-a-common-machine-prefix"
|
||||
#
|
||||
__cloud_FindInstances() {
|
||||
declare filter="$1"
|
||||
instances=()
|
||||
|
||||
declare name zone publicIp privateIp status
|
||||
while read -r name publicIp privateIp status; do
|
||||
printf "%-30s | publicIp=%-16s privateIp=%s staus=%s\n" "$name" "$publicIp" "$privateIp" "$status"
|
||||
|
||||
instances+=("$name:$publicIp:$privateIp")
|
||||
done < <(gcloud compute instances list \
|
||||
--filter "$filter" \
|
||||
--format 'value(name,networkInterfaces[0].accessConfigs[0].natIP,networkInterfaces[0].networkIP,status)')
|
||||
}
|
||||
#
|
||||
# cloud_FindInstances [namePrefix]
|
||||
#
|
||||
# Find instances with names matching the specified prefix
|
||||
#
|
||||
# For each matching instance, an entry in the `instances` array will be added with the
|
||||
# following information about the instance:
|
||||
# "name:public IP:private IP"
|
||||
#
|
||||
# namePrefix - The instance name prefix to look for
|
||||
#
|
||||
# examples:
|
||||
# $ cloud_FindInstances all-machines-with-a-common-machine-prefix
|
||||
#
|
||||
cloud_FindInstances() {
|
||||
declare namePrefix="$1"
|
||||
__cloud_FindInstances "name~^$namePrefix"
|
||||
}
|
||||
|
||||
#
|
||||
# cloud_FindInstance [name]
|
||||
#
|
||||
# Find an instance with a name matching the exact pattern.
|
||||
#
|
||||
# For each matching instance, an entry in the `instances` array will be added with the
|
||||
# following information about the instance:
|
||||
# "name:public IP:private IP"
|
||||
#
|
||||
# name - The instance name to look for
|
||||
#
|
||||
# examples:
|
||||
# $ cloud_FindInstance exact-machine-name
|
||||
#
|
||||
cloud_FindInstance() {
|
||||
declare name="$1"
|
||||
__cloud_FindInstances "name=$name"
|
||||
}
|
||||
|
||||
#
|
||||
# cloud_CreateInstances [networkName] [namePrefix] [numNodes] [imageName]
|
||||
# [machineType] [bootDiskSize] [enableGpu]
|
||||
# [startupScript] [address]
|
||||
#
|
||||
# Creates one more identical instances.
|
||||
#
|
||||
# networkName - unique name of this testnet
|
||||
# namePrefix - unique string to prefix all the instance names with
|
||||
# numNodes - number of instances to create
|
||||
# imageName - Disk image for the instances
|
||||
# machineType - GCE machine type. Note that this may also include an
|
||||
# `--accelerator=` or other |gcloud compute instances create|
|
||||
# options
|
||||
# bootDiskSize - Optional size of the boot disk in GB
|
||||
# enableGpu - Optionally enable GPU, use the value "true" to enable
|
||||
# eg, request 4 K80 GPUs with "count=4,type=nvidia-tesla-k80"
|
||||
# startupScript - Optional startup script to execute when the instance boots
|
||||
# address - Optional name of the GCE static IP address to attach to the
|
||||
# instance. Requires that |numNodes| = 1 and that addressName
|
||||
# has been provisioned in the GCE region that is hosting `$zone`
|
||||
#
|
||||
# Tip: use cloud_FindInstances to locate the instances once this function
|
||||
# returns
|
||||
cloud_CreateInstances() {
|
||||
declare networkName="$1"
|
||||
declare namePrefix="$2"
|
||||
declare numNodes="$3"
|
||||
declare imageName="$4"
|
||||
declare machineType="$5"
|
||||
declare optionalBootDiskSize="$6"
|
||||
declare optionalStartupScript="$7"
|
||||
declare optionalAddress="$8"
|
||||
declare optionalBootDiskType="$9"
|
||||
|
||||
declare nodes
|
||||
if [[ $numNodes = 1 ]]; then
|
||||
nodes=("$namePrefix")
|
||||
else
|
||||
read -ra nodes <<<$(seq -f "${namePrefix}%0${#numNodes}g" 1 "$numNodes")
|
||||
fi
|
||||
|
||||
declare -a args
|
||||
args=(
|
||||
--zone "$zone"
|
||||
--tags testnet
|
||||
--metadata "testnet=$networkName"
|
||||
--image "$imageName"
|
||||
--maintenance-policy TERMINATE
|
||||
--no-restart-on-failure
|
||||
)
|
||||
|
||||
# shellcheck disable=SC2206 # Do not want to quote $machineType as it may contain extra args
|
||||
args+=(--machine-type $machineType)
|
||||
if [[ -n $optionalBootDiskSize ]]; then
|
||||
args+=(
|
||||
--boot-disk-size "${optionalBootDiskSize}GB"
|
||||
)
|
||||
fi
|
||||
if [[ -n $optionalStartupScript ]]; then
|
||||
args+=(
|
||||
--metadata-from-file "startup-script=$optionalStartupScript"
|
||||
)
|
||||
fi
|
||||
if [[ -n $optionalBootDiskType ]]; then
|
||||
args+=(
|
||||
--boot-disk-type "${optionalBootDiskType}"
|
||||
)
|
||||
fi
|
||||
|
||||
if [[ -n $optionalAddress ]]; then
|
||||
[[ $numNodes = 1 ]] || {
|
||||
echo "Error: address may not be supplied when provisioning multiple nodes: $optionalAddress"
|
||||
exit 1
|
||||
}
|
||||
args+=(
|
||||
--address "$optionalAddress"
|
||||
)
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
gcloud beta compute instances create "${nodes[@]}" "${args[@]}"
|
||||
)
|
||||
}
|
||||
|
||||
#
|
||||
# cloud_DeleteInstances
|
||||
#
|
||||
# Deletes all the instances listed in the `instances` array
|
||||
#
|
||||
cloud_DeleteInstances() {
|
||||
if [[ ${#instances[0]} -eq 0 ]]; then
|
||||
echo No instances to delete
|
||||
return
|
||||
fi
|
||||
declare names=("${instances[@]/:*/}")
|
||||
|
||||
(
|
||||
set -x
|
||||
gcloud beta compute instances delete --zone "$zone" --quiet "${names[@]}"
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# cloud_FetchFile [instanceName] [publicIp] [remoteFile] [localFile]
|
||||
#
|
||||
# Fetch a file from the given instance. This function uses a cloud-specific
|
||||
# mechanism to fetch the file
|
||||
#
|
||||
cloud_FetchFile() {
|
||||
declare instanceName="$1"
|
||||
# shellcheck disable=SC2034 # publicIp is unused
|
||||
declare publicIp="$2"
|
||||
declare remoteFile="$3"
|
||||
declare localFile="$4"
|
||||
|
||||
(
|
||||
set -x
|
||||
gcloud compute scp --zone "$zone" "$instanceName:$remoteFile" "$localFile"
|
||||
)
|
||||
}
|
25
net/scripts/install-docker.sh
Executable file
25
net/scripts/install-docker.sh
Executable file
@ -0,0 +1,25 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
apt-get update
|
||||
apt-get install -y \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
curl \
|
||||
software-properties-common \
|
||||
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
|
||||
apt-get update
|
||||
apt-get install -y docker-ce
|
||||
docker run hello-world
|
||||
|
||||
# Grant the solana user access to docker
|
||||
if id solana; then
|
||||
addgroup solana docker
|
||||
fi
|
30
net/scripts/install-earlyoom.sh
Executable file
30
net/scripts/install-earlyoom.sh
Executable file
@ -0,0 +1,30 @@
|
||||
#!/bin/bash -ex
|
||||
#
|
||||
# Install EarlyOOM
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
# 64 - enable signalling of processes (term, kill, oom-kill)
|
||||
# TODO: This setting will not persist across reboots
|
||||
sysctl -w kernel.sysrq=$(( $(cat /proc/sys/kernel/sysrq) | 64 ))
|
||||
|
||||
if command -v earlyoom; then
|
||||
systemctl status earlyoom
|
||||
else
|
||||
wget -r -l1 -np http://ftp.us.debian.org/debian/pool/main/e/earlyoom/ -A 'earlyoom_1.1-*_amd64.deb' -e robots=off -nd
|
||||
apt install --quiet --yes ./earlyoom_1.1-*_amd64.deb
|
||||
|
||||
cat > earlyoom <<OOM
|
||||
# use the kernel OOM killer, trigger at 20% available RAM,
|
||||
EARLYOOM_ARGS="-k -m 20"
|
||||
OOM
|
||||
cp earlyoom /etc/default/
|
||||
rm earlyoom
|
||||
|
||||
systemctl stop earlyoom
|
||||
systemctl enable earlyoom
|
||||
systemctl start earlyoom
|
||||
fi
|
||||
|
18
net/scripts/install-libssl-compatability.sh
Executable file
18
net/scripts/install-libssl-compatability.sh
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
# Install libssl-dev to be compatible with binaries built on an Ubuntu machine...
|
||||
apt-get update
|
||||
apt-get --assume-yes install libssl-dev
|
||||
|
||||
# Install libssl1.1 to be compatible with binaries built in the
|
||||
# solanalabs/rust docker image
|
||||
#
|
||||
# cc: https://github.com/solana-labs/solana/issues/1090
|
||||
# cc: https://packages.ubuntu.com/bionic/amd64/libssl1.1/download
|
||||
wget http://security.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
|
||||
dpkg -i libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
|
||||
rm libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
|
||||
|
19
net/scripts/install-rsync.sh
Executable file
19
net/scripts/install-rsync.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/bash -ex
|
||||
#
|
||||
# Rsync setup for Snap builds
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
apt-get --assume-yes install rsync
|
||||
cat > /etc/rsyncd.conf <<-EOF
|
||||
[config]
|
||||
path = /var/snap/solana/current/config
|
||||
hosts allow = *
|
||||
read only = true
|
||||
EOF
|
||||
|
||||
systemctl enable rsync
|
||||
systemctl start rsync
|
||||
|
11
net/scripts/network-config.sh
Executable file
11
net/scripts/network-config.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash -ex
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
[[ $USER = root ]] || exit 1
|
||||
|
||||
sudo sysctl -w net.core.rmem_default=1610612736
|
||||
sudo sysctl -w net.core.rmem_max=1610612736
|
||||
|
||||
sudo sysctl -w net.core.wmem_default=1610612736
|
||||
sudo sysctl -w net.core.wmem_max=1610612736
|
12
net/scripts/rsync-retry.sh
Executable file
12
net/scripts/rsync-retry.sh
Executable file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# rsync wrapper that retries a few times on failure
|
||||
#
|
||||
|
||||
for i in $(seq 1 5); do
|
||||
(
|
||||
set -x
|
||||
rsync "$@"
|
||||
) && exit 0
|
||||
echo Retry "$i"...
|
||||
done
|
69
net/ssh.sh
Executable file
69
net/ssh.sh
Executable file
@ -0,0 +1,69 @@
|
||||
#!/bin/bash
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=net/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 [ipAddress] [extra ssh arguments]
|
||||
|
||||
ssh into a node
|
||||
|
||||
ipAddress - IP address of the desired node.
|
||||
|
||||
If ipAddress is unspecified, a list of available nodes will be displayed.
|
||||
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
while getopts "h?" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
loadConfigFile
|
||||
|
||||
ipAddress=$1
|
||||
shift
|
||||
if [[ -n "$ipAddress" ]]; then
|
||||
set -x
|
||||
exec ssh "${sshOptions[@]}" "$ipAddress" "$@"
|
||||
fi
|
||||
|
||||
printNode() {
|
||||
declare nodeType=$1
|
||||
declare ip=$2
|
||||
printf " %-25s | For logs run: $0 $ip tail -f solana/$nodeType.log\n" "$0 $ip"
|
||||
}
|
||||
|
||||
echo Leader:
|
||||
printNode leader "$leaderIp"
|
||||
echo
|
||||
echo Validators:
|
||||
for ipAddress in "${validatorIpList[@]}"; do
|
||||
printNode validator "$ipAddress"
|
||||
done
|
||||
echo
|
||||
echo Clients:
|
||||
if [[ ${#clientIpList[@]} -eq 0 ]]; then
|
||||
echo " None"
|
||||
else
|
||||
for ipAddress in "${clientIpList[@]}"; do
|
||||
printNode client "$ipAddress"
|
||||
done
|
||||
fi
|
||||
|
||||
exit 0
|
1
programs/bpf/c/.gitignore
vendored
Normal file
1
programs/bpf/c/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
/out/
|
1
programs/bpf/c/makefile
Normal file
1
programs/bpf/c/makefile
Normal file
@ -0,0 +1 @@
|
||||
include sdk/bpf.mk
|
33
programs/bpf/c/sdk/README.md
Normal file
33
programs/bpf/c/sdk/README.md
Normal file
@ -0,0 +1,33 @@
|
||||
|
||||
## Prerequisites
|
||||
|
||||
## LLVM / clang 7.0.0
|
||||
http://releases.llvm.org/download.html
|
||||
|
||||
### Linux Ubuntu 16.04 (xenial)
|
||||
```
|
||||
$ wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
$ sudo apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main"
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install -y clang-7
|
||||
```
|
||||
|
||||
### Linux Ubuntu 14.04 (trusty)
|
||||
```
|
||||
$ wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
$ sudo apt-add-repository "deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-7 main"
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install -y clang-7
|
||||
```
|
||||
|
||||
### macOS
|
||||
|
||||
The following depends on Homebrew, instructions on how to install Homebrew are at https://brew.sh
|
||||
|
||||
Once Homebrew is installed, ensure the latest llvm is installed:
|
||||
```
|
||||
$ brew update # <- ensure your brew is up to date
|
||||
$ brew install llvm # <- should output “Warning: llvm 7.0.0 is already installed and up-to-date”
|
||||
$ brew --prefix llvm # <- should output “/usr/local/opt/llvm”
|
||||
```
|
||||
|
116
programs/bpf/c/sdk/bpf.mk
Normal file
116
programs/bpf/c/sdk/bpf.mk
Normal file
@ -0,0 +1,116 @@
|
||||
|
||||
all:
|
||||
.PHONY: help all clean
|
||||
|
||||
ifneq ($(V),1)
|
||||
_@ :=@
|
||||
endif
|
||||
|
||||
INC_DIRS ?=
|
||||
SRC_DIR ?= ./src
|
||||
OUT_DIR ?= ./out
|
||||
|
||||
OS=$(shell uname)
|
||||
ifeq ($(OS),Darwin)
|
||||
LLVM_DIR ?= $(shell brew --prefix llvm)
|
||||
endif
|
||||
|
||||
ifdef LLVM_DIR
|
||||
CC := $(LLVM_DIR)/bin/clang
|
||||
LLC := $(LLVM_DIR)/bin/llc
|
||||
OBJ_DUMP := $(LLVM_DIR)/bin/llvm-objdump
|
||||
else
|
||||
CC := clang-7
|
||||
LLC := llc-7
|
||||
OBJ_DUMP := llvm-objdump-7
|
||||
endif
|
||||
|
||||
SYSTEM_INC_DIRS := -isystem $(dir $(lastword $(MAKEFILE_LIST)))inc
|
||||
|
||||
CC_FLAGS := \
|
||||
-Werror \
|
||||
-target bpf \
|
||||
-O2 \
|
||||
-emit-llvm \
|
||||
-fno-builtin \
|
||||
|
||||
LLC_FLAGS := \
|
||||
-march=bpf \
|
||||
-filetype=obj \
|
||||
-function-sections \
|
||||
|
||||
OBJ_DUMP_FLAGS := \
|
||||
-color \
|
||||
-source \
|
||||
-disassemble \
|
||||
|
||||
help:
|
||||
@echo 'BPF Program makefile'
|
||||
@echo ''
|
||||
@echo 'This makefile will build BPF Programs from C source files into ELFs'
|
||||
@echo ''
|
||||
@echo 'Assumptions:'
|
||||
@echo ' - Programs are a single .c source file (may include headers)'
|
||||
@echo ' - Programs are located in the source directory: $(SRC_DIR)'
|
||||
@echo ' - Programs are named by their basename (eg. file name:foo.c -> program name:foo)'
|
||||
@echo ' - Output files will be placed in the directory: $(OUT_DIR)'
|
||||
@echo ''
|
||||
@echo 'User settings'
|
||||
@echo ' - The following setting are overridable on the command line, default values shown:'
|
||||
@echo ' - Show commands while building:'
|
||||
@echo ' V=1'
|
||||
@echo ' - List of include directories:'
|
||||
@echo ' INC_DIRS=$(INC_DIRS)'
|
||||
@echo ' - List of system include directories:'
|
||||
@echo ' SYSTEM_INC_DIRS=$(SYSTEM_INC_DIRS)'
|
||||
@echo ' - Location of source files:'
|
||||
@echo ' SRC_DIR=$(SRC_DIR)'
|
||||
@echo ' - Location to place output files:'
|
||||
@echo ' OUT_DIR=$(OUT_DIR)'
|
||||
@echo ' - Location of LLVM:'
|
||||
@echo ' LLVM_DIR=$(LLVM_DIR)'
|
||||
@echo ''
|
||||
@echo 'Usage:'
|
||||
@echo ' - make help - This help message'
|
||||
@echo ' - make all - Builds all the programs in the directory: $(SRC_DIR)'
|
||||
@echo ' - make clean - Cleans all programs'
|
||||
@echo ' - make dump_<program name> - Dumps the contents of the program to stdout'
|
||||
@echo ' - make <program name> - Build a single program by name'
|
||||
@echo ''
|
||||
@echo 'Available programs:'
|
||||
$(foreach name, $(PROGRAM_NAMES), @echo ' - $(name)'$(\n))
|
||||
@echo ''
|
||||
@echo 'Example:'
|
||||
@echo ' - Assuming a programed named foo (src/foo.c)'
|
||||
@echo ' - make foo'
|
||||
@echo ' - make dump_foo'
|
||||
|
||||
.PRECIOUS: $(OUT_DIR)/%.bc
|
||||
$(OUT_DIR)/%.bc: $(SRC_DIR)/%.c
|
||||
@echo "[cc] $@ ($<)"
|
||||
$(_@)mkdir -p $(OUT_DIR)
|
||||
$(_@)$(CC) $(CC_FLAGS) $(SYSTEM_INC_DIRS) $(INC_DIRS) -o $@ -c $< -MD -MF $(@:.bc=.d)
|
||||
|
||||
.PRECIOUS: $(OUT_DIR)/%.o
|
||||
$(OUT_DIR)/%.o: $(OUT_DIR)/%.bc
|
||||
@echo "[llc] $@ ($<)"
|
||||
$(_@)$(LLC) $(LLC_FLAGS) -o $@ $<
|
||||
|
||||
-include $(wildcard $(OUT_DIR)/*.d)
|
||||
|
||||
PROGRAM_NAMES := $(notdir $(basename $(wildcard $(SRC_DIR)/*.c)))
|
||||
|
||||
define \n
|
||||
|
||||
|
||||
endef
|
||||
|
||||
all: $(PROGRAM_NAMES)
|
||||
|
||||
%: $(addprefix $(OUT_DIR)/, %.o) ;
|
||||
|
||||
dump_%: %
|
||||
$(_@)$(OBJ_DUMP) $(OBJ_DUMP_FLAGS) $(addprefix $(OUT_DIR)/, $(addsuffix .o, $<))
|
||||
|
||||
clean:
|
||||
rm -rf $(OUT_DIR)
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user