Compare commits
602 Commits
v0.7.0-bet
...
v0.7
Author | SHA1 | Date | |
---|---|---|---|
6f3beb915c | |||
f399172f51 | |||
15f3b97492 | |||
de284466ff | |||
cb20ebc583 | |||
ceb5686175 | |||
55d59b1ac6 | |||
2b60b4e23a | |||
9bdc1b9727 | |||
87efdabcb3 | |||
7d5bb28128 | |||
ae433d6a34 | |||
e3c668acff | |||
5825501c79 | |||
7e84bb7a60 | |||
da1fd96d50 | |||
141e1e974d | |||
fc0d7f5982 | |||
f697632edb | |||
73797c789b | |||
036fcced31 | |||
1d3157fb80 | |||
0b11c2e119 | |||
96af892d95 | |||
c2983f824e | |||
88d6fea999 | |||
c23fa289c3 | |||
db35f220f7 | |||
982afa87a6 | |||
dccae18b53 | |||
53e86f2fa2 | |||
757dfd36a3 | |||
708add0e64 | |||
d8991ae2ca | |||
5f6cbe0cf8 | |||
f167b0c2c5 | |||
f784500fbb | |||
83df47323a | |||
c75d4abb0b | |||
5216a723b1 | |||
b801ca477d | |||
c830c604f4 | |||
0e66606c7f | |||
8707abe091 | |||
dc2a840985 | |||
2727067b94 | |||
6a8a494f5d | |||
a09d2e252a | |||
3e9c463ff1 | |||
46d50f5bde | |||
e8da903c6c | |||
ab10b7676a | |||
fa44a71d3e | |||
c86e9e8568 | |||
9e22e23ce6 | |||
835f29a178 | |||
9688f8fb64 | |||
df5cde74b0 | |||
231d5e5968 | |||
c2ba72fe1f | |||
d93786c86a | |||
bf15cad36b | |||
288ed7a8ea | |||
f07c038266 | |||
8eed120c38 | |||
5dbcb43abd | |||
dd1eefaf62 | |||
35de159d00 | |||
546a1e90d5 | |||
b033e1d904 | |||
96d6985895 | |||
58f220a3b7 | |||
a206f2570d | |||
2318ffc704 | |||
d4304eea28 | |||
06af9de753 | |||
7f71e1e09f | |||
bb7eccd542 | |||
b04c71acd9 | |||
bbf9ea89c5 | |||
846ad61941 | |||
8b41c415b7 | |||
197ba8b395 | |||
8d2a61a0c9 | |||
7512317243 | |||
bca2294655 | |||
abd55e4159 | |||
4a980568ac | |||
9d436fc5f8 | |||
ad331e6d56 | |||
d7e4e57548 | |||
b2067d2721 | |||
c2bbe4344e | |||
8567253833 | |||
ca7d4c42dd | |||
8ca514a5ca | |||
b605552079 | |||
74f5538bd3 | |||
ff57c7b7df | |||
ce8a4fa831 | |||
8331aab26a | |||
a6857dbaaa | |||
054298d957 | |||
cca240c279 | |||
89f17ceecf | |||
fe97857c62 | |||
75854cc234 | |||
9783d47fd1 | |||
38be61bd22 | |||
c64e2acf8b | |||
a200cedb4b | |||
5fec0ac82f | |||
999534248b | |||
fbc754ea25 | |||
ecea41a0ab | |||
1b6d472cb2 | |||
f0446c7e88 | |||
2a0025bb57 | |||
64d6d3015a | |||
90550c5b58 | |||
53cd2cdd9f | |||
1ac5d300a4 | |||
642c25bd3b | |||
df808dedd1 | |||
02f9cb415b | |||
e3cf1e6598 | |||
7681211c02 | |||
0ee935dd72 | |||
16772d3d51 | |||
1c38e40dee | |||
ceb5a76609 | |||
db2392a691 | |||
9c1b6288a4 | |||
575179be8e | |||
5b6ffaecc0 | |||
efc72b9572 | |||
5dc7177540 | |||
78a4b1287d | |||
c5001869f1 | |||
7c31f217d5 | |||
1152457691 | |||
3beb38ac8a | |||
8cbaa19d2e | |||
63d2b2eb42 | |||
e02da9a15a | |||
ae111a131c | |||
4402e1128f | |||
f55bb6d95c | |||
91741e20fa | |||
0514f5e573 | |||
637d403415 | |||
9fabd34156 | |||
039ed01abf | |||
ead0eb2754 | |||
c3db2df7eb | |||
ee6c15d2db | |||
715a3d50fe | |||
692b125391 | |||
5193819d8e | |||
210b9d346f | |||
4c4b0f551e | |||
6800ff1882 | |||
399a3852b1 | |||
e7d3069f58 | |||
40ea3e3e61 | |||
dc9a11bae0 | |||
906d18a709 | |||
a13058b6c4 | |||
98ee4b4672 | |||
7fd7310b96 | |||
28fa43d2a9 | |||
1a9e6ffdd7 | |||
c998199954 | |||
19792192a7 | |||
4aab413154 | |||
15a6179b97 | |||
83b308983f | |||
f2b1a04bca | |||
3e36e6dcf8 | |||
6feb6a27be | |||
c5ceb15e02 | |||
57e928d1d0 | |||
e2c68d8775 | |||
d173e6ef87 | |||
c230360f4c | |||
384b486b29 | |||
b72e91f681 | |||
46d9ba5ca0 | |||
a9240a42bf | |||
a7204d5353 | |||
f570ef1c66 | |||
ee0195d588 | |||
448b8b1c17 | |||
4d77fa900b | |||
7ccd771ccc | |||
e9f8b5b9db | |||
2366c1ebaf | |||
c5de237276 | |||
aa9bc57b4d | |||
11df477b20 | |||
7141750668 | |||
68675bd1ab | |||
19b3cacd60 | |||
bcfaf5d994 | |||
e9499ac5b8 | |||
7ff721e563 | |||
fda3b9bbd4 | |||
cf70e5ff2f | |||
a86618faf3 | |||
6693386bc5 | |||
4a8a0d03a3 | |||
2c9d288ca9 | |||
bb0aabae75 | |||
5cda0ed964 | |||
0aba74935b | |||
4eb666d4f9 | |||
d5e0cf81ff | |||
3ea784aff7 | |||
fef93958c8 | |||
cae88c90b1 | |||
1a8da769b6 | |||
2b259aeb41 | |||
de7e9b4b4c | |||
0f95031b99 | |||
d622742b84 | |||
ff254fbe5f | |||
05153e4884 | |||
2ece27ee3a | |||
a58df52205 | |||
2ea6f86199 | |||
7c5172a65e | |||
821e3bc3ca | |||
5dd2f737a3 | |||
c9bb5c1f5b | |||
5d936e5c8a | |||
e985c2e7d5 | |||
308b6c3371 | |||
ea7fa11b3e | |||
5a40ea3fd7 | |||
102510ac0e | |||
2158329058 | |||
bc484ffe5f | |||
6fcf4584d5 | |||
1adc83d148 | |||
647053e973 | |||
95b98b3845 | |||
f27613754a | |||
3e351b0b13 | |||
79ece53e3c | |||
f341b2ec10 | |||
167b079e29 | |||
7ded5a70be | |||
fc476ff979 | |||
c3279c8a00 | |||
e471ea41da | |||
552d4adff5 | |||
0c33c9e0d7 | |||
fae9fff24c | |||
79924e407c | |||
18d4da0076 | |||
416c141775 | |||
af1a2e83bc | |||
4cdb9a73f8 | |||
4433730610 | |||
71eb5bdecc | |||
029e2db2cf | |||
81db333490 | |||
c68ee0040d | |||
d96e267624 | |||
0b47404ba6 | |||
7f4844f426 | |||
50e1e0ae47 | |||
538c3b63e1 | |||
678b2870ff | |||
308d8c254d | |||
f11aa4a57b | |||
c52d4eca0b | |||
7672506b45 | |||
80a02359f7 | |||
ab3968e3bf | |||
42ebf9502a | |||
bd4fcf4ac6 | |||
4dceb73909 | |||
dd819cec3d | |||
5115cd7798 | |||
cbb8dee360 | |||
e0cdcb0973 | |||
a6a2a745ae | |||
297896bc49 | |||
f372840354 | |||
4c4659be13 | |||
1b79fe73a1 | |||
5fa072cf16 | |||
212874e155 | |||
75212f40e7 | |||
6fde65577e | |||
80ecef2832 | |||
edf2ffaf4e | |||
6c275ea5ef | |||
23ed65b339 | |||
9c7913ac9e | |||
8b01e6ac0b | |||
ff5854396a | |||
f0725b4900 | |||
327ba5301d | |||
dcce475f0b | |||
aa2104a21b | |||
0206020104 | |||
33bd1229d9 | |||
195098ca2b | |||
9daa7bdbe2 | |||
6bd18e18ea | |||
8f046cb1f8 | |||
735a0ee16d | |||
537be6a29d | |||
2b528e2225 | |||
75505bbd72 | |||
e1fc7444f9 | |||
940caf7876 | |||
fcdb0403ba | |||
caeb55d066 | |||
f11e60b801 | |||
54f2146429 | |||
f60ee87a52 | |||
9c06fe25df | |||
1eec8bf57f | |||
ddb24ebb61 | |||
a58c83d999 | |||
6656ec816c | |||
8d2bd43100 | |||
429ea98ace | |||
3d80926508 | |||
d713e3c2cf | |||
5d20d1ddbf | |||
257acdcda1 | |||
dab98dcd81 | |||
99653a4d04 | |||
dda563a169 | |||
782aa7b23b | |||
813e438d18 | |||
7a71adaa8c | |||
ce8796bc2e | |||
c7e1409f7b | |||
9de9379925 | |||
7d68b6edc8 | |||
48b5344586 | |||
686b7d3737 | |||
7c65e2fbfc | |||
96a6e09050 | |||
b3f823d544 | |||
ea21c7a43e | |||
437fb1a8d7 | |||
166099b9d9 | |||
c707b3d2e7 | |||
f7d294de90 | |||
4ecd0a0e45 | |||
7ebbaaeb2d | |||
cdcf59ede0 | |||
5d065133ef | |||
d403808564 | |||
3ffdca193d | |||
69688a18c7 | |||
7193bf28b6 | |||
637f890b91 | |||
009d5adcba | |||
52c55a0335 | |||
23428b0381 | |||
0e305bd7dd | |||
c068ca4cb7 | |||
6a8379109d | |||
120add0e82 | |||
b92ee51c2d | |||
cba3b35ac9 | |||
313fed375c | |||
1e63702c36 | |||
478ee9a1c4 | |||
eb1e5dcce4 | |||
84225beeef | |||
9cf0bd9b88 | |||
9d25d7611a | |||
1abefb2c7a | |||
bcc247f25f | |||
68ca9b2cb8 | |||
686e61d50c | |||
17d927ac74 | |||
966c55f58e | |||
d76d3162e5 | |||
d0a2d46923 | |||
a67f58e9a5 | |||
fece91c4d1 | |||
9d2d9a0189 | |||
6d3afc774a | |||
88646bf27d | |||
0696f9f497 | |||
b2ea2455e2 | |||
3f659a69fd | |||
2c62be951f | |||
2348733d6c | |||
cc229b535d | |||
7f810a29ff | |||
fc1dfd86d2 | |||
5deb34e5bd | |||
39df087902 | |||
6ff46540b6 | |||
dbab8792e4 | |||
4eb676afaa | |||
a6cb2f1bcf | |||
28af9a39b4 | |||
8cf5620b87 | |||
85d6627ee6 | |||
611a005ec9 | |||
90b3b90391 | |||
fd4f294fd3 | |||
145274c001 | |||
df5d6693f6 | |||
05c5603879 | |||
c2c48a5c3c | |||
4af556f70e | |||
8bad411962 | |||
5b0418793e | |||
4423ee6902 | |||
f0c39cc84d | |||
3d45b04da8 | |||
9e2f26a5d2 | |||
a016f6e82e | |||
eb3e5fd204 | |||
72282dc493 | |||
47a22c66b4 | |||
fb11d8a909 | |||
7d872f52f4 | |||
d882bfe65c | |||
103584ef27 | |||
1fb537deb9 | |||
2bd48b4207 | |||
f5a6db3dc0 | |||
dd0c1ac5b2 | |||
d8c9655128 | |||
09f2d273c5 | |||
f6eb85e7a3 | |||
0d85b43901 | |||
fdf94a77b4 | |||
af40ab0c04 | |||
015b7a1ddb | |||
ab3e460e64 | |||
194a84c8dd | |||
51d932dad1 | |||
561d31cc13 | |||
d6a8e437bb | |||
4631af5011 | |||
5d28729b2a | |||
8c08e614b7 | |||
e76bf1438b | |||
4e177877c9 | |||
60848b9d95 | |||
79b3564a26 | |||
1e8c36c555 | |||
94d015b089 | |||
cfb3736372 | |||
2b77f62233 | |||
e8d23c17ca | |||
a7ed2a304a | |||
0025b42c26 | |||
3f7f492cc0 | |||
490d7875dd | |||
4240edf710 | |||
30e50d0f70 | |||
751c1eba32 | |||
d349d6aa98 | |||
1f9152dc72 | |||
1b9d50172b | |||
084dbd7f58 | |||
58c0508f94 | |||
dcf82c024f | |||
b253ed0c46 | |||
61db53fc19 | |||
b0ead086a1 | |||
a3b22d0d33 | |||
28d24497a3 | |||
05cea4c1da | |||
260f5edfd6 | |||
7105136595 | |||
54db379bf2 | |||
effbf0b978 | |||
8e7a2a9587 | |||
18e6ff4167 | |||
fa1cdaa91a | |||
b538b67524 | |||
2b0f6355af | |||
11b9a0323d | |||
710fa822a0 | |||
aaf6ce5aea | |||
34ea483736 | |||
a3ff40476e | |||
4cca3ff454 | |||
3d9acdd970 | |||
428f220b88 | |||
10add6a8ac | |||
f06a8dceda | |||
545f4f1c87 | |||
77543d83ff | |||
eb6a30cb7c | |||
97372b8e63 | |||
cea29ed772 | |||
b5006b8f2b | |||
81c44c605b | |||
0b66a6626a | |||
e8be4d7eae | |||
30f0c25b65 | |||
73ae3c3301 | |||
f98e9aba48 | |||
84c28a077a | |||
350cf62b90 | |||
aa4f30c491 | |||
3de979aa7c | |||
5bc133985b | |||
87156e1364 | |||
45ff142871 | |||
2710ff271e | |||
468ac9facd | |||
705720f086 | |||
a219e78f00 | |||
7a41868173 | |||
e16acec901 | |||
de44d7475e | |||
c2dd009e0b | |||
5a8da75d06 | |||
848c6e2371 | |||
e3882950cf | |||
28f6fbee23 | |||
3144a70b18 | |||
bed5438831 | |||
6f991b3c11 | |||
03a8a5ed55 | |||
0c6d2ef1f4 | |||
d2be79f38c | |||
cc89801b12 | |||
dfa05a8742 | |||
d7d985365b | |||
0d4e4b18c2 | |||
7687436bef | |||
d531b9645d | |||
6a1b5a222a | |||
be2bf69c93 | |||
0672794692 | |||
c65c0d9b23 | |||
0ee86ff313 | |||
3b1aa846b5 | |||
0a34cb8023 | |||
227aa38c8a | |||
1dd467ed7d | |||
922dffb122 | |||
63985d4595 | |||
97dd1834d7 | |||
2ea030be48 | |||
606cfbfe1e | |||
90a4ab7e57 | |||
412e15fbdc | |||
ed0a590549 | |||
71f05cb23e | |||
5f99657523 | |||
587ae1bf3c | |||
461dea69d9 | |||
22c0e3cd54 | |||
3ed9567f96 | |||
c4fa841aa9 | |||
f284af1c3d | |||
46602ba9c3 | |||
81477246be | |||
9bd63867aa | |||
d1c317fd5f | |||
cbd664ba4b | |||
4bb7cefa15 | |||
82c86daa78 | |||
b95db62be3 | |||
0f7fdd71cc | |||
af1a7da0d5 | |||
d698b3da3a | |||
6d275d571c | |||
63acb82c87 | |||
4d05b74314 | |||
37dd511356 | |||
96c321da76 | |||
4701540cc9 | |||
f54615b4e3 | |||
9c456b2fb0 | |||
77bf17064a | |||
44150b2e85 | |||
8ec2fe15f3 | |||
687af3e3a4 | |||
72ab83cd45 | |||
4b07772e22 | |||
22d2c962b2 | |||
e771d36278 | |||
800c2dd370 | |||
f38842822f | |||
88a6fb86bf | |||
f6fe998ed4 | |||
16337d7c1e | |||
ae309f80f7 | |||
fa70b3bf70 | |||
3a90f138b2 | |||
033f6dcbcb |
2
.buildkite/hooks/post-checkout
Normal file
2
.buildkite/hooks/post-checkout
Normal file
@ -0,0 +1,2 @@
|
||||
CI_BUILD_START=$(date +%s)
|
||||
export CI_BUILD_START
|
1
.buildkite/hooks/post-checkout.sh
Symbolic link
1
.buildkite/hooks/post-checkout.sh
Symbolic link
@ -0,0 +1 @@
|
||||
post-checkout
|
49
.buildkite/hooks/post-command
Executable file → Normal file
49
.buildkite/hooks/post-command
Executable file → Normal file
@ -1,14 +1,45 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n "$CARGO_TARGET_CACHE_NAME" ]] || exit 0
|
||||
|
||||
#
|
||||
# Save target/ for the next CI build on this machine
|
||||
#
|
||||
(
|
||||
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
|
||||
mkdir -p $d
|
||||
set -x
|
||||
rsync -a --delete --link-dest=$PWD target $d
|
||||
du -hs $d
|
||||
)
|
||||
if [[ -n $CARGO_TARGET_CACHE_NAME ]]; then
|
||||
(
|
||||
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
|
||||
mkdir -p "$d"
|
||||
set -x
|
||||
rsync -a --delete --link-dest="$PWD" target "$d"
|
||||
du -hs "$d"
|
||||
)
|
||||
fi
|
||||
|
||||
#
|
||||
# Add job_stats data point
|
||||
#
|
||||
if [[ -z $CI_BUILD_START ]]; then
|
||||
echo Error: CI_BUILD_START empty
|
||||
else
|
||||
CI_BUILD_DURATION=$(( $(date +%s) - CI_BUILD_START + 1 ))
|
||||
|
||||
CI_LABEL=${BUILDKITE_LABEL:-build label missing}
|
||||
|
||||
PR=false
|
||||
if [[ $BUILDKITE_BRANCH =~ pull/* ]]; then
|
||||
PR=true
|
||||
fi
|
||||
|
||||
SUCCESS=true
|
||||
if [[ $BUILDKITE_COMMAND_EXIT_STATUS != 0 ]]; then
|
||||
SUCCESS=false
|
||||
fi
|
||||
|
||||
point_tags="pipeline=$BUILDKITE_PIPELINE_SLUG,job=$CI_LABEL,pr=$PR,success=$SUCCESS"
|
||||
point_tags="${point_tags// /\\ }" # Escape spaces
|
||||
|
||||
point_fields="duration=$CI_BUILD_DURATION"
|
||||
point_fields="${point_fields// /\\ }" # Escape spaces
|
||||
|
||||
point="job_stats,$point_tags $point_fields"
|
||||
|
||||
multinode-demo/metrics_write_datapoint.sh "$point" || true
|
||||
fi
|
||||
|
1
.buildkite/hooks/post-command.sh
Symbolic link
1
.buildkite/hooks/post-command.sh
Symbolic link
@ -0,0 +1 @@
|
||||
post-command
|
4
.buildkite/hooks/pre-command
Executable file → Normal file
4
.buildkite/hooks/pre-command
Executable file → Normal file
@ -7,7 +7,7 @@
|
||||
#
|
||||
(
|
||||
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
|
||||
mkdir -p $d/target
|
||||
mkdir -p "$d"/target
|
||||
set -x
|
||||
rsync -a --delete --link-dest=$d $d/target .
|
||||
rsync -a --delete --link-dest="$d" "$d"/target .
|
||||
)
|
||||
|
1
.buildkite/hooks/pre-command.sh
Symbolic link
1
.buildkite/hooks/pre-command.sh
Symbolic link
@ -0,0 +1 @@
|
||||
pre-command
|
1
.clippy.toml
Normal file
1
.clippy.toml
Normal file
@ -0,0 +1 @@
|
||||
too-many-arguments-threshold = 9
|
5
.gitignore
vendored
5
.gitignore
vendored
@ -1,5 +1,6 @@
|
||||
Cargo.lock
|
||||
/target/
|
||||
|
||||
**/*.rs.bk
|
||||
.cargo
|
||||
|
||||
@ -9,3 +10,7 @@ Cargo.lock
|
||||
/config-drone/
|
||||
/config-validator/
|
||||
/config-client/
|
||||
/multinode-demo/test/config-client/
|
||||
|
||||
# test temp files, ledgers, etc.
|
||||
/farf/
|
||||
|
53
CONTRIBUTING.md
Normal file
53
CONTRIBUTING.md
Normal file
@ -0,0 +1,53 @@
|
||||
Solana Coding Guidelines
|
||||
===
|
||||
|
||||
The goal of these guidelines is to improve developer productivity by allowing developers to
|
||||
jump any file in the codebase and not need to adapt to inconsistencies in how the code is
|
||||
written. The codebase should appear as if it had been authored by a single developer. If you
|
||||
don't agree with a convention, submit a PR patching this document and let's discuss! Once
|
||||
the PR is accepted, *all* code should be updated as soon as possible to reflect the new
|
||||
conventions.
|
||||
|
||||
Rust coding conventions
|
||||
---
|
||||
|
||||
* All Rust code is formatted using the latest version of `rustfmt`. Once installed, it will be
|
||||
updated automatically when you update the compiler with `rustup`.
|
||||
|
||||
* All Rust code is linted with Clippy. If you'd prefer to ignore its advice, do so explicitly:
|
||||
|
||||
```rust
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
|
||||
```
|
||||
|
||||
Note: Clippy defaults can be overridden in the top-level file `.clippy.toml`.
|
||||
|
||||
* For variable names, when in doubt, spell it out. The mapping from type names to variable names
|
||||
is to lowercase the type name, putting an underscore before each capital letter. Variable names
|
||||
should *not* be abbreviated unless being used as closure arguments and the brevity improves
|
||||
readability. When a function has multiple instances of the same type, qualify each with a
|
||||
prefix and underscore (i.e. alice_keypair) or a numeric suffix (i.e. tx0).
|
||||
|
||||
* For function and method names, use `<verb>_<subject>`. For unit tests, that verb should
|
||||
always be `test` and for benchmarks the verb should always be `bench`. Avoid namespacing
|
||||
function names with some arbitrary word. Avoid abreviating words in function names.
|
||||
|
||||
* As they say, "When in Rome, do as the Romans do." A good patch should acknowledge the coding
|
||||
conventions of the code that surrounds it, even in the case where that code has not yet been
|
||||
updated to meet the conventions described here.
|
||||
|
||||
|
||||
Terminology
|
||||
---
|
||||
|
||||
Inventing new terms is allowed, but should only be done when the term is widely used and
|
||||
understood. Avoid introducing new 3-letter terms, which can be confused with 3-letter acronyms.
|
||||
|
||||
Some terms we currently use regularly in the codebase:
|
||||
|
||||
* fullnode: n. A fully participating network node.
|
||||
* hash: n. A SHA-256 Hash.
|
||||
* keypair: n. A Ed25519 key-pair, containing a public and private key.
|
||||
* pubkey: n. The public key of a Ed25519 key-pair.
|
||||
* sigverify: v. To verify a Ed25519 digital signature.
|
||||
|
97
Cargo.toml
97
Cargo.toml
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.7.0-beta"
|
||||
version = "0.7.2"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "http://solana.com/"
|
||||
readme = "README.md"
|
||||
@ -10,16 +10,24 @@ authors = [
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
"Greg Fitzgerald <greg@solana.com>",
|
||||
"Stephen Akridge <stephen@solana.com>",
|
||||
"Michael Vines <mvines@solana.com>",
|
||||
"Rob Walker <rob@solana.com>",
|
||||
"Pankaj Garg <pankaj@solana.com>",
|
||||
"Tyera Eulberg <tyera@solana.com>",
|
||||
]
|
||||
license = "Apache-2.0"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-client-demo"
|
||||
path = "src/bin/client-demo.rs"
|
||||
name = "solana-bench-tps"
|
||||
path = "src/bin/bench-tps.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-wallet"
|
||||
path = "src/bin/wallet.rs"
|
||||
name = "solana-bench-streamer"
|
||||
path = "src/bin/bench-streamer.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-drone"
|
||||
path = "src/bin/drone.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-fullnode"
|
||||
@ -34,12 +42,16 @@ name = "solana-genesis"
|
||||
path = "src/bin/genesis.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-mint"
|
||||
path = "src/bin/mint.rs"
|
||||
name = "solana-ledger-tool"
|
||||
path = "src/bin/ledger-tool.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-drone"
|
||||
path = "src/bin/drone.rs"
|
||||
name = "solana-keygen"
|
||||
path = "src/bin/keygen.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-wallet"
|
||||
path = "src/bin/wallet.rs"
|
||||
|
||||
[badges]
|
||||
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
||||
@ -51,32 +63,57 @@ cuda = []
|
||||
erasure = []
|
||||
|
||||
[dependencies]
|
||||
rayon = "1.0.0"
|
||||
sha2 = "0.7.0"
|
||||
atty = "0.2"
|
||||
bincode = "1.0.0"
|
||||
bs58 = "0.2.0"
|
||||
byteorder = "1.2.1"
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
clap = "2.31"
|
||||
dirs = "1.0.2"
|
||||
env_logger = "0.5.12"
|
||||
futures = "0.1.21"
|
||||
generic-array = { version = "0.11.1", default-features = false, features = ["serde"] }
|
||||
getopts = "0.2"
|
||||
influx_db_client = "0.3.4"
|
||||
itertools = "0.7.8"
|
||||
libc = "0.2.1"
|
||||
log = "0.4.2"
|
||||
matches = "0.1.6"
|
||||
pnet_datalink = "0.21.0"
|
||||
rand = "0.5.1"
|
||||
rayon = "1.0.0"
|
||||
reqwest = "0.8.6"
|
||||
ring = "0.13.2"
|
||||
sha2 = "0.7.0"
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
serde_json = "1.0.10"
|
||||
ring = "0.12.1"
|
||||
untrusted = "0.5.1"
|
||||
bincode = "1.0.0"
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
log = "0.4.2"
|
||||
env_logger = "0.5.10"
|
||||
matches = "0.1.6"
|
||||
byteorder = "1.2.1"
|
||||
libc = "0.2.1"
|
||||
getopts = "0.2"
|
||||
atty = "0.2"
|
||||
rand = "0.5.1"
|
||||
pnet_datalink = "0.21.0"
|
||||
sys-info = "0.5.6"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-core = "0.1.17"
|
||||
tokio-io = "0.1"
|
||||
itertools = "0.7.8"
|
||||
bs58 = "0.2.0"
|
||||
p2p = "0.5.2"
|
||||
futures = "0.1.21"
|
||||
clap = "2.31"
|
||||
reqwest = "0.8.6"
|
||||
untrusted = "0.6.2"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.2"
|
||||
|
||||
[[bench]]
|
||||
name = "bank"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "banking_stage"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "ledger"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "signature"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "sigverify"
|
||||
harness = false
|
||||
|
48
README.md
48
README.md
@ -47,7 +47,7 @@ $ source $HOME/.cargo/env
|
||||
Now checkout the code from github:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/solana-labs/solana.git
|
||||
$ git clone https://github.com/solana-labs/solana.git
|
||||
$ cd solana
|
||||
```
|
||||
|
||||
@ -71,6 +71,20 @@ These files can be generated by running the following script.
|
||||
$ ./multinode-demo/setup.sh
|
||||
```
|
||||
|
||||
Drone
|
||||
---
|
||||
|
||||
In order for the leader, client and validators to work, we'll need to
|
||||
spin up a drone to give out some test tokens. The drone delivers Milton
|
||||
Friedman-style "air drops" (free tokens to requesting clients) to be used in
|
||||
test transactions.
|
||||
|
||||
Start the drone on the leader node with:
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/drone.sh
|
||||
```
|
||||
|
||||
Singlenode Testnet
|
||||
---
|
||||
|
||||
@ -84,16 +98,9 @@ Now start the server:
|
||||
$ ./multinode-demo/leader.sh
|
||||
```
|
||||
|
||||
To run a performance-enhanced fullnode on Linux,
|
||||
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
|
||||
your system:
|
||||
```bash
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
|
||||
```
|
||||
|
||||
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
|
||||
receive transactions.
|
||||
receive transactions. The leader will request some tokens from the drone if it doesn't have any.
|
||||
The drone does not need to be running for subsequent leader starts.
|
||||
|
||||
Multinode Testnet
|
||||
---
|
||||
@ -104,15 +111,18 @@ To run a multinode testnet, after starting a leader node, spin up some validator
|
||||
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
```
|
||||
|
||||
To run a performance-enhanced fullnode on Linux,
|
||||
To run a performance-enhanced leader or validator (on Linux),
|
||||
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
|
||||
your system:
|
||||
```bash
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
Testnet Client Demo
|
||||
---
|
||||
|
||||
@ -146,7 +156,7 @@ $ sudo snap install solana --edge --devmode
|
||||
Once installed the usual Solana programs will be available as `solona.*` instead
|
||||
of `solana-*`. For example, `solana.fullnode` instead of `solana-fullnode`.
|
||||
|
||||
Update to the latest version at any time with
|
||||
Update to the latest version at any time with:
|
||||
```bash
|
||||
$ snap info solana
|
||||
$ sudo snap refresh solana --devmode
|
||||
@ -156,8 +166,14 @@ $ sudo snap refresh solana --devmode
|
||||
The snap supports running a leader, validator or leader+drone node as a system
|
||||
daemon.
|
||||
|
||||
Run `sudo snap get solana` to view the current daemon configuration, and
|
||||
`sudo snap logs -f solana` to view the daemon logs.
|
||||
Run `sudo snap get solana` to view the current daemon configuration. To view
|
||||
daemon logs:
|
||||
1. Run `sudo snap logs -n=all solana` to view the daemon initialization log
|
||||
2. Runtime logging can be found under `/var/snap/solana/current/leader/`,
|
||||
`/var/snap/solana/current/validator/`, or `/var/snap/solana/current/drone/` depending
|
||||
on which `mode=` was selected. Within each log directory the file `current`
|
||||
contains the latest log, and the files `*.s` (if present) contain older rotated
|
||||
logs.
|
||||
|
||||
Disable the daemon at any time by running:
|
||||
```bash
|
||||
@ -276,7 +292,7 @@ to see the debug and info sections for streamer and server respectively. General
|
||||
we are using debug for infrequent debug messages, trace for potentially frequent messages and
|
||||
info for performance-related logging.
|
||||
|
||||
Attaching to a running process with gdb
|
||||
Attaching to a running process with gdb:
|
||||
|
||||
```
|
||||
$ sudo gdb
|
||||
|
66
benches/bank.rs
Normal file
66
benches/bank.rs
Normal file
@ -0,0 +1,66 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate bincode;
|
||||
extern crate rayon;
|
||||
extern crate solana;
|
||||
|
||||
use bincode::serialize;
|
||||
use criterion::{Bencher, Criterion};
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::*;
|
||||
use solana::hash::hash;
|
||||
use solana::mint::Mint;
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
|
||||
fn bench_process_transaction(bencher: &mut Bencher) {
|
||||
let mint = Mint::new(100_000_000);
|
||||
let bank = Bank::new(&mint);
|
||||
|
||||
// Create transactions between unrelated parties.
|
||||
let transactions: Vec<_> = (0..4096)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
// Seed the 'from' account.
|
||||
let rando0 = Keypair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 10_000, mint.last_id());
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
|
||||
// Seed the 'to' account and a cell for its signature.
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
|
||||
let rando1 = Keypair::new();
|
||||
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
|
||||
// Finally, return the transaction to the benchmark.
|
||||
tx
|
||||
})
|
||||
.collect();
|
||||
|
||||
bencher.iter_with_setup(
|
||||
|| {
|
||||
// Since benchmarker runs this multiple times, we need to clear the signatures.
|
||||
bank.clear_signatures();
|
||||
transactions.clone()
|
||||
},
|
||||
|transactions| {
|
||||
let results = bank.process_transactions(transactions);
|
||||
assert!(results.iter().all(Result::is_ok));
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_process_transaction", |bencher| {
|
||||
bench_process_transaction(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
229
benches/banking_stage.rs
Normal file
229
benches/banking_stage.rs
Normal file
@ -0,0 +1,229 @@
|
||||
extern crate bincode;
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate rayon;
|
||||
extern crate solana;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::Bank;
|
||||
use solana::banking_stage::BankingStage;
|
||||
use solana::mint::Mint;
|
||||
use solana::packet::{to_packets_chunked, PacketRecycler};
|
||||
use solana::record_stage::Signal;
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use std::iter;
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
|
||||
// use self::test::Bencher;
|
||||
// use bank::{Bank, MAX_ENTRY_IDS};
|
||||
// use bincode::serialize;
|
||||
// use hash::hash;
|
||||
// use mint::Mint;
|
||||
// use rayon::prelude::*;
|
||||
// use signature::{Keypair, KeypairUtil};
|
||||
// use std::collections::HashSet;
|
||||
// use std::time::Instant;
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// fn bench_process_transactions(_bencher: &mut Bencher) {
|
||||
// let mint = Mint::new(100_000_000);
|
||||
// let bank = Bank::new(&mint);
|
||||
// // Create transactions between unrelated parties.
|
||||
// let txs = 100_000;
|
||||
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
|
||||
// let transactions: Vec<_> = (0..txs)
|
||||
// .into_par_iter()
|
||||
// .map(|i| {
|
||||
// // Seed the 'to' account and a cell for its signature.
|
||||
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
|
||||
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
|
||||
// {
|
||||
// let mut last_ids = last_ids.lock().unwrap();
|
||||
// if !last_ids.contains(&last_id) {
|
||||
// last_ids.insert(last_id);
|
||||
// bank.register_entry_id(&last_id);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Seed the 'from' account.
|
||||
// let rando0 = Keypair::new();
|
||||
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// let rando1 = Keypair::new();
|
||||
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// // Finally, return a transaction that's unique
|
||||
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
// })
|
||||
// .collect();
|
||||
//
|
||||
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// let now = Instant::now();
|
||||
// assert!(banking_stage.process_transactions(transactions).is_ok());
|
||||
// let duration = now.elapsed();
|
||||
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
|
||||
// let tps = txs as f64 / sec;
|
||||
//
|
||||
// // Ensure that all transactions were successfully logged.
|
||||
// drop(banking_stage.historian_input);
|
||||
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
|
||||
// assert_eq!(entries.len(), 1);
|
||||
// assert_eq!(entries[0].transactions.len(), txs as usize);
|
||||
//
|
||||
// println!("{} tps", tps);
|
||||
// }
|
||||
|
||||
fn check_txs(receiver: &Receiver<Signal>, ref_tx_count: usize) {
|
||||
let mut total = 0;
|
||||
loop {
|
||||
let signal = receiver.recv().unwrap();
|
||||
if let Signal::Transactions(transactions) = signal {
|
||||
total += transactions.len();
|
||||
if total >= ref_tx_count {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
}
|
||||
assert_eq!(total, ref_tx_count);
|
||||
}
|
||||
|
||||
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
let tx = 10_000_usize;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let mint = Mint::new(mint_total);
|
||||
let num_dst_accounts = 8 * 1024;
|
||||
let num_src_accounts = 8 * 1024;
|
||||
|
||||
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| Keypair::new()).collect();
|
||||
let dstkeys: Vec<_> = (0..num_dst_accounts)
|
||||
.map(|_| Keypair::new().pubkey())
|
||||
.collect();
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&srckeys[i % num_src_accounts],
|
||||
dstkeys[i % num_dst_accounts],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
let setup_transactions: Vec<_> = (0..num_src_accounts)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
srckeys[i].pubkey(),
|
||||
mint_total / num_src_accounts as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
|
||||
let verified_setup: Vec<_> =
|
||||
to_packets_chunked(&packet_recycler, &setup_transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_setup_len = verified_setup.len();
|
||||
verified_sender.send(verified_setup).unwrap();
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(&signal_receiver, num_src_accounts);
|
||||
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), 192)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_len = verified.len();
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(&signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
|
||||
let tx = 10_000_usize;
|
||||
let mint = Mint::new(1_000_000_000_000);
|
||||
let mut pubkeys = Vec::new();
|
||||
let num_keys = 8;
|
||||
for _ in 0..num_keys {
|
||||
pubkeys.push(Keypair::new().pubkey());
|
||||
}
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
pubkeys[i % num_keys],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
let verified_len = verified.len();
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(&signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_banking_stage_multi_accounts", |bencher| {
|
||||
bench_banking_stage_multi_accounts(bencher);
|
||||
});
|
||||
criterion.bench_function("bench_process_stage_single_from", |bencher| {
|
||||
bench_banking_stage_single_from(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
40
benches/ledger.rs
Normal file
40
benches/ledger.rs
Normal file
@ -0,0 +1,40 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate solana;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use solana::hash::{hash, Hash};
|
||||
use solana::ledger::{next_entries, reconstruct_entries_from_blobs, Block};
|
||||
use solana::packet::BlobRecycler;
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
let keypair = Keypair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0; 10];
|
||||
let entries = next_entries(&zero, 1, transactions);
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
bencher.iter(|| {
|
||||
let mut blob_q = VecDeque::new();
|
||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap(), entries);
|
||||
});
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_block_to_blobs_to_block", |bencher| {
|
||||
bench_block_to_blobs_to_block(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
24
benches/signature.rs
Normal file
24
benches/signature.rs
Normal file
@ -0,0 +1,24 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate solana;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use solana::signature::GenKeys;
|
||||
|
||||
fn bench_gen_keys(b: &mut Bencher) {
|
||||
let mut rnd = GenKeys::new([0u8; 32]);
|
||||
b.iter(|| rnd.gen_n_keypairs(1000));
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_gen_keys", |bencher| {
|
||||
bench_gen_keys(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
36
benches/sigverify.rs
Normal file
36
benches/sigverify.rs
Normal file
@ -0,0 +1,36 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate bincode;
|
||||
extern crate rayon;
|
||||
extern crate solana;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use solana::packet::{to_packets, PacketRecycler};
|
||||
use solana::sigverify;
|
||||
use solana::transaction::test_tx;
|
||||
|
||||
fn bench_sigverify(bencher: &mut Bencher) {
|
||||
let tx = test_tx();
|
||||
|
||||
// generate packet vector
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let batches = to_packets(&packet_recycler, &vec![tx; 128]);
|
||||
|
||||
// verify packets
|
||||
bencher.iter(|| {
|
||||
let _ans = sigverify::ed25519_verify(&batches);
|
||||
})
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_sigverify", |bencher| {
|
||||
bench_sigverify(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
@ -33,11 +33,12 @@ The process to update a disk image is as follows (TODO: make this less manual):
|
||||
4. From another machine, `gcloud auth login`, then create a new Disk Image based
|
||||
off the modified VM Instance:
|
||||
```
|
||||
$ gcloud compute images create ci-default-v5 --source-disk xxx --source-disk-zone us-east1-b
|
||||
$ gcloud compute images create ci-default-$(date +%Y%m%d%H%M) --source-disk xxx --source-disk-zone us-east1-b --family ci-default
|
||||
|
||||
```
|
||||
or
|
||||
```
|
||||
$ gcloud compute images create ci-cuda-v5 --source-disk xxx --source-disk-zone us-east1-b
|
||||
$ gcloud compute images create ci-cuda-$(date +%Y%m%d%H%M) --source-disk xxx --source-disk-zone us-east1-b --family ci-cuda
|
||||
```
|
||||
5. Delete the new VM instance.
|
||||
6. Go to the Instance templates tab, find the existing template named
|
||||
|
32
ci/audit.sh
Executable file
32
ci/audit.sh
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Audits project dependencies for security vulnerabilities
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
maybe_cargo_install() {
|
||||
for cmd in "$@"; do
|
||||
set +e
|
||||
cargo "$cmd" --help > /dev/null 2>&1
|
||||
declare exitcode=$?
|
||||
set -e
|
||||
if [[ $exitcode -eq 101 ]]; then
|
||||
_ cargo install cargo-"$cmd"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
maybe_cargo_install audit tree
|
||||
|
||||
_ cargo tree
|
||||
_ cargo audit
|
@ -1,4 +1,4 @@
|
||||
steps:
|
||||
- command: "ci/snap.sh"
|
||||
timeout_in_minutes: 20
|
||||
timeout_in_minutes: 40
|
||||
name: "snap [public]"
|
||||
|
@ -1,13 +1,18 @@
|
||||
steps:
|
||||
- command: "ci/docker-run.sh rust ci/test-stable.sh"
|
||||
- command: "ci/docker-run.sh solanalabs/rust:1.28.0 ci/test-stable.sh"
|
||||
name: "stable [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 20
|
||||
timeout_in_minutes: 30
|
||||
- command: "ci/docker-run.sh solanalabs/rust:1.28.0 ci/test-bench.sh"
|
||||
name: "bench [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 30
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck [public]"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh"
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-08-14 ci/test-nightly.sh"
|
||||
name: "nightly [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "nightly"
|
||||
@ -17,12 +22,15 @@ steps:
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable-perf"
|
||||
timeout_in_minutes: 20
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: "*"
|
||||
limit: 2
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
- command: "ci/test-large-network.sh || true"
|
||||
name: "large-network [public] [ignored]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 20
|
||||
agents:
|
||||
- "queue=large"
|
||||
- command: "ci/pr-snap.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "snap [public]"
|
||||
@ -30,9 +38,12 @@ steps:
|
||||
- command: "ci/publish-crate.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "publish crate [public]"
|
||||
- command: "ci/hoover.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "clean agent [public]"
|
||||
- trigger: "solana-snap"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
build:
|
||||
message: "${BUILDKITE_MESSAGE}"
|
||||
commit: "${BUILDKITE_COMMIT}"
|
||||
branch: "${BUILDKITE_BRANCH}"
|
||||
env:
|
||||
TRIGGERED_BUILDKITE_TAG: "${BUILDKITE_TAG}"
|
||||
|
@ -22,11 +22,14 @@ shift
|
||||
ARGS=(
|
||||
--workdir /solana
|
||||
--volume "$PWD:/solana"
|
||||
--volume "$HOME:/home"
|
||||
--env "CARGO_HOME=/home/.cargo"
|
||||
--rm
|
||||
)
|
||||
|
||||
if [[ -n $CI ]]; then
|
||||
ARGS+=(--volume "$HOME:/home")
|
||||
ARGS+=(--env "CARGO_HOME=/home/.cargo")
|
||||
fi
|
||||
|
||||
# kcov tries to set the personality of the binary which docker
|
||||
# doesn't allow by default.
|
||||
ARGS+=(--security-opt "seccomp=unconfined")
|
||||
@ -38,7 +41,10 @@ fi
|
||||
|
||||
# Environment variables to propagate into the container
|
||||
ARGS+=(
|
||||
--env BUILDKITE
|
||||
--env BUILDKITE_AGENT_ACCESS_TOKEN
|
||||
--env BUILDKITE_BRANCH
|
||||
--env BUILDKITE_JOB_ID
|
||||
--env BUILDKITE_TAG
|
||||
--env CODECOV_TOKEN
|
||||
--env CRATES_IO_TOKEN
|
||||
|
9
ci/docker-rust-nightly/Dockerfile
Normal file
9
ci/docker-rust-nightly/Dockerfile
Normal file
@ -0,0 +1,9 @@
|
||||
FROM rustlang/rust:nightly
|
||||
|
||||
RUN rustup component add clippy-preview --toolchain=nightly && \
|
||||
echo deb http://ftp.debian.org/debian stretch-backports main >> /etc/apt/sources.list && \
|
||||
apt update && \
|
||||
apt install -y \
|
||||
llvm-6.0 \
|
||||
&& \
|
||||
rm -rf /var/lib/apt/lists/*
|
6
ci/docker-rust-nightly/README.md
Normal file
6
ci/docker-rust-nightly/README.md
Normal file
@ -0,0 +1,6 @@
|
||||
Docker image containing rust nightly and some preinstalled crates used in CI.
|
||||
|
||||
This image may be manually updated by running `./build.sh` if you are a member
|
||||
of the [Solana Labs](https://hub.docker.com/u/solanalabs/) Docker Hub
|
||||
organization, but it is also automatically updated periodically by
|
||||
[this automation](https://buildkite.com/solana-labs/solana-ci-docker-rust-nightly).
|
6
ci/docker-rust-nightly/build.sh
Executable file
6
ci/docker-rust-nightly/build.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
docker build -t solanalabs/rust-nightly .
|
||||
docker push solanalabs/rust-nightly
|
15
ci/docker-rust/Dockerfile
Normal file
15
ci/docker-rust/Dockerfile
Normal file
@ -0,0 +1,15 @@
|
||||
FROM rust:1.28
|
||||
|
||||
RUN apt update && \
|
||||
apt-get install apt-transport-https && \
|
||||
echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list && \
|
||||
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 && \
|
||||
apt update && \
|
||||
apt install -y \
|
||||
buildkite-agent \
|
||||
rsync \
|
||||
sudo \
|
||||
cmake \
|
||||
&& \
|
||||
rustup component add rustfmt-preview && \
|
||||
rm -rf /var/lib/apt/lists/*
|
6
ci/docker-rust/README.md
Normal file
6
ci/docker-rust/README.md
Normal file
@ -0,0 +1,6 @@
|
||||
Docker image containing rust and some preinstalled packages used in CI.
|
||||
|
||||
This image may be manually updated by running `./build.sh` if you are a member
|
||||
of the [Solana Labs](https://hub.docker.com/u/solanalabs/) Docker Hub
|
||||
organization, but it is also automatically updated periodically by
|
||||
[this automation](https://buildkite.com/solana-labs/solana-ci-docker-rust).
|
6
ci/docker-rust/build.sh
Executable file
6
ci/docker-rust/build.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
docker build -t solanalabs/rust .
|
||||
docker push solanalabs/rust
|
@ -2,6 +2,6 @@ FROM snapcraft/xenial-amd64
|
||||
|
||||
# Update snapcraft to latest version
|
||||
RUN apt-get update -qq \
|
||||
&& apt-get install -y snapcraft \
|
||||
&& apt-get install -y snapcraft daemontools \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& snapcraft --version
|
||||
|
24
ci/hoover.sh
24
ci/hoover.sh
@ -3,6 +3,7 @@
|
||||
# Regular maintenance performed on a buildkite agent to control disk usage
|
||||
#
|
||||
|
||||
|
||||
echo --- Delete all exited containers first
|
||||
(
|
||||
set -x
|
||||
@ -45,6 +46,29 @@ echo "--- Delete /tmp files older than 1 day owned by $(whoami)"
|
||||
find /tmp -maxdepth 1 -user "$(whoami)" -mtime +1 -print0 | xargs -0 rm -rf
|
||||
)
|
||||
|
||||
echo --- Deleting stale buildkite agent build directories
|
||||
if [[ ! -d ../../../../builds/$BUILDKITE_AGENT_NAME ]]; then
|
||||
# We might not be where we think we are, do nothing
|
||||
echo Warning: Skipping flush of stale agent build directories
|
||||
echo " PWD=$PWD"
|
||||
else
|
||||
# NOTE: this will be horribly broken if we ever decide to run multiple
|
||||
# agents on the same machine.
|
||||
(
|
||||
for keepDir in "$BUILDKITE_PIPELINE_SLUG" \
|
||||
"$BUILDKITE_ORGANIZATION_SLUG" \
|
||||
"$BUILDKITE_AGENT_NAME"; do
|
||||
cd .. || exit 1
|
||||
for dir in *; do
|
||||
if [[ -d $dir && $dir != "$keepDir" ]]; then
|
||||
echo "Removing $dir"
|
||||
rm -rf "${dir:?}"/
|
||||
fi
|
||||
done
|
||||
done
|
||||
)
|
||||
fi
|
||||
|
||||
echo --- System Status
|
||||
(
|
||||
set -x
|
||||
|
32
ci/install-earlyoom.sh
Executable file
32
ci/install-earlyoom.sh
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash -x
|
||||
#
|
||||
# Install EarlyOOM
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
|
||||
# 64 - enable signalling of processes (term, kill, oom-kill)
|
||||
# TODO: This setting will not persist across reboots
|
||||
sysrq=$(( $(cat /proc/sys/kernel/sysrq) | 64 ))
|
||||
sudo sysctl -w kernel.sysrq=$sysrq
|
||||
|
||||
if command -v earlyoom; then
|
||||
sudo systemctl status earlyoom
|
||||
exit 0
|
||||
fi
|
||||
|
||||
wget http://ftp.us.debian.org/debian/pool/main/e/earlyoom/earlyoom_1.1-2_amd64.deb
|
||||
sudo apt install --quiet --yes ./earlyoom_1.1-2_amd64.deb
|
||||
|
||||
cat > earlyoom <<OOM
|
||||
# use the kernel OOM killer, trigger at 20% available RAM,
|
||||
EARLYOOM_ARGS="-k -m 20"
|
||||
OOM
|
||||
sudo cp earlyoom /etc/default/
|
||||
rm earlyoom
|
||||
|
||||
sudo systemctl stop earlyoom
|
||||
sudo systemctl enable earlyoom
|
||||
sudo systemctl start earlyoom
|
||||
|
||||
exit 0
|
86
ci/localnet-sanity.sh
Executable file
86
ci/localnet-sanity.sh
Executable file
@ -0,0 +1,86 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Perform a quick sanity test on a leader, drone, validator and client running
|
||||
# locally on the same machine
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")"/..
|
||||
source ci/upload_ci_artifact.sh
|
||||
source multinode-demo/common.sh
|
||||
|
||||
./multinode-demo/setup.sh
|
||||
|
||||
backgroundCommands="drone leader validator validator-x"
|
||||
pids=()
|
||||
|
||||
for cmd in $backgroundCommands; do
|
||||
echo "--- Start $cmd"
|
||||
rm -f log-"$cmd".txt
|
||||
./multinode-demo/"$cmd".sh > log-"$cmd".txt 2>&1 &
|
||||
declare pid=$!
|
||||
pids+=("$pid")
|
||||
echo "pid: $pid"
|
||||
done
|
||||
|
||||
killBackgroundCommands() {
|
||||
set +e
|
||||
for pid in "${pids[@]}"; do
|
||||
if kill "$pid"; then
|
||||
wait "$pid"
|
||||
else
|
||||
echo -e "^^^ +++\\nWarning: unable to kill $pid"
|
||||
fi
|
||||
done
|
||||
set -e
|
||||
pids=()
|
||||
}
|
||||
|
||||
shutdown() {
|
||||
exitcode=$?
|
||||
killBackgroundCommands
|
||||
|
||||
set +e
|
||||
|
||||
echo "--- Upload artifacts"
|
||||
for cmd in $backgroundCommands; do
|
||||
declare logfile=log-$cmd.txt
|
||||
upload_ci_artifact "$logfile"
|
||||
tail "$logfile"
|
||||
done
|
||||
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
trap shutdown EXIT INT
|
||||
|
||||
set -e
|
||||
|
||||
flag_error() {
|
||||
echo Failed
|
||||
echo "^^^ +++"
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo "--- Wallet sanity"
|
||||
(
|
||||
set -x
|
||||
multinode-demo/test/wallet-sanity.sh
|
||||
) || flag_error
|
||||
|
||||
echo "--- Node count"
|
||||
(
|
||||
set -x
|
||||
./multinode-demo/client.sh "$PWD" 3 -c --addr 127.0.0.1
|
||||
) || flag_error
|
||||
|
||||
killBackgroundCommands
|
||||
|
||||
echo "--- Ledger verification"
|
||||
(
|
||||
set -x
|
||||
$solana_ledger_tool --ledger "$SOLANA_CONFIG_DIR"/ledger verify
|
||||
) || flag_error
|
||||
|
||||
echo +++
|
||||
echo Ok
|
||||
exit 0
|
@ -5,7 +5,12 @@
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
set -x
|
||||
find . -name "*.sh" -not -regex ".*/.cargo/.*" -not -regex ".*/node_modules/.*" -print0 \
|
||||
find . -name "*.sh" \
|
||||
-not -regex ".*/.cargo/.*" \
|
||||
-not -regex ".*/node_modules/.*" \
|
||||
-not -regex ".*/target/.*" \
|
||||
-print0 \
|
||||
| xargs -0 \
|
||||
ci/docker-run.sh koalaman/shellcheck --color=always --external-sources --shell=bash
|
||||
|
||||
exit 0
|
||||
|
18
ci/snap.sh
18
ci/snap.sh
@ -7,7 +7,13 @@ if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
|
||||
DRYRUN="echo"
|
||||
fi
|
||||
|
||||
if [[ -z "$BUILDKITE_TAG" ]]; then
|
||||
# BUILDKITE_TAG is the normal environment variable set by Buildkite. However
|
||||
# when this script is run from a triggered pipeline, TRIGGERED_BUILDKITE_TAG is
|
||||
# used instead of BUILDKITE_TAG (due to Buildkite limitations that prevents
|
||||
# BUILDKITE_TAG from propagating through to triggered pipelines)
|
||||
if [[ -n "$BUILDKITE_TAG" || -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
|
||||
SNAP_CHANNEL=stable
|
||||
elif [[ $BUILDKITE_BRANCH = master ]]; then
|
||||
SNAP_CHANNEL=edge
|
||||
else
|
||||
SNAP_CHANNEL=beta
|
||||
@ -33,11 +39,17 @@ fi
|
||||
|
||||
set -x
|
||||
|
||||
echo --- build
|
||||
echo --- checking for multilog
|
||||
if [[ ! -x /usr/bin/multilog ]]; then
|
||||
echo "multilog not found, install with: sudo apt-get install -y daemontools"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo --- build: $SNAP_CHANNEL channel
|
||||
snapcraft
|
||||
|
||||
source ci/upload_ci_artifact.sh
|
||||
upload_ci_artifact solana_*.snap
|
||||
|
||||
echo --- publish
|
||||
echo --- publish: $SNAP_CHANNEL channel
|
||||
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL
|
||||
|
13
ci/test-bench.sh
Executable file
13
ci/test-bench.sh
Executable file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
ci/version-check.sh stable
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo bench --verbose
|
45
ci/test-large-network.sh
Executable file
45
ci/test-large-network.sh
Executable file
@ -0,0 +1,45 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
here=$(dirname "$0")
|
||||
cd "$here"/..
|
||||
|
||||
if ! ci/version-check.sh stable; then
|
||||
# This job doesn't run within a container, try once to upgrade tooling on a
|
||||
# version check failure
|
||||
rustup install stable
|
||||
ci/version-check.sh stable
|
||||
fi
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
export LD_LIBRARY_PATH+=:$PWD
|
||||
|
||||
export RUST_LOG=multinode=info
|
||||
|
||||
if [[ $(ulimit -n) -lt 65000 ]]; then
|
||||
echo 'Error: nofiles too small, run "ulimit -n 65000" to continue'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(sysctl -n net.core.rmem_default) -lt 1610612736 ]]; then
|
||||
echo 'Error: rmem_default too small, run "sudo sysctl -w net.core.rmem_default=1610612736" to continue'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(sysctl -n net.core.rmem_max) -lt 1610612736 ]]; then
|
||||
echo 'Error: rmem_max too small, run "sudo sysctl -w net.core.rmem_max=1610612736" to continue'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(sysctl -n net.core.wmem_default) -lt 1610612736 ]]; then
|
||||
echo 'Error: rmem_default too small, run "sudo sysctl -w net.core.wmem_default=1610612736" to continue'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(sysctl -n net.core.wmem_max) -lt 1610612736 ]]; then
|
||||
echo 'Error: rmem_max too small, run "sudo sysctl -w net.core.wmem_max=1610612736" to continue'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -x
|
||||
exec cargo test --release --features=erasure test_multi_node_dynamic_network -- --ignored
|
@ -2,9 +2,8 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
ci/version-check.sh nightly
|
||||
export RUST_BACKTRACE=1
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
@ -13,13 +12,11 @@ _() {
|
||||
|
||||
_ cargo build --verbose --features unstable
|
||||
_ cargo test --verbose --features unstable
|
||||
_ cargo bench --verbose --features unstable
|
||||
|
||||
_ cargo clippy -- --deny=warnings
|
||||
|
||||
exit 0
|
||||
|
||||
# Coverage disabled (see issue #433)
|
||||
_ cargo install --force cargo-cov
|
||||
_ cargo cov test
|
||||
_ cargo cov report
|
||||
|
||||
@ -29,6 +26,6 @@ ls -l target/cov/report/index.html
|
||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo CODECOV_TOKEN undefined
|
||||
else
|
||||
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov gcov'
|
||||
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov-6.0 gcov'
|
||||
fi
|
||||
|
||||
|
@ -2,11 +2,29 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
|
||||
export LD_LIBRARY_PATH=$PWD:/usr/local/cuda/lib64
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
if ! ci/version-check.sh stable; then
|
||||
# This job doesn't run within a container, try once to upgrade tooling on a
|
||||
# version check failure
|
||||
rustup install stable
|
||||
ci/version-check.sh stable
|
||||
fi
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
set -x
|
||||
exec cargo test --features=cuda,erasure
|
||||
./fetch-perf-libs.sh
|
||||
export LD_LIBRARY_PATH=$PWD:/usr/local/cuda/lib64
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo test --features=cuda,erasure
|
||||
|
||||
echo --- ci/localnet-sanity.sh
|
||||
(
|
||||
set -x
|
||||
# Assume |cargo build| has populated target/debug/ successfully.
|
||||
export PATH=$PWD/target/debug:$PATH
|
||||
USE_INSTALL=1 ci/localnet-sanity.sh
|
||||
)
|
||||
|
@ -2,17 +2,24 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
ci/version-check.sh stable
|
||||
export RUST_BACKTRACE=1
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ rustup component add rustfmt-preview
|
||||
_ cargo fmt -- --write-mode=check
|
||||
_ cargo fmt -- --check
|
||||
_ cargo build --verbose
|
||||
_ cargo test --verbose
|
||||
_ cargo test -- --ignored
|
||||
|
||||
echo --- ci/localnet-sanity.sh
|
||||
(
|
||||
set -x
|
||||
# Assume |cargo build| has populated target/debug/ successfully.
|
||||
export PATH=$PWD/target/debug:$PATH
|
||||
USE_INSTALL=1 ci/localnet-sanity.sh
|
||||
)
|
||||
|
||||
_ ci/audit.sh
|
||||
|
519
ci/testnet-deploy.sh
Executable file
519
ci/testnet-deploy.sh
Executable file
@ -0,0 +1,519 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Deploys the Solana software running on the testnet full nodes
|
||||
#
|
||||
# This script must be run by a user/machine that has successfully authenticated
|
||||
# with GCP and has sufficient permission.
|
||||
#
|
||||
here=$(dirname "$0")
|
||||
metrics_write_datapoint="$here"/../multinode-demo/metrics_write_datapoint.sh
|
||||
|
||||
# TODO: Switch over to rolling updates
|
||||
ROLLING_UPDATE=false
|
||||
#ROLLING_UPDATE=true
|
||||
|
||||
if [[ -z $SOLANA_METRICS_CONFIG ]]; then
|
||||
echo Error: SOLANA_METRICS_CONFIG environment variable is unset
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# The SOLANA_METRICS_CONFIG environment variable is formatted as a
|
||||
# comma-delimited list of parameters. All parameters are optional.
|
||||
#
|
||||
# Example:
|
||||
# export SOLANA_METRICS_CONFIG="host=<metrics host>,db=<database name>,u=<username>,p=<password>"
|
||||
#
|
||||
configure_metrics() {
|
||||
[[ -n $SOLANA_METRICS_CONFIG ]] || return 0
|
||||
|
||||
declare metrics_params
|
||||
IFS=',' read -r -a metrics_params <<< "$SOLANA_METRICS_CONFIG"
|
||||
for param in "${metrics_params[@]}"; do
|
||||
IFS='=' read -r -a pair <<< "$param"
|
||||
if [[ ${#pair[@]} != 2 ]]; then
|
||||
echo Error: invalid metrics parameter: "$param" >&2
|
||||
else
|
||||
declare name="${pair[0]}"
|
||||
declare value="${pair[1]}"
|
||||
case "$name" in
|
||||
host)
|
||||
export INFLUX_HOST="$value"
|
||||
echo INFLUX_HOST="$INFLUX_HOST" >&2
|
||||
;;
|
||||
db)
|
||||
export INFLUX_DATABASE="$value"
|
||||
echo INFLUX_DATABASE="$INFLUX_DATABASE" >&2
|
||||
;;
|
||||
u)
|
||||
export INFLUX_USERNAME="$value"
|
||||
echo INFLUX_USERNAME="$INFLUX_USERNAME" >&2
|
||||
;;
|
||||
p)
|
||||
export INFLUX_PASSWORD="$value"
|
||||
echo INFLUX_PASSWORD="********" >&2
|
||||
;;
|
||||
*)
|
||||
echo Error: Unknown metrics parameter name: "$name" >&2
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
done
|
||||
}
|
||||
configure_metrics
|
||||
|
||||
# Default to edge channel. To select the beta channel:
|
||||
# export SOLANA_SNAP_CHANNEL=beta
|
||||
if [[ -z $SOLANA_SNAP_CHANNEL ]]; then
|
||||
SOLANA_SNAP_CHANNEL=edge
|
||||
fi
|
||||
|
||||
# Select default network URL based on SOLANA_SNAP_CHANNEL if SOLANA_NET_ENTRYPOINT is
|
||||
# unspecified
|
||||
if [[ -z $SOLANA_NET_ENTRYPOINT ]]; then
|
||||
case $SOLANA_SNAP_CHANNEL in
|
||||
edge)
|
||||
SOLANA_NET_ENTRYPOINT=master.testnet.solana.com
|
||||
unset SOLANA_NET_NAME
|
||||
;;
|
||||
beta)
|
||||
SOLANA_NET_ENTRYPOINT=testnet.solana.com
|
||||
unset SOLANA_NET_NAME
|
||||
;;
|
||||
*)
|
||||
echo Error: Unknown SOLANA_SNAP_CHANNEL=$SOLANA_SNAP_CHANNEL
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [[ -z $SOLANA_NET_NAME ]]; then
|
||||
SOLANA_NET_NAME=${SOLANA_NET_ENTRYPOINT//./-}
|
||||
fi
|
||||
|
||||
: ${SOLANA_NET_NAME:?$SOLANA_NET_ENTRYPOINT}
|
||||
netBasename=${SOLANA_NET_NAME/-*/}
|
||||
if [[ $netBasename != testnet ]]; then
|
||||
netBasename="testnet-$netBasename"
|
||||
fi
|
||||
|
||||
# Figure installation command
|
||||
SNAP_INSTALL_CMD="\
|
||||
for i in {1..3}; do \
|
||||
sudo snap install solana --$SOLANA_SNAP_CHANNEL --devmode && break;
|
||||
sleep 1; \
|
||||
done \
|
||||
"
|
||||
LOCAL_SNAP=$1
|
||||
if [[ -n $LOCAL_SNAP ]]; then
|
||||
if [[ ! -f $LOCAL_SNAP ]]; then
|
||||
echo "Error: $LOCAL_SNAP is not a file"
|
||||
exit 1
|
||||
fi
|
||||
SNAP_INSTALL_CMD="sudo snap install ~/solana_local.snap --devmode --dangerous"
|
||||
fi
|
||||
SNAP_INSTALL_CMD="sudo snap remove solana; $SNAP_INSTALL_CMD"
|
||||
|
||||
EARLYOOM_INSTALL_CMD="\
|
||||
wget --retry-connrefused --waitretry=1 \
|
||||
--read-timeout=20 --timeout=15 --tries=5 \
|
||||
-O install-earlyoom.sh \
|
||||
https://raw.githubusercontent.com/solana-labs/solana/v0.7/ci/install-earlyoom.sh; \
|
||||
bash install-earlyoom.sh \
|
||||
"
|
||||
SNAP_INSTALL_CMD="$EARLYOOM_INSTALL_CMD; $SNAP_INSTALL_CMD"
|
||||
|
||||
# `export SKIP_INSTALL=1` to reset the network without reinstalling the snap
|
||||
if [[ -n $SKIP_INSTALL ]]; then
|
||||
SNAP_INSTALL_CMD="echo Install skipped"
|
||||
fi
|
||||
|
||||
echo "+++ Configuration for $netBasename"
|
||||
publicUrl="$SOLANA_NET_ENTRYPOINT"
|
||||
if [[ $publicUrl = testnet.solana.com ]]; then
|
||||
publicIp="" # Use default value
|
||||
else
|
||||
publicIp=$(dig +short $publicUrl | head -n1)
|
||||
fi
|
||||
|
||||
echo "Network name: $SOLANA_NET_NAME"
|
||||
echo "Network entry point URL: $publicUrl ($publicIp)"
|
||||
echo "Snap channel: $SOLANA_SNAP_CHANNEL"
|
||||
echo "Install command: $SNAP_INSTALL_CMD"
|
||||
echo "Setup args: $SOLANA_SETUP_ARGS"
|
||||
[[ -z $LOCAL_SNAP ]] || echo "Local snap: $LOCAL_SNAP"
|
||||
|
||||
vmlist=() # Each array element is formatted as "class:vmName:vmZone:vmPublicIp"
|
||||
|
||||
vm_exec() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare message=$4
|
||||
declare cmd=$5
|
||||
|
||||
echo "--- $message $vmName in zone $vmZone ($vmPublicIp)"
|
||||
ssh -o BatchMode=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
|
||||
testnet-deploy@"$vmPublicIp" "$cmd"
|
||||
}
|
||||
|
||||
#
|
||||
# vm_foreach [cmd] [extra args to cmd]
|
||||
# where
|
||||
# cmd - the command to execute on each VM
|
||||
# The command will receive three fixed arguments, followed by any
|
||||
# additionl arguments supplied to vm_foreach:
|
||||
# vmName - GCP name of the VM
|
||||
# vmZone - The GCP zone the VM is located in
|
||||
# vmPublicIp - The public IP address of this VM
|
||||
# vmClass - The 'class' of this VM
|
||||
# count - Monotonically increasing count for each
|
||||
# invocation of cmd, starting at 1
|
||||
# ... - Extra args to cmd..
|
||||
#
|
||||
#
|
||||
vm_foreach() {
|
||||
declare cmd=$1
|
||||
shift
|
||||
|
||||
declare count=1
|
||||
for info in "${vmlist[@]}"; do
|
||||
declare vmClass vmName vmZone vmPublicIp
|
||||
IFS=: read -r vmClass vmName vmZone vmPublicIp < <(echo "$info")
|
||||
|
||||
eval "$cmd" "$vmName" "$vmZone" "$vmPublicIp" "$vmClass" "$count" "$@"
|
||||
count=$((count + 1))
|
||||
done
|
||||
}
|
||||
|
||||
#
|
||||
# vm_foreach_in_class [class] [cmd]
|
||||
# where
|
||||
# class - the desired VM class to operate on
|
||||
# cmd - the command to execute on each VM in the desired class.
|
||||
# The command will receive three arguments:
|
||||
# vmName - GCP name of the VM
|
||||
# vmZone - The GCP zone the VM is located in
|
||||
# vmPublicIp - The public IP address of this VM
|
||||
# count - Monotonically increasing count for each
|
||||
# invocation of cmd, starting at 1
|
||||
#
|
||||
#
|
||||
_run_cmd_if_class() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare vmClass=$4
|
||||
declare count=$5
|
||||
declare class=$6
|
||||
declare cmd=$7
|
||||
if [[ $class = "$vmClass" ]]; then
|
||||
eval "$cmd" "$vmName" "$vmZone" "$vmPublicIp" "$count"
|
||||
fi
|
||||
}
|
||||
|
||||
vm_foreach_in_class() {
|
||||
declare class=$1
|
||||
declare cmd=$2
|
||||
vm_foreach _run_cmd_if_class "$1" "$2"
|
||||
}
|
||||
|
||||
#
|
||||
# Load all VMs matching the specified filter and tag them with the specified
|
||||
# class into the `vmlist` array.
|
||||
findVms() {
|
||||
declare class="$1"
|
||||
declare filter="$2"
|
||||
gcloud compute instances list --filter="$filter"
|
||||
while read -r vmName vmZone vmPublicIp status; do
|
||||
if [[ $status != RUNNING ]]; then
|
||||
echo "Warning: $vmName is not RUNNING, ignoring it."
|
||||
continue
|
||||
fi
|
||||
vmlist+=("$class:$vmName:$vmZone:$vmPublicIp")
|
||||
done < <(gcloud compute instances list \
|
||||
--filter="$filter" \
|
||||
--format 'value(name,zone,networkInterfaces[0].accessConfigs[0].natIP,status)')
|
||||
}
|
||||
|
||||
wait_for_pids() {
|
||||
echo "--- Waiting for $*"
|
||||
for pid in "${pids[@]}"; do
|
||||
declare ok=true
|
||||
wait "$pid" || ok=false
|
||||
cat "log-$pid.txt"
|
||||
if ! $ok; then
|
||||
echo ^^^ +++
|
||||
exit 1
|
||||
fi
|
||||
rm "log-$pid.txt"
|
||||
done
|
||||
}
|
||||
|
||||
delete_unreachable_validators() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
if ! vm_exec "$vmName" "$vmZone" "$vmPublicIp" "Checking $vmName" uptime; then
|
||||
echo "^^^ +++"
|
||||
|
||||
# Validators are managed by a Compute Engine Instance Group, so deleting
|
||||
# one will just cause a new one to be spawned.
|
||||
echo "Warning: $vmName is unreachable, deleting it"
|
||||
gcloud compute instances delete "$vmName" --zone "$vmZone"
|
||||
fi
|
||||
echo "validator checked in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
|
||||
echo "Validator nodes (unverified):"
|
||||
findVms validator "name~^$SOLANA_NET_NAME-validator-"
|
||||
pids=()
|
||||
vm_foreach_in_class validator delete_unreachable_validators
|
||||
wait_for_pids validator sanity check
|
||||
vmlist=()
|
||||
|
||||
echo "Leader node:"
|
||||
findVms leader "name=$SOLANA_NET_NAME"
|
||||
[[ ${#vmlist[@]} = 1 ]] || {
|
||||
echo "Unable to find $SOLANA_NET_NAME"
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo "Client node(s):"
|
||||
findVms client "name~^$SOLANA_NET_NAME-client"
|
||||
|
||||
echo "Validator nodes:"
|
||||
findVms validator "name~^$SOLANA_NET_NAME-validator-"
|
||||
|
||||
fullnode_count=0
|
||||
inc_fullnode_count() {
|
||||
fullnode_count=$((fullnode_count + 1))
|
||||
}
|
||||
vm_foreach_in_class leader inc_fullnode_count
|
||||
vm_foreach_in_class validator inc_fullnode_count
|
||||
|
||||
# Add "network stopping" datapoint
|
||||
$metrics_write_datapoint "testnet-deploy,name=$netBasename stop=1"
|
||||
|
||||
client_start() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare count=$4
|
||||
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" \
|
||||
"Starting client $count:" \
|
||||
"\
|
||||
set -x;
|
||||
snap info solana; \
|
||||
sudo snap get solana; \
|
||||
threadCount=\$(nproc); \
|
||||
if [[ \$threadCount -gt 4 ]]; then threadCount=4; fi; \
|
||||
tmux kill-session -t solana; \
|
||||
tmux new -s solana -d \" \
|
||||
set -x; \
|
||||
sudo rm /tmp/solana.log; \
|
||||
while : ; do \
|
||||
/snap/bin/solana.bench-tps $SOLANA_NET_ENTRYPOINT $fullnode_count --loop -s 600 --sustained -t \$threadCount 2>&1 | tee -a /tmp/solana.log; \
|
||||
echo 'https://metrics.solana.com:8086/write?db=${INFLUX_DATABASE}&u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}' \
|
||||
| xargs curl --max-time 5 -XPOST --data-binary 'testnet-deploy,name=$netBasename clientexit=1'; \
|
||||
echo Error: bench-tps should never exit | tee -a /tmp/solana.log; \
|
||||
done; \
|
||||
bash \
|
||||
\"; \
|
||||
sleep 2; \
|
||||
tmux capture-pane -t solana -p -S -100; \
|
||||
tail /tmp/solana.log; \
|
||||
"
|
||||
}
|
||||
|
||||
client_stop() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare count=$4
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" \
|
||||
"Stopping client $vmName ($count):" \
|
||||
"\
|
||||
set -x;
|
||||
tmux list-sessions; \
|
||||
tmux capture-pane -t solana -p; \
|
||||
tmux kill-session -t solana; \
|
||||
$SNAP_INSTALL_CMD; \
|
||||
sudo snap set solana metrics-config=$SOLANA_METRICS_CONFIG \
|
||||
rust-log=$RUST_LOG \
|
||||
default-metrics-rate=$SOLANA_DEFAULT_METRICS_RATE \
|
||||
; \
|
||||
"
|
||||
echo "Client stopped in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
fullnode_start() {
|
||||
declare class=$1
|
||||
declare vmName=$2
|
||||
declare vmZone=$3
|
||||
declare vmPublicIp=$4
|
||||
declare count=$5
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
commonNodeConfig="\
|
||||
rust-log=$RUST_LOG \
|
||||
default-metrics-rate=$SOLANA_DEFAULT_METRICS_RATE \
|
||||
metrics-config=$SOLANA_METRICS_CONFIG \
|
||||
setup-args=$SOLANA_SETUP_ARGS \
|
||||
"
|
||||
if [[ $class = leader ]]; then
|
||||
nodeConfig="mode=leader+drone $commonNodeConfig"
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
nodeConfig="$nodeConfig enable-cuda=1"
|
||||
fi
|
||||
else
|
||||
nodeConfig="mode=validator leader-address=$publicIp $commonNodeConfig"
|
||||
fi
|
||||
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" "Starting $class $count:" \
|
||||
"\
|
||||
set -ex; \
|
||||
logmarker='solana deploy $(date)/$RANDOM'; \
|
||||
logger \"\$logmarker\"; \
|
||||
$SNAP_INSTALL_CMD; \
|
||||
sudo snap set solana $nodeConfig; \
|
||||
snap info solana; \
|
||||
sudo snap get solana; \
|
||||
echo Slight delay to get more syslog output; \
|
||||
sleep 2; \
|
||||
sudo grep -Pzo \"\$logmarker(.|\\n)*\" /var/log/syslog \
|
||||
"
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
leader_start() {
|
||||
fullnode_start leader "$@"
|
||||
}
|
||||
|
||||
validator_start() {
|
||||
fullnode_start validator "$@"
|
||||
}
|
||||
|
||||
fullnode_stop() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare count=$4
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
# Try to ping the machine first. When a machine (validator) is restarted,
|
||||
# there can be a delay between when the instance is reported as RUNNING and when
|
||||
# it's reachable over the network
|
||||
timeout 30s bash -c "set -o pipefail; until ping -c 3 $vmPublicIp | tr - _; do echo .; done"
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" "Shutting down" "\
|
||||
if snap list solana; then \
|
||||
sudo snap set solana mode=; \
|
||||
fi"
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
if [[ -n $LOCAL_SNAP ]]; then
|
||||
echo "--- Transferring $LOCAL_SNAP to node(s)"
|
||||
|
||||
transfer_local_snap() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare vmClass=$4
|
||||
declare count=$5
|
||||
|
||||
echo "--- $vmName in zone $vmZone ($count)"
|
||||
SECONDS=0
|
||||
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
|
||||
"$LOCAL_SNAP" testnet-deploy@"$vmPublicIp":solana_local.snap
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
}
|
||||
vm_foreach transfer_local_snap
|
||||
fi
|
||||
|
||||
echo "--- Stopping client node(s)"
|
||||
pids=()
|
||||
vm_foreach_in_class client client_stop
|
||||
client_stop_pids=("${pids[@]}")
|
||||
|
||||
if ! $ROLLING_UPDATE; then
|
||||
pids=()
|
||||
echo "--- Shutting down all full nodes"
|
||||
vm_foreach_in_class leader fullnode_stop
|
||||
vm_foreach_in_class validator fullnode_stop
|
||||
wait_for_pids fullnode shutdown
|
||||
fi
|
||||
|
||||
pids=()
|
||||
echo --- Starting leader node
|
||||
vm_foreach_in_class leader leader_start
|
||||
wait_for_pids leader
|
||||
|
||||
pids=()
|
||||
echo --- Starting validator nodes
|
||||
vm_foreach_in_class validator validator_start
|
||||
wait_for_pids validators
|
||||
|
||||
echo "--- $publicUrl sanity test"
|
||||
if [[ -z $CI ]]; then
|
||||
# TODO: ssh into a node and run testnet-sanity.sh there. It's not safe to
|
||||
# assume the correct Snap is installed on the current non-CI machine
|
||||
echo Skipped for non-CI deploy
|
||||
snapVersion=unknown
|
||||
else
|
||||
(
|
||||
set -x
|
||||
USE_SNAP=1 ci/testnet-sanity.sh $publicUrl $fullnode_count
|
||||
)
|
||||
IFS=\ read -r _ snapVersion _ < <(snap info solana | grep "^installed:")
|
||||
snapVersion=${snapVersion/0+git./}
|
||||
fi
|
||||
|
||||
pids=("${client_stop_pids[@]}")
|
||||
wait_for_pids client shutdown
|
||||
vm_foreach_in_class client client_start
|
||||
|
||||
# Add "network started" datapoint
|
||||
$metrics_write_datapoint "testnet-deploy,name=$netBasename start=1,version=\"$snapVersion\""
|
||||
|
||||
exit 0
|
77
ci/testnet-sanity.sh
Executable file
77
ci/testnet-sanity.sh
Executable file
@ -0,0 +1,77 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Perform a quick sanity test on the specific testnet
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
source multinode-demo/common.sh
|
||||
|
||||
NET_URL=$1
|
||||
if [[ -z $NET_URL ]]; then
|
||||
NET_URL=testnet.solana.com
|
||||
fi
|
||||
|
||||
EXPECTED_NODE_COUNT=$2
|
||||
if [[ -z $EXPECTED_NODE_COUNT ]]; then
|
||||
EXPECTED_NODE_COUNT=50
|
||||
fi
|
||||
|
||||
echo "--- $NET_URL: verify ledger"
|
||||
if [[ -z $NO_LEDGER_VERIFY ]]; then
|
||||
if [[ -d /var/snap/solana/current/config/ledger ]]; then
|
||||
# Note: here we assume this script is actually running on the leader node...
|
||||
(
|
||||
set -x
|
||||
sudo cp -r /var/snap/solana/current/config/ledger /var/snap/solana/current/config/ledger-verify-$$
|
||||
sudo solana.ledger-tool --ledger /var/snap/solana/current/config/ledger-verify-$$ verify
|
||||
)
|
||||
else
|
||||
echo "^^^ +++"
|
||||
echo "Ledger verify skipped"
|
||||
fi
|
||||
else
|
||||
echo "^^^ +++"
|
||||
echo "Ledger verify skipped (NO_LEDGER_VERIFY defined)"
|
||||
fi
|
||||
|
||||
echo "--- $NET_URL: wallet sanity"
|
||||
(
|
||||
set -x
|
||||
multinode-demo/test/wallet-sanity.sh $NET_URL
|
||||
)
|
||||
|
||||
echo "--- $NET_URL: node count"
|
||||
if [[ -n "$USE_SNAP" ]]; then
|
||||
# TODO: Merge client.sh functionality into solana-bench-tps proper and
|
||||
# remove this USE_SNAP case
|
||||
cmd=$solana_bench_tps
|
||||
else
|
||||
cmd=multinode-demo/client.sh
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
$cmd $NET_URL $EXPECTED_NODE_COUNT -c
|
||||
)
|
||||
|
||||
echo "--- $NET_URL: validator sanity"
|
||||
if [[ -z $NO_VALIDATOR_SANITY ]]; then
|
||||
(
|
||||
./multinode-demo/setup.sh -t validator
|
||||
set -e pipefail
|
||||
timeout 10s ./multinode-demo/validator.sh "$NET_URL" 2>&1 | tee validator.log
|
||||
)
|
||||
wc -l validator.log
|
||||
if grep -C100 panic validator.log; then
|
||||
echo "^^^ +++"
|
||||
echo "Panic observed"
|
||||
exit 1
|
||||
else
|
||||
echo "Validator log looks ok"
|
||||
fi
|
||||
else
|
||||
echo "^^^ +++"
|
||||
echo "Validator sanity disabled (NO_VALIDATOR_SANITY defined)"
|
||||
fi
|
||||
|
||||
exit 0
|
35
ci/version-check.sh
Executable file
35
ci/version-check.sh
Executable file
@ -0,0 +1,35 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
require() {
|
||||
declare expectedProgram="$1"
|
||||
declare expectedVersion="$2"
|
||||
|
||||
read -r program version _ < <($expectedProgram -V)
|
||||
|
||||
declare ok=true
|
||||
[[ $program = "$expectedProgram" ]] || ok=false
|
||||
[[ $version =~ $expectedVersion ]] || ok=false
|
||||
|
||||
echo "Found $program $version"
|
||||
if ! $ok; then
|
||||
echo Error: expected "$expectedProgram $expectedVersion"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
case ${1:-stable} in
|
||||
nightly)
|
||||
require rustc 1.30.[0-9]+-nightly
|
||||
require cargo 1.29.[0-9]+-nightly
|
||||
;;
|
||||
stable)
|
||||
require rustc 1.28.[0-9]+
|
||||
require cargo 1.28.[0-9]+
|
||||
;;
|
||||
*)
|
||||
echo Error: unknown argument: "$1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
35
doc/testnet.md
Normal file
35
doc/testnet.md
Normal file
@ -0,0 +1,35 @@
|
||||
# TestNet debugging info
|
||||
|
||||
Currently we have two testnets, 'perf' and 'master', both on the master branch of the solana repo. Deploys happen
|
||||
at the top of every hour with the latest code. 'perf' has more cores for the client machine to flood the network
|
||||
with transactions until failure.
|
||||
|
||||
## Deploy process
|
||||
|
||||
They are deployed with the `ci/testnet-deploy.sh` script. There is a scheduled buildkite job which runs to do the deploy,
|
||||
look at `testnet-deploy` to see the agent which ran it and the logs. There is also a manual job to do the deploy manually..
|
||||
Validators are selected based on their machine name and everyone gets the binaries installed from snap.
|
||||
|
||||
## Where are the testnet logs?
|
||||
|
||||
For the client they are put in `/tmp/solana`; for validators and leaders they are in `/var/snap/solana/current/`.
|
||||
You can also see the backtrace of the client by ssh'ing into the client node and doing:
|
||||
|
||||
```bash
|
||||
$ sudo -u testnet-deploy
|
||||
$ tmux attach -t solana
|
||||
```
|
||||
|
||||
## How do I reset the testnet?
|
||||
|
||||
Through buildkite.
|
||||
|
||||
## How can I scale the tx generation rate?
|
||||
|
||||
Increase the TX rate by increasing the number of cores on the client machine which is running
|
||||
`bench-tps` or run multiple clients. Decrease by lowering cores or using the rayon env
|
||||
variable `RAYON_NUM_THREADS=<xx>`
|
||||
|
||||
## How can I test a change on the testnet?
|
||||
|
||||
Currently, a merged PR is the only way to test a change on the testnet.
|
@ -13,7 +13,7 @@ fi
|
||||
(
|
||||
set -x
|
||||
curl -o solana-perf.tgz \
|
||||
https://solana-perf.s3.amazonaws.com/master/x86_64-unknown-linux-gnu/solana-perf.tgz
|
||||
https://solana-perf.s3.amazonaws.com/v0.8.0/x86_64-unknown-linux-gnu/solana-perf.tgz
|
||||
tar zxvf solana-perf.tgz
|
||||
)
|
||||
|
||||
|
@ -1,23 +1,66 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# usage: $0 <rsync network path to solana repo on leader machine> <number of nodes in the network>"
|
||||
#!/bin/bash -e
|
||||
#
|
||||
USAGE=" usage: $0 [leader_url] [num_nodes] [--loop] [extra args]
|
||||
|
||||
leader_url URL to the leader (defaults to ..)
|
||||
num_nodes Minimum number of nodes to look for while converging
|
||||
--loop Add this flag to cause the program to loop infinitely
|
||||
\"extra args\" Any additional arguments are pass along to solana-bench-tps
|
||||
"
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
leader=${1:-${here}/..} # Default to local solana repo
|
||||
count=${2:-1}
|
||||
leader=$1
|
||||
if [[ -n $leader ]]; then
|
||||
if [[ $leader == "-h" || $leader == "--help" ]]; then
|
||||
echo "$USAGE"
|
||||
exit 0
|
||||
fi
|
||||
shift
|
||||
else
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
leader=testnet.solana.com # Default to testnet when running as a Snap
|
||||
else
|
||||
leader=$here/.. # Default to local solana repo
|
||||
fi
|
||||
fi
|
||||
|
||||
count=$1
|
||||
if [[ -n $count ]]; then
|
||||
shift
|
||||
else
|
||||
count=1
|
||||
fi
|
||||
|
||||
loop=
|
||||
if [[ $1 = --loop ]]; then
|
||||
loop=1
|
||||
shift
|
||||
fi
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
(
|
||||
set -x
|
||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
|
||||
set -ex
|
||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
$rsync -vPz "$rsync_leader_url"/config-private/mint.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
client_json="$SOLANA_CONFIG_CLIENT_DIR"/client.json
|
||||
[[ -r $client_json ]] || $solana_keygen -o "$client_json"
|
||||
)
|
||||
|
||||
# shellcheck disable=SC2086 # $solana_client_demo should not be quoted
|
||||
exec $solana_client_demo \
|
||||
-n "$count" -l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json \
|
||||
< "$SOLANA_CONFIG_CLIENT_DIR"/mint.json
|
||||
iteration=0
|
||||
set -x
|
||||
while true; do
|
||||
$solana_bench_tps \
|
||||
-n "$count" \
|
||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json \
|
||||
-k "$SOLANA_CONFIG_CLIENT_DIR"/client.json \
|
||||
"$@"
|
||||
[[ -n $loop ]] || exit 0
|
||||
iteration=$((iteration + 1))
|
||||
echo ------------------------------------------------------------------------
|
||||
echo "Iteration: $iteration"
|
||||
echo ------------------------------------------------------------------------
|
||||
done
|
||||
|
@ -4,26 +4,55 @@
|
||||
# shellcheck disable=2034
|
||||
|
||||
rsync=rsync
|
||||
if [[ -d "$SNAP" ]]; then # Running inside a Linux Snap?
|
||||
leader_logger="cat"
|
||||
validator_logger="cat"
|
||||
drone_logger="cat"
|
||||
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
# Protect against unsupported configurations to prevent non-obvious errors
|
||||
# later. Arguably these should be fatal errors but for now prefer tolerance.
|
||||
if [[ -n $USE_SNAP ]]; then
|
||||
echo "Warning: Snap is not supported on $(uname)"
|
||||
USE_SNAP=
|
||||
fi
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
echo "Warning: CUDA is not supported on $(uname)"
|
||||
SOLANA_CUDA=
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -d $SNAP ]]; then # Running inside a Linux Snap?
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
if [[ "$program" = wallet ]]; then
|
||||
# TODO: Merge wallet.sh functionality into solana-wallet proper and
|
||||
# remove this special case
|
||||
if [[ "$program" = wallet || "$program" = bench-tps ]]; then
|
||||
# TODO: Merge wallet.sh/client.sh functionality into
|
||||
# solana-wallet/solana-demo-client proper and remove this special case
|
||||
printf "%s/bin/solana-%s" "$SNAP" "$program"
|
||||
else
|
||||
printf "%s/command-%s.wrapper" "$SNAP" "$program"
|
||||
fi
|
||||
}
|
||||
rsync="$SNAP"/bin/rsync
|
||||
SOLANA_CUDA="$(snapctl get enable-cuda)"
|
||||
multilog="$SNAP/bin/multilog t s16777215 n200"
|
||||
leader_logger="$multilog $SNAP_DATA/leader"
|
||||
validator_logger="$multilog t $SNAP_DATA/validator"
|
||||
drone_logger="$multilog $SNAP_DATA/drone"
|
||||
# Create log directories manually to prevent multilog from creating them as
|
||||
# 0700
|
||||
mkdir -p "$SNAP_DATA"/{drone,leader,validator}
|
||||
|
||||
elif [[ -n "$USE_SNAP" ]]; then # Use the Linux Snap binaries
|
||||
SOLANA_METRICS_CONFIG="$(snapctl get metrics-config)"
|
||||
SOLANA_DEFAULT_METRICS_RATE="$(snapctl get default-metrics-rate)"
|
||||
export SOLANA_DEFAULT_METRICS_RATE
|
||||
SOLANA_CUDA="$(snapctl get enable-cuda)"
|
||||
RUST_LOG="$(snapctl get rust-log)"
|
||||
|
||||
elif [[ -n $USE_SNAP ]]; then # Use the Linux Snap binaries
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
printf "solana.%s" "$program"
|
||||
}
|
||||
elif [[ -n "$USE_INSTALL" ]]; then # Assume |cargo install| was run
|
||||
elif [[ -n $USE_INSTALL ]]; then # Assume |cargo install| was run
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
printf "solana-%s" "$program"
|
||||
@ -36,48 +65,147 @@ else
|
||||
declare features=""
|
||||
if [[ "$program" =~ ^(.*)-cuda$ ]]; then
|
||||
program=${BASH_REMATCH[1]}
|
||||
features="--features=cuda,erasure"
|
||||
features="--features=cuda"
|
||||
fi
|
||||
if [[ -z "$DEBUG" ]]; then
|
||||
if [[ -z $DEBUG ]]; then
|
||||
maybe_release=--release
|
||||
fi
|
||||
printf "cargo run $maybe_release --bin solana-%s %s -- " "$program" "$features"
|
||||
}
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
# shellcheck disable=2154 # 'here' is referenced but not assigned
|
||||
if [[ -z $here ]]; then
|
||||
echo "|here| is not defined"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Locate perf libs downloaded by |./fetch-perf-libs.sh|
|
||||
LD_LIBRARY_PATH=$(cd "$here" && dirname "$PWD"):$LD_LIBRARY_PATH
|
||||
export LD_LIBRARY_PATH
|
||||
fi
|
||||
fi
|
||||
|
||||
solana_client_demo=$(solana_program client-demo)
|
||||
solana_bench_tps=$(solana_program bench-tps)
|
||||
solana_wallet=$(solana_program wallet)
|
||||
solana_drone=$(solana_program drone)
|
||||
solana_fullnode=$(solana_program fullnode)
|
||||
solana_fullnode_config=$(solana_program fullnode-config)
|
||||
solana_fullnode_cuda=$(solana_program fullnode-cuda)
|
||||
solana_genesis=$(solana_program genesis)
|
||||
solana_mint=$(solana_program mint)
|
||||
solana_keygen=$(solana_program keygen)
|
||||
solana_ledger_tool=$(solana_program ledger-tool)
|
||||
|
||||
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
|
||||
# The SOLANA_METRICS_CONFIG environment variable is formatted as a
|
||||
# comma-delimited list of parameters. All parameters are optional.
|
||||
#
|
||||
# Example:
|
||||
# export SOLANA_METRICS_CONFIG="host=<metrics host>,db=<database name>,u=<username>,p=<password>"
|
||||
#
|
||||
configure_metrics() {
|
||||
[[ -n $SOLANA_METRICS_CONFIG ]] || return 0
|
||||
|
||||
declare metrics_params
|
||||
IFS=',' read -r -a metrics_params <<< "$SOLANA_METRICS_CONFIG"
|
||||
for param in "${metrics_params[@]}"; do
|
||||
IFS='=' read -r -a pair <<< "$param"
|
||||
if [[ ${#pair[@]} != 2 ]]; then
|
||||
echo Error: invalid metrics parameter: "$param" >&2
|
||||
else
|
||||
declare name="${pair[0]}"
|
||||
declare value="${pair[1]}"
|
||||
case "$name" in
|
||||
host)
|
||||
export INFLUX_HOST="$value"
|
||||
echo INFLUX_HOST="$INFLUX_HOST" >&2
|
||||
;;
|
||||
db)
|
||||
export INFLUX_DATABASE="$value"
|
||||
echo INFLUX_DATABASE="$INFLUX_DATABASE" >&2
|
||||
;;
|
||||
u)
|
||||
export INFLUX_USERNAME="$value"
|
||||
echo INFLUX_USERNAME="$INFLUX_USERNAME" >&2
|
||||
;;
|
||||
p)
|
||||
export INFLUX_PASSWORD="$value"
|
||||
echo INFLUX_PASSWORD="********" >&2
|
||||
;;
|
||||
*)
|
||||
echo Error: Unknown metrics parameter name: "$name" >&2
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
done
|
||||
}
|
||||
configure_metrics
|
||||
|
||||
tune_networking() {
|
||||
[[ $(uname) = Linux ]] && (set -x; sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null)
|
||||
# Skip in CI
|
||||
[[ -z $CI ]] || return 0
|
||||
|
||||
# Reference: https://medium.com/@CameronSparr/increase-os-udp-buffers-to-improve-performance-51d167bb1360
|
||||
if [[ $(uname) = Linux ]]; then
|
||||
(
|
||||
set -x +e
|
||||
# test the existence of the sysctls before trying to set them
|
||||
# go ahead and return true and don't exit if these calls fail
|
||||
sysctl net.core.rmem_max 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
|
||||
) || true
|
||||
fi
|
||||
|
||||
if [[ $(uname) = Darwin ]]; then
|
||||
(
|
||||
if [[ $(sysctl net.inet.udp.maxdgram | cut -d\ -f2) != 65535 ]]; then
|
||||
echo "Adjusting maxdgram to allow for large UDP packets, see BLOB_SIZE in src/packet.rs:"
|
||||
set -x
|
||||
sudo sysctl net.inet.udp.maxdgram=65535
|
||||
fi
|
||||
)
|
||||
|
||||
fi
|
||||
}
|
||||
|
||||
oom_score_adj() {
|
||||
declare pid=$1
|
||||
declare score=$2
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
echo "$score" > "/proc/$pid/oom_score_adj" || true
|
||||
declare currentScore
|
||||
currentScore=$(cat "/proc/$pid/oom_score_adj" || true)
|
||||
if [[ $score != "$currentScore" ]]; then
|
||||
echo "Failed to set oom_score_adj to $score for pid $pid (current score: $currentScore)"
|
||||
fi
|
||||
}
|
||||
|
||||
SOLANA_CONFIG_DIR=${SNAP_DATA:-$PWD}/config
|
||||
SOLANA_CONFIG_PRIVATE_DIR=${SNAP_DATA:-$PWD}/config-private
|
||||
SOLANA_CONFIG_CLIENT_DIR=${SNAP_USER_DATA:-$PWD}/config-client-client
|
||||
SOLANA_CONFIG_VALIDATOR_DIR=${SNAP_DATA:-$PWD}/config-validator
|
||||
SOLANA_CONFIG_CLIENT_DIR=${SNAP_USER_DATA:-$PWD}/config-client
|
||||
|
||||
rsync_url() { # adds the 'rsync://` prefix to URLs that need it
|
||||
declare url="$1"
|
||||
|
||||
if [[ "$url" =~ ^.*:.*$ ]]; then
|
||||
if [[ $url =~ ^.*:.*$ ]]; then
|
||||
# assume remote-shell transport when colon is present, use $url unmodified
|
||||
echo "$url"
|
||||
return
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ -d "$url" ]]; then
|
||||
if [[ -d $url ]]; then
|
||||
# assume local directory if $url is a valid directory, use $url unmodified
|
||||
echo "$url"
|
||||
return
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Default to rsync:// URL
|
||||
|
@ -21,7 +21,7 @@ if [[ -d "$SNAP" ]]; then
|
||||
fi
|
||||
leader="$leader_address"
|
||||
else
|
||||
leader=${1:-${here}/..} # Default to local solana repo
|
||||
leader=${1:-${here}/..} # Default to local tree for data
|
||||
fi
|
||||
|
||||
[[ -f "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json ]] || {
|
||||
@ -36,6 +36,11 @@ set -ex
|
||||
mkdir -p "$SOLANA_CONFIG_DIR"
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_DIR"/
|
||||
|
||||
# shellcheck disable=SC2086 # $solana_drone should not be quoted
|
||||
exec $solana_drone \
|
||||
-l "$SOLANA_CONFIG_DIR"/leader.json < "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||
|
||||
trap 'kill "$pid" && wait "$pid"' INT TERM
|
||||
$solana_drone \
|
||||
-l "$SOLANA_CONFIG_DIR"/leader.json -k "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json \
|
||||
> >($drone_logger) 2>&1 &
|
||||
pid=$!
|
||||
oom_score_adj "$pid" 1000
|
||||
wait "$pid"
|
||||
|
80
multinode-demo/gce_multinode.sh
Executable file
80
multinode-demo/gce_multinode.sh
Executable file
@ -0,0 +1,80 @@
|
||||
#!/bin/bash
|
||||
|
||||
command=$1
|
||||
prefix=
|
||||
num_nodes=
|
||||
out_file=
|
||||
image_name="ubuntu-16-04-cuda-9-2-new"
|
||||
|
||||
shift
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 <create|delete> <-p prefix> <-n num_nodes> <-o file> [-i image-name]
|
||||
|
||||
Manage a GCE multinode network
|
||||
|
||||
create|delete - Create or delete the network
|
||||
-p prefix - A common prefix for node names, to avoid collision
|
||||
-n num_nodes - Number of nodes
|
||||
-o out_file - Used for create option. Outputs an array of IP addresses
|
||||
of new nodes to the file
|
||||
-i image_name - Existing image on GCE (default $image_name)
|
||||
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
while getopts "h?p:i:n:o:" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
;;
|
||||
p)
|
||||
prefix=$OPTARG
|
||||
;;
|
||||
i)
|
||||
image_name=$OPTARG
|
||||
;;
|
||||
o)
|
||||
out_file=$OPTARG
|
||||
;;
|
||||
n)
|
||||
num_nodes=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
set -e
|
||||
|
||||
[[ -n $command ]] || usage "Need a command (create|delete)"
|
||||
|
||||
[[ -n $prefix ]] || usage "Need a prefix for GCE instance names"
|
||||
|
||||
[[ -n $num_nodes ]] || usage "Need number of nodes"
|
||||
|
||||
nodes=()
|
||||
for i in $(seq 1 "$num_nodes"); do
|
||||
nodes+=("$prefix$i")
|
||||
done
|
||||
|
||||
if [[ $command == "create" ]]; then
|
||||
[[ -n $out_file ]] || usage "Need an outfile to store IP Addresses"
|
||||
|
||||
ip_addr_list=$(gcloud beta compute instances create "${nodes[@]}" --zone=us-west1-b --tags=testnet \
|
||||
--image="$image_name" | awk '/RUNNING/ {print $5}')
|
||||
|
||||
echo "ip_addr_array=($ip_addr_list)" >"$out_file"
|
||||
elif [[ $command == "delete" ]]; then
|
||||
gcloud beta compute instances delete "${nodes[@]}"
|
||||
else
|
||||
usage "Unknown command: $command"
|
||||
fi
|
@ -13,7 +13,7 @@ fi
|
||||
[[ -f "$SOLANA_CONFIG_DIR"/leader.json ]] || {
|
||||
echo "$SOLANA_CONFIG_DIR/leader.json not found, create it by running:"
|
||||
echo
|
||||
echo " ${here}/setup.sh -t leader"
|
||||
echo " ${here}/setup.sh"
|
||||
exit 1
|
||||
}
|
||||
|
||||
@ -25,9 +25,11 @@ fi
|
||||
|
||||
tune_networking
|
||||
|
||||
# shellcheck disable=SC2086 # $program should not be quoted
|
||||
exec $program \
|
||||
-l "$SOLANA_CONFIG_DIR"/leader.json \
|
||||
< <(shopt -s nullglob && cat "$SOLANA_CONFIG_DIR"/genesis.log \
|
||||
"$SOLANA_CONFIG_DIR"/tx-*.log) \
|
||||
> "$SOLANA_CONFIG_DIR"/tx-"$(date -u +%Y%m%d%H%M%S%N)".log
|
||||
trap 'kill "$pid" && wait "$pid"' INT TERM
|
||||
$program \
|
||||
--identity "$SOLANA_CONFIG_DIR"/leader.json \
|
||||
--ledger "$SOLANA_CONFIG_DIR"/ledger \
|
||||
> >($leader_logger) 2>&1 &
|
||||
pid=$!
|
||||
oom_score_adj "$pid" 1000
|
||||
wait "$pid"
|
||||
|
16
multinode-demo/metrics_write_datapoint.sh
Executable file
16
multinode-demo/metrics_write_datapoint.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
point=$1
|
||||
if [[ -z $point ]]; then
|
||||
echo "Data point not specified"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Influx data point: $point"
|
||||
if [[ -z $INFLUX_DATABASE || -z $INFLUX_USERNAME || -z $INFLUX_PASSWORD ]]; then
|
||||
echo Influx user credentials not found
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "https://metrics.solana.com:8086/write?db=${INFLUX_DATABASE}&u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}" \
|
||||
| xargs curl --max-time 5 -XPOST --data-binary "$point"
|
@ -1,59 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
function myip()
|
||||
{
|
||||
# shellcheck disable=SC2207
|
||||
declare ipaddrs=(
|
||||
# query interwebs
|
||||
$(curl -s ifconfig.co)
|
||||
# machine's interfaces
|
||||
$(ifconfig |
|
||||
awk '/inet addr:/ {gsub("addr:","",$2); print $2; next}
|
||||
/inet6 addr:/ {gsub("/.*", "", $3); print $3; next}
|
||||
/inet(6)? / {print $2}'
|
||||
)
|
||||
)
|
||||
|
||||
if (( ! ${#ipaddrs[*]} ))
|
||||
then
|
||||
echo "
|
||||
myip: error: I'm having trouble determining what our IP address is...
|
||||
Are we connected to a network?
|
||||
|
||||
"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
declare prompt="
|
||||
Please choose the IP address you want to advertise to the network:
|
||||
|
||||
0) ${ipaddrs[0]} <====== this one was returned by the interwebs...
|
||||
"
|
||||
|
||||
for ((i=1; i < ${#ipaddrs[*]}; i++))
|
||||
do
|
||||
prompt+=" $i) ${ipaddrs[i]}
|
||||
"
|
||||
done
|
||||
|
||||
while read -r -p "${prompt}
|
||||
please enter a number [0 for default]: " which
|
||||
do
|
||||
[[ -z ${which} ]] && break;
|
||||
[[ ${which} =~ [0-9]+ ]] && (( which < ${#ipaddrs[*]} )) && break;
|
||||
echo "Ug. invalid entry \"${which}\"...
|
||||
"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
which=${which:-0}
|
||||
|
||||
echo "${ipaddrs[which]}"
|
||||
|
||||
}
|
||||
|
||||
if [[ ${0} == "${BASH_SOURCE[0]}" ]]
|
||||
then
|
||||
myip "$@"
|
||||
fi
|
32
multinode-demo/oom_monitor.sh
Executable file
32
multinode-demo/oom_monitor.sh
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Reports Linux OOM Killer activity
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
syslog=/var/log/syslog
|
||||
if [[ ! -r $syslog ]]; then
|
||||
echo Unable to read $syslog
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Adjust OOM score to reduce the chance that this script will be killed
|
||||
# during an Out of Memory event since the purpose of this script is to
|
||||
# report such events
|
||||
oom_score_adj "self" -500
|
||||
|
||||
while read -r victim; do
|
||||
echo "Out of memory event detected, $victim killed"
|
||||
"$here"/metrics_write_datapoint.sh "oom-killer,victim=$victim killed=1"
|
||||
done < <( \
|
||||
tail --follow=name --retry -n0 $syslog \
|
||||
| sed --unbuffered -n 's/^.* Out of memory: Kill process [1-9][0-9]* (\([^)]*\)) .*/\1/p' \
|
||||
)
|
||||
exit 1
|
14
multinode-demo/remote_leader.sh
Executable file
14
multinode-demo/remote_leader.sh
Executable file
@ -0,0 +1,14 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n $FORCE ]] || exit
|
||||
|
||||
chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa
|
||||
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
|
||||
# Run setup
|
||||
USE_INSTALL=1 ./multinode-demo/setup.sh -p
|
||||
USE_INSTALL=1 ./multinode-demo/drone.sh >drone.log 2>&1 &
|
||||
USE_INSTALL=1 SOLANA_CUDA=1 ./multinode-demo/leader.sh >leader.log 2>&1 &
|
185
multinode-demo/remote_nodes.sh
Executable file
185
multinode-demo/remote_nodes.sh
Executable file
@ -0,0 +1,185 @@
|
||||
#!/bin/bash
|
||||
|
||||
command=$1
|
||||
ip_addr_file=
|
||||
remote_user=
|
||||
ssh_keys=
|
||||
|
||||
shift
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 <start|stop> <-f IP Addr Array file> <-u username> [-k ssh-keys]
|
||||
|
||||
Manage a GCE multinode network
|
||||
|
||||
start|stop - Create or delete the network
|
||||
-f file - A bash script that exports an array of IP addresses, ip_addr_array.
|
||||
Elements of the array are public IP address of remote nodes.
|
||||
-u username - The username for logging into remote nodes.
|
||||
-k ssh-keys - Path to public/private key pair that remote nodes can use to perform
|
||||
rsync and ssh among themselves. Must contain pub, and priv keys.
|
||||
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
while getopts "h?f:u:k:" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
;;
|
||||
f)
|
||||
ip_addr_file=$OPTARG
|
||||
;;
|
||||
u)
|
||||
remote_user=$OPTARG
|
||||
;;
|
||||
k)
|
||||
ssh_keys=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
set -e
|
||||
|
||||
# Sample IP Address array file contents
|
||||
# ip_addr_array=(192.168.1.1 192.168.1.5 192.168.2.2)
|
||||
|
||||
[[ -n $command ]] || usage "Need a command (start|stop)"
|
||||
[[ -n $ip_addr_file ]] || usage "Need a file with IP address array"
|
||||
[[ -n $remote_user ]] || usage "Need the username for remote nodes"
|
||||
|
||||
ip_addr_array=()
|
||||
# Get IP address array
|
||||
# shellcheck source=/dev/null
|
||||
source "$ip_addr_file"
|
||||
|
||||
build_project() {
|
||||
echo "Build started at $(date)"
|
||||
SECONDS=0
|
||||
|
||||
# Build and install locally
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
cargo install --force
|
||||
|
||||
echo "Build took $SECONDS seconds"
|
||||
}
|
||||
|
||||
common_start_setup() {
|
||||
ip_addr=$1
|
||||
|
||||
# Killing sshguard for now. TODO: Find a better solution
|
||||
# sshguard is blacklisting IP address after ssh-keyscan and ssh login attempts
|
||||
ssh "$remote_user@$ip_addr" " \
|
||||
set -ex; \
|
||||
sudo service sshguard stop; \
|
||||
sudo apt-get --assume-yes install rsync libssl-dev; \
|
||||
mkdir -p ~/.ssh ~/solana ~/.cargo/bin; \
|
||||
" >log/"$ip_addr".log
|
||||
|
||||
# If provided, deploy SSH keys
|
||||
if [[ -n $ssh_keys ]]; then
|
||||
{
|
||||
rsync -vPrz "$ssh_keys"/id_rsa "$remote_user@$ip_addr":~/.ssh/
|
||||
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/
|
||||
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/authorized_keys
|
||||
rsync -vPrz ./multinode-demo "$remote_user@$ip_addr":~/solana/
|
||||
} >>log/"$ip_addr".log
|
||||
fi
|
||||
}
|
||||
|
||||
start_leader() {
|
||||
common_start_setup "$1"
|
||||
|
||||
{
|
||||
rsync -vPrz ~/.cargo/bin/solana* "$remote_user@$ip_addr":~/.cargo/bin/
|
||||
rsync -vPrz ./fetch-perf-libs.sh "$remote_user@$ip_addr":~/solana/
|
||||
ssh -n -f "$remote_user@$ip_addr" 'cd solana; FORCE=1 ./multinode-demo/remote_leader.sh'
|
||||
} >>log/"$1".log
|
||||
|
||||
leader_ip=$1
|
||||
leader_time=$SECONDS
|
||||
SECONDS=0
|
||||
}
|
||||
|
||||
start_validator() {
|
||||
common_start_setup "$1"
|
||||
|
||||
ssh -n -f "$remote_user@$ip_addr" "cd solana; FORCE=1 ./multinode-demo/remote_validator.sh $leader_ip" >>log/"$1".log
|
||||
}
|
||||
|
||||
start_all_nodes() {
|
||||
echo "Deployment started at $(date)"
|
||||
SECONDS=0
|
||||
count=0
|
||||
leader_ip=
|
||||
leader_time=
|
||||
|
||||
mkdir -p log
|
||||
|
||||
for ip_addr in "${ip_addr_array[@]}"; do
|
||||
if ((!count)); then
|
||||
# Start the leader on the first node
|
||||
echo "Leader node $ip_addr, killing previous instance and restarting"
|
||||
start_leader "$ip_addr"
|
||||
else
|
||||
# Start validator on all other nodes
|
||||
echo "Validator[$count] node $ip_addr, killing previous instance and restarting"
|
||||
start_validator "$ip_addr" &
|
||||
# TBD: Remove the sleep or reduce time once GCP login quota is increased
|
||||
sleep 2
|
||||
fi
|
||||
|
||||
((count = count + 1))
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
((validator_count = count - 1))
|
||||
|
||||
echo "Deployment finished at $(date)"
|
||||
echo "Leader deployment too $leader_time seconds"
|
||||
echo "$validator_count Validator deployment took $SECONDS seconds"
|
||||
}
|
||||
|
||||
stop_all_nodes() {
|
||||
SECONDS=0
|
||||
local count=0
|
||||
for ip_addr in "${ip_addr_array[@]}"; do
|
||||
ssh-keygen -R "$ip_addr" >log/local.log
|
||||
ssh-keyscan "$ip_addr" >>~/.ssh/known_hosts 2>/dev/null
|
||||
|
||||
echo "Stopping node[$count] $ip_addr. Remote user $remote_user"
|
||||
|
||||
ssh -n -f "$remote_user@$ip_addr" " \
|
||||
set -ex; \
|
||||
sudo service sshguard stop; \
|
||||
pkill -9 solana-; \
|
||||
pkill -9 validator; \
|
||||
pkill -9 leader; \
|
||||
"
|
||||
sleep 2
|
||||
((count = count + 1))
|
||||
echo "Stopped node[$count] $ip_addr"
|
||||
done
|
||||
echo "Stopping $count nodes took $SECONDS seconds"
|
||||
}
|
||||
|
||||
if [[ $command == "start" ]]; then
|
||||
build_project
|
||||
stop_all_nodes
|
||||
start_all_nodes
|
||||
elif [[ $command == "stop" ]]; then
|
||||
stop_all_nodes
|
||||
else
|
||||
usage "Unknown command: $command"
|
||||
fi
|
17
multinode-demo/remote_validator.sh
Executable file
17
multinode-demo/remote_validator.sh
Executable file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n $FORCE ]] || exit
|
||||
|
||||
chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa
|
||||
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
|
||||
touch ~/.ssh/known_hosts
|
||||
ssh-keygen -R "$1" 2>/dev/null
|
||||
ssh-keyscan "$1" >>~/.ssh/known_hosts 2>/dev/null
|
||||
|
||||
rsync -vPrz "$1":~/.cargo/bin/solana* ~/.cargo/bin/
|
||||
|
||||
# Run setup
|
||||
USE_INSTALL=1 ./multinode-demo/setup.sh -p
|
||||
USE_INSTALL=1 ./multinode-demo/validator.sh "$1":~/solana "$1" >validator.log 2>&1
|
@ -71,35 +71,40 @@ done
|
||||
|
||||
leader_address_args=("$ip_address_arg")
|
||||
validator_address_args=("$ip_address_arg" -b 9000)
|
||||
leader_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/leader-id.json
|
||||
validator_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/validator-id.json
|
||||
mint_path="$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||
|
||||
set -e
|
||||
|
||||
echo "Cleaning $SOLANA_CONFIG_DIR"
|
||||
rm -rvf "$SOLANA_CONFIG_DIR"
|
||||
mkdir -p "$SOLANA_CONFIG_DIR"
|
||||
for i in "$SOLANA_CONFIG_DIR" "$SOLANA_CONFIG_PRIVATE_DIR" "$SOLANA_CONFIG_VALIDATOR_DIR"; do
|
||||
echo "Cleaning $i"
|
||||
rm -rvf "$i"
|
||||
mkdir -p "$i"
|
||||
done
|
||||
|
||||
|
||||
$solana_keygen -o "$leader_id_path"
|
||||
$solana_keygen -o "$validator_id_path"
|
||||
|
||||
if $node_type_leader; then
|
||||
rm -rvf "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
mkdir -p "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
echo "Creating $mint_path with $num_tokens tokens"
|
||||
$solana_keygen -o "$mint_path"
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_DIR/mint.json with $num_tokens tokens"
|
||||
$solana_mint <<<"$num_tokens" > "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_DIR/genesis.log"
|
||||
$solana_genesis < "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json > "$SOLANA_CONFIG_DIR"/genesis.log
|
||||
echo "Creating $SOLANA_CONFIG_DIR/ledger"
|
||||
$solana_genesis --tokens="$num_tokens" --ledger "$SOLANA_CONFIG_DIR"/ledger < "$mint_path"
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_DIR/leader.json"
|
||||
$solana_fullnode_config "${leader_address_args[@]}" > "$SOLANA_CONFIG_DIR"/leader.json
|
||||
$solana_fullnode_config --keypair="$leader_id_path" "${leader_address_args[@]}" > "$SOLANA_CONFIG_DIR"/leader.json
|
||||
fi
|
||||
|
||||
|
||||
if $node_type_validator; then
|
||||
echo "Creating $SOLANA_CONFIG_DIR/validator.json"
|
||||
$solana_fullnode_config "${validator_address_args[@]}" > "$SOLANA_CONFIG_DIR"/validator.json
|
||||
echo "Creating $SOLANA_CONFIG_VALIDATOR_DIR/validator.json"
|
||||
$solana_fullnode_config --keypair="$validator_id_path" "${validator_address_args[@]}" > "$SOLANA_CONFIG_VALIDATOR_DIR"/validator.json
|
||||
fi
|
||||
|
||||
ls -lh "$SOLANA_CONFIG_DIR"/
|
||||
ls -lhR "$SOLANA_CONFIG_DIR"/
|
||||
if $node_type_leader; then
|
||||
ls -lh "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
ls -lhR "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
fi
|
||||
|
@ -1,80 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
ip_addr_file=$1
|
||||
remote_user=$2
|
||||
ssh_keys=$3
|
||||
|
||||
usage() {
|
||||
echo -e "\\tUsage: $0 <IP Address array> <username> [path to ssh keys]\\n"
|
||||
echo -e "\\t <IP Address array>: A bash script that exports an array of IP addresses, ip_addr_array. Elements of the array are public IP address of remote nodes."
|
||||
echo -e "\\t <username>: The username for logging into remote nodes."
|
||||
echo -e "\\t [path to ssh keys]: The public/private key pair that remote nodes can use to perform rsync and ssh among themselves. Must contain pub, priv and authorized_keys.\\n"
|
||||
}
|
||||
|
||||
# Sample IP Address array file contents
|
||||
# ip_addr_array=(192.168.1.1 192.168.1.5 192.168.2.2)
|
||||
|
||||
if [[ -z "$ip_addr_file" ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$remote_user" ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Build and install locally
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
cargo install --force
|
||||
|
||||
ip_addr_array=()
|
||||
# Get IP address array
|
||||
# shellcheck source=/dev/null
|
||||
source "$ip_addr_file"
|
||||
|
||||
# shellcheck disable=SC2089,SC2016
|
||||
ssh_command_prefix='export PATH="$HOME/.cargo/bin:$PATH"; cd solana; USE_INSTALL=1 ./multinode-demo/'
|
||||
|
||||
count=0
|
||||
leader=
|
||||
for ip_addr in "${ip_addr_array[@]}"; do
|
||||
echo "$ip_addr"
|
||||
|
||||
# Deploy build and scripts to remote node
|
||||
rsync -r -av ~/.cargo/bin "$remote_user"@"$ip_addr":~/.cargo
|
||||
rsync -r -av ./multinode-demo "$remote_user"@"$ip_addr":~/solana/
|
||||
|
||||
# If provided, deploy SSH keys
|
||||
if [[ -z $ssh_keys ]]; then
|
||||
echo "skip copying the ssh keys"
|
||||
else
|
||||
rsync -r -av "$ssh_keys"/* "$remote_user"@"$ip_addr":~/.ssh/
|
||||
fi
|
||||
|
||||
# Stop current nodes
|
||||
ssh "$remote_user"@"$ip_addr" 'pkill -9 solana-fullnode'
|
||||
ssh "$remote_user"@"$ip_addr" 'pkill -9 solana-client-demo'
|
||||
|
||||
# Run setup
|
||||
ssh "$remote_user"@"$ip_addr" "$ssh_command_prefix"'setup.sh -p "$ip_addr"'
|
||||
|
||||
if (( !count )); then
|
||||
# Start the leader on the first node
|
||||
echo "Starting leader node $ip_addr"
|
||||
ssh -n -f "$remote_user"@"$ip_addr" "$ssh_command_prefix"'leader.sh > leader.log 2>&1'
|
||||
leader=${ip_addr_array[0]}
|
||||
else
|
||||
# Start validator on all other nodes
|
||||
echo "Starting validator node $ip_addr"
|
||||
ssh -n -f "$remote_user"@"$ip_addr" "$ssh_command_prefix""validator.sh $remote_user@$leader:~/solana $leader > validator.log 2>&1"
|
||||
fi
|
||||
|
||||
(( count++ ))
|
||||
|
||||
if (( count == ${#ip_addr_array[@]} )); then
|
||||
# Launch client demo on the last node
|
||||
echo "Starting client demo on $ip_addr"
|
||||
ssh -n -f "$remote_user"@"$ip_addr" "$ssh_command_prefix""client.sh $remote_user@$leader:~/solana $count > client.log 2>&1"
|
||||
fi
|
||||
done
|
@ -6,7 +6,13 @@
|
||||
here=$(dirname "$0")
|
||||
cd "$here"
|
||||
|
||||
wallet="../wallet.sh $1"
|
||||
if [[ -n "$USE_SNAP" ]]; then
|
||||
# TODO: Merge wallet.sh functionality into solana-wallet proper and
|
||||
# remove this USE_SNAP case
|
||||
wallet="solana.wallet $1"
|
||||
else
|
||||
wallet="../wallet.sh $1"
|
||||
fi
|
||||
|
||||
# Tokens transferred to this address are lost forever...
|
||||
garbage_address=vS3ngn1TfQmpsW1Z4NkLuqNAQFF3dYQw8UZ6TCx9bmq
|
||||
|
4
multinode-demo/validator-x.sh
Executable file
4
multinode-demo/validator-x.sh
Executable file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
here=$(dirname "$0")
|
||||
|
||||
exec "$here"/validator.sh -x "$@"
|
@ -4,77 +4,108 @@ here=$(dirname "$0")
|
||||
source "$here"/common.sh
|
||||
|
||||
usage() {
|
||||
if [[ -n "$1" ]]; then
|
||||
if [[ -n $1 ]]; then
|
||||
echo "$*"
|
||||
echo
|
||||
fi
|
||||
echo "usage: $0 [rsync network path to solana repo on leader machine] [network ip address of leader]"
|
||||
echo "usage: $0 [-x] [rsync network path to solana repo on leader machine] [network ip address of leader]"
|
||||
echo ""
|
||||
echo " -x: runs a new, dynamically-configured validator"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [[ "$1" = "-h" || -n "$3" ]]; then
|
||||
if [[ $1 = -h ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
if [[ $1 == -x ]]; then
|
||||
self_setup=1
|
||||
shift
|
||||
else
|
||||
self_setup=0
|
||||
fi
|
||||
|
||||
if [[ -n $3 ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
if [[ -d $SNAP ]]; then
|
||||
# Exit if mode is not yet configured
|
||||
# (typically the case after the Snap is first installed)
|
||||
[[ -n "$(snapctl get mode)" ]] || exit 0
|
||||
[[ -n $(snapctl get mode) ]] || exit 0
|
||||
|
||||
# Select leader from the Snap configuration
|
||||
leader_address="$(snapctl get leader-address)"
|
||||
if [[ -z "$leader_address" ]]; then
|
||||
leader_address=$(snapctl get leader-address)
|
||||
if [[ -z $leader_address ]]; then
|
||||
# Assume public testnet by default
|
||||
leader_address=35.230.65.68 # testnet.solana.com
|
||||
leader_address=35.227.93.37 # testnet.solana.com
|
||||
fi
|
||||
leader="$leader_address"
|
||||
leader=$leader_address
|
||||
else
|
||||
if [[ -n "$3" ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
leader=${1:-${here}/..} # Default to local solana repo
|
||||
if [[ -z $1 ]]; then
|
||||
leader=${1:-${here}/..} # Default to local tree for data
|
||||
leader_address=${2:-127.0.0.1} # Default to local leader
|
||||
elif [[ -z "$2" ]]; then
|
||||
leader="$1"
|
||||
leader_address=$(dig +short "$1" | head -n1)
|
||||
if [[ -z "$leader_address" ]]; then
|
||||
elif [[ -z $2 ]]; then
|
||||
leader=$1
|
||||
leader_address=$(dig +short "${leader%:*}" | head -n1)
|
||||
if [[ -z $leader_address ]]; then
|
||||
usage "Error: unable to resolve IP address for $leader"
|
||||
fi
|
||||
else
|
||||
leader="$1"
|
||||
leader_address="$2"
|
||||
leader=$1
|
||||
leader_address=$2
|
||||
fi
|
||||
fi
|
||||
leader_port=8001
|
||||
|
||||
if [[ -n "$SOLANA_CUDA" ]]; then
|
||||
program="$solana_fullnode_cuda"
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
program=$solana_fullnode_cuda
|
||||
else
|
||||
program="$solana_fullnode"
|
||||
program=$solana_fullnode
|
||||
fi
|
||||
|
||||
if ((!self_setup)); then
|
||||
[[ -f $SOLANA_CONFIG_VALIDATOR_DIR/validator.json ]] || {
|
||||
echo "$SOLANA_CONFIG_VALIDATOR_DIR/validator.json not found, create it by running:"
|
||||
echo
|
||||
echo " ${here}/setup.sh"
|
||||
exit 1
|
||||
}
|
||||
validator_json_path=$SOLANA_CONFIG_VALIDATOR_DIR/validator.json
|
||||
SOLANA_LEADER_CONFIG_DIR=$SOLANA_CONFIG_VALIDATOR_DIR/leader-config
|
||||
else
|
||||
mkdir -p "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
validator_id_path=$SOLANA_CONFIG_PRIVATE_DIR/validator-id-x$$.json
|
||||
$solana_keygen -o "$validator_id_path"
|
||||
|
||||
[[ -f "$SOLANA_CONFIG_DIR"/validator.json ]] || {
|
||||
echo "$SOLANA_CONFIG_DIR/validator.json not found, create it by running:"
|
||||
echo
|
||||
echo " ${here}/setup.sh -t validator"
|
||||
exit 1
|
||||
}
|
||||
mkdir -p "$SOLANA_CONFIG_VALIDATOR_DIR"
|
||||
validator_json_path=$SOLANA_CONFIG_VALIDATOR_DIR/validator-x$$.json
|
||||
|
||||
port=9000
|
||||
(((port += ($$ % 1000)) && (port == 9000) && port++))
|
||||
|
||||
$solana_fullnode_config --keypair="$validator_id_path" -l -b "$port" > "$validator_json_path"
|
||||
|
||||
SOLANA_LEADER_CONFIG_DIR=$SOLANA_CONFIG_VALIDATOR_DIR/leader-config-x$$
|
||||
fi
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
|
||||
set -ex
|
||||
SOLANA_LEADER_CONFIG_DIR="$SOLANA_CONFIG_DIR"/leader-config
|
||||
rm -rf "$SOLANA_LEADER_CONFIG_DIR"
|
||||
$rsync -vPrz "$rsync_leader_url"/config/ "$SOLANA_LEADER_CONFIG_DIR"
|
||||
ls -lh "$SOLANA_LEADER_CONFIG_DIR"
|
||||
|
||||
tune_networking
|
||||
|
||||
# shellcheck disable=SC2086 # $program should not be quoted
|
||||
exec $program \
|
||||
-l "$SOLANA_CONFIG_DIR"/validator.json -t "$leader_address:$leader_port" \
|
||||
< <(shopt -s nullglob && cat "$SOLANA_LEADER_CONFIG_DIR"/genesis.log \
|
||||
"$SOLANA_LEADER_CONFIG_DIR"/tx-*.log)
|
||||
set -ex
|
||||
$rsync -vPr "$rsync_leader_url"/config/ "$SOLANA_LEADER_CONFIG_DIR"
|
||||
[[ -d $SOLANA_LEADER_CONFIG_DIR/ledger ]] || {
|
||||
echo "Unable to retrieve ledger from $rsync_leader_url"
|
||||
exit 1
|
||||
}
|
||||
|
||||
trap 'kill "$pid" && wait "$pid"' INT TERM
|
||||
$program \
|
||||
--identity "$validator_json_path" \
|
||||
--testnet "$leader_address:$leader_port" \
|
||||
--ledger "$SOLANA_LEADER_CONFIG_DIR"/ledger \
|
||||
> >($validator_logger) 2>&1 &
|
||||
pid=$!
|
||||
oom_score_adj "$pid" 1000
|
||||
wait "$pid"
|
||||
|
@ -30,18 +30,16 @@ rsync_leader_url=$(rsync_url "$leader")
|
||||
set -e
|
||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
if [[ ! -r "$SOLANA_CONFIG_CLIENT_DIR"/leader.json ]]; then
|
||||
(
|
||||
set -x
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
)
|
||||
echo "Fetching leader configuration from $rsync_leader_url"
|
||||
$rsync -Pz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
fi
|
||||
|
||||
client_json="$SOLANA_CONFIG_CLIENT_DIR"/client.json
|
||||
if [[ ! -r $client_json ]]; then
|
||||
$solana_mint <<<0 > "$client_json"
|
||||
client_id_path="$SOLANA_CONFIG_CLIENT_DIR"/id.json
|
||||
if [[ ! -r $client_id_path ]]; then
|
||||
echo "Generating client identity: $client_id_path"
|
||||
$solana_keygen -o "$client_id_path"
|
||||
fi
|
||||
|
||||
set -x
|
||||
# shellcheck disable=SC2086 # $solana_wallet should not be quoted
|
||||
exec $solana_wallet \
|
||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -m "$client_json" "$@"
|
||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -k "$client_id_path" "$@"
|
||||
|
77
rfcs/rfc-004-tictactoe-program.md
Normal file
77
rfcs/rfc-004-tictactoe-program.md
Normal file
@ -0,0 +1,77 @@
|
||||
|
||||
Two players want to play tic-tac-toe with each other on Solana.
|
||||
|
||||
The tic-tac-toe program has already been provisioned on the network, and the
|
||||
program author has advertised the following information to potential gamers:
|
||||
* `tictactoe_publickey` - the program's public key
|
||||
* `tictactoe_gamestate_size` - the number of bytes needed to maintain the game state
|
||||
|
||||
The game state is a well-documented data structure consisting of:
|
||||
- Player 1's public key
|
||||
- Player 2's public key
|
||||
- Game status. An 8-bit value where:
|
||||
* 0 = game uninitialized
|
||||
* 1 = Player 1's turn
|
||||
* 2 = Player 2's turn
|
||||
* 3 = Player 1 won
|
||||
* 4 = Player 2 won
|
||||
- Current board configuration. A 3x3 character array containing the values '\0', 'X' or 'O'
|
||||
|
||||
### Game Setup
|
||||
|
||||
1. Two players want to start a game. Player 2 sends Player 1 their public key,
|
||||
`player2_publickey` off-chain (IM, email, etc)
|
||||
|
||||
2. Player 1 creates a new keypair to represent the game state, `(gamestate_publickey,
|
||||
gamestate_privatekey)`.
|
||||
|
||||
3. Player 1 issues an allocate_memory transaction, assigning that memory page to the
|
||||
tic-tac-toe program. The `memory_fee` is used to *rent* the memory page for the
|
||||
duration of the game and is subtracted from current account balance of Player
|
||||
1:
|
||||
```
|
||||
allocate_memory(gamestate_publickey, tictactoe_publickey, tictactoe_gamestate_size, memory_fee)
|
||||
```
|
||||
|
||||
|
||||
4. Game state is then initialized by issuing a *new* call transaction to the
|
||||
tic-tac-toe program. This transaction is signed by `gamestate_privatekey`, known only
|
||||
to Player 1.
|
||||
```
|
||||
call(tictactoe_publickey, gamestate_publickey, 'new', player1_publickey, player2_publickey)
|
||||
```
|
||||
|
||||
5. Once the game is initialized, Player 1 shares `gamestate_publickey` with
|
||||
Player 2 off-chain (IM, email, etc)
|
||||
|
||||
Note that it's likely each player prefer to generate a game-specific keypair
|
||||
rather than sharing their primary public key (`player1_publickey`,
|
||||
`player2_publickey`) with each other and the tic-tac-toe program.
|
||||
|
||||
### Game Play
|
||||
|
||||
Both players poll the network, via a **TBD off-chain RPC API**, to read the
|
||||
current game state from the `gamestate_publickey` memory page.
|
||||
|
||||
When the *Game status* field indicates it's their turn, the player issues a
|
||||
*move* call transaction passing in the board position (1..9) that they want to
|
||||
mark as X or O:
|
||||
```
|
||||
call(tictactoe_publickey, gamestate_publickey, 'move', position)
|
||||
```
|
||||
The program will reject the transaction if it was not signed by the player whose
|
||||
turn it is.
|
||||
|
||||
The outcome of the *move* call is also observed by polling the current game state via
|
||||
the **TBD off-chain RPC API**.
|
||||
|
||||
### Game Cancellation
|
||||
|
||||
At any time Player 1 may conclude the game by issuing:
|
||||
```
|
||||
call(tictactoe_publickey, gamestate_publickey, 'abort')
|
||||
```
|
||||
causing any remaining *rent* tokens assigned to the `gamestate_publickey` page
|
||||
to be transferred back to Player 1 by the tic-tac-toe program. Lastly, the
|
||||
network recognizes the empty account and frees the `gamestate_publickey` memory
|
||||
page.
|
15
snap/hooks/configure
vendored
15
snap/hooks/configure
vendored
@ -4,27 +4,30 @@ echo Stopping daemons
|
||||
snapctl stop --disable solana.daemon-drone
|
||||
snapctl stop --disable solana.daemon-leader
|
||||
snapctl stop --disable solana.daemon-validator
|
||||
snapctl stop --disable solana.daemon-oom-monitor
|
||||
|
||||
mode="$(snapctl get mode)"
|
||||
if [[ -z "$mode" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ip_address_arg=-p # Use public IP address (TODO: make this configurable?)
|
||||
num_tokens="$(snapctl get num-tokens)"
|
||||
num_tokens="${num_tokens:+-n $num_tokens}"
|
||||
|
||||
setup_args="$(snapctl get setup-args)"
|
||||
|
||||
case $mode in
|
||||
leader+drone)
|
||||
$SNAP/bin/setup.sh ${num_tokens:+-n $num_tokens} ${ip_address_arg} -t leader
|
||||
snapctl start --enable solana.daemon-leader
|
||||
"$SNAP"/bin/setup.sh -t leader $num_tokens -p $setup_args
|
||||
snapctl start --enable solana.daemon-drone
|
||||
snapctl start --enable solana.daemon-leader
|
||||
;;
|
||||
leader)
|
||||
$SNAP/bin/setup.sh ${num_tokens:+-n $num_tokens} ${ip_address_arg} -t leader
|
||||
"$SNAP"/bin/setup.sh -t leader $num_tokens -p $setup_args
|
||||
snapctl start --enable solana.daemon-leader
|
||||
;;
|
||||
validator)
|
||||
$SNAP/bin/setup.sh ${ip_address_arg} -t validator
|
||||
"$SNAP"/bin/setup.sh -t validator -p $setup_args
|
||||
snapctl start --enable solana.daemon-validator
|
||||
;;
|
||||
*)
|
||||
@ -32,3 +35,5 @@ validator)
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
snapctl start --enable solana.daemon-oom-monitor
|
||||
|
@ -37,40 +37,73 @@ apps:
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
genesis:
|
||||
command: solana-genesis
|
||||
mint:
|
||||
command: solana-mint
|
||||
client-demo:
|
||||
command: solana-client-demo
|
||||
keygen:
|
||||
command: solana-keygen
|
||||
plugs:
|
||||
- home
|
||||
ledger-tool:
|
||||
command: solana-ledger-tool
|
||||
plugs:
|
||||
- home
|
||||
bench-tps:
|
||||
# TODO: Merge client.sh functionality into solana-bench-tps proper
|
||||
command: client.sh
|
||||
#command: solana-bench-tps
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
wallet:
|
||||
# TODO: Merge wallet.sh functionality into solana-wallet proper
|
||||
command: wallet.sh
|
||||
#command: solana-wallet
|
||||
|
||||
plugs:
|
||||
- network
|
||||
- home
|
||||
daemon-validator:
|
||||
daemon: simple
|
||||
command: validator.sh
|
||||
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
daemon-leader:
|
||||
daemon: simple
|
||||
command: leader.sh
|
||||
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
daemon-drone:
|
||||
daemon: simple
|
||||
command: drone.sh
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
daemon-oom-monitor:
|
||||
daemon: simple
|
||||
command: oom_monitor.sh
|
||||
plugs:
|
||||
- network
|
||||
|
||||
parts:
|
||||
solana:
|
||||
plugin: nil
|
||||
prime:
|
||||
- bin
|
||||
- usr/lib/libgf_complete.so.1
|
||||
- usr/lib/libJerasure.so.2
|
||||
- usr/lib
|
||||
override-build: |
|
||||
# Install CUDA 9.2 runtime
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/nvidia-396/
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/x86_64-linux-gnu/
|
||||
cp -rav /usr/local/cuda-9.2/targets/x86_64-linux/lib/libcudart.so* $SNAPCRAFT_PART_INSTALL/usr/lib
|
||||
cp -rav /usr/lib/x86_64-linux-gnu/libcuda.so* $SNAPCRAFT_PART_INSTALL/usr/lib/x86_64-linux-gnu/
|
||||
cp -v /usr/lib/nvidia-396/libnvidia-fatbinaryloader.so* $SNAPCRAFT_PART_INSTALL/usr/lib/nvidia-396/
|
||||
|
||||
# Build/install solana-fullnode-cuda
|
||||
./fetch-perf-libs.sh
|
||||
cargo install --features=cuda,erasure --root $SNAPCRAFT_PART_INSTALL --bin solana-fullnode
|
||||
cargo install --features=cuda --root $SNAPCRAFT_PART_INSTALL --bin solana-fullnode
|
||||
mv $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode $SNAPCRAFT_PART_INSTALL
|
||||
rm -rf $SNAPCRAFT_PART_INSTALL/bin/*
|
||||
mv $SNAPCRAFT_PART_INSTALL/solana-fullnode $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode-cuda
|
||||
@ -85,8 +118,10 @@ parts:
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
||||
cp -av multinode-demo/* $SNAPCRAFT_PART_INSTALL/bin/
|
||||
|
||||
# TODO: build rsync from source instead of sneaking it in from the host
|
||||
# TODO: build curl,rsync/multilog from source instead of sneaking it in from the host
|
||||
# system...
|
||||
set -x
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
||||
cp -av /usr/bin/curl $SNAPCRAFT_PART_INSTALL/bin/
|
||||
cp -av /usr/bin/multilog $SNAPCRAFT_PART_INSTALL/bin/
|
||||
cp -av /usr/bin/rsync $SNAPCRAFT_PART_INSTALL/bin/
|
||||
|
622
src/bank.rs
Normal file → Executable file
622
src/bank.rs
Normal file → Executable file
File diff suppressed because it is too large
Load Diff
@ -5,15 +5,17 @@
|
||||
use bank::Bank;
|
||||
use bincode::deserialize;
|
||||
use counter::Counter;
|
||||
use log::Level;
|
||||
use packet::{PacketRecycler, Packets, SharedPackets};
|
||||
use rayon::prelude::*;
|
||||
use record_stage::Signal;
|
||||
use result::Result;
|
||||
use result::{Error, Result};
|
||||
use service::Service;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use timing;
|
||||
@ -22,17 +24,15 @@ use transaction::Transaction;
|
||||
/// Stores the stage's thread handle and output receiver.
|
||||
pub struct BankingStage {
|
||||
/// Handle to the stage's thread.
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl BankingStage {
|
||||
/// Create the stage using `bank`. Exit when either `exit` is set or
|
||||
/// when `verified_receiver` or the stage's output receiver is dropped.
|
||||
/// Create the stage using `bank`. Exit when `verified_receiver` is dropped.
|
||||
/// Discard input packets using `packet_recycler` to minimize memory
|
||||
/// allocations in a previous stage such as the `fetch_stage`.
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
exit: Arc<AtomicBool>,
|
||||
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
packet_recycler: PacketRecycler,
|
||||
) -> (Self, Receiver<Signal>) {
|
||||
@ -40,15 +40,16 @@ impl BankingStage {
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-banking-stage".to_string())
|
||||
.spawn(move || loop {
|
||||
let e = Self::process_packets(
|
||||
bank.clone(),
|
||||
if let Err(e) = Self::process_packets(
|
||||
&bank,
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
);
|
||||
if e.is_err() {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => error!("{:?}", e),
|
||||
}
|
||||
}
|
||||
})
|
||||
@ -71,8 +72,8 @@ impl BankingStage {
|
||||
|
||||
/// Process the incoming packets and send output `Signal` messages to `signal_sender`.
|
||||
/// Discard packets via `packet_recycler`.
|
||||
fn process_packets(
|
||||
bank: Arc<Bank>,
|
||||
pub fn process_packets(
|
||||
bank: &Arc<Bank>,
|
||||
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
signal_sender: &Sender<Signal>,
|
||||
packet_recycler: &PacketRecycler,
|
||||
@ -88,8 +89,8 @@ impl BankingStage {
|
||||
timing::duration_as_ms(&recv_start.elapsed()),
|
||||
mms.len(),
|
||||
);
|
||||
let bank_starting_tx_count = bank.transaction_count();
|
||||
let count = mms.iter().map(|x| x.1.len()).sum();
|
||||
static mut COUNTER: Counter = create_counter!("banking_stage_process_packets", 1);
|
||||
let proc_start = Instant::now();
|
||||
for (msgs, vers) in mms {
|
||||
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
|
||||
@ -125,11 +126,25 @@ impl BankingStage {
|
||||
reqs_len,
|
||||
(reqs_len as f32) / (total_time_s)
|
||||
);
|
||||
inc_counter!(COUNTER, count, proc_start);
|
||||
inc_new_counter_info!("banking_stage-process_packets", count);
|
||||
inc_new_counter_info!(
|
||||
"banking_stage-process_transactions",
|
||||
bank.transaction_count() - bank_starting_tx_count
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for BankingStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
vec![self.thread_hdl]
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: When banking is pulled out of RequestStage, add this test back in.
|
||||
|
||||
//use bank::Bank;
|
||||
@ -190,239 +205,3 @@ impl BankingStage {
|
||||
// assert_eq!(bank.get_balance(&alice.pubkey()), Some(1));
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//#[cfg(all(feature = "unstable", test))]
|
||||
//mod bench {
|
||||
// extern crate test;
|
||||
// use self::test::Bencher;
|
||||
// use bank::{Bank, MAX_ENTRY_IDS};
|
||||
// use bincode::serialize;
|
||||
// use hash::hash;
|
||||
// use mint::Mint;
|
||||
// use rayon::prelude::*;
|
||||
// use signature::{KeyPair, KeyPairUtil};
|
||||
// use std::collections::HashSet;
|
||||
// use std::time::Instant;
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// #[bench]
|
||||
// fn bench_process_transactions(_bencher: &mut Bencher) {
|
||||
// let mint = Mint::new(100_000_000);
|
||||
// let bank = Bank::new(&mint);
|
||||
// // Create transactions between unrelated parties.
|
||||
// let txs = 100_000;
|
||||
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
|
||||
// let transactions: Vec<_> = (0..txs)
|
||||
// .into_par_iter()
|
||||
// .map(|i| {
|
||||
// // Seed the 'to' account and a cell for its signature.
|
||||
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
|
||||
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
|
||||
// {
|
||||
// let mut last_ids = last_ids.lock().unwrap();
|
||||
// if !last_ids.contains(&last_id) {
|
||||
// last_ids.insert(last_id);
|
||||
// bank.register_entry_id(&last_id);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Seed the 'from' account.
|
||||
// let rando0 = KeyPair::new();
|
||||
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// let rando1 = KeyPair::new();
|
||||
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// // Finally, return a transaction that's unique
|
||||
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
// })
|
||||
// .collect();
|
||||
//
|
||||
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// let now = Instant::now();
|
||||
// assert!(banking_stage.process_transactions(transactions).is_ok());
|
||||
// let duration = now.elapsed();
|
||||
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
|
||||
// let tps = txs as f64 / sec;
|
||||
//
|
||||
// // Ensure that all transactions were successfully logged.
|
||||
// drop(banking_stage.historian_input);
|
||||
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
|
||||
// assert_eq!(entries.len(), 1);
|
||||
// assert_eq!(entries[0].transactions.len(), txs as usize);
|
||||
//
|
||||
// println!("{} tps", tps);
|
||||
// }
|
||||
//}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use bank::*;
|
||||
use banking_stage::BankingStage;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use packet::{to_packets_chunked, PacketRecycler};
|
||||
use rayon::prelude::*;
|
||||
use record_stage::Signal;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::iter;
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
use transaction::Transaction;
|
||||
|
||||
fn check_txs(batches: usize, receiver: &Receiver<Signal>, ref_tx_count: usize) {
|
||||
let mut total = 0;
|
||||
for _ in 0..batches {
|
||||
let signal = receiver.recv().unwrap();
|
||||
if let Signal::Transactions(transactions) = signal {
|
||||
total += transactions.len();
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
}
|
||||
assert_eq!(total, ref_tx_count);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
logger::setup();
|
||||
let tx = 10_000_usize;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let mint = Mint::new(mint_total);
|
||||
let num_dst_accounts = 8 * 1024;
|
||||
let num_src_accounts = 8 * 1024;
|
||||
|
||||
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| KeyPair::new()).collect();
|
||||
let dstkeys: Vec<_> = (0..num_dst_accounts)
|
||||
.map(|_| KeyPair::new().pubkey())
|
||||
.collect();
|
||||
|
||||
info!("created keys src: {} dst: {}", srckeys.len(), dstkeys.len());
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&srckeys[i % num_src_accounts],
|
||||
dstkeys[i % num_dst_accounts],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
info!("created transactions");
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
let setup_transactions: Vec<_> = (0..num_src_accounts)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
srckeys[i].pubkey(),
|
||||
mint_total / num_src_accounts as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
|
||||
let verified_setup: Vec<_> =
|
||||
to_packets_chunked(&packet_recycler, setup_transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_setup_len = verified_setup.len();
|
||||
verified_sender.send(verified_setup).unwrap();
|
||||
BankingStage::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
|
||||
check_txs(verified_setup_len, &signal_receiver, num_src_accounts);
|
||||
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions.clone(), 192)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_len = verified.len();
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
|
||||
check_txs(verified_len, &signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
|
||||
logger::setup();
|
||||
let tx = 10_000_usize;
|
||||
let mint = Mint::new(1_000_000_000_000);
|
||||
let mut pubkeys = Vec::new();
|
||||
let num_keys = 8;
|
||||
for _ in 0..num_keys {
|
||||
pubkeys.push(KeyPair::new().pubkey());
|
||||
}
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
pubkeys[i % num_keys],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
let verified_len = verified.len();
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
|
||||
check_txs(verified_len, &signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
90
src/bin/bench-streamer.rs
Normal file
90
src/bin/bench-streamer.rs
Normal file
@ -0,0 +1,90 @@
|
||||
extern crate solana;
|
||||
|
||||
use solana::packet::{Packet, PacketRecycler, BLOB_SIZE, PACKET_DATA_SIZE};
|
||||
use solana::result::Result;
|
||||
use solana::streamer::{receiver, PacketReceiver};
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
|
||||
fn producer(addr: &SocketAddr, recycler: &PacketRecycler, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let msgs = recycler.allocate();
|
||||
let msgs_ = msgs.clone();
|
||||
msgs.write().unwrap().packets.resize(10, Packet::default());
|
||||
for w in &mut msgs.write().unwrap().packets {
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
}
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in &msgs_.read().unwrap().packets {
|
||||
let a = p.meta.addr();
|
||||
assert!(p.meta.size < BLOB_SIZE);
|
||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||
num += 1;
|
||||
}
|
||||
assert_eq!(num, 10);
|
||||
})
|
||||
}
|
||||
|
||||
fn sink(
|
||||
recycler: PacketRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
rvs: Arc<AtomicUsize>,
|
||||
r: PacketReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
if let Ok(msgs) = r.recv_timeout(timer) {
|
||||
rvs.fetch_add(msgs.read().unwrap().packets.len(), Ordering::Relaxed);
|
||||
recycler.recycle(msgs);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let read = UdpSocket::bind("127.0.0.1:0")?;
|
||||
read.set_read_timeout(Some(Duration::new(1, 0)))?;
|
||||
|
||||
let addr = read.local_addr()?;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let pack_recycler = PacketRecycler::default();
|
||||
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
|
||||
let t_producer1 = producer(&addr, &pack_recycler, exit.clone());
|
||||
let t_producer2 = producer(&addr, &pack_recycler, exit.clone());
|
||||
let t_producer3 = producer(&addr, &pack_recycler, exit.clone());
|
||||
|
||||
let rvs = Arc::new(AtomicUsize::new(0));
|
||||
let t_sink = sink(pack_recycler.clone(), exit.clone(), rvs.clone(), r_reader);
|
||||
|
||||
let start = SystemTime::now();
|
||||
let start_val = rvs.load(Ordering::Relaxed);
|
||||
sleep(Duration::new(5, 0));
|
||||
let elapsed = start.elapsed().unwrap();
|
||||
let end_val = rvs.load(Ordering::Relaxed);
|
||||
let time = elapsed.as_secs() * 10_000_000_000 + u64::from(elapsed.subsec_nanos());
|
||||
let ftime = (time as f64) / 10_000_000_000_f64;
|
||||
let fcount = (end_val - start_val) as f64;
|
||||
println!("performance: {:?}", fcount / ftime);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_reader.join()?;
|
||||
t_producer1.join()?;
|
||||
t_producer2.join()?;
|
||||
t_producer3.join()?;
|
||||
t_sink.join()?;
|
||||
Ok(())
|
||||
}
|
714
src/bin/bench-tps.rs
Normal file
714
src/bin/bench-tps.rs
Normal file
@ -0,0 +1,714 @@
|
||||
extern crate bincode;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate influx_db_client;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use clap::{App, Arg};
|
||||
use influx_db_client as influxdb;
|
||||
use rayon::prelude::*;
|
||||
use solana::client::mk_client;
|
||||
use solana::crdt::{Crdt, NodeInfo};
|
||||
use solana::drone::DRONE_PORT;
|
||||
use solana::fullnode::Config;
|
||||
use solana::hash::Hash;
|
||||
use solana::logger;
|
||||
use solana::metrics;
|
||||
use solana::nat::{get_public_ip_addr, udp_random_bind};
|
||||
use solana::ncp::Ncp;
|
||||
use solana::service::Service;
|
||||
use solana::signature::{read_keypair, GenKeys, Keypair, KeypairUtil};
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::timing::{duration_as_ms, duration_as_s};
|
||||
use solana::transaction::Transaction;
|
||||
use solana::wallet::request_airdrop;
|
||||
use solana::window::default_window;
|
||||
use std::collections::VecDeque;
|
||||
use std::fs::File;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::{AtomicBool, AtomicIsize, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::thread::Builder;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
pub struct NodeStats {
|
||||
pub tps: f64, // Maximum TPS reported by this node
|
||||
pub tx: u64, // Total transactions reported by this node
|
||||
}
|
||||
|
||||
fn metrics_submit_token_balance(token_balance: i64) {
|
||||
println!("Token balance: {}", token_balance);
|
||||
metrics::submit(
|
||||
influxdb::Point::new("bench-tps")
|
||||
.add_tag("op", influxdb::Value::String("token_balance".to_string()))
|
||||
.add_field("balance", influxdb::Value::Integer(token_balance as i64))
|
||||
.to_owned(),
|
||||
);
|
||||
}
|
||||
|
||||
fn sample_tx_count(
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
maxes: &Arc<RwLock<Vec<(SocketAddr, NodeStats)>>>,
|
||||
first_tx_count: u64,
|
||||
v: &NodeInfo,
|
||||
sample_period: u64,
|
||||
) {
|
||||
let mut client = mk_client(&v);
|
||||
let mut now = Instant::now();
|
||||
let mut initial_tx_count = client.transaction_count();
|
||||
let mut max_tps = 0.0;
|
||||
let mut total;
|
||||
|
||||
let log_prefix = format!("{:21}:", v.contact_info.tpu.to_string());
|
||||
|
||||
loop {
|
||||
let tx_count = client.transaction_count();
|
||||
assert!(
|
||||
tx_count >= initial_tx_count,
|
||||
"expected tx_count({}) >= initial_tx_count({})",
|
||||
tx_count,
|
||||
initial_tx_count
|
||||
);
|
||||
let duration = now.elapsed();
|
||||
now = Instant::now();
|
||||
let sample = tx_count - initial_tx_count;
|
||||
initial_tx_count = tx_count;
|
||||
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||
if tps > max_tps {
|
||||
max_tps = tps;
|
||||
}
|
||||
if tx_count > first_tx_count {
|
||||
total = tx_count - first_tx_count;
|
||||
} else {
|
||||
total = 0;
|
||||
}
|
||||
println!(
|
||||
"{} {:9.2} TPS, Transactions: {:6}, Total transactions: {}",
|
||||
log_prefix, tps, sample, total
|
||||
);
|
||||
sleep(Duration::new(sample_period, 0));
|
||||
|
||||
if exit_signal.load(Ordering::Relaxed) {
|
||||
println!("{} Exiting validator thread", log_prefix);
|
||||
let stats = NodeStats {
|
||||
tps: max_tps,
|
||||
tx: total,
|
||||
};
|
||||
maxes.write().unwrap().push((v.contact_info.tpu, stats));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Send loopback payment of 0 tokens and confirm the network processed it
|
||||
fn send_barrier_transaction(barrier_client: &mut ThinClient, last_id: &mut Hash, id: &Keypair) {
|
||||
let transfer_start = Instant::now();
|
||||
|
||||
let mut poll_count = 0;
|
||||
loop {
|
||||
if poll_count > 0 && poll_count % 8 == 0 {
|
||||
println!(
|
||||
"polling for barrier transaction confirmation, attempt {}",
|
||||
poll_count
|
||||
);
|
||||
}
|
||||
|
||||
*last_id = barrier_client.get_last_id();
|
||||
let signature = barrier_client
|
||||
.transfer(0, &id, id.pubkey(), last_id)
|
||||
.expect("Unable to send barrier transaction");
|
||||
|
||||
let confirmatiom = barrier_client.poll_for_signature(&signature);
|
||||
let duration_ms = duration_as_ms(&transfer_start.elapsed());
|
||||
if confirmatiom.is_ok() {
|
||||
println!("barrier transaction confirmed in {}ms", duration_ms);
|
||||
|
||||
metrics::submit(
|
||||
influxdb::Point::new("bench-tps")
|
||||
.add_tag(
|
||||
"op",
|
||||
influxdb::Value::String("send_barrier_transaction".to_string()),
|
||||
)
|
||||
.add_field("poll_count", influxdb::Value::Integer(poll_count))
|
||||
.add_field("duration", influxdb::Value::Integer(duration_ms as i64))
|
||||
.to_owned(),
|
||||
);
|
||||
|
||||
// Sanity check that the client balance is still 1
|
||||
let balance = barrier_client.poll_get_balance(&id.pubkey()).unwrap_or(-1);
|
||||
if balance != 1 {
|
||||
panic!("Expected an account balance of 1 (balance: {}", balance);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Timeout after 3 minutes. When running a CPU-only leader+validator+drone+bench-tps on a dev
|
||||
// machine, some batches of transactions can take upwards of 1 minute...
|
||||
if duration_ms > 1000 * 60 * 3 {
|
||||
println!("Error: Couldn't confirm barrier transaction!");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let new_last_id = barrier_client.get_last_id();
|
||||
if new_last_id == *last_id {
|
||||
if poll_count > 0 && poll_count % 8 == 0 {
|
||||
println!("last_id is not advancing, still at {:?}", *last_id);
|
||||
}
|
||||
} else {
|
||||
*last_id = new_last_id;
|
||||
}
|
||||
|
||||
poll_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_txs(
|
||||
shared_txs: &Arc<RwLock<VecDeque<Vec<Transaction>>>>,
|
||||
id: &Keypair,
|
||||
keypairs: &[Keypair],
|
||||
last_id: &Hash,
|
||||
threads: usize,
|
||||
reclaim: bool,
|
||||
) {
|
||||
let tx_count = keypairs.len();
|
||||
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
|
||||
let signing_start = Instant::now();
|
||||
|
||||
let transactions: Vec<_> = keypairs
|
||||
.par_iter()
|
||||
.map(|keypair| {
|
||||
if !reclaim {
|
||||
Transaction::new(&id, keypair.pubkey(), 1, *last_id)
|
||||
} else {
|
||||
Transaction::new(keypair, id.pubkey(), 1, *last_id)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let duration = signing_start.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = (tx_count) as f64 / ns as f64;
|
||||
let nsps = ns as f64 / (tx_count) as f64;
|
||||
println!(
|
||||
"Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time",
|
||||
bsps * 1_000_000_f64,
|
||||
nsps / 1_000_f64,
|
||||
duration_as_ms(&duration),
|
||||
);
|
||||
metrics::submit(
|
||||
influxdb::Point::new("bench-tps")
|
||||
.add_tag("op", influxdb::Value::String("generate_txs".to_string()))
|
||||
.add_field(
|
||||
"duration",
|
||||
influxdb::Value::Integer(duration_as_ms(&duration) as i64),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
{
|
||||
let mut shared_txs_wl = shared_txs.write().unwrap();
|
||||
for chunk in chunks {
|
||||
shared_txs_wl.push_back(chunk.to_vec());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn do_tx_transfers(
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
shared_txs: &Arc<RwLock<VecDeque<Vec<Transaction>>>>,
|
||||
leader: &NodeInfo,
|
||||
shared_tx_thread_count: &Arc<AtomicIsize>,
|
||||
) {
|
||||
let client = mk_client(&leader);
|
||||
loop {
|
||||
let txs;
|
||||
{
|
||||
let mut shared_txs_wl = shared_txs.write().unwrap();
|
||||
txs = shared_txs_wl.pop_front();
|
||||
}
|
||||
if let Some(txs0) = txs {
|
||||
shared_tx_thread_count.fetch_add(1, Ordering::Relaxed);
|
||||
println!(
|
||||
"Transferring 1 unit {} times... to {}",
|
||||
txs0.len(),
|
||||
leader.contact_info.tpu
|
||||
);
|
||||
let tx_len = txs0.len();
|
||||
let transfer_start = Instant::now();
|
||||
for tx in txs0 {
|
||||
client.transfer_signed(&tx).unwrap();
|
||||
}
|
||||
shared_tx_thread_count.fetch_add(-1, Ordering::Relaxed);
|
||||
println!(
|
||||
"Tx send done. {} ms {} tps",
|
||||
duration_as_ms(&transfer_start.elapsed()),
|
||||
tx_len as f32 / duration_as_s(&transfer_start.elapsed()),
|
||||
);
|
||||
metrics::submit(
|
||||
influxdb::Point::new("bench-tps")
|
||||
.add_tag("op", influxdb::Value::String("do_tx_transfers".to_string()))
|
||||
.add_field(
|
||||
"duration",
|
||||
influxdb::Value::Integer(duration_as_ms(&transfer_start.elapsed()) as i64),
|
||||
)
|
||||
.add_field("count", influxdb::Value::Integer(tx_len as i64))
|
||||
.to_owned(),
|
||||
);
|
||||
}
|
||||
if exit_signal.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn airdrop_tokens(client: &mut ThinClient, leader: &NodeInfo, id: &Keypair, tx_count: i64) {
|
||||
let mut drone_addr = leader.contact_info.tpu;
|
||||
drone_addr.set_port(DRONE_PORT);
|
||||
|
||||
let starting_balance = client.poll_get_balance(&id.pubkey()).unwrap();
|
||||
metrics_submit_token_balance(starting_balance);
|
||||
|
||||
if starting_balance < tx_count {
|
||||
let airdrop_amount = tx_count - starting_balance;
|
||||
println!(
|
||||
"Airdropping {:?} tokens from {}",
|
||||
airdrop_amount, drone_addr
|
||||
);
|
||||
|
||||
let previous_balance = starting_balance;
|
||||
request_airdrop(&drone_addr, &id.pubkey(), airdrop_amount as u64).unwrap();
|
||||
|
||||
// TODO: return airdrop Result from Drone instead of polling the
|
||||
// network
|
||||
let mut current_balance = previous_balance;
|
||||
for _ in 0..20 {
|
||||
sleep(Duration::from_millis(500));
|
||||
current_balance = client.poll_get_balance(&id.pubkey()).unwrap();
|
||||
if starting_balance != current_balance {
|
||||
break;
|
||||
}
|
||||
println!(".");
|
||||
}
|
||||
metrics_submit_token_balance(current_balance);
|
||||
if current_balance - starting_balance != airdrop_amount {
|
||||
println!("Airdrop failed!");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_and_report_stats(
|
||||
maxes: &Arc<RwLock<Vec<(SocketAddr, NodeStats)>>>,
|
||||
sample_period: u64,
|
||||
tx_send_elapsed: &Duration,
|
||||
) {
|
||||
// Compute/report stats
|
||||
let mut max_of_maxes = 0.0;
|
||||
let mut total_txs = 0;
|
||||
let mut nodes_with_zero_tps = 0;
|
||||
let mut total_maxes = 0.0;
|
||||
println!(" Node address | Max TPS | Total Transactions");
|
||||
println!("---------------------+---------------+--------------------");
|
||||
|
||||
for (sock, stats) in maxes.read().unwrap().iter() {
|
||||
let maybe_flag = match stats.tx {
|
||||
0 => "!!!!!",
|
||||
_ => "",
|
||||
};
|
||||
|
||||
println!(
|
||||
"{:20} | {:13.2} | {} {}",
|
||||
(*sock).to_string(),
|
||||
stats.tps,
|
||||
stats.tx,
|
||||
maybe_flag
|
||||
);
|
||||
|
||||
if stats.tps == 0.0 {
|
||||
nodes_with_zero_tps += 1;
|
||||
}
|
||||
total_maxes += stats.tps;
|
||||
|
||||
if stats.tps > max_of_maxes {
|
||||
max_of_maxes = stats.tps;
|
||||
}
|
||||
total_txs += stats.tx;
|
||||
}
|
||||
|
||||
if total_maxes > 0.0 {
|
||||
let num_nodes_with_tps = maxes.read().unwrap().len() - nodes_with_zero_tps;
|
||||
let average_max = total_maxes / num_nodes_with_tps as f64;
|
||||
println!(
|
||||
"\nAverage max TPS: {:.2}, {} nodes had 0 TPS",
|
||||
average_max, nodes_with_zero_tps
|
||||
);
|
||||
}
|
||||
|
||||
println!(
|
||||
"\nHighest TPS: {:.2} sampling period {}s total transactions: {} clients: {}",
|
||||
max_of_maxes,
|
||||
sample_period,
|
||||
total_txs,
|
||||
maxes.read().unwrap().len()
|
||||
);
|
||||
println!(
|
||||
"\tAverage TPS: {}",
|
||||
total_txs as f32 / duration_as_s(tx_send_elapsed)
|
||||
);
|
||||
}
|
||||
|
||||
fn main() {
|
||||
logger::setup();
|
||||
metrics::set_panic_hook("bench-tps");
|
||||
let mut threads = 4usize;
|
||||
let mut num_nodes = 1usize;
|
||||
let mut time_sec = 90;
|
||||
let mut sustained = false;
|
||||
let mut tx_count = 500_000;
|
||||
|
||||
let matches = App::new("solana-bench-tps")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("leader")
|
||||
.short("l")
|
||||
.long("leader")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/leader.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.default_value("~/.config/solana/id.json")
|
||||
.help("/path/to/id.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num_nodes")
|
||||
.short("n")
|
||||
.long("nodes")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("number of nodes to converge to"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("threads")
|
||||
.short("t")
|
||||
.long("threads")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("number of threads"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("seconds")
|
||||
.short("s")
|
||||
.long("sec")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("send transactions for this many seconds"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("converge_only")
|
||||
.short("c")
|
||||
.help("exit immediately after converging"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("addr")
|
||||
.short("a")
|
||||
.long("addr")
|
||||
.value_name("IPADDR")
|
||||
.takes_value(true)
|
||||
.help("address to advertise to the network"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("sustained")
|
||||
.long("sustained")
|
||||
.help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("tx_count")
|
||||
.long("tx_count")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("number of transactions to send in a single batch")
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let leader: NodeInfo;
|
||||
if let Some(l) = matches.value_of("leader") {
|
||||
leader = read_leader(l).node_info;
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
leader = NodeInfo::new_leader(&server_addr);
|
||||
};
|
||||
|
||||
let id = read_keypair(matches.value_of("keypair").unwrap()).expect("client keypair");
|
||||
|
||||
if let Some(t) = matches.value_of("threads") {
|
||||
threads = t.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
if let Some(n) = matches.value_of("num_nodes") {
|
||||
num_nodes = n.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
if let Some(s) = matches.value_of("seconds") {
|
||||
time_sec = s.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
let addr = if let Some(s) = matches.value_of("addr") {
|
||||
s.to_string().parse().unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse {} as IP address error: {:?}", s, e);
|
||||
exit(1);
|
||||
})
|
||||
} else {
|
||||
get_public_ip_addr().unwrap_or_else(|e| {
|
||||
eprintln!("failed to get public IP, try --addr? error: {:?}", e);
|
||||
exit(1);
|
||||
})
|
||||
};
|
||||
|
||||
if let Some(s) = matches.value_of("tx_count") {
|
||||
tx_count = s.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
if matches.is_present("sustained") {
|
||||
sustained = true;
|
||||
}
|
||||
|
||||
let exit_signal = Arc::new(AtomicBool::new(false));
|
||||
let mut c_threads = vec![];
|
||||
let validators = converge(&leader, &exit_signal, num_nodes, &mut c_threads, addr);
|
||||
|
||||
println!(" Node address | Node identifier");
|
||||
println!("----------------------+------------------");
|
||||
for node in &validators {
|
||||
println!(
|
||||
" {:20} | {:16x}",
|
||||
node.contact_info.tpu.to_string(),
|
||||
node.debug_id()
|
||||
);
|
||||
}
|
||||
println!("Nodes: {}", validators.len());
|
||||
|
||||
if validators.len() < num_nodes {
|
||||
println!(
|
||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||
num_nodes
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if matches.is_present("converge_only") {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut client = mk_client(&leader);
|
||||
let mut barrier_client = mk_client(&leader);
|
||||
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&id.public_key_bytes()[..32]);
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
|
||||
println!("Creating {} keypairs...", tx_count / 2);
|
||||
let keypairs = rnd.gen_n_keypairs(tx_count / 2);
|
||||
let barrier_id = rnd.gen_n_keypairs(1).pop().unwrap();
|
||||
|
||||
println!("Get tokens...");
|
||||
airdrop_tokens(&mut client, &leader, &id, tx_count);
|
||||
airdrop_tokens(&mut barrier_client, &leader, &barrier_id, 1);
|
||||
|
||||
println!("Get last ID...");
|
||||
let mut last_id = client.get_last_id();
|
||||
println!("Got last ID {:?}", last_id);
|
||||
|
||||
let first_tx_count = client.transaction_count();
|
||||
println!("Initial transaction count {}", first_tx_count);
|
||||
|
||||
// Setup a thread per validator to sample every period
|
||||
// collect the max transaction rate and total tx count seen
|
||||
let maxes = Arc::new(RwLock::new(Vec::new()));
|
||||
let sample_period = 1; // in seconds
|
||||
println!("Sampling TPS every {} second...", sample_period);
|
||||
let v_threads: Vec<_> = validators
|
||||
.into_iter()
|
||||
.map(|v| {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let maxes = maxes.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sample".to_string())
|
||||
.spawn(move || {
|
||||
sample_tx_count(&exit_signal, &maxes, first_tx_count, &v, sample_period);
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
let shared_txs: Arc<RwLock<VecDeque<Vec<Transaction>>>> =
|
||||
Arc::new(RwLock::new(VecDeque::new()));
|
||||
|
||||
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
|
||||
|
||||
let s_threads: Vec<_> = (0..threads)
|
||||
.map(|_| {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let shared_txs = shared_txs.clone();
|
||||
let leader = leader.clone();
|
||||
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sender".to_string())
|
||||
.spawn(move || {
|
||||
do_tx_transfers(
|
||||
&exit_signal,
|
||||
&shared_txs,
|
||||
&leader,
|
||||
&shared_tx_active_thread_count,
|
||||
);
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
// generate and send transactions for the specified duration
|
||||
let time = Duration::new(time_sec, 0);
|
||||
let now = Instant::now();
|
||||
let mut reclaim_tokens_back_to_source_account = false;
|
||||
while now.elapsed() < time || reclaim_tokens_back_to_source_account {
|
||||
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(-1);
|
||||
metrics_submit_token_balance(balance);
|
||||
|
||||
// ping-pong between source and destination accounts for each loop iteration
|
||||
// this seems to be faster than trying to determine the balance of individual
|
||||
// accounts
|
||||
generate_txs(
|
||||
&shared_txs,
|
||||
&id,
|
||||
&keypairs,
|
||||
&last_id,
|
||||
threads,
|
||||
reclaim_tokens_back_to_source_account,
|
||||
);
|
||||
reclaim_tokens_back_to_source_account = !reclaim_tokens_back_to_source_account;
|
||||
|
||||
// In sustained mode overlap the transfers with generation
|
||||
// this has higher average performance but lower peak performance
|
||||
// in tested environments.
|
||||
if !sustained {
|
||||
while shared_tx_active_thread_count.load(Ordering::Relaxed) > 0 {
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
// It's not feasible (would take too much time) to confirm each of the `tx_count / 2`
|
||||
// transactions sent by `generate_txs()` so instead send and confirm a single transaction
|
||||
// to validate the network is still functional.
|
||||
send_barrier_transaction(&mut barrier_client, &mut last_id, &barrier_id);
|
||||
}
|
||||
|
||||
// Stop the sampling threads so it will collect the stats
|
||||
exit_signal.store(true, Ordering::Relaxed);
|
||||
|
||||
println!("Waiting for validator threads...");
|
||||
for t in v_threads {
|
||||
if let Err(err) = t.join() {
|
||||
println!(" join() failed with: {:?}", err);
|
||||
}
|
||||
}
|
||||
|
||||
// join the tx send threads
|
||||
println!("Waiting for transmit threads...");
|
||||
for t in s_threads {
|
||||
if let Err(err) = t.join() {
|
||||
println!(" join() failed with: {:?}", err);
|
||||
}
|
||||
}
|
||||
|
||||
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(-1);
|
||||
metrics_submit_token_balance(balance);
|
||||
|
||||
compute_and_report_stats(&maxes, sample_period, &now.elapsed());
|
||||
|
||||
// join the crdt client threads
|
||||
for t in c_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn spy_node(addr: IpAddr) -> (NodeInfo, UdpSocket) {
|
||||
let gossip_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
|
||||
let gossip_addr = SocketAddr::new(addr, gossip_socket.local_addr().unwrap().port());
|
||||
|
||||
let pubkey = Keypair::new().pubkey();
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
assert!(!gossip_addr.ip().is_unspecified());
|
||||
assert!(!gossip_addr.ip().is_multicast());
|
||||
let node = NodeInfo::new(pubkey, gossip_addr, daddr, daddr, daddr, daddr);
|
||||
(node, gossip_socket)
|
||||
}
|
||||
|
||||
fn converge(
|
||||
leader: &NodeInfo,
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
addr: IpAddr,
|
||||
) -> Vec<NodeInfo> {
|
||||
//lets spy on the network
|
||||
let (spy, spy_gossip) = spy_node(addr);
|
||||
let mut spy_crdt = Crdt::new(spy).expect("Crdt::new");
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let window = default_window();
|
||||
let gossip_send_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
let ncp = Ncp::new(
|
||||
&spy_ref,
|
||||
window.clone(),
|
||||
None,
|
||||
spy_gossip,
|
||||
gossip_send_socket,
|
||||
exit_signal.clone(),
|
||||
).expect("DataReplicator::new");
|
||||
let mut v: Vec<NodeInfo> = vec![];
|
||||
//wait for the network to converge, 30 seconds should be plenty
|
||||
for _ in 0..30 {
|
||||
v = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.filter(|x| Crdt::is_valid_address(x.contact_info.rpu))
|
||||
.cloned()
|
||||
.collect();
|
||||
if v.len() >= num_nodes {
|
||||
println!("CONVERGED!");
|
||||
break;
|
||||
} else {
|
||||
println!(
|
||||
"{} node(s) discovered (looking for {} or more)",
|
||||
v.len(),
|
||||
num_nodes
|
||||
);
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
threads.extend(ncp.thread_hdls().into_iter());
|
||||
v
|
||||
}
|
||||
|
||||
fn read_leader(path: &str) -> Config {
|
||||
let file = File::open(path).unwrap_or_else(|_| panic!("file not found: {}", path));
|
||||
serde_json::from_reader(file).unwrap_or_else(|_| panic!("failed to parse {}", path))
|
||||
}
|
@ -1,383 +0,0 @@
|
||||
extern crate atty;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use getopts::Options;
|
||||
use rayon::prelude::*;
|
||||
use solana::crdt::{Crdt, ReplicatedData};
|
||||
use solana::hash::Hash;
|
||||
use solana::mint::Mint;
|
||||
use solana::nat::udp_public_bind;
|
||||
use solana::ncp::Ncp;
|
||||
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
|
||||
use solana::streamer::default_window;
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::timing::{duration_as_ms, duration_as_s};
|
||||
use solana::transaction::Transaction;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::thread::Builder;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
|
||||
brief += " Solana client demo creates a number of transactions and\n";
|
||||
brief += " sends them to a target node.";
|
||||
brief += " Takes json formatted mint file to stdin.";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn sample_tx_count(
|
||||
exit: Arc<AtomicBool>,
|
||||
maxes: Arc<RwLock<Vec<(f64, u64)>>>,
|
||||
first_count: u64,
|
||||
v: ReplicatedData,
|
||||
sample_period: u64,
|
||||
) {
|
||||
let mut client = mk_client(&v);
|
||||
let mut now = Instant::now();
|
||||
let mut initial_tx_count = client.transaction_count();
|
||||
let mut max_tps = 0.0;
|
||||
let mut total;
|
||||
loop {
|
||||
let tx_count = client.transaction_count();
|
||||
let duration = now.elapsed();
|
||||
now = Instant::now();
|
||||
let sample = tx_count - initial_tx_count;
|
||||
initial_tx_count = tx_count;
|
||||
println!("{}: Transactions processed {}", v.transactions_addr, sample);
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||
if tps > max_tps {
|
||||
max_tps = tps;
|
||||
}
|
||||
println!("{}: {:.2} tps", v.transactions_addr, tps);
|
||||
total = tx_count - first_count;
|
||||
println!(
|
||||
"{}: Total Transactions processed {}",
|
||||
v.transactions_addr, total
|
||||
);
|
||||
sleep(Duration::new(sample_period, 0));
|
||||
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
println!("exiting validator thread");
|
||||
maxes.write().unwrap().push((max_tps, total));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_and_send_txs(
|
||||
client: &mut ThinClient,
|
||||
tx_clients: &Vec<ThinClient>,
|
||||
mint: &Mint,
|
||||
keypairs: &Vec<KeyPair>,
|
||||
leader: &ReplicatedData,
|
||||
txs: i64,
|
||||
last_id: &mut Hash,
|
||||
threads: usize,
|
||||
) {
|
||||
println!("Signing transactions... {}", keypairs.len(),);
|
||||
let signing_start = Instant::now();
|
||||
let transactions: Vec<_> = keypairs
|
||||
.par_iter()
|
||||
.map(|keypair| Transaction::new(&mint.keypair(), keypair.pubkey(), 1, *last_id))
|
||||
.collect();
|
||||
|
||||
let duration = signing_start.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = txs as f64 / ns as f64;
|
||||
let nsps = ns as f64 / txs as f64;
|
||||
println!(
|
||||
"Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time",
|
||||
bsps * 1_000_000_f64,
|
||||
nsps / 1_000_f64,
|
||||
duration_as_ms(&duration),
|
||||
);
|
||||
|
||||
println!("Transfering {} transactions in {} batches", txs, threads);
|
||||
let transfer_start = Instant::now();
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
chunks
|
||||
.into_par_iter()
|
||||
.zip(tx_clients)
|
||||
.for_each(|(txs, client)| {
|
||||
println!(
|
||||
"Transferring 1 unit {} times... to {:?}",
|
||||
txs.len(),
|
||||
leader.transactions_addr
|
||||
);
|
||||
for tx in txs {
|
||||
client.transfer_signed(tx.clone()).unwrap();
|
||||
}
|
||||
});
|
||||
println!(
|
||||
"Transfer done. {:?} ms {} tps",
|
||||
duration_as_ms(&transfer_start.elapsed()),
|
||||
txs as f32 / (duration_as_s(&transfer_start.elapsed()))
|
||||
);
|
||||
|
||||
loop {
|
||||
let new_id = client.get_last_id();
|
||||
if *last_id != new_id {
|
||||
*last_id = new_id;
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
env_logger::init();
|
||||
let mut threads = 4usize;
|
||||
let mut num_nodes = 1usize;
|
||||
let mut time_sec = 60;
|
||||
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("l", "", "leader", "leader.json");
|
||||
opts.optopt("t", "", "number of threads", &format!("{}", threads));
|
||||
opts.optopt(
|
||||
"s",
|
||||
"",
|
||||
"send transactions for this many seconds",
|
||||
&format!("{}", time_sec),
|
||||
);
|
||||
opts.optopt(
|
||||
"n",
|
||||
"",
|
||||
"number of nodes to converge to",
|
||||
&format!("{}", num_nodes),
|
||||
);
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
if matches.opt_present("t") {
|
||||
threads = matches.opt_str("t").unwrap().parse().expect("integer");
|
||||
}
|
||||
if matches.opt_present("n") {
|
||||
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
|
||||
}
|
||||
if matches.opt_present("s") {
|
||||
time_sec = matches.opt_str("s").unwrap().parse().expect("integer");
|
||||
}
|
||||
|
||||
let leader = if matches.opt_present("l") {
|
||||
read_leader(matches.opt_str("l").unwrap())
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
ReplicatedData::new_leader(&server_addr)
|
||||
};
|
||||
|
||||
let signal = Arc::new(AtomicBool::new(false));
|
||||
let mut c_threads = vec![];
|
||||
let validators = converge(&leader, signal.clone(), num_nodes, &mut c_threads);
|
||||
assert_eq!(validators.len(), num_nodes);
|
||||
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
println!("Parsing stdin...");
|
||||
let mint: Mint = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
let mut client = mk_client(&leader);
|
||||
|
||||
println!("Get last ID...");
|
||||
let mut last_id = client.get_last_id();
|
||||
println!("Got last ID {:?}", last_id);
|
||||
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&mint.keypair().public_key_bytes()[..32]);
|
||||
let rnd = GenKeys::new(seed);
|
||||
|
||||
println!("Creating keypairs...");
|
||||
let txs = 500_000;
|
||||
let keypairs = rnd.gen_n_keypairs(txs);
|
||||
|
||||
let first_count = client.transaction_count();
|
||||
println!("initial count {}", first_count);
|
||||
|
||||
println!("Sampling tps every second...",);
|
||||
|
||||
// Setup a thread per validator to sample every period
|
||||
// collect the max transaction rate and total tx count seen
|
||||
let maxes = Arc::new(RwLock::new(Vec::new()));
|
||||
let sample_period = 1; // in seconds
|
||||
let v_threads: Vec<_> = validators
|
||||
.into_iter()
|
||||
.map(|v| {
|
||||
let exit = signal.clone();
|
||||
let maxes = maxes.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sample".to_string())
|
||||
.spawn(move || {
|
||||
sample_tx_count(exit, maxes, first_count, v, sample_period);
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
let clients = (0..threads).map(|_| mk_client(&leader)).collect();
|
||||
|
||||
// generate and send transactions for the specified duration
|
||||
let time = Duration::new(time_sec, 0);
|
||||
let now = Instant::now();
|
||||
while now.elapsed() < time {
|
||||
generate_and_send_txs(
|
||||
&mut client,
|
||||
&clients,
|
||||
&mint,
|
||||
&keypairs,
|
||||
&leader,
|
||||
txs,
|
||||
&mut last_id,
|
||||
threads,
|
||||
);
|
||||
}
|
||||
|
||||
// Stop the sampling threads so it will collect the stats
|
||||
signal.store(true, Ordering::Relaxed);
|
||||
for t in v_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
|
||||
// Compute/report stats
|
||||
let mut max_of_maxes = 0.0;
|
||||
let mut total_txs = 0;
|
||||
for (max, txs) in maxes.read().unwrap().iter() {
|
||||
if *max > max_of_maxes {
|
||||
max_of_maxes = *max;
|
||||
}
|
||||
total_txs += *txs;
|
||||
}
|
||||
println!(
|
||||
"\nHighest TPS: {:.2} sampling period {}s total transactions: {} clients: {}",
|
||||
max_of_maxes,
|
||||
sample_period,
|
||||
total_txs,
|
||||
maxes.read().unwrap().len()
|
||||
);
|
||||
|
||||
// join the crdt client threads
|
||||
for t in c_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn mk_client(r: &ReplicatedData) -> ThinClient {
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
|
||||
ThinClient::new(
|
||||
r.requests_addr,
|
||||
requests_socket,
|
||||
r.transactions_addr,
|
||||
transactions_socket,
|
||||
)
|
||||
}
|
||||
|
||||
fn spy_node() -> (ReplicatedData, UdpSocket) {
|
||||
let gossip_socket_pair = udp_public_bind("gossip");
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let node = ReplicatedData::new(
|
||||
pubkey,
|
||||
//gossip.local_addr().unwrap(),
|
||||
gossip_socket_pair.addr,
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
);
|
||||
(node, gossip_socket_pair.receiver)
|
||||
}
|
||||
|
||||
fn converge(
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) -> Vec<ReplicatedData> {
|
||||
//lets spy on the network
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let (spy, spy_gossip) = spy_node();
|
||||
let mut spy_crdt = Crdt::new(spy);
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let window = default_window();
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let ncp = Ncp::new(
|
||||
spy_ref.clone(),
|
||||
window.clone(),
|
||||
spy_gossip,
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
).expect("DataReplicator::new");
|
||||
let mut rv = vec![];
|
||||
//wait for the network to converge, 30 seconds should be plenty
|
||||
for _ in 0..30 {
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.requests_addr != daddr)
|
||||
.cloned()
|
||||
.collect();
|
||||
if v.len() >= num_nodes {
|
||||
println!("CONVERGED!");
|
||||
rv.extend(v.into_iter());
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
threads.extend(ncp.thread_hdls.into_iter());
|
||||
rv
|
||||
}
|
||||
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
|
||||
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
|
||||
}
|
170
src/bin/drone.rs
170
src/bin/drone.rs
@ -1,115 +1,99 @@
|
||||
extern crate atty;
|
||||
extern crate bincode;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
extern crate tokio;
|
||||
extern crate tokio_codec;
|
||||
extern crate tokio_io;
|
||||
|
||||
use atty::{is, Stream as atty_stream};
|
||||
use bincode::deserialize;
|
||||
use getopts::Options;
|
||||
use solana::crdt::ReplicatedData;
|
||||
use solana::drone::{Drone, DroneRequest};
|
||||
use solana::mint::Mint;
|
||||
use std::env;
|
||||
use clap::{App, Arg};
|
||||
use solana::crdt::NodeInfo;
|
||||
use solana::drone::{Drone, DroneRequest, DRONE_PORT};
|
||||
use solana::fullnode::Config;
|
||||
use solana::logger;
|
||||
use solana::metrics::set_panic_hook;
|
||||
use solana::signature::read_keypair;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::process::exit;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::prelude::*;
|
||||
use tokio_codec::{BytesCodec, Decoder};
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
|
||||
brief += " Run a Solana Drone to act as the custodian of the mint's remaining tokens\n";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn main() {
|
||||
env_logger::init();
|
||||
let mut opts = Options::new();
|
||||
opts.optopt(
|
||||
"t",
|
||||
"",
|
||||
"time",
|
||||
"time slice over which to limit token requests to drone",
|
||||
);
|
||||
opts.optopt("c", "", "cap", "request limit for time slice");
|
||||
opts.optopt("l", "", "leader", "leader.json");
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
logger::setup();
|
||||
set_panic_hook("drone");
|
||||
let matches = App::new("drone")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("leader")
|
||||
.short("l")
|
||||
.long("leader")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/leader.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("/path/to/mint.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("time")
|
||||
.short("t")
|
||||
.long("time")
|
||||
.value_name("SECONDS")
|
||||
.takes_value(true)
|
||||
.help("time slice over which to limit requests to drone"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("cap")
|
||||
.short("c")
|
||||
.long("cap")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("request limit for time slice"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let leader: NodeInfo;
|
||||
if let Some(l) = matches.value_of("leader") {
|
||||
leader = read_leader(l).node_info;
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
leader = NodeInfo::new_leader(&server_addr);
|
||||
};
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
|
||||
let mint_keypair =
|
||||
read_keypair(matches.value_of("keypair").expect("keypair")).expect("client keypair");
|
||||
|
||||
let time_slice: Option<u64>;
|
||||
if matches.opt_present("t") {
|
||||
time_slice = matches
|
||||
.opt_str("t")
|
||||
.expect("unexpected string from input")
|
||||
.parse()
|
||||
.ok();
|
||||
if let Some(t) = matches.value_of("time") {
|
||||
time_slice = Some(t.to_string().parse().expect("integer"));
|
||||
} else {
|
||||
time_slice = None;
|
||||
}
|
||||
let request_cap: Option<u64>;
|
||||
if matches.opt_present("c") {
|
||||
request_cap = matches
|
||||
.opt_str("c")
|
||||
.expect("unexpected string from input")
|
||||
.parse()
|
||||
.ok();
|
||||
if let Some(c) = matches.value_of("cap") {
|
||||
request_cap = Some(c.to_string().parse().expect("integer"));
|
||||
} else {
|
||||
request_cap = None;
|
||||
}
|
||||
let leader = if matches.opt_present("l") {
|
||||
read_leader(matches.opt_str("l").unwrap())
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
ReplicatedData::new_leader(&server_addr)
|
||||
};
|
||||
|
||||
if is(atty_stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mint: Mint = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let mint_keypair = mint.keypair();
|
||||
|
||||
let drone_addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
let drone_addr: SocketAddr = format!("0.0.0.0:{}", DRONE_PORT).parse().unwrap();
|
||||
|
||||
let drone = Arc::new(Mutex::new(Drone::new(
|
||||
mint_keypair,
|
||||
drone_addr,
|
||||
leader.transactions_addr,
|
||||
leader.requests_addr,
|
||||
leader.contact_info.tpu,
|
||||
leader.contact_info.rpu,
|
||||
time_slice,
|
||||
request_cap,
|
||||
)));
|
||||
@ -134,8 +118,14 @@ fn main() {
|
||||
|
||||
let processor = reader
|
||||
.for_each(move |bytes| {
|
||||
let req: DroneRequest =
|
||||
deserialize(&bytes).expect("deserialize packet in drone");
|
||||
let req: DroneRequest = deserialize(&bytes).or_else(|err| {
|
||||
use std::io;
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("deserialize packet in drone: {:?}", err),
|
||||
))
|
||||
})?;
|
||||
|
||||
println!("Airdrop requested...");
|
||||
// let res = drone2.lock().unwrap().check_rate_limit(client_ip);
|
||||
let res1 = drone2.lock().unwrap().send_airdrop(req);
|
||||
@ -145,14 +135,6 @@ fn main() {
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
.and_then(|()| {
|
||||
println!("Socket received FIN packet and closed connection");
|
||||
Ok(())
|
||||
})
|
||||
.or_else(|err| {
|
||||
println!("Socket closed with error: {:?}", err);
|
||||
Err(err)
|
||||
})
|
||||
.then(|result| {
|
||||
println!("Socket closed with result: {:?}", result);
|
||||
Ok(())
|
||||
@ -161,7 +143,7 @@ fn main() {
|
||||
});
|
||||
tokio::run(done);
|
||||
}
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
|
||||
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
|
||||
fn read_leader(path: &str) -> Config {
|
||||
let file = File::open(path).unwrap_or_else(|_| panic!("file not found: {}", path));
|
||||
serde_json::from_reader(file).unwrap_or_else(|_| panic!("failed to parse {}", path))
|
||||
}
|
||||
|
@ -1,66 +1,83 @@
|
||||
extern crate getopts;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate dirs;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use getopts::Options;
|
||||
use solana::crdt::{get_ip_addr, parse_port_or_addr, ReplicatedData};
|
||||
use clap::{App, Arg};
|
||||
use solana::crdt::{get_ip_addr, parse_port_or_addr};
|
||||
use solana::fullnode::Config;
|
||||
use solana::nat::get_public_ip_addr;
|
||||
use std::env;
|
||||
use solana::signature::read_pkcs8;
|
||||
use std::io;
|
||||
use std::net::SocketAddr;
|
||||
use std::process::exit;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: {} [options]\n\n", program);
|
||||
brief += " Create a solana fullnode config file\n";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("b", "", "bind", "bind to port or address");
|
||||
opts.optflag(
|
||||
"p",
|
||||
"",
|
||||
"detect public network address using public servers",
|
||||
);
|
||||
opts.optflag(
|
||||
"l",
|
||||
"",
|
||||
"detect network address from local machine configuration",
|
||||
);
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
let matches = App::new("fullnode-config")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("local")
|
||||
.short("l")
|
||||
.long("local")
|
||||
.takes_value(false)
|
||||
.help("detect network address from local machine configuration"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/id.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("public")
|
||||
.short("p")
|
||||
.long("public")
|
||||
.takes_value(false)
|
||||
.help("detect public network address using public servers"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("bind")
|
||||
.short("b")
|
||||
.long("bind")
|
||||
.value_name("PORT")
|
||||
.takes_value(true)
|
||||
.help("bind to port or address"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let bind_addr: SocketAddr = {
|
||||
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
|
||||
if matches.opt_present("l") {
|
||||
let mut bind_addr = parse_port_or_addr({
|
||||
if let Some(b) = matches.value_of("bind") {
|
||||
Some(b.to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
if matches.is_present("local") {
|
||||
let ip = get_ip_addr().unwrap();
|
||||
bind_addr.set_ip(ip);
|
||||
}
|
||||
if matches.opt_present("p") {
|
||||
if matches.is_present("public") {
|
||||
let ip = get_public_ip_addr().unwrap();
|
||||
bind_addr.set_ip(ip);
|
||||
}
|
||||
bind_addr
|
||||
};
|
||||
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let id_path = if matches.is_present("keypair") {
|
||||
matches.value_of("keypair").unwrap()
|
||||
} else {
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
let pkcs8 = read_pkcs8(id_path).expect("client keypair");
|
||||
|
||||
// we need all the receiving sockets to be bound within the expected
|
||||
// port range that we open on aws
|
||||
let repl_data = ReplicatedData::new_leader(&bind_addr);
|
||||
let config = Config::new(&bind_addr, pkcs8);
|
||||
let stdout = io::stdout();
|
||||
serde_json::to_writer(stdout, &repl_data).expect("serialize");
|
||||
serde_json::to_writer(stdout, &config).expect("serialize");
|
||||
}
|
||||
|
@ -1,74 +1,66 @@
|
||||
extern crate atty;
|
||||
extern crate env_logger;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate getopts;
|
||||
extern crate log;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use getopts::Options;
|
||||
use solana::crdt::{ReplicatedData, TestNode};
|
||||
use solana::fullnode::FullNode;
|
||||
use std::env;
|
||||
use clap::{App, Arg};
|
||||
use solana::client::mk_client;
|
||||
use solana::crdt::{NodeInfo, TestNode};
|
||||
use solana::drone::DRONE_PORT;
|
||||
use solana::fullnode::{Config, Fullnode};
|
||||
use solana::logger;
|
||||
use solana::metrics::set_panic_hook;
|
||||
use solana::service::Service;
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::wallet::request_airdrop;
|
||||
use std::fs::File;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
//use std::time::Duration;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
|
||||
brief += " Run a Solana node to handle transactions and\n";
|
||||
brief += " write a new transaction log to stdout.\n";
|
||||
brief += " Takes existing transaction log from stdin.";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn main() -> () {
|
||||
env_logger::init();
|
||||
let mut opts = Options::new();
|
||||
opts.optflag("h", "help", "print help");
|
||||
opts.optopt("l", "", "run with the identity found in FILE", "FILE");
|
||||
opts.optopt(
|
||||
"t",
|
||||
"",
|
||||
"testnet; connect to the network at this gossip entry point",
|
||||
"HOST:PORT",
|
||||
);
|
||||
opts.optopt(
|
||||
"o",
|
||||
"",
|
||||
"output log to FILE, defaults to stdout (ignored by validators)",
|
||||
"FILE",
|
||||
);
|
||||
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a log file");
|
||||
exit(1);
|
||||
}
|
||||
logger::setup();
|
||||
set_panic_hook("fullnode");
|
||||
let matches = App::new("fullnode")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("identity")
|
||||
.short("i")
|
||||
.long("identity")
|
||||
.value_name("FILE")
|
||||
.takes_value(true)
|
||||
.help("run with the identity found in FILE"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("testnet")
|
||||
.short("t")
|
||||
.long("testnet")
|
||||
.value_name("HOST:PORT")
|
||||
.takes_value(true)
|
||||
.help("connect to the network at this gossip entry point"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("ledger")
|
||||
.short("l")
|
||||
.long("ledger")
|
||||
.value_name("DIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("use DIR as persistent ledger location"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
let mut repl_data = ReplicatedData::new_leader(&bind_addr);
|
||||
if matches.opt_present("l") {
|
||||
let path = matches.opt_str("l").unwrap();
|
||||
let mut keypair = Keypair::new();
|
||||
let mut repl_data = NodeInfo::new_leader_with_pubkey(keypair.pubkey(), &bind_addr);
|
||||
if let Some(i) = matches.value_of("identity") {
|
||||
let path = i.to_string();
|
||||
if let Ok(file) = File::open(path.clone()) {
|
||||
if let Ok(data) = serde_json::from_reader(file) {
|
||||
repl_data = data;
|
||||
let parse: serde_json::Result<Config> = serde_json::from_reader(file);
|
||||
if let Ok(data) = parse {
|
||||
keypair = data.keypair();
|
||||
repl_data = data.node_info;
|
||||
} else {
|
||||
eprintln!("failed to parse {}", path);
|
||||
exit(1);
|
||||
@ -78,19 +70,49 @@ fn main() -> () {
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
let mut node = TestNode::new_with_bind_addr(repl_data, bind_addr);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let fullnode = if matches.opt_present("t") {
|
||||
let testnet_address_string = matches.opt_str("t").unwrap();
|
||||
let testnet_addr = testnet_address_string.parse().unwrap();
|
||||
FullNode::new(node, false, None, Some(testnet_addr), None, exit)
|
||||
} else {
|
||||
node.data.current_leader_id = node.data.id.clone();
|
||||
|
||||
let outfile = matches.opt_str("o");
|
||||
FullNode::new(node, true, None, None, outfile, exit)
|
||||
let leader_pubkey = keypair.pubkey();
|
||||
let repl_clone = repl_data.clone();
|
||||
|
||||
let ledger_path = matches.value_of("ledger").unwrap();
|
||||
|
||||
let mut node = TestNode::new_with_bind_addr(repl_data, bind_addr);
|
||||
let mut drone_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), DRONE_PORT);
|
||||
let fullnode = if let Some(t) = matches.value_of("testnet") {
|
||||
let testnet_address_string = t.to_string();
|
||||
let testnet_addr: SocketAddr = testnet_address_string.parse().unwrap();
|
||||
drone_addr.set_ip(testnet_addr.ip());
|
||||
|
||||
Fullnode::new(node, false, ledger_path, keypair, Some(testnet_addr))
|
||||
} else {
|
||||
node.data.leader_id = node.data.id;
|
||||
|
||||
Fullnode::new(node, true, ledger_path, keypair, None)
|
||||
};
|
||||
for t in fullnode.thread_hdls {
|
||||
t.join().expect("join");
|
||||
|
||||
let mut client = mk_client(&repl_clone);
|
||||
let previous_balance = client.poll_get_balance(&leader_pubkey).unwrap();
|
||||
eprintln!("balance is {}", previous_balance);
|
||||
|
||||
if previous_balance == 0 {
|
||||
eprintln!("requesting airdrop from {}", drone_addr);
|
||||
request_airdrop(&drone_addr, &leader_pubkey, 50).unwrap_or_else(|_| {
|
||||
panic!(
|
||||
"Airdrop failed, is the drone address correct {:?} drone running?",
|
||||
drone_addr
|
||||
)
|
||||
});
|
||||
|
||||
// Try multiple times to confirm a non-zero balance. |poll_get_balance| currently times
|
||||
// out after 1 second, and sometimes this is not enough time while the network is
|
||||
// booting
|
||||
let balance_ok = (0..30).any(|i| {
|
||||
let balance = client.poll_get_balance(&leader_pubkey).unwrap();
|
||||
eprintln!("new balance is {} (attempt #{})", balance, i);
|
||||
balance > 0
|
||||
});
|
||||
assert!(balance_ok, "0 balance, airdrop failed?");
|
||||
}
|
||||
|
||||
fullnode.join().expect("join");
|
||||
}
|
||||
|
@ -1,17 +1,45 @@
|
||||
//! A command-line executable for generating the chain's genesis block.
|
||||
|
||||
extern crate atty;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use solana::entry_writer::EntryWriter;
|
||||
use clap::{App, Arg};
|
||||
use solana::ledger::LedgerWriter;
|
||||
use solana::mint::Mint;
|
||||
use std::error;
|
||||
use std::io::{stdin, stdout, Read};
|
||||
use std::io::{stdin, Read};
|
||||
use std::process::exit;
|
||||
|
||||
fn main() -> Result<(), Box<error::Error>> {
|
||||
let matches = App::new("solana-genesis")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("tokens")
|
||||
.short("t")
|
||||
.long("tokens")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("Number of tokens with which to initialize mint"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("ledger")
|
||||
.short("l")
|
||||
.long("ledger")
|
||||
.value_name("DIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("use DIR as persistent ledger location"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let tokens = value_t_or_exit!(matches, "tokens", i64);
|
||||
let ledger_path = matches.value_of("ledger").unwrap();
|
||||
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
@ -24,8 +52,11 @@ fn main() -> Result<(), Box<error::Error>> {
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mint: Mint = serde_json::from_str(&buffer)?;
|
||||
let mut writer = stdout();
|
||||
EntryWriter::write_entries(&mut writer, mint.create_entries())?;
|
||||
let pkcs8: Vec<u8> = serde_json::from_str(&buffer)?;
|
||||
let mint = Mint::new_with_pkcs8(tokens, pkcs8);
|
||||
|
||||
let mut ledger_writer = LedgerWriter::open(&ledger_path, true)?;
|
||||
ledger_writer.write_entries(mint.create_entries())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
51
src/bin/keygen.rs
Normal file
51
src/bin/keygen.rs
Normal file
@ -0,0 +1,51 @@
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate dirs;
|
||||
extern crate ring;
|
||||
extern crate serde_json;
|
||||
|
||||
use clap::{App, Arg};
|
||||
use ring::rand::SystemRandom;
|
||||
use ring::signature::Ed25519KeyPair;
|
||||
use std::error;
|
||||
use std::fs::{self, File};
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
|
||||
fn main() -> Result<(), Box<error::Error>> {
|
||||
let matches = App::new("solana-keygen")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("outfile")
|
||||
.short("o")
|
||||
.long("outfile")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("path to generated file"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let rnd = SystemRandom::new();
|
||||
let pkcs8_bytes = Ed25519KeyPair::generate_pkcs8(&rnd)?;
|
||||
let serialized = serde_json::to_string(&pkcs8_bytes.to_vec())?;
|
||||
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let outfile = if matches.is_present("outfile") {
|
||||
matches.value_of("outfile").unwrap()
|
||||
} else {
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
|
||||
if outfile == "-" {
|
||||
println!("{}", serialized);
|
||||
} else {
|
||||
if let Some(outdir) = Path::new(outfile).parent() {
|
||||
fs::create_dir_all(outdir)?;
|
||||
}
|
||||
let mut f = File::create(outfile)?;
|
||||
f.write_all(&serialized.into_bytes())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
137
src/bin/ledger-tool.rs
Normal file
137
src/bin/ledger-tool.rs
Normal file
@ -0,0 +1,137 @@
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use solana::bank::Bank;
|
||||
use solana::ledger::{read_ledger, verify_ledger};
|
||||
use solana::logger;
|
||||
use std::io::{stdout, Write};
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
logger::setup();
|
||||
let matches = App::new("ledger-tool")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("ledger")
|
||||
.short("l")
|
||||
.long("ledger")
|
||||
.value_name("DIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("use DIR for ledger location"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("head")
|
||||
.short("n")
|
||||
.long("head")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.help("at most the first NUM entries in ledger\n (only applies to verify, print, json commands)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("precheck")
|
||||
.short("p")
|
||||
.long("precheck")
|
||||
.help("use ledger_verify() to check internal ledger consistency before proceeding"),
|
||||
)
|
||||
.subcommand(SubCommand::with_name("print").about("Print the ledger"))
|
||||
.subcommand(SubCommand::with_name("json").about("Print the ledger in JSON format"))
|
||||
.subcommand(SubCommand::with_name("verify").about("Verify the ledger's PoH"))
|
||||
.get_matches();
|
||||
|
||||
let ledger_path = matches.value_of("ledger").unwrap();
|
||||
|
||||
if matches.is_present("precheck") {
|
||||
if let Err(e) = verify_ledger(&ledger_path) {
|
||||
eprintln!("ledger precheck failed, error: {:?} ", e);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
let entries = match read_ledger(ledger_path, true) {
|
||||
Ok(entries) => entries,
|
||||
Err(err) => {
|
||||
eprintln!("Failed to open ledger at {}: {}", ledger_path, err);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
let head = match matches.value_of("head") {
|
||||
Some(head) => head.parse().expect("please pass a number for --head"),
|
||||
None => <usize>::max_value(),
|
||||
};
|
||||
|
||||
match matches.subcommand() {
|
||||
("print", _) => {
|
||||
let entries = match read_ledger(ledger_path, true) {
|
||||
Ok(entries) => entries,
|
||||
Err(err) => {
|
||||
eprintln!("Failed to open ledger at {}: {}", ledger_path, err);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
for (i, entry) in entries.enumerate() {
|
||||
if i >= head {
|
||||
break;
|
||||
}
|
||||
let entry = entry.unwrap();
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
}
|
||||
("json", _) => {
|
||||
stdout().write_all(b"{\"ledger\":[\n").expect("open array");
|
||||
for (i, entry) in entries.enumerate() {
|
||||
if i >= head {
|
||||
break;
|
||||
}
|
||||
let entry = entry.unwrap();
|
||||
serde_json::to_writer(stdout(), &entry).expect("serialize");
|
||||
stdout().write_all(b",\n").expect("newline");
|
||||
}
|
||||
stdout().write_all(b"\n]}\n").expect("close array");
|
||||
}
|
||||
("verify", _) => {
|
||||
if head < 2 {
|
||||
eprintln!("verify requires at least 2 entries to run");
|
||||
exit(1);
|
||||
}
|
||||
let bank = Bank::default();
|
||||
|
||||
{
|
||||
let genesis = match read_ledger(ledger_path, true) {
|
||||
Ok(entries) => entries,
|
||||
Err(err) => {
|
||||
eprintln!("Failed to open ledger at {}: {}", ledger_path, err);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
let genesis = genesis.take(2).map(|e| e.unwrap());
|
||||
|
||||
if let Err(e) = bank.process_ledger(genesis) {
|
||||
eprintln!("verify failed at genesis err: {:?}", e);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
let entries = entries.map(|e| e.unwrap());
|
||||
|
||||
let head = head - 2;
|
||||
for (i, entry) in entries.skip(2).enumerate() {
|
||||
if i >= head {
|
||||
break;
|
||||
}
|
||||
if let Err(e) = bank.process_entry(entry) {
|
||||
eprintln!("verify failed at entry[{}], err: {:?}", i + 2, e);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
("", _) => {
|
||||
eprintln!("{}", matches.usage());
|
||||
exit(1);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
extern crate atty;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use solana::mint::Mint;
|
||||
use std::io;
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
let mut input_text = String::new();
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a token number");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
io::stdin().read_line(&mut input_text).unwrap();
|
||||
let trimmed = input_text.trim();
|
||||
let tokens = trimmed.parse::<i64>().unwrap_or_else(|e| {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
});
|
||||
let mint = Mint::new(tokens);
|
||||
let serialized = serde_json::to_string(&mint).unwrap_or_else(|e| {
|
||||
eprintln!("failed to serialize: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
println!("{}", serialized);
|
||||
}
|
@ -1,26 +1,25 @@
|
||||
extern crate atty;
|
||||
extern crate bincode;
|
||||
extern crate bs58;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate dirs;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use bincode::serialize;
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use solana::crdt::ReplicatedData;
|
||||
use solana::drone::DroneRequest;
|
||||
use solana::mint::Mint;
|
||||
use solana::signature::{PublicKey, Signature};
|
||||
use solana::client::mk_client;
|
||||
use solana::crdt::NodeInfo;
|
||||
use solana::drone::DRONE_PORT;
|
||||
use solana::fullnode::Config;
|
||||
use solana::logger;
|
||||
use solana::signature::{read_keypair, Keypair, KeypairUtil, Pubkey, Signature};
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::wallet::request_airdrop;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::io::prelude::*;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
@ -28,7 +27,7 @@ enum WalletCommand {
|
||||
Address,
|
||||
Balance,
|
||||
AirDrop(i64),
|
||||
Pay(i64, PublicKey),
|
||||
Pay(i64, Pubkey),
|
||||
Confirm(Signature),
|
||||
}
|
||||
|
||||
@ -56,8 +55,8 @@ impl error::Error for WalletError {
|
||||
}
|
||||
|
||||
struct WalletConfig {
|
||||
leader: ReplicatedData,
|
||||
id: Mint,
|
||||
leader: NodeInfo,
|
||||
id: Keypair,
|
||||
drone_addr: SocketAddr,
|
||||
command: WalletCommand,
|
||||
}
|
||||
@ -66,9 +65,9 @@ impl Default for WalletConfig {
|
||||
fn default() -> WalletConfig {
|
||||
let default_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
WalletConfig {
|
||||
leader: ReplicatedData::new_leader(&default_addr.clone()),
|
||||
id: Mint::new(0),
|
||||
drone_addr: default_addr.clone(),
|
||||
leader: NodeInfo::new_leader(&default_addr),
|
||||
id: Keypair::new(),
|
||||
drone_addr: default_addr,
|
||||
command: WalletCommand::Balance,
|
||||
}
|
||||
}
|
||||
@ -76,6 +75,7 @@ impl Default for WalletConfig {
|
||||
|
||||
fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
let matches = App::new("solana-wallet")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("leader")
|
||||
.short("l")
|
||||
@ -85,12 +85,12 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
.help("/path/to/leader.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("mint")
|
||||
.short("m")
|
||||
.long("mint")
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/mint.json"),
|
||||
.help("/path/to/id.json"),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("airdrop")
|
||||
@ -101,6 +101,7 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
.long("tokens")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("The number of tokens to request"),
|
||||
),
|
||||
)
|
||||
@ -122,7 +123,6 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
.long("to")
|
||||
.value_name("PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("The pubkey of recipient"),
|
||||
),
|
||||
)
|
||||
@ -141,71 +141,72 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
.subcommand(SubCommand::with_name("address").about("Get your public key"))
|
||||
.get_matches();
|
||||
|
||||
let leader: ReplicatedData;
|
||||
let leader: NodeInfo;
|
||||
if let Some(l) = matches.value_of("leader") {
|
||||
leader = read_leader(l.to_string());
|
||||
leader = read_leader(l)?.node_info;
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
leader = ReplicatedData::new_leader(&server_addr);
|
||||
leader = NodeInfo::new_leader(&server_addr);
|
||||
};
|
||||
|
||||
let id: Mint;
|
||||
if let Some(m) = matches.value_of("mint") {
|
||||
id = read_mint(m.to_string())?;
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let id_path = if matches.is_present("keypair") {
|
||||
matches.value_of("keypair").unwrap()
|
||||
} else {
|
||||
eprintln!("No mint found!");
|
||||
exit(1);
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
let id = read_keypair(id_path).or_else(|err| {
|
||||
Err(WalletError::BadParameter(format!(
|
||||
"{}: Unable to open keypair file: {}",
|
||||
err, id_path
|
||||
)))
|
||||
})?;
|
||||
|
||||
let mut drone_addr = leader.transactions_addr.clone();
|
||||
drone_addr.set_port(9900);
|
||||
let mut drone_addr = leader.contact_info.tpu;
|
||||
drone_addr.set_port(DRONE_PORT);
|
||||
|
||||
let command = match matches.subcommand() {
|
||||
("airdrop", Some(airdrop_matches)) => {
|
||||
let mut tokens: i64 = id.tokens;
|
||||
if airdrop_matches.is_present("tokens") {
|
||||
tokens = airdrop_matches.value_of("tokens").unwrap().parse()?;
|
||||
}
|
||||
let tokens = airdrop_matches.value_of("tokens").unwrap().parse()?;
|
||||
Ok(WalletCommand::AirDrop(tokens))
|
||||
}
|
||||
("pay", Some(pay_matches)) => {
|
||||
let to: PublicKey;
|
||||
if pay_matches.is_present("to") {
|
||||
let to = if pay_matches.is_present("to") {
|
||||
let pubkey_vec = bs58::decode(pay_matches.value_of("to").unwrap())
|
||||
.into_vec()
|
||||
.expect("base58-encoded public key");
|
||||
|
||||
if pubkey_vec.len() != std::mem::size_of::<PublicKey>() {
|
||||
display_actions();
|
||||
if pubkey_vec.len() != std::mem::size_of::<Pubkey>() {
|
||||
eprintln!("{}", pay_matches.usage());
|
||||
Err(WalletError::BadParameter("Invalid public key".to_string()))?;
|
||||
}
|
||||
to = PublicKey::clone_from_slice(&pubkey_vec);
|
||||
Pubkey::new(&pubkey_vec)
|
||||
} else {
|
||||
to = id.pubkey();
|
||||
}
|
||||
let mut tokens: i64 = id.tokens;
|
||||
if pay_matches.is_present("tokens") {
|
||||
tokens = pay_matches.value_of("tokens").unwrap().parse()?;
|
||||
}
|
||||
id.pubkey()
|
||||
};
|
||||
|
||||
let tokens = pay_matches.value_of("tokens").unwrap().parse()?;
|
||||
|
||||
Ok(WalletCommand::Pay(tokens, to))
|
||||
}
|
||||
("confirm", Some(confirm_matches)) => {
|
||||
let sig_vec = bs58::decode(confirm_matches.value_of("signature").unwrap())
|
||||
let signatures = bs58::decode(confirm_matches.value_of("signature").unwrap())
|
||||
.into_vec()
|
||||
.expect("base58-encoded signature");
|
||||
|
||||
if sig_vec.len() == std::mem::size_of::<Signature>() {
|
||||
let sig = Signature::clone_from_slice(&sig_vec);
|
||||
Ok(WalletCommand::Confirm(sig))
|
||||
if signatures.len() == std::mem::size_of::<Signature>() {
|
||||
let signature = Signature::new(&signatures);
|
||||
Ok(WalletCommand::Confirm(signature))
|
||||
} else {
|
||||
display_actions();
|
||||
eprintln!("{}", confirm_matches.usage());
|
||||
Err(WalletError::BadParameter("Invalid signature".to_string()))
|
||||
}
|
||||
}
|
||||
("balance", Some(_balance_matches)) => Ok(WalletCommand::Balance),
|
||||
("address", Some(_address_matches)) => Ok(WalletCommand::Address),
|
||||
("", None) => {
|
||||
display_actions();
|
||||
println!("{}", matches.usage());
|
||||
Err(WalletError::CommandNotRecognized(
|
||||
"no subcommand given".to_string(),
|
||||
))
|
||||
@ -228,7 +229,7 @@ fn process_command(
|
||||
match config.command {
|
||||
// Check client balance
|
||||
WalletCommand::Address => {
|
||||
println!("{}", bs58::encode(config.id.pubkey()).into_string());
|
||||
println!("{}", config.id.pubkey());
|
||||
}
|
||||
WalletCommand::Balance => {
|
||||
println!("Balance requested...");
|
||||
@ -242,31 +243,45 @@ fn process_command(
|
||||
}
|
||||
Err(error) => {
|
||||
println!("An error occurred: {:?}", error);
|
||||
Err(error)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Request an airdrop from Solana Drone;
|
||||
// Request amount is set in request_airdrop function
|
||||
WalletCommand::AirDrop(tokens) => {
|
||||
println!("Airdrop requested...");
|
||||
println!("Airdropping {:?} tokens", tokens);
|
||||
let _airdrop = request_airdrop(&config.drone_addr, &config.id, tokens as u64)?;
|
||||
// TODO: return airdrop Result from Drone
|
||||
sleep(Duration::from_millis(100));
|
||||
println!(
|
||||
"Your balance is: {:?}",
|
||||
client.poll_get_balance(&config.id.pubkey()).unwrap()
|
||||
"Requesting airdrop of {:?} tokens from {}",
|
||||
tokens, config.drone_addr
|
||||
);
|
||||
let previous_balance = client.poll_get_balance(&config.id.pubkey())?;
|
||||
request_airdrop(&config.drone_addr, &config.id.pubkey(), tokens as u64)?;
|
||||
|
||||
// TODO: return airdrop Result from Drone instead of polling the
|
||||
// network
|
||||
let mut current_balance = previous_balance;
|
||||
for _ in 0..20 {
|
||||
sleep(Duration::from_millis(500));
|
||||
current_balance = client.poll_get_balance(&config.id.pubkey())?;
|
||||
if previous_balance != current_balance {
|
||||
break;
|
||||
}
|
||||
println!(".");
|
||||
}
|
||||
println!("Your balance is: {:?}", current_balance);
|
||||
if current_balance - previous_balance != tokens {
|
||||
Err("Airdrop failed!")?;
|
||||
}
|
||||
}
|
||||
// If client has positive balance, spend tokens in {balance} number of transactions
|
||||
WalletCommand::Pay(tokens, to) => {
|
||||
let last_id = client.get_last_id();
|
||||
let sig = client.transfer(tokens, &config.id.keypair(), to, &last_id)?;
|
||||
println!("{}", bs58::encode(sig).into_string());
|
||||
let signature = client.transfer(tokens, &config.id, to, &last_id)?;
|
||||
println!("{}", signature);
|
||||
}
|
||||
// Confirm the last client transaction by signature
|
||||
WalletCommand::Confirm(sig) => {
|
||||
if client.check_signature(&sig) {
|
||||
WalletCommand::Confirm(signature) => {
|
||||
if client.check_signature(&signature) {
|
||||
println!("Confirmed");
|
||||
} else {
|
||||
println!("Not found");
|
||||
@ -276,62 +291,25 @@ fn process_command(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn display_actions() {
|
||||
println!("");
|
||||
println!("Commands:");
|
||||
println!(" address Get your public key");
|
||||
println!(" balance Get your account balance");
|
||||
println!(" airdrop Request a batch of tokens");
|
||||
println!(" pay Send tokens to a public key");
|
||||
println!(" confirm Confirm your last payment by signature");
|
||||
println!("");
|
||||
}
|
||||
fn read_leader(path: &str) -> Result<Config, WalletError> {
|
||||
let file = File::open(path.to_string()).or_else(|err| {
|
||||
Err(WalletError::BadParameter(format!(
|
||||
"{}: Unable to open leader file: {}",
|
||||
err, path
|
||||
)))
|
||||
})?;
|
||||
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
|
||||
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
|
||||
}
|
||||
|
||||
fn read_mint(path: String) -> Result<Mint, Box<error::Error>> {
|
||||
let file = File::open(path.clone())?;
|
||||
let mint = serde_json::from_reader(file)?;
|
||||
Ok(mint)
|
||||
}
|
||||
|
||||
fn mk_client(r: &ReplicatedData) -> io::Result<ThinClient> {
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
|
||||
Ok(ThinClient::new(
|
||||
r.requests_addr,
|
||||
requests_socket,
|
||||
r.transactions_addr,
|
||||
transactions_socket,
|
||||
))
|
||||
}
|
||||
|
||||
fn request_airdrop(
|
||||
drone_addr: &SocketAddr,
|
||||
id: &Mint,
|
||||
tokens: u64,
|
||||
) -> Result<(), Box<error::Error>> {
|
||||
let mut stream = TcpStream::connect(drone_addr)?;
|
||||
let req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: tokens,
|
||||
client_public_key: id.pubkey(),
|
||||
};
|
||||
let tx = serialize(&req).expect("serialize drone request");
|
||||
stream.write_all(&tx).unwrap();
|
||||
// TODO: add timeout to this function, in case of unresponsive drone
|
||||
Ok(())
|
||||
serde_json::from_reader(file).or_else(|err| {
|
||||
Err(WalletError::BadParameter(format!(
|
||||
"{}: Failed to parse leader file: {}",
|
||||
err, path
|
||||
)))
|
||||
})
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Box<error::Error>> {
|
||||
env_logger::init();
|
||||
logger::setup();
|
||||
let config = parse_args()?;
|
||||
let mut client = mk_client(&config.leader)?;
|
||||
let mut client = mk_client(&config.leader);
|
||||
process_command(&config, &mut client)
|
||||
}
|
||||
|
@ -1,29 +1,31 @@
|
||||
//! The `blob_fetch_stage` pulls blobs from UDP sockets and sends it to a channel.
|
||||
|
||||
use packet::BlobRecycler;
|
||||
use service::Service;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use std::thread::{self, JoinHandle};
|
||||
use streamer::{self, BlobReceiver};
|
||||
|
||||
pub struct BlobFetchStage {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl BlobFetchStage {
|
||||
pub fn new(
|
||||
socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: BlobRecycler,
|
||||
blob_recycler: &BlobRecycler,
|
||||
) -> (Self, BlobReceiver) {
|
||||
Self::new_multi_socket(vec![socket], exit, blob_recycler)
|
||||
}
|
||||
pub fn new_multi_socket(
|
||||
sockets: Vec<UdpSocket>,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: BlobRecycler,
|
||||
blob_recycler: &BlobRecycler,
|
||||
) -> (Self, BlobReceiver) {
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let thread_hdls: Vec<_> = sockets
|
||||
@ -38,6 +40,23 @@ impl BlobFetchStage {
|
||||
})
|
||||
.collect();
|
||||
|
||||
(BlobFetchStage { thread_hdls }, blob_receiver)
|
||||
(BlobFetchStage { exit, thread_hdls }, blob_receiver)
|
||||
}
|
||||
|
||||
pub fn close(&self) {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for BlobFetchStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
self.thread_hdls
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls() {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
203
src/broadcast_stage.rs
Normal file
203
src/broadcast_stage.rs
Normal file
@ -0,0 +1,203 @@
|
||||
//! The `broadcast_stage` broadcasts data from a leader node to validators
|
||||
//!
|
||||
use counter::Counter;
|
||||
use crdt::{Crdt, CrdtError, NodeInfo};
|
||||
#[cfg(feature = "erasure")]
|
||||
use erasure;
|
||||
use log::Level;
|
||||
use packet::BlobRecycler;
|
||||
use result::{Error, Result};
|
||||
use service::Service;
|
||||
use std::mem;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::mpsc::RecvTimeoutError;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer::BlobReceiver;
|
||||
use window::{self, SharedWindow, WindowIndex, WINDOW_SIZE};
|
||||
|
||||
fn broadcast(
|
||||
node_info: &NodeInfo,
|
||||
broadcast_table: &[NodeInfo],
|
||||
window: &SharedWindow,
|
||||
recycler: &BlobRecycler,
|
||||
receiver: &BlobReceiver,
|
||||
sock: &UdpSocket,
|
||||
transmit_index: &mut WindowIndex,
|
||||
receive_index: &mut u64,
|
||||
) -> Result<()> {
|
||||
let debug_id = node_info.debug_id();
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut dq = receiver.recv_timeout(timer)?;
|
||||
while let Ok(mut nq) = receiver.try_recv() {
|
||||
dq.append(&mut nq);
|
||||
}
|
||||
|
||||
// flatten deque to vec
|
||||
let blobs_vec: Vec<_> = dq.into_iter().collect();
|
||||
|
||||
// We could receive more blobs than window slots so
|
||||
// break them up into window-sized chunks to process
|
||||
let blobs_chunked = blobs_vec.chunks(WINDOW_SIZE as usize).map(|x| x.to_vec());
|
||||
|
||||
if log_enabled!(Level::Trace) {
|
||||
trace!("{}", window::print_window(debug_id, window, *receive_index));
|
||||
}
|
||||
|
||||
for mut blobs in blobs_chunked {
|
||||
let blobs_len = blobs.len();
|
||||
trace!("{:x}: broadcast blobs.len: {}", debug_id, blobs_len);
|
||||
|
||||
// Index the blobs
|
||||
window::index_blobs(node_info, &blobs, receive_index)
|
||||
.expect("index blobs for initial window");
|
||||
|
||||
// keep the cache of blobs that are broadcast
|
||||
inc_new_counter_info!("streamer-broadcast-sent", blobs.len());
|
||||
{
|
||||
let mut win = window.write().unwrap();
|
||||
assert!(blobs.len() <= win.len());
|
||||
for b in &blobs {
|
||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||
let pos = (ix % WINDOW_SIZE) as usize;
|
||||
if let Some(x) = mem::replace(&mut win[pos].data, None) {
|
||||
trace!(
|
||||
"{:x} popped {} at {}",
|
||||
debug_id,
|
||||
x.read().unwrap().get_index().unwrap(),
|
||||
pos
|
||||
);
|
||||
recycler.recycle(x);
|
||||
}
|
||||
if let Some(x) = mem::replace(&mut win[pos].coding, None) {
|
||||
trace!(
|
||||
"{:x} popped {} at {}",
|
||||
debug_id,
|
||||
x.read().unwrap().get_index().unwrap(),
|
||||
pos
|
||||
);
|
||||
recycler.recycle(x);
|
||||
}
|
||||
|
||||
trace!("{:x} null {}", debug_id, pos);
|
||||
}
|
||||
while let Some(b) = blobs.pop() {
|
||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||
let pos = (ix % WINDOW_SIZE) as usize;
|
||||
trace!("{:x} caching {} at {}", debug_id, ix, pos);
|
||||
assert!(win[pos].data.is_none());
|
||||
win[pos].data = Some(b);
|
||||
}
|
||||
}
|
||||
|
||||
// Fill in the coding blob data from the window data blobs
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
erasure::generate_coding(
|
||||
debug_id,
|
||||
&mut window.write().unwrap(),
|
||||
recycler,
|
||||
*receive_index,
|
||||
blobs_len,
|
||||
&mut transmit_index.coding,
|
||||
)?;
|
||||
}
|
||||
|
||||
*receive_index += blobs_len as u64;
|
||||
|
||||
// Send blobs out from the window
|
||||
Crdt::broadcast(
|
||||
&node_info,
|
||||
&broadcast_table,
|
||||
&window,
|
||||
&sock,
|
||||
transmit_index,
|
||||
*receive_index,
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct BroadcastStage {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl BroadcastStage {
|
||||
fn run(
|
||||
sock: &UdpSocket,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
window: &SharedWindow,
|
||||
entry_height: u64,
|
||||
recycler: &BlobRecycler,
|
||||
receiver: &BlobReceiver,
|
||||
) {
|
||||
let mut transmit_index = WindowIndex {
|
||||
data: entry_height,
|
||||
coding: entry_height,
|
||||
};
|
||||
let mut receive_index = entry_height;
|
||||
let me = crdt.read().unwrap().my_data().clone();
|
||||
loop {
|
||||
let broadcast_table = crdt.read().unwrap().compute_broadcast_table();
|
||||
if let Err(e) = broadcast(
|
||||
&me,
|
||||
&broadcast_table,
|
||||
&window,
|
||||
&recycler,
|
||||
&receiver,
|
||||
&sock,
|
||||
&mut transmit_index,
|
||||
&mut receive_index,
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
Error::CrdtError(CrdtError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these?
|
||||
_ => {
|
||||
inc_new_counter_info!("streamer-broadcaster-error", 1, 1);
|
||||
error!("broadcaster error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Service to broadcast messages from the leader to layer 1 nodes.
|
||||
/// See `crdt` for network layer definitions.
|
||||
/// # Arguments
|
||||
/// * `sock` - Socket to send from.
|
||||
/// * `exit` - Boolean to signal system exit.
|
||||
/// * `crdt` - CRDT structure
|
||||
/// * `window` - Cache of blobs that we have broadcast
|
||||
/// * `recycler` - Blob recycler.
|
||||
/// * `receiver` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||
pub fn new(
|
||||
sock: UdpSocket,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: SharedWindow,
|
||||
entry_height: u64,
|
||||
recycler: BlobRecycler,
|
||||
receiver: BlobReceiver,
|
||||
) -> Self {
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-broadcaster".to_string())
|
||||
.spawn(move || {
|
||||
Self::run(&sock, &crdt, &window, entry_height, &recycler, &receiver);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
BroadcastStage { thread_hdl }
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for BroadcastStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
vec![self.thread_hdl]
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
}
|
@ -5,25 +5,27 @@
|
||||
|
||||
use chrono::prelude::*;
|
||||
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use signature::PublicKey;
|
||||
use signature::Pubkey;
|
||||
use std::mem;
|
||||
|
||||
/// A data type representing a `Witness` that the payment plan is waiting on.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Condition {
|
||||
/// Wait for a `Timestamp` `Witness` at or after the given `DateTime`.
|
||||
Timestamp(DateTime<Utc>),
|
||||
Timestamp(DateTime<Utc>, Pubkey),
|
||||
|
||||
/// Wait for a `Signature` `Witness` from `PublicKey`.
|
||||
Signature(PublicKey),
|
||||
/// Wait for a `Signature` `Witness` from `Pubkey`.
|
||||
Signature(Pubkey),
|
||||
}
|
||||
|
||||
impl Condition {
|
||||
/// Return true if the given Witness satisfies this Condition.
|
||||
pub fn is_satisfied(&self, witness: &Witness) -> bool {
|
||||
pub fn is_satisfied(&self, witness: &Witness, from: &Pubkey) -> bool {
|
||||
match (self, witness) {
|
||||
(Condition::Signature(pubkey), Witness::Signature(from)) => pubkey == from,
|
||||
(Condition::Timestamp(dt), Witness::Timestamp(last_time)) => dt <= last_time,
|
||||
(Condition::Signature(pubkey), Witness::Signature) => pubkey == from,
|
||||
(Condition::Timestamp(dt, pubkey), Witness::Timestamp(last_time)) => {
|
||||
pubkey == from && dt <= last_time
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
@ -45,31 +47,31 @@ pub enum Budget {
|
||||
}
|
||||
|
||||
impl Budget {
|
||||
/// Create the simplest budget - one that pays `tokens` to PublicKey.
|
||||
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
|
||||
/// Create the simplest budget - one that pays `tokens` to Pubkey.
|
||||
pub fn new_payment(tokens: i64, to: Pubkey) -> Self {
|
||||
Budget::Pay(Payment { tokens, to })
|
||||
}
|
||||
|
||||
/// Create a budget that pays `tokens` to `to` after being witnessed by `from`.
|
||||
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
|
||||
pub fn new_authorized_payment(from: Pubkey, tokens: i64, to: Pubkey) -> Self {
|
||||
Budget::After(Condition::Signature(from), Payment { tokens, to })
|
||||
}
|
||||
|
||||
/// Create a budget that pays `tokens` to `to` after the given DateTime.
|
||||
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
|
||||
Budget::After(Condition::Timestamp(dt), Payment { tokens, to })
|
||||
pub fn new_future_payment(dt: DateTime<Utc>, from: Pubkey, tokens: i64, to: Pubkey) -> Self {
|
||||
Budget::After(Condition::Timestamp(dt, from), Payment { tokens, to })
|
||||
}
|
||||
|
||||
/// Create a budget that pays `tokens` to `to` after the given DateTime
|
||||
/// unless cancelled by `from`.
|
||||
pub fn new_cancelable_future_payment(
|
||||
dt: DateTime<Utc>,
|
||||
from: PublicKey,
|
||||
from: Pubkey,
|
||||
tokens: i64,
|
||||
to: PublicKey,
|
||||
to: Pubkey,
|
||||
) -> Self {
|
||||
Budget::Or(
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Timestamp(dt, from), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
)
|
||||
}
|
||||
@ -94,11 +96,11 @@ impl PaymentPlan for Budget {
|
||||
|
||||
/// Apply a witness to the budget to see if the budget can be reduced.
|
||||
/// If so, modify the budget in-place.
|
||||
fn apply_witness(&mut self, witness: &Witness) {
|
||||
fn apply_witness(&mut self, witness: &Witness, from: &Pubkey) {
|
||||
let new_payment = match self {
|
||||
Budget::After(cond, payment) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Or((cond, payment), _) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Or(_, (cond, payment)) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::After(cond, payment) if cond.is_satisfied(witness, from) => Some(payment),
|
||||
Budget::Or((cond, payment), _) if cond.is_satisfied(witness, from) => Some(payment),
|
||||
Budget::Or(_, (cond, payment)) if cond.is_satisfied(witness, from) => Some(payment),
|
||||
_ => None,
|
||||
}.cloned();
|
||||
|
||||
@ -111,65 +113,82 @@ impl PaymentPlan for Budget {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
|
||||
#[test]
|
||||
fn test_signature_satisfied() {
|
||||
let sig = PublicKey::default();
|
||||
assert!(Condition::Signature(sig).is_satisfied(&Witness::Signature(sig)));
|
||||
let from = Pubkey::default();
|
||||
assert!(Condition::Signature(from).is_satisfied(&Witness::Signature, &from));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_satisfied() {
|
||||
let dt1 = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let dt2 = Utc.ymd(2014, 11, 14).and_hms(10, 9, 8);
|
||||
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt1)));
|
||||
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt2)));
|
||||
assert!(!Condition::Timestamp(dt2).is_satisfied(&Witness::Timestamp(dt1)));
|
||||
let from = Pubkey::default();
|
||||
assert!(Condition::Timestamp(dt1, from).is_satisfied(&Witness::Timestamp(dt1), &from));
|
||||
assert!(Condition::Timestamp(dt1, from).is_satisfied(&Witness::Timestamp(dt2), &from));
|
||||
assert!(!Condition::Timestamp(dt2, from).is_satisfied(&Witness::Timestamp(dt1), &from));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
let from = Pubkey::default();
|
||||
let to = Pubkey::default();
|
||||
assert!(Budget::new_payment(42, to).verify(42));
|
||||
assert!(Budget::new_authorized_payment(from, 42, to).verify(42));
|
||||
assert!(Budget::new_future_payment(dt, 42, to).verify(42));
|
||||
assert!(Budget::new_future_payment(dt, from, 42, to).verify(42));
|
||||
assert!(Budget::new_cancelable_future_payment(dt, from, 42, to).verify(42));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_authorized_payment() {
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
let from = Pubkey::default();
|
||||
let to = Pubkey::default();
|
||||
|
||||
let mut budget = Budget::new_authorized_payment(from, 42, to);
|
||||
budget.apply_witness(&Witness::Signature(from));
|
||||
budget.apply_witness(&Witness::Signature, &from);
|
||||
assert_eq!(budget, Budget::new_payment(42, to));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_future_payment() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let to = PublicKey::default();
|
||||
let from = Keypair::new().pubkey();
|
||||
let to = Keypair::new().pubkey();
|
||||
|
||||
let mut budget = Budget::new_future_payment(dt, 42, to);
|
||||
budget.apply_witness(&Witness::Timestamp(dt));
|
||||
let mut budget = Budget::new_future_payment(dt, from, 42, to);
|
||||
budget.apply_witness(&Witness::Timestamp(dt), &from);
|
||||
assert_eq!(budget, Budget::new_payment(42, to));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unauthorized_future_payment() {
|
||||
// Ensure timestamp will only be acknowledged if it came from the
|
||||
// whitelisted public key.
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let from = Keypair::new().pubkey();
|
||||
let to = Keypair::new().pubkey();
|
||||
|
||||
let mut budget = Budget::new_future_payment(dt, from, 42, to);
|
||||
let orig_budget = budget.clone();
|
||||
budget.apply_witness(&Witness::Timestamp(dt), &to); // <-- Attack!
|
||||
assert_eq!(budget, orig_budget);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cancelable_future_payment() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
let from = Pubkey::default();
|
||||
let to = Pubkey::default();
|
||||
|
||||
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
|
||||
budget.apply_witness(&Witness::Timestamp(dt));
|
||||
budget.apply_witness(&Witness::Timestamp(dt), &from);
|
||||
assert_eq!(budget, Budget::new_payment(42, to));
|
||||
|
||||
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
|
||||
budget.apply_witness(&Witness::Signature(from));
|
||||
budget.apply_witness(&Witness::Signature, &from);
|
||||
assert_eq!(budget, Budget::new_payment(42, from));
|
||||
}
|
||||
}
|
||||
|
@ -1,15 +1,15 @@
|
||||
use crdt::ReplicatedData;
|
||||
use crdt::{CrdtError, NodeInfo};
|
||||
use rand::distributions::{Distribution, Weighted, WeightedChoice};
|
||||
use rand::thread_rng;
|
||||
use result::{Error, Result};
|
||||
use signature::PublicKey;
|
||||
use result::Result;
|
||||
use signature::Pubkey;
|
||||
use std;
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub const DEFAULT_WEIGHT: u32 = 1;
|
||||
|
||||
pub trait ChooseGossipPeerStrategy {
|
||||
fn choose_peer<'a>(&self, options: Vec<&'a ReplicatedData>) -> Result<&'a ReplicatedData>;
|
||||
fn choose_peer<'a>(&self, options: Vec<&'a NodeInfo>) -> Result<&'a NodeInfo>;
|
||||
}
|
||||
|
||||
pub struct ChooseRandomPeerStrategy<'a> {
|
||||
@ -27,9 +27,9 @@ impl<'a, 'b> ChooseRandomPeerStrategy<'a> {
|
||||
}
|
||||
|
||||
impl<'a> ChooseGossipPeerStrategy for ChooseRandomPeerStrategy<'a> {
|
||||
fn choose_peer<'b>(&self, options: Vec<&'b ReplicatedData>) -> Result<&'b ReplicatedData> {
|
||||
fn choose_peer<'b>(&self, options: Vec<&'b NodeInfo>) -> Result<&'b NodeInfo> {
|
||||
if options.is_empty() {
|
||||
return Err(Error::CrdtTooSmall);
|
||||
Err(CrdtError::NoPeers)?;
|
||||
}
|
||||
|
||||
let n = ((self.random)() as usize) % options.len();
|
||||
@ -61,21 +61,21 @@ impl<'a> ChooseGossipPeerStrategy for ChooseRandomPeerStrategy<'a> {
|
||||
pub struct ChooseWeightedPeerStrategy<'a> {
|
||||
// The map of last directly observed update_index for each active validator.
|
||||
// This is how we get observed(v) from the formula above.
|
||||
remote: &'a HashMap<PublicKey, u64>,
|
||||
remote: &'a HashMap<Pubkey, u64>,
|
||||
// The map of rumored update_index for each active validator. Using the formula above,
|
||||
// to find rumor_v(i), we would first look up "v" in the outer map, then look up
|
||||
// "i" in the inner map, i.e. look up external_liveness[v][i]
|
||||
external_liveness: &'a HashMap<PublicKey, HashMap<PublicKey, u64>>,
|
||||
external_liveness: &'a HashMap<Pubkey, HashMap<Pubkey, u64>>,
|
||||
// A function returning the size of the stake for a particular validator, corresponds
|
||||
// to stake(i) in the formula above.
|
||||
get_stake: &'a Fn(PublicKey) -> f64,
|
||||
get_stake: &'a Fn(Pubkey) -> f64,
|
||||
}
|
||||
|
||||
impl<'a> ChooseWeightedPeerStrategy<'a> {
|
||||
pub fn new(
|
||||
remote: &'a HashMap<PublicKey, u64>,
|
||||
external_liveness: &'a HashMap<PublicKey, HashMap<PublicKey, u64>>,
|
||||
get_stake: &'a Fn(PublicKey) -> f64,
|
||||
remote: &'a HashMap<Pubkey, u64>,
|
||||
external_liveness: &'a HashMap<Pubkey, HashMap<Pubkey, u64>>,
|
||||
get_stake: &'a Fn(Pubkey) -> f64,
|
||||
) -> Self {
|
||||
ChooseWeightedPeerStrategy {
|
||||
remote,
|
||||
@ -84,7 +84,7 @@ impl<'a> ChooseWeightedPeerStrategy<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn calculate_weighted_remote_index(&self, peer_id: PublicKey) -> u32 {
|
||||
fn calculate_weighted_remote_index(&self, peer_id: Pubkey) -> u32 {
|
||||
let mut last_seen_index = 0;
|
||||
// If the peer is not in our remote table, then we leave last_seen_index as zero.
|
||||
// Only happens when a peer appears in our crdt.table but not in our crdt.remote,
|
||||
@ -159,7 +159,7 @@ impl<'a> ChooseWeightedPeerStrategy<'a> {
|
||||
|
||||
// Return u32 b/c the weighted sampling API from rand::distributions
|
||||
// only takes u32 for weights
|
||||
if weighted_vote >= std::u32::MAX as f64 {
|
||||
if weighted_vote >= f64::from(std::u32::MAX) {
|
||||
return std::u32::MAX;
|
||||
}
|
||||
|
||||
@ -172,9 +172,9 @@ impl<'a> ChooseWeightedPeerStrategy<'a> {
|
||||
}
|
||||
|
||||
impl<'a> ChooseGossipPeerStrategy for ChooseWeightedPeerStrategy<'a> {
|
||||
fn choose_peer<'b>(&self, options: Vec<&'b ReplicatedData>) -> Result<&'b ReplicatedData> {
|
||||
if options.len() < 1 {
|
||||
return Err(Error::CrdtTooSmall);
|
||||
fn choose_peer<'b>(&self, options: Vec<&'b NodeInfo>) -> Result<&'b NodeInfo> {
|
||||
if options.is_empty() {
|
||||
Err(CrdtError::NoPeers)?;
|
||||
}
|
||||
|
||||
let mut weighted_peers = vec![];
|
||||
@ -192,11 +192,11 @@ impl<'a> ChooseGossipPeerStrategy for ChooseWeightedPeerStrategy<'a> {
|
||||
mod tests {
|
||||
use choose_gossip_peer_strategy::{ChooseWeightedPeerStrategy, DEFAULT_WEIGHT};
|
||||
use logger;
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
use signature::{Keypair, KeypairUtil, Pubkey};
|
||||
use std;
|
||||
use std::collections::HashMap;
|
||||
|
||||
fn get_stake(_id: PublicKey) -> f64 {
|
||||
fn get_stake(_id: Pubkey) -> f64 {
|
||||
1.0
|
||||
}
|
||||
|
||||
@ -205,10 +205,10 @@ mod tests {
|
||||
logger::setup();
|
||||
|
||||
// Initialize the filler keys
|
||||
let key1 = KeyPair::new().pubkey();
|
||||
let key1 = Keypair::new().pubkey();
|
||||
|
||||
let remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||
let remote: HashMap<Pubkey, u64> = HashMap::new();
|
||||
let external_liveness: HashMap<Pubkey, HashMap<Pubkey, u64>> = HashMap::new();
|
||||
|
||||
let weighted_strategy =
|
||||
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
|
||||
@ -224,16 +224,16 @@ mod tests {
|
||||
logger::setup();
|
||||
|
||||
// Initialize the filler keys
|
||||
let key1 = KeyPair::new().pubkey();
|
||||
let key2 = KeyPair::new().pubkey();
|
||||
let key1 = Keypair::new().pubkey();
|
||||
let key2 = Keypair::new().pubkey();
|
||||
|
||||
let remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||
let remote: HashMap<Pubkey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<Pubkey, HashMap<Pubkey, u64>> = HashMap::new();
|
||||
|
||||
// If only the liveness table contains the entry, should return the
|
||||
// weighted liveness entries
|
||||
let test_value: u32 = 5;
|
||||
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut rumors: HashMap<Pubkey, u64> = HashMap::new();
|
||||
rumors.insert(key2, test_value as u64);
|
||||
external_liveness.insert(key1, rumors);
|
||||
|
||||
@ -249,15 +249,15 @@ mod tests {
|
||||
logger::setup();
|
||||
|
||||
// Initialize the filler keys
|
||||
let key1 = KeyPair::new().pubkey();
|
||||
let key2 = KeyPair::new().pubkey();
|
||||
let key1 = Keypair::new().pubkey();
|
||||
let key2 = Keypair::new().pubkey();
|
||||
|
||||
let remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||
let remote: HashMap<Pubkey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<Pubkey, HashMap<Pubkey, u64>> = HashMap::new();
|
||||
|
||||
// If the vote index is greater than u32::MAX, default to u32::MAX
|
||||
let test_value = (std::u32::MAX as u64) + 10;
|
||||
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut rumors: HashMap<Pubkey, u64> = HashMap::new();
|
||||
rumors.insert(key2, test_value);
|
||||
external_liveness.insert(key1, rumors);
|
||||
|
||||
@ -273,20 +273,20 @@ mod tests {
|
||||
logger::setup();
|
||||
|
||||
// Initialize the filler keys
|
||||
let key1 = KeyPair::new().pubkey();
|
||||
let key1 = Keypair::new().pubkey();
|
||||
|
||||
let mut remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||
let mut remote: HashMap<Pubkey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<Pubkey, HashMap<Pubkey, u64>> = HashMap::new();
|
||||
|
||||
// Test many validators' rumors in external_liveness
|
||||
let num_peers = 10;
|
||||
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut rumors: HashMap<Pubkey, u64> = HashMap::new();
|
||||
|
||||
remote.insert(key1, 0);
|
||||
|
||||
for i in 0..num_peers {
|
||||
let pk = KeyPair::new().pubkey();
|
||||
rumors.insert(pk, i);
|
||||
let pubkey = Keypair::new().pubkey();
|
||||
rumors.insert(pubkey, i);
|
||||
}
|
||||
|
||||
external_liveness.insert(key1, rumors);
|
||||
@ -303,21 +303,21 @@ mod tests {
|
||||
logger::setup();
|
||||
|
||||
// Initialize the filler keys
|
||||
let key1 = KeyPair::new().pubkey();
|
||||
let key1 = Keypair::new().pubkey();
|
||||
|
||||
let mut remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||
let mut remote: HashMap<Pubkey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<Pubkey, HashMap<Pubkey, u64>> = HashMap::new();
|
||||
|
||||
// Test many validators' rumors in external_liveness
|
||||
let num_peers = 10;
|
||||
let old_index = 20;
|
||||
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut rumors: HashMap<Pubkey, u64> = HashMap::new();
|
||||
|
||||
remote.insert(key1, old_index);
|
||||
|
||||
for _i in 0..num_peers {
|
||||
let pk = KeyPair::new().pubkey();
|
||||
rumors.insert(pk, old_index);
|
||||
let pubkey = Keypair::new().pubkey();
|
||||
rumors.insert(pubkey, old_index);
|
||||
}
|
||||
|
||||
external_liveness.insert(key1, rumors);
|
||||
|
20
src/client.rs
Normal file
20
src/client.rs
Normal file
@ -0,0 +1,20 @@
|
||||
use crdt::NodeInfo;
|
||||
use nat::udp_random_bind;
|
||||
use std::time::Duration;
|
||||
use thin_client::ThinClient;
|
||||
|
||||
pub fn mk_client(r: &NodeInfo) -> ThinClient {
|
||||
let requests_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
let transactions_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
|
||||
ThinClient::new(
|
||||
r.contact_info.rpu,
|
||||
requests_socket,
|
||||
r.contact_info.tpu,
|
||||
transactions_socket,
|
||||
)
|
||||
}
|
160
src/counter.rs
160
src/counter.rs
@ -1,13 +1,19 @@
|
||||
use influx_db_client as influxdb;
|
||||
use metrics;
|
||||
use std::env;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::Duration;
|
||||
use timing;
|
||||
|
||||
const DEFAULT_METRICS_RATE: usize = 100;
|
||||
|
||||
pub struct Counter {
|
||||
pub name: &'static str,
|
||||
/// total accumulated value
|
||||
pub counts: AtomicUsize,
|
||||
pub nanos: AtomicUsize,
|
||||
pub times: AtomicUsize,
|
||||
pub lograte: usize,
|
||||
/// last accumulated value logged
|
||||
pub lastlog: AtomicUsize,
|
||||
pub lograte: AtomicUsize,
|
||||
}
|
||||
|
||||
macro_rules! create_counter {
|
||||
@ -15,55 +21,169 @@ macro_rules! create_counter {
|
||||
Counter {
|
||||
name: $name,
|
||||
counts: AtomicUsize::new(0),
|
||||
nanos: AtomicUsize::new(0),
|
||||
times: AtomicUsize::new(0),
|
||||
lograte: $lograte,
|
||||
lastlog: AtomicUsize::new(0),
|
||||
lograte: AtomicUsize::new($lograte),
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! inc_counter {
|
||||
($name:expr, $count:expr, $start:expr) => {
|
||||
unsafe { $name.inc($count, $start.elapsed()) };
|
||||
($name:expr, $count:expr) => {
|
||||
unsafe { $name.inc($count) };
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! inc_new_counter_info {
|
||||
($name:expr, $count:expr) => {{
|
||||
inc_new_counter!($name, $count, Level::Info, 0);
|
||||
}};
|
||||
($name:expr, $count:expr, $lograte:expr) => {{
|
||||
inc_new_counter!($name, $count, Level::Info, $lograte);
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! inc_new_counter {
|
||||
($name:expr, $count:expr, $level:expr, $lograte:expr) => {{
|
||||
if log_enabled!($level) {
|
||||
static mut INC_NEW_COUNTER: Counter = create_counter!($name, $lograte);
|
||||
inc_counter!(INC_NEW_COUNTER, $count);
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
impl Counter {
|
||||
pub fn inc(&mut self, events: usize, dur: Duration) {
|
||||
let total = dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64;
|
||||
fn default_log_rate() -> usize {
|
||||
let v = env::var("SOLANA_DEFAULT_METRICS_RATE")
|
||||
.map(|x| x.parse().unwrap_or(DEFAULT_METRICS_RATE))
|
||||
.unwrap_or(DEFAULT_METRICS_RATE);
|
||||
if v == 0 {
|
||||
DEFAULT_METRICS_RATE
|
||||
} else {
|
||||
v
|
||||
}
|
||||
}
|
||||
pub fn inc(&mut self, events: usize) {
|
||||
let counts = self.counts.fetch_add(events, Ordering::Relaxed);
|
||||
let nanos = self.nanos.fetch_add(total as usize, Ordering::Relaxed);
|
||||
let times = self.times.fetch_add(1, Ordering::Relaxed);
|
||||
if times % self.lograte == 0 && times > 0 {
|
||||
let mut lograte = self.lograte.load(Ordering::Relaxed);
|
||||
if lograte == 0 {
|
||||
lograte = Counter::default_log_rate();
|
||||
self.lograte.store(lograte, Ordering::Relaxed);
|
||||
}
|
||||
if times % lograte == 0 && times > 0 {
|
||||
let lastlog = self.lastlog.load(Ordering::Relaxed);
|
||||
info!(
|
||||
"COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"nanos\": {}, \"samples\": {}, \"rate\": {}, \"now\": {}}}",
|
||||
"COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"samples\": {}, \"now\": {}}}",
|
||||
self.name,
|
||||
counts,
|
||||
nanos,
|
||||
times,
|
||||
counts as f64 * 1e9 / nanos as f64,
|
||||
timing::timestamp(),
|
||||
);
|
||||
metrics::submit(
|
||||
influxdb::Point::new(&format!("counter-{}", self.name))
|
||||
.add_field(
|
||||
"count",
|
||||
influxdb::Value::Integer(counts as i64 - lastlog as i64),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
self.lastlog
|
||||
.compare_and_swap(lastlog, counts, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use counter::Counter;
|
||||
use counter::{Counter, DEFAULT_METRICS_RATE};
|
||||
use log::Level;
|
||||
use std::env;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::Instant;
|
||||
use std::sync::{Once, RwLock, ONCE_INIT};
|
||||
|
||||
fn get_env_lock() -> &'static RwLock<()> {
|
||||
static mut ENV_LOCK: Option<RwLock<()>> = None;
|
||||
static INIT_HOOK: Once = ONCE_INIT;
|
||||
|
||||
unsafe {
|
||||
INIT_HOOK.call_once(|| {
|
||||
ENV_LOCK = Some(RwLock::new(()));
|
||||
});
|
||||
&ENV_LOCK.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_counter() {
|
||||
let _readlock = get_env_lock().read();
|
||||
static mut COUNTER: Counter = create_counter!("test", 100);
|
||||
let start = Instant::now();
|
||||
let count = 1;
|
||||
inc_counter!(COUNTER, count, start);
|
||||
inc_counter!(COUNTER, count);
|
||||
unsafe {
|
||||
assert_eq!(COUNTER.counts.load(Ordering::Relaxed), 1);
|
||||
assert_ne!(COUNTER.nanos.load(Ordering::Relaxed), 0);
|
||||
assert_eq!(COUNTER.times.load(Ordering::Relaxed), 1);
|
||||
assert_eq!(COUNTER.lograte, 100);
|
||||
assert_eq!(COUNTER.lograte.load(Ordering::Relaxed), 100);
|
||||
assert_eq!(COUNTER.lastlog.load(Ordering::Relaxed), 0);
|
||||
assert_eq!(COUNTER.name, "test");
|
||||
}
|
||||
for _ in 0..199 {
|
||||
inc_counter!(COUNTER, 2);
|
||||
}
|
||||
unsafe {
|
||||
assert_eq!(COUNTER.lastlog.load(Ordering::Relaxed), 199);
|
||||
}
|
||||
inc_counter!(COUNTER, 2);
|
||||
unsafe {
|
||||
assert_eq!(COUNTER.lastlog.load(Ordering::Relaxed), 399);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn test_inc_new_counter() {
|
||||
let _readlock = get_env_lock().read();
|
||||
//make sure that macros are syntactically correct
|
||||
//the variable is internal to the macro scope so there is no way to introspect it
|
||||
inc_new_counter_info!("counter-1", 1);
|
||||
inc_new_counter_info!("counter-2", 1, 2);
|
||||
}
|
||||
#[test]
|
||||
fn test_lograte() {
|
||||
let _readlock = get_env_lock().read();
|
||||
assert_eq!(
|
||||
Counter::default_log_rate(),
|
||||
DEFAULT_METRICS_RATE,
|
||||
"default_log_rate() is {}, expected {}, SOLANA_DEFAULT_METRICS_RATE environment variable set?",
|
||||
Counter::default_log_rate(),
|
||||
DEFAULT_METRICS_RATE,
|
||||
);
|
||||
static mut COUNTER: Counter = create_counter!("test_lograte", 0);
|
||||
inc_counter!(COUNTER, 2);
|
||||
unsafe {
|
||||
assert_eq!(
|
||||
COUNTER.lograte.load(Ordering::Relaxed),
|
||||
DEFAULT_METRICS_RATE
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lograte_env() {
|
||||
assert_ne!(DEFAULT_METRICS_RATE, 0);
|
||||
let _writelock = get_env_lock().write();
|
||||
static mut COUNTER: Counter = create_counter!("test_lograte_env", 0);
|
||||
env::set_var("SOLANA_DEFAULT_METRICS_RATE", "50");
|
||||
inc_counter!(COUNTER, 2);
|
||||
unsafe {
|
||||
assert_eq!(COUNTER.lograte.load(Ordering::Relaxed), 50);
|
||||
}
|
||||
|
||||
static mut COUNTER2: Counter = create_counter!("test_lograte_env", 0);
|
||||
env::set_var("SOLANA_DEFAULT_METRICS_RATE", "0");
|
||||
inc_counter!(COUNTER2, 2);
|
||||
unsafe {
|
||||
assert_eq!(
|
||||
COUNTER2.lograte.load(Ordering::Relaxed),
|
||||
DEFAULT_METRICS_RATE
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
1599
src/crdt.rs
1599
src/crdt.rs
File diff suppressed because it is too large
Load Diff
156
src/drone.rs
156
src/drone.rs
@ -4,7 +4,10 @@
|
||||
//! checking requests against a request cap for a given time time_slice
|
||||
//! and (to come) an IP rate limit.
|
||||
|
||||
use signature::{KeyPair, PublicKey};
|
||||
use influx_db_client as influxdb;
|
||||
use metrics;
|
||||
use signature::Signature;
|
||||
use signature::{Keypair, Pubkey};
|
||||
use std::io;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::net::{IpAddr, SocketAddr, UdpSocket};
|
||||
@ -13,18 +16,19 @@ use thin_client::ThinClient;
|
||||
use transaction::Transaction;
|
||||
|
||||
pub const TIME_SLICE: u64 = 60;
|
||||
pub const REQUEST_CAP: u64 = 150_000;
|
||||
pub const REQUEST_CAP: u64 = 1_000_000;
|
||||
pub const DRONE_PORT: u16 = 9900;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
||||
pub enum DroneRequest {
|
||||
GetAirdrop {
|
||||
airdrop_request_amount: u64,
|
||||
client_public_key: PublicKey,
|
||||
client_pubkey: Pubkey,
|
||||
},
|
||||
}
|
||||
|
||||
pub struct Drone {
|
||||
mint_keypair: KeyPair,
|
||||
mint_keypair: Keypair,
|
||||
ip_cache: Vec<IpAddr>,
|
||||
_airdrop_addr: SocketAddr,
|
||||
transactions_addr: SocketAddr,
|
||||
@ -36,7 +40,7 @@ pub struct Drone {
|
||||
|
||||
impl Drone {
|
||||
pub fn new(
|
||||
mint_keypair: KeyPair,
|
||||
mint_keypair: Keypair,
|
||||
_airdrop_addr: SocketAddr,
|
||||
transactions_addr: SocketAddr,
|
||||
requests_addr: SocketAddr,
|
||||
@ -91,8 +95,7 @@ impl Drone {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_airdrop(&mut self, req: DroneRequest) -> Result<usize, io::Error> {
|
||||
let tx: Transaction;
|
||||
pub fn send_airdrop(&mut self, req: DroneRequest) -> Result<Signature, io::Error> {
|
||||
let request_amount: u64;
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
@ -105,39 +108,63 @@ impl Drone {
|
||||
);
|
||||
let last_id = client.get_last_id();
|
||||
|
||||
match req {
|
||||
let tx = match req {
|
||||
DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount,
|
||||
client_public_key,
|
||||
client_pubkey,
|
||||
} => {
|
||||
request_amount = airdrop_request_amount.clone();
|
||||
tx = Transaction::new(
|
||||
info!(
|
||||
"Requesting airdrop of {} to {:?}",
|
||||
airdrop_request_amount, client_pubkey
|
||||
);
|
||||
request_amount = airdrop_request_amount;
|
||||
Transaction::new(
|
||||
&self.mint_keypair,
|
||||
client_public_key,
|
||||
client_pubkey,
|
||||
airdrop_request_amount as i64,
|
||||
last_id,
|
||||
);
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
if self.check_request_limit(request_amount) {
|
||||
self.request_current += request_amount;
|
||||
client.transfer_signed(tx)
|
||||
metrics::submit(
|
||||
influxdb::Point::new("drone")
|
||||
.add_tag("op", influxdb::Value::String("airdrop".to_string()))
|
||||
.add_field(
|
||||
"request_amount",
|
||||
influxdb::Value::Integer(request_amount as i64),
|
||||
)
|
||||
.add_field(
|
||||
"request_current",
|
||||
influxdb::Value::Integer(self.request_current as i64),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
client.transfer_signed(&tx)
|
||||
} else {
|
||||
Err(Error::new(ErrorKind::Other, "token limit reached"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Drone {
|
||||
fn drop(&mut self) {
|
||||
metrics::flush();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bank::Bank;
|
||||
use crdt::{get_ip_addr, TestNode};
|
||||
use drone::{Drone, DroneRequest, REQUEST_CAP, TIME_SLICE};
|
||||
use fullnode::FullNode;
|
||||
use fullnode::Fullnode;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use service::Service;
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use std::fs::remove_dir_all;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
@ -147,7 +174,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_check_request_limit() {
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
@ -167,7 +194,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_clear_request_count() {
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
@ -181,7 +208,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_add_ip_to_cache() {
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
@ -196,7 +223,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_clear_ip_cache() {
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
@ -213,7 +240,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_drone_default_init() {
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
@ -232,33 +259,43 @@ mod tests {
|
||||
assert_eq!(drone.request_cap, REQUEST_CAP);
|
||||
}
|
||||
|
||||
fn tmp_ledger_path(name: &str) -> String {
|
||||
use std::env;
|
||||
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
|
||||
let keypair = Keypair::new();
|
||||
|
||||
format!("{}/tmp-ledger-{}-{}", out_dir, name, keypair.pubkey())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_send_airdrop() {
|
||||
const SMALL_BATCH: i64 = 50;
|
||||
const TPS_BATCH: i64 = 5_000_000;
|
||||
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let leader_keypair = Keypair::new();
|
||||
let leader = TestNode::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||
|
||||
let alice = Mint::new(10_000_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let carlos_pubkey = KeyPair::new().pubkey();
|
||||
let bob_pubkey = Keypair::new().pubkey();
|
||||
let carlos_pubkey = Keypair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let leader_data = leader.data.clone();
|
||||
let ledger_path = tmp_ledger_path("send_airdrop");
|
||||
|
||||
let server = FullNode::new_leader(
|
||||
let server = Fullnode::new_leader(
|
||||
leader_keypair,
|
||||
bank,
|
||||
0,
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
&[],
|
||||
leader,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
&ledger_path,
|
||||
false,
|
||||
);
|
||||
//TODO: this seems unstable
|
||||
sleep(Duration::from_millis(900));
|
||||
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().expect("bind to drone socket");
|
||||
@ -266,48 +303,47 @@ mod tests {
|
||||
let mut drone = Drone::new(
|
||||
alice.keypair(),
|
||||
addr,
|
||||
leader.data.transactions_addr,
|
||||
leader.data.requests_addr,
|
||||
leader_data.contact_info.tpu,
|
||||
leader_data.contact_info.rpu,
|
||||
None,
|
||||
Some(5_000_050),
|
||||
Some(150_000),
|
||||
);
|
||||
|
||||
let bob_req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: 50,
|
||||
client_public_key: bob_pubkey,
|
||||
};
|
||||
let bob_result = drone.send_airdrop(bob_req).expect("send airdrop test");
|
||||
assert!(bob_result > 0);
|
||||
|
||||
let carlos_req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: 5_000_000,
|
||||
client_public_key: carlos_pubkey,
|
||||
};
|
||||
let carlos_result = drone.send_airdrop(carlos_req).expect("send airdrop test");
|
||||
assert!(carlos_result > 0);
|
||||
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").expect("drone bind to requests socket");
|
||||
let transactions_socket =
|
||||
UdpSocket::bind("0.0.0.0:0").expect("drone bind to transactions socket");
|
||||
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
leader_data.contact_info.rpu,
|
||||
requests_socket,
|
||||
leader.data.transactions_addr,
|
||||
leader_data.contact_info.tpu,
|
||||
transactions_socket,
|
||||
);
|
||||
|
||||
let bob_balance = client.poll_get_balance(&bob_pubkey);
|
||||
let bob_req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: 50,
|
||||
client_pubkey: bob_pubkey,
|
||||
};
|
||||
let bob_sig = drone.send_airdrop(bob_req).unwrap();
|
||||
assert!(client.poll_for_signature(&bob_sig).is_ok());
|
||||
|
||||
let carlos_req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: 5_000_000,
|
||||
client_pubkey: carlos_pubkey,
|
||||
};
|
||||
let carlos_sig = drone.send_airdrop(carlos_req).unwrap();
|
||||
assert!(client.poll_for_signature(&carlos_sig).is_ok());
|
||||
|
||||
let bob_balance = client.get_balance(&bob_pubkey);
|
||||
info!("Small request balance: {:?}", bob_balance);
|
||||
assert_eq!(bob_balance.unwrap(), SMALL_BATCH);
|
||||
|
||||
let carlos_balance = client.poll_get_balance(&carlos_pubkey);
|
||||
let carlos_balance = client.get_balance(&carlos_pubkey);
|
||||
info!("TPS request balance: {:?}", carlos_balance);
|
||||
assert_eq!(carlos_balance.unwrap(), TPS_BATCH);
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
server.join().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
}
|
||||
|
103
src/entry.rs
103
src/entry.rs
@ -2,10 +2,13 @@
|
||||
//! unique ID that is the hash of the Entry before it, plus the hash of the
|
||||
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
|
||||
//! represents an approximate amount of time since the last Entry was created.
|
||||
use bincode::serialized_size;
|
||||
use bincode::{serialize_into, serialized_size};
|
||||
use hash::{extend_and_hash, hash, Hash};
|
||||
use packet::BLOB_DATA_SIZE;
|
||||
use packet::{BlobRecycler, SharedBlob, BLOB_DATA_SIZE};
|
||||
use rayon::prelude::*;
|
||||
use signature::Pubkey;
|
||||
use std::io::Cursor;
|
||||
use std::net::SocketAddr;
|
||||
use transaction::Transaction;
|
||||
|
||||
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
|
||||
@ -20,7 +23,7 @@ use transaction::Transaction;
|
||||
/// world's fastest processor at the time the entry was recorded. Or said another way, it
|
||||
/// is physically not possible for a shorter duration to have occurred if one assumes the
|
||||
/// hash was computed by the world's fastest processor at that time. The hash chain is both
|
||||
/// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof or
|
||||
/// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof of
|
||||
/// Work consensus!)
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
@ -32,7 +35,7 @@ pub struct Entry {
|
||||
pub id: Hash,
|
||||
|
||||
/// An unordered list of transactions that were observed before the Entry ID was
|
||||
/// generated. The may have been observed before a previous Entry ID but were
|
||||
/// generated. They may have been observed before a previous Entry ID but were
|
||||
/// pushed back into this list to ensure deterministic interpretation of the ledger.
|
||||
pub transactions: Vec<Transaction>,
|
||||
|
||||
@ -51,11 +54,11 @@ impl Entry {
|
||||
/// Creates the next Entry `num_hashes` after `start_hash`.
|
||||
pub fn new(
|
||||
start_hash: &Hash,
|
||||
cur_hashes: u64,
|
||||
num_hashes: u64,
|
||||
transactions: Vec<Transaction>,
|
||||
has_more: bool,
|
||||
) -> Self {
|
||||
let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
|
||||
let num_hashes = num_hashes + if transactions.is_empty() { 0 } else { 1 };
|
||||
let id = next_hash(start_hash, 0, &transactions);
|
||||
let entry = Entry {
|
||||
num_hashes,
|
||||
@ -64,10 +67,49 @@ impl Entry {
|
||||
has_more,
|
||||
pad: [0, 0, 0],
|
||||
};
|
||||
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
||||
|
||||
let size = serialized_size(&entry).unwrap();
|
||||
if size > BLOB_DATA_SIZE as u64 {
|
||||
panic!(
|
||||
"Serialized entry size too large: {} ({} transactions):",
|
||||
size,
|
||||
entry.transactions.len()
|
||||
);
|
||||
}
|
||||
entry
|
||||
}
|
||||
|
||||
pub fn to_blob(
|
||||
&self,
|
||||
blob_recycler: &BlobRecycler,
|
||||
idx: Option<u64>,
|
||||
id: Option<Pubkey>,
|
||||
addr: Option<&SocketAddr>,
|
||||
) -> SharedBlob {
|
||||
let blob = blob_recycler.allocate();
|
||||
{
|
||||
let mut blob_w = blob.write().unwrap();
|
||||
let pos = {
|
||||
let mut out = Cursor::new(blob_w.data_mut());
|
||||
serialize_into(&mut out, &self).expect("failed to serialize output");
|
||||
out.position() as usize
|
||||
};
|
||||
blob_w.set_size(pos);
|
||||
|
||||
if let Some(idx) = idx {
|
||||
blob_w.set_index(idx).expect("set_index()");
|
||||
}
|
||||
if let Some(id) = id {
|
||||
blob_w.set_id(id).expect("set_id()");
|
||||
}
|
||||
if let Some(addr) = addr {
|
||||
blob_w.meta.set_addr(addr);
|
||||
}
|
||||
blob_w.set_flags(0).unwrap();
|
||||
}
|
||||
blob
|
||||
}
|
||||
|
||||
pub fn will_fit(transactions: Vec<Transaction>) -> bool {
|
||||
serialized_size(&Entry {
|
||||
num_hashes: 0,
|
||||
@ -81,13 +123,13 @@ impl Entry {
|
||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
||||
pub fn new_mut(
|
||||
start_hash: &mut Hash,
|
||||
cur_hashes: &mut u64,
|
||||
num_hashes: &mut u64,
|
||||
transactions: Vec<Transaction>,
|
||||
has_more: bool,
|
||||
) -> Self {
|
||||
let entry = Self::new(start_hash, *cur_hashes, transactions, has_more);
|
||||
let entry = Self::new(start_hash, *num_hashes, transactions, has_more);
|
||||
*start_hash = entry.id;
|
||||
*cur_hashes = 0;
|
||||
*num_hashes = 0;
|
||||
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
||||
entry
|
||||
}
|
||||
@ -107,14 +149,31 @@ impl Entry {
|
||||
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
|
||||
/// If the transaction is not a Tick, then hash that as well.
|
||||
pub fn verify(&self, start_hash: &Hash) -> bool {
|
||||
self.transactions.par_iter().all(|tx| tx.verify_plan())
|
||||
&& self.id == next_hash(start_hash, self.num_hashes, &self.transactions)
|
||||
let tx_plans_verified = self.transactions.par_iter().all(|tx| {
|
||||
let r = tx.verify_plan();
|
||||
if !r {
|
||||
warn!("tx plan invalid: {:?}", tx);
|
||||
}
|
||||
r
|
||||
});
|
||||
if !tx_plans_verified {
|
||||
return false;
|
||||
}
|
||||
let ref_hash = next_hash(start_hash, self.num_hashes, &self.transactions);
|
||||
if self.id != ref_hash {
|
||||
warn!(
|
||||
"next_hash is invalid expected: {:?} actual: {:?}",
|
||||
self.id, ref_hash
|
||||
);
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
fn add_transaction_data(hash_data: &mut Vec<u8>, tx: &Transaction) {
|
||||
hash_data.push(0u8);
|
||||
hash_data.extend_from_slice(&tx.sig);
|
||||
hash_data.extend_from_slice(&tx.signature.as_ref());
|
||||
}
|
||||
|
||||
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
|
||||
@ -124,7 +183,7 @@ fn add_transaction_data(hash_data: &mut Vec<u8>, tx: &Transaction) {
|
||||
fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
|
||||
let mut id = *start_hash;
|
||||
for _ in 1..num_hashes {
|
||||
id = hash(&id);
|
||||
id = hash(&id.as_ref());
|
||||
}
|
||||
|
||||
// Hash all the transaction data
|
||||
@ -136,7 +195,7 @@ fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -
|
||||
if !hash_data.is_empty() {
|
||||
extend_and_hash(&id, &hash_data)
|
||||
} else if num_hashes != 0 {
|
||||
hash(&id)
|
||||
hash(&id.as_ref())
|
||||
} else {
|
||||
id
|
||||
}
|
||||
@ -144,7 +203,7 @@ fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -
|
||||
|
||||
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
|
||||
pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
|
||||
assert!(num_hashes > 0 || transactions.len() == 0);
|
||||
assert!(num_hashes > 0 || transactions.is_empty());
|
||||
Entry {
|
||||
num_hashes,
|
||||
id: next_hash(start_hash, num_hashes, &transactions),
|
||||
@ -160,13 +219,13 @@ mod tests {
|
||||
use chrono::prelude::*;
|
||||
use entry::Entry;
|
||||
use hash::hash;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[test]
|
||||
fn test_entry_verify() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let one = hash(&zero.as_ref());
|
||||
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case
|
||||
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
|
||||
assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step
|
||||
@ -178,7 +237,7 @@ mod tests {
|
||||
let zero = Hash::default();
|
||||
|
||||
// First, verify entries
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
|
||||
let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero);
|
||||
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false);
|
||||
@ -195,7 +254,7 @@ mod tests {
|
||||
let zero = Hash::default();
|
||||
|
||||
// First, verify entries
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
|
||||
let tx1 = Transaction::new_signature(&keypair, Default::default(), zero);
|
||||
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false);
|
||||
@ -218,7 +277,7 @@ mod tests {
|
||||
assert_eq!(tick.num_hashes, 0);
|
||||
assert_eq!(tick.id, zero);
|
||||
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
|
||||
let entry0 = next_entry(&zero, 1, vec![tx0.clone()]);
|
||||
assert_eq!(entry0.num_hashes, 1);
|
||||
@ -229,7 +288,7 @@ mod tests {
|
||||
#[should_panic]
|
||||
fn test_next_entry_panic() {
|
||||
let zero = Hash::default();
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let tx = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
|
||||
next_entry(&zero, 0, vec![tx]);
|
||||
}
|
||||
|
@ -3,9 +3,10 @@
|
||||
//! stdout, and then sends the Entry to its output channel.
|
||||
|
||||
use bank::Bank;
|
||||
use bincode;
|
||||
use entry::Entry;
|
||||
use serde_json;
|
||||
use std::io::{self, BufRead, Error, ErrorKind, Write};
|
||||
use std::mem::size_of;
|
||||
|
||||
pub struct EntryWriter<'a, W> {
|
||||
bank: &'a Bank,
|
||||
@ -19,8 +20,16 @@ impl<'a, W: Write> EntryWriter<'a, W> {
|
||||
}
|
||||
|
||||
fn write_entry(writer: &mut W, entry: &Entry) -> io::Result<()> {
|
||||
let serialized = serde_json::to_string(entry).unwrap();
|
||||
writeln!(writer, "{}", serialized)
|
||||
let entry_bytes =
|
||||
bincode::serialize(&entry).map_err(|e| Error::new(ErrorKind::Other, e.to_string()))?;
|
||||
|
||||
let len = entry_bytes.len();
|
||||
let len_bytes =
|
||||
bincode::serialize(&len).map_err(|e| Error::new(ErrorKind::Other, e.to_string()))?;
|
||||
|
||||
writer.write_all(&len_bytes[..])?;
|
||||
writer.write_all(&entry_bytes[..])?;
|
||||
writer.flush()
|
||||
}
|
||||
|
||||
pub fn write_entries<I>(writer: &mut W, entries: I) -> io::Result<()>
|
||||
@ -49,15 +58,44 @@ impl<'a, W: Write> EntryWriter<'a, W> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_entry(s: String) -> io::Result<Entry> {
|
||||
serde_json::from_str(&s).map_err(|e| Error::new(ErrorKind::Other, e.to_string()))
|
||||
struct EntryReader<R: BufRead> {
|
||||
reader: R,
|
||||
entry_bytes: Vec<u8>,
|
||||
}
|
||||
|
||||
// TODO: How to implement this without attaching the input's lifetime to the output?
|
||||
pub fn read_entries<'a, R: BufRead>(
|
||||
reader: &'a mut R,
|
||||
) -> impl Iterator<Item = io::Result<Entry>> + 'a {
|
||||
reader.lines().map(|s| read_entry(s?))
|
||||
impl<R: BufRead> Iterator for EntryReader<R> {
|
||||
type Item = io::Result<Entry>;
|
||||
|
||||
fn next(&mut self) -> Option<io::Result<Entry>> {
|
||||
let mut entry_len_bytes = [0u8; size_of::<usize>()];
|
||||
|
||||
if self.reader.read_exact(&mut entry_len_bytes[..]).is_ok() {
|
||||
let entry_len = bincode::deserialize(&entry_len_bytes).unwrap();
|
||||
|
||||
if entry_len > self.entry_bytes.len() {
|
||||
self.entry_bytes.resize(entry_len, 0);
|
||||
}
|
||||
|
||||
if let Err(e) = self.reader.read_exact(&mut self.entry_bytes[..entry_len]) {
|
||||
Some(Err(e))
|
||||
} else {
|
||||
Some(
|
||||
bincode::deserialize(&self.entry_bytes)
|
||||
.map_err(|e| Error::new(ErrorKind::Other, e.to_string())),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
None // EOF (probably)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return an iterator for all the entries in the given file.
|
||||
pub fn read_entries<R: BufRead>(reader: R) -> impl Iterator<Item = io::Result<Entry>> {
|
||||
EntryReader {
|
||||
reader,
|
||||
entry_bytes: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -66,7 +104,8 @@ mod tests {
|
||||
use ledger;
|
||||
use mint::Mint;
|
||||
use packet::BLOB_DATA_SIZE;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use std::io::Cursor;
|
||||
use transaction::Transaction;
|
||||
|
||||
#[test]
|
||||
@ -76,7 +115,7 @@ mod tests {
|
||||
|
||||
let writer = io::sink();
|
||||
let mut entry_writer = EntryWriter::new(&bank, writer);
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, mint.last_id());
|
||||
|
||||
// NOTE: if Entry grows to larger than a transaction, the code below falls over
|
||||
@ -98,4 +137,24 @@ mod tests {
|
||||
entry_writer.write_and_register_entry(&entries[1]).unwrap();
|
||||
assert_eq!(bank.last_id(), entries[1].id);
|
||||
}
|
||||
|
||||
/// Same as read_entries() but parsing a buffer and returning a vector.
|
||||
fn read_entries_from_buf(s: &[u8]) -> io::Result<Vec<Entry>> {
|
||||
let mut result = vec![];
|
||||
let reader = Cursor::new(s);
|
||||
for x in read_entries(reader) {
|
||||
trace!("entry... {:?}", x);
|
||||
result.push(x?);
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_entries_from_buf() {
|
||||
let mint = Mint::new(1);
|
||||
let mut buf = vec![];
|
||||
EntryWriter::write_entries(&mut buf, mint.create_entries()).unwrap();
|
||||
let entries = read_entries_from_buf(&buf).unwrap();
|
||||
assert_eq!(entries, mint.create_entries());
|
||||
}
|
||||
}
|
||||
|
1075
src/erasure.rs
1075
src/erasure.rs
File diff suppressed because it is too large
Load Diff
@ -1,29 +1,31 @@
|
||||
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
|
||||
|
||||
use packet::PacketRecycler;
|
||||
use service::Service;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use std::thread::{self, JoinHandle};
|
||||
use streamer::{self, PacketReceiver};
|
||||
|
||||
pub struct FetchStage {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl FetchStage {
|
||||
pub fn new(
|
||||
socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_recycler: PacketRecycler,
|
||||
packet_recycler: &PacketRecycler,
|
||||
) -> (Self, PacketReceiver) {
|
||||
Self::new_multi_socket(vec![socket], exit, packet_recycler)
|
||||
}
|
||||
pub fn new_multi_socket(
|
||||
sockets: Vec<UdpSocket>,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_recycler: PacketRecycler,
|
||||
packet_recycler: &PacketRecycler,
|
||||
) -> (Self, PacketReceiver) {
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let thread_hdls: Vec<_> = sockets
|
||||
@ -38,6 +40,23 @@ impl FetchStage {
|
||||
})
|
||||
.collect();
|
||||
|
||||
(FetchStage { thread_hdls }, packet_receiver)
|
||||
(FetchStage { exit, thread_hdls }, packet_receiver)
|
||||
}
|
||||
|
||||
pub fn close(&self) {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for FetchStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
self.thread_hdls
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls() {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
389
src/fullnode.rs
389
src/fullnode.rs
@ -1,55 +1,69 @@
|
||||
//! The `fullnode` module hosts all the fullnode microservices.
|
||||
|
||||
use bank::Bank;
|
||||
use crdt::{Crdt, ReplicatedData, TestNode};
|
||||
use entry_writer;
|
||||
use broadcast_stage::BroadcastStage;
|
||||
use crdt::{Crdt, NodeInfo, TestNode};
|
||||
use entry::Entry;
|
||||
use ledger::read_ledger;
|
||||
use ncp::Ncp;
|
||||
use packet::BlobRecycler;
|
||||
use rpu::Rpu;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::io::{stdin, stdout, BufReader};
|
||||
use service::Service;
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use std::net::SocketAddr;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
use std::thread::{JoinHandle, Result};
|
||||
use tpu::Tpu;
|
||||
use tvu::Tvu;
|
||||
use untrusted::Input;
|
||||
use window;
|
||||
|
||||
//use std::time::Duration;
|
||||
pub struct FullNode {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
pub struct Fullnode {
|
||||
exit: Arc<AtomicBool>,
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl FullNode {
|
||||
pub fn new(
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
/// Fullnode configuration to be stored in file
|
||||
pub struct Config {
|
||||
pub node_info: NodeInfo,
|
||||
pkcs8: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Structure to be replicated by the network
|
||||
impl Config {
|
||||
pub fn new(bind_addr: &SocketAddr, pkcs8: Vec<u8>) -> Self {
|
||||
let keypair =
|
||||
Keypair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in fullnode::Config new");
|
||||
let pubkey = keypair.pubkey();
|
||||
let node_info = NodeInfo::new_leader_with_pubkey(pubkey, bind_addr);
|
||||
Config { node_info, pkcs8 }
|
||||
}
|
||||
pub fn keypair(&self) -> Keypair {
|
||||
Keypair::from_pkcs8(Input::from(&self.pkcs8))
|
||||
.expect("from_pkcs8 in fullnode::Config keypair")
|
||||
}
|
||||
}
|
||||
|
||||
impl Fullnode {
|
||||
fn new_internal(
|
||||
mut node: TestNode,
|
||||
leader: bool,
|
||||
infile: Option<String>,
|
||||
ledger_path: &str,
|
||||
keypair: Keypair,
|
||||
network_entry_for_validator: Option<SocketAddr>,
|
||||
outfile_for_leader: Option<String>,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> FullNode {
|
||||
sigverify_disabled: bool,
|
||||
) -> Self {
|
||||
info!("creating bank...");
|
||||
let bank = Bank::default();
|
||||
let entry_height = if let Some(path) = infile {
|
||||
let f = File::open(path).unwrap();
|
||||
let mut r = BufReader::new(f);
|
||||
let entries =
|
||||
entry_writer::read_entries(&mut r).map(|e| e.expect("failed to parse entry"));
|
||||
info!("processing ledger...");
|
||||
bank.process_ledger(entries).expect("process_ledger")
|
||||
} else {
|
||||
let mut r = BufReader::new(stdin());
|
||||
let entries =
|
||||
entry_writer::read_entries(&mut r).map(|e| e.expect("failed to parse entry"));
|
||||
info!("processing ledger...");
|
||||
bank.process_ledger(entries).expect("process_ledger")
|
||||
};
|
||||
let bank = Bank::new_default(leader);
|
||||
|
||||
let entries = read_ledger(ledger_path, true).expect("opening ledger");
|
||||
|
||||
let entries = entries.map(|e| e.expect("failed to parse entry"));
|
||||
|
||||
info!("processing ledger...");
|
||||
let (entry_height, ledger_tail) = bank.process_ledger(entries).expect("process_ledger");
|
||||
// entry_height is the network-wide agreed height of the ledger.
|
||||
// initialize it from the input ledger
|
||||
info!("processed {} ledger...", entry_height);
|
||||
@ -60,69 +74,85 @@ impl FullNode {
|
||||
let local_requests_addr = node.sockets.requests.local_addr().unwrap();
|
||||
info!(
|
||||
"starting... local gossip address: {} (advertising {})",
|
||||
local_gossip_addr, node.data.gossip_addr
|
||||
local_gossip_addr, node.data.contact_info.ncp
|
||||
);
|
||||
let requests_addr = node.data.contact_info.rpu;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
if !leader {
|
||||
let testnet_addr = network_entry_for_validator.expect("validator requires entry");
|
||||
|
||||
let network_entry_point = ReplicatedData::new_entry_point(testnet_addr);
|
||||
let server = FullNode::new_validator(
|
||||
let network_entry_point = NodeInfo::new_entry_point(testnet_addr);
|
||||
let server = Self::new_validator(
|
||||
keypair,
|
||||
bank,
|
||||
entry_height,
|
||||
node.data.clone(),
|
||||
node.sockets.requests,
|
||||
node.sockets.respond,
|
||||
node.sockets.replicate,
|
||||
node.sockets.gossip,
|
||||
node.sockets.repair,
|
||||
network_entry_point,
|
||||
&ledger_tail,
|
||||
node,
|
||||
&network_entry_point,
|
||||
exit.clone(),
|
||||
Some(ledger_path),
|
||||
sigverify_disabled,
|
||||
);
|
||||
info!(
|
||||
"validator ready... local request address: {} (advertising {}) connected to: {}",
|
||||
local_requests_addr, node.data.requests_addr, testnet_addr
|
||||
local_requests_addr, requests_addr, testnet_addr
|
||||
);
|
||||
server
|
||||
} else {
|
||||
node.data.current_leader_id = node.data.id.clone();
|
||||
let server = if let Some(file) = outfile_for_leader {
|
||||
FullNode::new_leader(
|
||||
bank,
|
||||
entry_height,
|
||||
//Some(Duration::from_millis(1000)),
|
||||
None,
|
||||
node.data.clone(),
|
||||
node.sockets.requests,
|
||||
node.sockets.transaction,
|
||||
node.sockets.broadcast,
|
||||
node.sockets.respond,
|
||||
node.sockets.gossip,
|
||||
exit.clone(),
|
||||
File::create(file).expect("opening ledger file"),
|
||||
)
|
||||
} else {
|
||||
FullNode::new_leader(
|
||||
bank,
|
||||
entry_height,
|
||||
//Some(Duration::from_millis(1000)),
|
||||
None,
|
||||
node.data.clone(),
|
||||
node.sockets.requests,
|
||||
node.sockets.transaction,
|
||||
node.sockets.broadcast,
|
||||
node.sockets.respond,
|
||||
node.sockets.gossip,
|
||||
exit.clone(),
|
||||
stdout(),
|
||||
)
|
||||
};
|
||||
node.data.leader_id = node.data.id;
|
||||
|
||||
let server = Self::new_leader(
|
||||
keypair,
|
||||
bank,
|
||||
entry_height,
|
||||
&ledger_tail,
|
||||
node,
|
||||
exit.clone(),
|
||||
ledger_path,
|
||||
sigverify_disabled,
|
||||
);
|
||||
info!(
|
||||
"leader ready... local request address: {} (advertising {})",
|
||||
local_requests_addr, node.data.requests_addr
|
||||
local_requests_addr, requests_addr
|
||||
);
|
||||
server
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
node: TestNode,
|
||||
leader: bool,
|
||||
ledger: &str,
|
||||
keypair: Keypair,
|
||||
network_entry_for_validator: Option<SocketAddr>,
|
||||
) -> Self {
|
||||
Self::new_internal(
|
||||
node,
|
||||
leader,
|
||||
ledger,
|
||||
keypair,
|
||||
network_entry_for_validator,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new_without_sigverify(
|
||||
node: TestNode,
|
||||
leader: bool,
|
||||
ledger_path: &str,
|
||||
keypair: Keypair,
|
||||
network_entry_for_validator: Option<SocketAddr>,
|
||||
) -> Self {
|
||||
Self::new_internal(
|
||||
node,
|
||||
leader,
|
||||
ledger_path,
|
||||
keypair,
|
||||
network_entry_for_validator,
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a server instance acting as a leader.
|
||||
///
|
||||
/// ```text
|
||||
@ -147,59 +177,69 @@ impl FullNode {
|
||||
/// | | `------------`
|
||||
/// `---------------------`
|
||||
/// ```
|
||||
pub fn new_leader<W: Write + Send + 'static>(
|
||||
pub fn new_leader(
|
||||
keypair: Keypair,
|
||||
bank: Bank,
|
||||
entry_height: u64,
|
||||
tick_duration: Option<Duration>,
|
||||
me: ReplicatedData,
|
||||
requests_socket: UdpSocket,
|
||||
transactions_socket: UdpSocket,
|
||||
broadcast_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
gossip_socket: UdpSocket,
|
||||
ledger_tail: &[Entry],
|
||||
node: TestNode,
|
||||
exit: Arc<AtomicBool>,
|
||||
writer: W,
|
||||
ledger_path: &str,
|
||||
sigverify_disabled: bool,
|
||||
) -> Self {
|
||||
let tick_duration = None;
|
||||
// TODO: To light up PoH, uncomment the following line:
|
||||
//let tick_duration = Some(Duration::from_millis(1000));
|
||||
|
||||
let bank = Arc::new(bank);
|
||||
let mut thread_hdls = vec![];
|
||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
||||
thread_hdls.extend(rpu.thread_hdls);
|
||||
let rpu = Rpu::new(
|
||||
&bank,
|
||||
node.sockets.requests,
|
||||
node.sockets.respond,
|
||||
exit.clone(),
|
||||
);
|
||||
thread_hdls.extend(rpu.thread_hdls());
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let (tpu, blob_receiver) = Tpu::new(
|
||||
bank.clone(),
|
||||
tick_duration,
|
||||
transactions_socket,
|
||||
blob_recycler.clone(),
|
||||
exit.clone(),
|
||||
writer,
|
||||
);
|
||||
thread_hdls.extend(tpu.thread_hdls);
|
||||
let window =
|
||||
window::new_window_from_entries(ledger_tail, entry_height, &node.data, &blob_recycler);
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
let window = streamer::default_window();
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(node.data).expect("Crdt::new")));
|
||||
|
||||
let (tpu, blob_receiver) = Tpu::new(
|
||||
keypair,
|
||||
&bank,
|
||||
&crdt,
|
||||
tick_duration,
|
||||
node.sockets.transaction,
|
||||
&blob_recycler,
|
||||
exit.clone(),
|
||||
ledger_path,
|
||||
sigverify_disabled,
|
||||
);
|
||||
thread_hdls.extend(tpu.thread_hdls());
|
||||
let ncp = Ncp::new(
|
||||
crdt.clone(),
|
||||
&crdt,
|
||||
window.clone(),
|
||||
gossip_socket,
|
||||
gossip_send_socket,
|
||||
Some(ledger_path),
|
||||
node.sockets.gossip,
|
||||
node.sockets.gossip_send,
|
||||
exit.clone(),
|
||||
).expect("Ncp::new");
|
||||
thread_hdls.extend(ncp.thread_hdls);
|
||||
thread_hdls.extend(ncp.thread_hdls());
|
||||
|
||||
let t_broadcast = streamer::broadcaster(
|
||||
broadcast_socket,
|
||||
exit.clone(),
|
||||
let broadcast_stage = BroadcastStage::new(
|
||||
node.sockets.broadcast,
|
||||
crdt,
|
||||
window,
|
||||
entry_height,
|
||||
blob_recycler.clone(),
|
||||
blob_receiver,
|
||||
);
|
||||
thread_hdls.extend(vec![t_broadcast]);
|
||||
thread_hdls.extend(broadcast_stage.thread_hdls());
|
||||
|
||||
FullNode { thread_hdls }
|
||||
Fullnode { exit, thread_hdls }
|
||||
}
|
||||
|
||||
/// Create a server instance acting as a validator.
|
||||
@ -232,81 +272,126 @@ impl FullNode {
|
||||
/// `-------------------------------`
|
||||
/// ```
|
||||
pub fn new_validator(
|
||||
keypair: Keypair,
|
||||
bank: Bank,
|
||||
entry_height: u64,
|
||||
me: ReplicatedData,
|
||||
requests_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
replicate_socket: UdpSocket,
|
||||
gossip_listen_socket: UdpSocket,
|
||||
repair_socket: UdpSocket,
|
||||
entry_point: ReplicatedData,
|
||||
ledger_tail: &[Entry],
|
||||
node: TestNode,
|
||||
entry_point: &NodeInfo,
|
||||
exit: Arc<AtomicBool>,
|
||||
ledger_path: Option<&str>,
|
||||
_sigverify_disabled: bool,
|
||||
) -> Self {
|
||||
let bank = Arc::new(bank);
|
||||
let mut thread_hdls = vec![];
|
||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
||||
thread_hdls.extend(rpu.thread_hdls);
|
||||
let rpu = Rpu::new(
|
||||
&bank,
|
||||
node.sockets.requests,
|
||||
node.sockets.respond,
|
||||
exit.clone(),
|
||||
);
|
||||
thread_hdls.extend(rpu.thread_hdls());
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let window =
|
||||
window::new_window_from_entries(ledger_tail, entry_height, &node.data, &blob_recycler);
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(node.data).expect("Crdt::new")));
|
||||
crdt.write()
|
||||
.expect("'crdt' write lock before insert() in pub fn replicate")
|
||||
.insert(&entry_point);
|
||||
let window = streamer::default_window();
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let retransmit_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
|
||||
let ncp = Ncp::new(
|
||||
crdt.clone(),
|
||||
&crdt,
|
||||
window.clone(),
|
||||
gossip_listen_socket,
|
||||
gossip_send_socket,
|
||||
ledger_path,
|
||||
node.sockets.gossip,
|
||||
node.sockets.gossip_send,
|
||||
exit.clone(),
|
||||
).expect("Ncp::new");
|
||||
|
||||
let tvu = Tvu::new(
|
||||
bank.clone(),
|
||||
keypair,
|
||||
&bank,
|
||||
entry_height,
|
||||
crdt.clone(),
|
||||
window.clone(),
|
||||
replicate_socket,
|
||||
repair_socket,
|
||||
retransmit_socket,
|
||||
node.sockets.replicate,
|
||||
node.sockets.repair,
|
||||
node.sockets.retransmit,
|
||||
ledger_path,
|
||||
exit.clone(),
|
||||
);
|
||||
thread_hdls.extend(tvu.thread_hdls);
|
||||
thread_hdls.extend(ncp.thread_hdls);
|
||||
FullNode { thread_hdls }
|
||||
thread_hdls.extend(tvu.thread_hdls());
|
||||
thread_hdls.extend(ncp.thread_hdls());
|
||||
Fullnode { exit, thread_hdls }
|
||||
}
|
||||
|
||||
//used for notifying many nodes in parallel to exit
|
||||
pub fn exit(&self) {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
pub fn close(self) -> Result<()> {
|
||||
self.exit();
|
||||
self.join()
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for Fullnode {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
self.thread_hdls
|
||||
}
|
||||
|
||||
fn join(self) -> Result<()> {
|
||||
for thread_hdl in self.thread_hdls() {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bank::Bank;
|
||||
use crdt::TestNode;
|
||||
use fullnode::FullNode;
|
||||
use fullnode::Fullnode;
|
||||
use mint::Mint;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use service::Service;
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[test]
|
||||
fn validator_exit() {
|
||||
let tn = TestNode::new();
|
||||
let keypair = Keypair::new();
|
||||
let tn = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let v = FullNode::new_validator(
|
||||
bank,
|
||||
0,
|
||||
tn.data.clone(),
|
||||
tn.sockets.requests,
|
||||
tn.sockets.respond,
|
||||
tn.sockets.replicate,
|
||||
tn.sockets.gossip,
|
||||
tn.sockets.repair,
|
||||
tn.data,
|
||||
exit.clone(),
|
||||
);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in v.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
let entry = tn.data.clone();
|
||||
let v = Fullnode::new_validator(keypair, bank, 0, &[], tn, &entry, exit, None, false);
|
||||
v.exit();
|
||||
v.join().unwrap();
|
||||
}
|
||||
#[test]
|
||||
fn validator_parallel_exit() {
|
||||
let vals: Vec<Fullnode> = (0..2)
|
||||
.map(|_| {
|
||||
let keypair = Keypair::new();
|
||||
let tn = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let entry = tn.data.clone();
|
||||
Fullnode::new_validator(keypair, bank, 0, &[], tn, &entry, exit, None, false)
|
||||
})
|
||||
.collect();
|
||||
//each validator can exit in parallel to speed many sequential calls to `join`
|
||||
vals.iter().for_each(|v| v.exit());
|
||||
//while join is called sequentially, the above exit call notified all the
|
||||
//validators to exit from all their threads
|
||||
vals.into_iter().for_each(|v| {
|
||||
v.join().unwrap();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
26
src/hash.rs
26
src/hash.rs
@ -1,11 +1,31 @@
|
||||
//! The `hash` module provides functions for creating SHA-256 hashes.
|
||||
|
||||
use bs58;
|
||||
use generic_array::typenum::U32;
|
||||
use generic_array::GenericArray;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::fmt;
|
||||
|
||||
pub type Hash = GenericArray<u8, U32>;
|
||||
#[derive(Serialize, Deserialize, Clone, Copy, Default, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
pub struct Hash(GenericArray<u8, U32>);
|
||||
|
||||
impl AsRef<[u8]> for Hash {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
&self.0[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Hash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", bs58::encode(self.0).into_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Hash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", bs58::encode(self.0).into_string())
|
||||
}
|
||||
}
|
||||
/// Return a Sha256 hash for the given data.
|
||||
pub fn hash(val: &[u8]) -> Hash {
|
||||
let mut hasher = Sha256::default();
|
||||
@ -13,12 +33,12 @@ pub fn hash(val: &[u8]) -> Hash {
|
||||
|
||||
// At the time of this writing, the sha2 library is stuck on an old version
|
||||
// of generic_array (0.9.0). Decouple ourselves with a clone to our version.
|
||||
GenericArray::clone_from_slice(hasher.result().as_slice())
|
||||
Hash(GenericArray::clone_from_slice(hasher.result().as_slice()))
|
||||
}
|
||||
|
||||
/// Return the hash of the given hash extended with the given value.
|
||||
pub fn extend_and_hash(id: &Hash, val: &[u8]) -> Hash {
|
||||
let mut hash_data = id.to_vec();
|
||||
let mut hash_data = id.as_ref().to_vec();
|
||||
hash_data.extend_from_slice(val);
|
||||
hash(&hash_data)
|
||||
}
|
||||
|
843
src/ledger.rs
843
src/ledger.rs
@ -1,17 +1,414 @@
|
||||
//! The `ledger` module provides functions for parallel verification of the
|
||||
//! Proof of History ledger.
|
||||
//! Proof of History ledger as well as iterative read, append write, and random
|
||||
//! access read to a persistent file-based ledger.
|
||||
|
||||
use bincode::{self, deserialize, serialize_into};
|
||||
use bincode::{self, deserialize, deserialize_from, serialize_into, serialized_size};
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use packet::{self, SharedBlob, BLOB_SIZE};
|
||||
use log::Level::Trace;
|
||||
use packet::{self, SharedBlob, BLOB_DATA_SIZE};
|
||||
use rayon::prelude::*;
|
||||
use result::{Error, Result};
|
||||
use std::collections::VecDeque;
|
||||
use std::io::Cursor;
|
||||
use std::fs::{create_dir_all, remove_dir_all, File, OpenOptions};
|
||||
use std::io::prelude::*;
|
||||
use std::io::{self, BufReader, BufWriter, Seek, SeekFrom};
|
||||
use std::mem::size_of;
|
||||
use std::path::Path;
|
||||
use transaction::Transaction;
|
||||
use window::WINDOW_SIZE;
|
||||
|
||||
//
|
||||
// A persistent ledger is 2 files:
|
||||
// ledger_path/ --+
|
||||
// +-- index <== an array of u64 offsets into data,
|
||||
// | each offset points to the first bytes
|
||||
// | of a u64 that contains the length of
|
||||
// | the entry. To make the code smaller,
|
||||
// | index[0] is set to 0, TODO: this field
|
||||
// | could later be used for other stuff...
|
||||
// +-- data <== concatenated instances of
|
||||
// u64 length
|
||||
// entry data
|
||||
//
|
||||
// When opening a ledger, we have the ability to "audit" it, which means we need
|
||||
// to pick which file to use as "truth", and correct the other file as
|
||||
// necessary, if possible.
|
||||
//
|
||||
// The protocol for writing the ledger is to append to the data file first, the
|
||||
// index file 2nd. If the writing node is interupted while appending to the
|
||||
// ledger, there are some possibilities we need to cover:
|
||||
//
|
||||
// 1. a partial write of data, which might be a partial write of length
|
||||
// or a partial write entry data
|
||||
// 2. a partial or missing write to index for that entry
|
||||
//
|
||||
// There is also the possibility of "unsynchronized" reading of the ledger
|
||||
// during transfer across nodes via rsync (or whatever). In this case, if the
|
||||
// transfer of the data file is done before the transfer of the index file,
|
||||
// it's likely that the index file will be far ahead of the data file in time.
|
||||
//
|
||||
// The quickest and most reliable strategy for recovery is therefore to treat
|
||||
// the data file as nearest to the "truth".
|
||||
//
|
||||
// The logic for "recovery/audit" is to open index and read backwards from the
|
||||
// last u64-aligned entry to get to where index and data agree (i.e. where a
|
||||
// successful deserialization of an entry can be performed), then truncate
|
||||
// both files to this syncrhonization point.
|
||||
//
|
||||
|
||||
// ledger window
|
||||
#[derive(Debug)]
|
||||
pub struct LedgerWindow {
|
||||
index: BufReader<File>,
|
||||
data: BufReader<File>,
|
||||
}
|
||||
|
||||
// use a CONST because there's a cast, and we don't want "sizeof::<u64> as u64"...
|
||||
const SIZEOF_U64: u64 = size_of::<u64>() as u64;
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
|
||||
fn err_bincode_to_io(e: Box<bincode::ErrorKind>) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, e.to_string())
|
||||
}
|
||||
|
||||
fn entry_at<A: Read + Seek>(file: &mut A, at: u64) -> io::Result<Entry> {
|
||||
file.seek(SeekFrom::Start(at))?;
|
||||
|
||||
let len = deserialize_from(file.take(SIZEOF_U64)).map_err(err_bincode_to_io)?;
|
||||
trace!("entry_at({}) len: {}", at, len);
|
||||
|
||||
deserialize_from(file.take(len)).map_err(err_bincode_to_io)
|
||||
}
|
||||
|
||||
fn next_entry<A: Read>(file: &mut A) -> io::Result<Entry> {
|
||||
let len = deserialize_from(file.take(SIZEOF_U64)).map_err(err_bincode_to_io)?;
|
||||
deserialize_from(file.take(len)).map_err(err_bincode_to_io)
|
||||
}
|
||||
|
||||
fn u64_at<A: Read + Seek>(file: &mut A, at: u64) -> io::Result<u64> {
|
||||
file.seek(SeekFrom::Start(at))?;
|
||||
deserialize_from(file.take(SIZEOF_U64)).map_err(err_bincode_to_io)
|
||||
}
|
||||
|
||||
impl LedgerWindow {
|
||||
// opens a Ledger in directory, provides "infinite" window
|
||||
//
|
||||
pub fn open(ledger_path: &str) -> io::Result<Self> {
|
||||
let ledger_path = Path::new(&ledger_path);
|
||||
|
||||
let index = File::open(ledger_path.join("index"))?;
|
||||
let index = BufReader::with_capacity((WINDOW_SIZE * SIZEOF_U64) as usize, index);
|
||||
let data = File::open(ledger_path.join("data"))?;
|
||||
let data = BufReader::with_capacity(WINDOW_SIZE as usize * BLOB_DATA_SIZE, data);
|
||||
|
||||
Ok(LedgerWindow { index, data })
|
||||
}
|
||||
|
||||
pub fn get_entry(&mut self, index: u64) -> io::Result<Entry> {
|
||||
let offset = u64_at(&mut self.index, index * SIZEOF_U64)?;
|
||||
entry_at(&mut self.data, offset)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn verify_ledger(ledger_path: &str) -> io::Result<()> {
|
||||
let ledger_path = Path::new(&ledger_path);
|
||||
|
||||
let index = File::open(ledger_path.join("index"))?;
|
||||
|
||||
let index_len = index.metadata()?.len();
|
||||
|
||||
if index_len % SIZEOF_U64 != 0 {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("index is not a multiple of {} bytes long", SIZEOF_U64),
|
||||
))?;
|
||||
}
|
||||
let mut index = BufReader::with_capacity((WINDOW_SIZE * SIZEOF_U64) as usize, index);
|
||||
|
||||
let data = File::open(ledger_path.join("data"))?;
|
||||
let mut data = BufReader::with_capacity(WINDOW_SIZE as usize * BLOB_DATA_SIZE, data);
|
||||
|
||||
let mut last_data_offset = 0;
|
||||
let mut index_offset = 0;
|
||||
let mut data_read = 0;
|
||||
let mut last_len = 0;
|
||||
let mut i = 0;
|
||||
|
||||
while index_offset < index_len {
|
||||
let data_offset = u64_at(&mut index, index_offset)?;
|
||||
|
||||
if last_data_offset + last_len != data_offset {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"at entry[{}], a gap or an overlap last_offset {} offset {} last_len {}",
|
||||
i, last_data_offset, data_offset, last_len
|
||||
),
|
||||
))?;
|
||||
}
|
||||
|
||||
match entry_at(&mut data, data_offset) {
|
||||
Err(e) => Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"entry[{}] deserialize() failed at offset {}, err: {}",
|
||||
index_offset / SIZEOF_U64,
|
||||
data_offset,
|
||||
e.to_string(),
|
||||
),
|
||||
))?,
|
||||
Ok(entry) => {
|
||||
last_len = serialized_size(&entry).map_err(err_bincode_to_io)? + SIZEOF_U64
|
||||
}
|
||||
}
|
||||
|
||||
last_data_offset = data_offset;
|
||||
data_read += last_len;
|
||||
index_offset += SIZEOF_U64;
|
||||
i += 1;
|
||||
}
|
||||
let data = data.into_inner();
|
||||
if data_read != data.metadata()?.len() {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"garbage on end of data file",
|
||||
))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn recover_ledger(ledger_path: &str) -> io::Result<()> {
|
||||
let ledger_path = Path::new(ledger_path);
|
||||
let mut index = OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.open(ledger_path.join("index"))?;
|
||||
|
||||
let mut data = OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.open(ledger_path.join("data"))?;
|
||||
|
||||
// first, truncate to a multiple of SIZEOF_U64
|
||||
let len = index.metadata()?.len();
|
||||
|
||||
if len % SIZEOF_U64 != 0 {
|
||||
trace!("recover: trimming index len to {}", len - len % SIZEOF_U64);
|
||||
index.set_len(len - (len % SIZEOF_U64))?;
|
||||
}
|
||||
|
||||
// next, pull index offsets off one at a time until the last one points
|
||||
// to a valid entry deserialization offset...
|
||||
loop {
|
||||
let len = index.metadata()?.len();
|
||||
trace!("recover: index len:{}", len);
|
||||
|
||||
// should never happen
|
||||
if len < SIZEOF_U64 {
|
||||
trace!("recover: error index len {} too small", len);
|
||||
|
||||
Err(io::Error::new(io::ErrorKind::Other, "empty ledger index"))?;
|
||||
}
|
||||
|
||||
let offset = u64_at(&mut index, len - SIZEOF_U64)?;
|
||||
trace!("recover: offset[{}]: {}", (len / SIZEOF_U64) - 1, offset);
|
||||
|
||||
match entry_at(&mut data, offset) {
|
||||
Ok(entry) => {
|
||||
trace!("recover: entry[{}]: {:?}", (len / SIZEOF_U64) - 1, entry);
|
||||
|
||||
let entry_len = serialized_size(&entry).map_err(err_bincode_to_io)?;
|
||||
|
||||
trace!("recover: entry_len: {}", entry_len);
|
||||
|
||||
// now trim data file to size...
|
||||
data.set_len(offset + SIZEOF_U64 + entry_len)?;
|
||||
|
||||
trace!(
|
||||
"recover: trimmed data file to {}",
|
||||
offset + SIZEOF_U64 + entry_len
|
||||
);
|
||||
|
||||
break; // all good
|
||||
}
|
||||
Err(_err) => {
|
||||
trace!(
|
||||
"recover: no entry recovered at {} {}",
|
||||
offset,
|
||||
_err.to_string()
|
||||
);
|
||||
index.set_len(len - SIZEOF_U64)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
if log_enabled!(Trace) {
|
||||
let num_entries = index.metadata()?.len() / SIZEOF_U64;
|
||||
trace!("recover: done. {} entries", num_entries);
|
||||
}
|
||||
|
||||
// flush everything to disk...
|
||||
index.sync_all()?;
|
||||
data.sync_all()
|
||||
}
|
||||
|
||||
// TODO?? ... we could open the files on demand to support [], but today
|
||||
// LedgerWindow needs "&mut self"
|
||||
//
|
||||
//impl Index<u64> for LedgerWindow {
|
||||
// type Output = io::Result<Entry>;
|
||||
//
|
||||
// fn index(&mut self, index: u64) -> &io::Result<Entry> {
|
||||
// match u64_at(&mut self.index, index * SIZEOF_U64) {
|
||||
// Ok(offset) => &entry_at(&mut self.data, offset),
|
||||
// Err(e) => &Err(e),
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LedgerWriter {
|
||||
index: BufWriter<File>,
|
||||
data: BufWriter<File>,
|
||||
}
|
||||
|
||||
impl LedgerWriter {
|
||||
// recover and open the ledger for writing
|
||||
pub fn recover(ledger_path: &str) -> io::Result<Self> {
|
||||
recover_ledger(ledger_path)?;
|
||||
LedgerWriter::open(ledger_path, false)
|
||||
}
|
||||
|
||||
// opens or creates a LedgerWriter in ledger_path directory
|
||||
pub fn open(ledger_path: &str, create: bool) -> io::Result<Self> {
|
||||
let ledger_path = Path::new(&ledger_path);
|
||||
|
||||
if create {
|
||||
let _ignored = remove_dir_all(ledger_path);
|
||||
create_dir_all(ledger_path)?;
|
||||
}
|
||||
|
||||
let index = OpenOptions::new()
|
||||
.create(create)
|
||||
.append(true)
|
||||
.open(ledger_path.join("index"))?;
|
||||
|
||||
if log_enabled!(Trace) {
|
||||
let len = index.metadata()?.len();
|
||||
trace!("LedgerWriter::new: index fp:{}", len);
|
||||
}
|
||||
let index = BufWriter::new(index);
|
||||
|
||||
let data = OpenOptions::new()
|
||||
.create(create)
|
||||
.append(true)
|
||||
.open(ledger_path.join("data"))?;
|
||||
|
||||
if log_enabled!(Trace) {
|
||||
let len = data.metadata()?.len();
|
||||
trace!("LedgerWriter::new: data fp:{}", len);
|
||||
}
|
||||
let data = BufWriter::new(data);
|
||||
|
||||
Ok(LedgerWriter { index, data })
|
||||
}
|
||||
|
||||
fn write_entry_noflush(&mut self, entry: &Entry) -> io::Result<()> {
|
||||
let len = serialized_size(&entry).map_err(err_bincode_to_io)?;
|
||||
|
||||
serialize_into(&mut self.data, &len).map_err(err_bincode_to_io)?;
|
||||
if log_enabled!(Trace) {
|
||||
let offset = self.data.seek(SeekFrom::Current(0))?;
|
||||
trace!("write_entry: after len data fp:{}", offset);
|
||||
}
|
||||
|
||||
serialize_into(&mut self.data, &entry).map_err(err_bincode_to_io)?;
|
||||
if log_enabled!(Trace) {
|
||||
let offset = self.data.seek(SeekFrom::Current(0))?;
|
||||
trace!("write_entry: after entry data fp:{}", offset);
|
||||
}
|
||||
|
||||
let offset = self.data.seek(SeekFrom::Current(0))? - len - SIZEOF_U64;
|
||||
trace!("write_entry: offset:{} len:{}", offset, len);
|
||||
|
||||
serialize_into(&mut self.index, &offset).map_err(err_bincode_to_io)?;
|
||||
|
||||
if log_enabled!(Trace) {
|
||||
let offset = self.index.seek(SeekFrom::Current(0))?;
|
||||
trace!("write_entry: end index fp:{}", offset);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_entry(&mut self, entry: &Entry) -> io::Result<()> {
|
||||
self.write_entry_noflush(&entry)?;
|
||||
self.index.flush()?;
|
||||
self.data.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_entries<I>(&mut self, entries: I) -> io::Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = Entry>,
|
||||
{
|
||||
for entry in entries {
|
||||
self.write_entry_noflush(&entry)?;
|
||||
}
|
||||
self.index.flush()?;
|
||||
self.data.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LedgerReader {
|
||||
data: BufReader<File>,
|
||||
}
|
||||
|
||||
impl Iterator for LedgerReader {
|
||||
type Item = io::Result<Entry>;
|
||||
|
||||
fn next(&mut self) -> Option<io::Result<Entry>> {
|
||||
match next_entry(&mut self.data) {
|
||||
Ok(entry) => Some(Ok(entry)),
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return an iterator for all the entries in the given file.
|
||||
pub fn read_ledger(
|
||||
ledger_path: &str,
|
||||
recover: bool,
|
||||
) -> io::Result<impl Iterator<Item = io::Result<Entry>>> {
|
||||
if recover {
|
||||
recover_ledger(ledger_path)?;
|
||||
}
|
||||
|
||||
let ledger_path = Path::new(&ledger_path);
|
||||
let data = File::open(ledger_path.join("data"))?;
|
||||
let data = BufReader::new(data);
|
||||
|
||||
Ok(LedgerReader { data })
|
||||
}
|
||||
|
||||
///// copy ledger is doesn't fix up the "from" ledger
|
||||
//pub fn copy_ledger(from: &str, to: &str) -> io::Result<()> {
|
||||
// let mut to = LedgerWriter::new(to, true)?;
|
||||
//
|
||||
// let from = Path::new(&from);
|
||||
//
|
||||
// // for a copy, we read "readonly" from data
|
||||
// let data = File::open(from.join("data"))?;
|
||||
//
|
||||
// for entry in (LedgerReader { data }) {
|
||||
// let entry = entry?;
|
||||
// to.write_entry(&entry)?;
|
||||
// }
|
||||
// Ok(())
|
||||
//}
|
||||
|
||||
// a Block is a slice of Entries
|
||||
|
||||
pub trait Block {
|
||||
/// Verifies the hashes and counts of a slice of transactions are all consistent.
|
||||
fn verify(&self, start_hash: &Hash) -> bool;
|
||||
@ -22,39 +419,42 @@ impl Block for [Entry] {
|
||||
fn verify(&self, start_hash: &Hash) -> bool {
|
||||
let genesis = [Entry::new_tick(0, start_hash)];
|
||||
let entry_pairs = genesis.par_iter().chain(self).zip(self);
|
||||
entry_pairs.all(|(x0, x1)| x1.verify(&x0.id))
|
||||
entry_pairs.all(|(x0, x1)| {
|
||||
let r = x1.verify(&x0.id);
|
||||
if !r {
|
||||
warn!(
|
||||
"entry invalid!: {:?} num txs: {}",
|
||||
x1.id,
|
||||
x1.transactions.len()
|
||||
);
|
||||
}
|
||||
r
|
||||
})
|
||||
}
|
||||
|
||||
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>) {
|
||||
for entry in self {
|
||||
let blob = blob_recycler.allocate();
|
||||
let pos = {
|
||||
let mut bd = blob.write().unwrap();
|
||||
let mut out = Cursor::new(bd.data_mut());
|
||||
serialize_into(&mut out, &entry).expect("failed to serialize output");
|
||||
out.position() as usize
|
||||
};
|
||||
assert!(pos < BLOB_SIZE);
|
||||
blob.write().unwrap().set_size(pos);
|
||||
let blob = entry.to_blob(blob_recycler, None, None, None);
|
||||
q.push_back(blob);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reconstruct_entries_from_blobs(blobs: VecDeque<SharedBlob>) -> bincode::Result<Vec<Entry>> {
|
||||
pub fn reconstruct_entries_from_blobs(blobs: VecDeque<SharedBlob>) -> Result<Vec<Entry>> {
|
||||
let mut entries: Vec<Entry> = Vec::with_capacity(blobs.len());
|
||||
|
||||
for blob in blobs {
|
||||
let entry = {
|
||||
let msg = blob.read().unwrap();
|
||||
deserialize(&msg.data()[..msg.meta.size])
|
||||
let msg_size = msg.get_size()?;
|
||||
deserialize(&msg.data()[..msg_size])
|
||||
};
|
||||
|
||||
match entry {
|
||||
Ok(entry) => entries.push(entry),
|
||||
Err(err) => {
|
||||
trace!("reconstruct_entry_from_blobs: {}", err);
|
||||
return Err(err);
|
||||
trace!("reconstruct_entry_from_blobs: {:?}", err);
|
||||
return Err(Error::Serialize(err));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -62,39 +462,64 @@ pub fn reconstruct_entries_from_blobs(blobs: VecDeque<SharedBlob>) -> bincode::R
|
||||
}
|
||||
|
||||
/// Creates the next entries for given transactions, outputs
|
||||
/// updates start_hash to id of last Entry, sets cur_hashes to 0
|
||||
/// updates start_hash to id of last Entry, sets num_hashes to 0
|
||||
pub fn next_entries_mut(
|
||||
start_hash: &mut Hash,
|
||||
cur_hashes: &mut u64,
|
||||
num_hashes: &mut u64,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> Vec<Entry> {
|
||||
if transactions.is_empty() {
|
||||
vec![Entry::new_mut(start_hash, cur_hashes, transactions, false)]
|
||||
// TODO: find a magic number that works better than | ?
|
||||
// V
|
||||
if transactions.is_empty() || transactions.len() == 1 {
|
||||
vec![Entry::new_mut(start_hash, num_hashes, transactions, false)]
|
||||
} else {
|
||||
let mut chunk_len = transactions.len();
|
||||
let mut chunk_start = 0;
|
||||
let mut entries = Vec::new();
|
||||
|
||||
// check for fit, make sure they can be serialized
|
||||
while !Entry::will_fit(transactions[0..chunk_len].to_vec()) {
|
||||
chunk_len /= 2;
|
||||
}
|
||||
while chunk_start < transactions.len() {
|
||||
let mut chunk_end = transactions.len();
|
||||
let mut upper = chunk_end;
|
||||
let mut lower = chunk_start;
|
||||
let mut next = chunk_end; // be optimistic that all will fit
|
||||
|
||||
let mut num_chunks = if transactions.len() % chunk_len == 0 {
|
||||
transactions.len() / chunk_len
|
||||
} else {
|
||||
transactions.len() / chunk_len + 1
|
||||
};
|
||||
|
||||
let mut entries = Vec::with_capacity(num_chunks);
|
||||
|
||||
for chunk in transactions.chunks(chunk_len) {
|
||||
num_chunks -= 1;
|
||||
// binary search for how many transactions will fit in an Entry (i.e. a BLOB)
|
||||
loop {
|
||||
debug!(
|
||||
"chunk_end {}, upper {} lower {} next {} transactions.len() {}",
|
||||
chunk_end,
|
||||
upper,
|
||||
lower,
|
||||
next,
|
||||
transactions.len()
|
||||
);
|
||||
if Entry::will_fit(transactions[chunk_start..chunk_end].to_vec()) {
|
||||
next = (upper + chunk_end) / 2;
|
||||
lower = chunk_end;
|
||||
debug!(
|
||||
"chunk_end {} fits, maybe too well? trying {}",
|
||||
chunk_end, next
|
||||
);
|
||||
} else {
|
||||
next = (lower + chunk_end) / 2;
|
||||
upper = chunk_end;
|
||||
debug!("chunk_end {} doesn't fit! trying {}", chunk_end, next);
|
||||
}
|
||||
// same as last time
|
||||
if next == chunk_end {
|
||||
debug!("converged on chunk_end {}", chunk_end);
|
||||
break;
|
||||
}
|
||||
chunk_end = next;
|
||||
}
|
||||
entries.push(Entry::new_mut(
|
||||
start_hash,
|
||||
cur_hashes,
|
||||
chunk.to_vec(),
|
||||
num_chunks > 0,
|
||||
num_hashes,
|
||||
transactions[chunk_start..chunk_end].to_vec(),
|
||||
transactions.len() - chunk_end > 0,
|
||||
));
|
||||
chunk_start = chunk_end;
|
||||
}
|
||||
|
||||
entries
|
||||
}
|
||||
}
|
||||
@ -102,28 +527,41 @@ pub fn next_entries_mut(
|
||||
/// Creates the next Entries for given transactions
|
||||
pub fn next_entries(
|
||||
start_hash: &Hash,
|
||||
cur_hashes: u64,
|
||||
num_hashes: u64,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> Vec<Entry> {
|
||||
let mut id = *start_hash;
|
||||
let mut num_hashes = cur_hashes;
|
||||
let mut num_hashes = num_hashes;
|
||||
next_entries_mut(&mut id, &mut num_hashes, transactions)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode::serialized_size;
|
||||
use chrono::prelude::*;
|
||||
use entry::{next_entry, Entry};
|
||||
use hash::hash;
|
||||
use packet::{BlobRecycler, BLOB_DATA_SIZE};
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use packet::{BlobRecycler, BLOB_DATA_SIZE, PACKET_DATA_SIZE};
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use std;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use transaction::Transaction;
|
||||
use transaction::{Transaction, Vote};
|
||||
|
||||
fn tmp_ledger_path(name: &str) -> String {
|
||||
use std::env;
|
||||
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
|
||||
let keypair = Keypair::new();
|
||||
|
||||
format!("{}/tmp-ledger-{}-{}", out_dir, name, keypair.pubkey())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice() {
|
||||
use logger;
|
||||
logger::setup();
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let one = hash(&zero.as_ref());
|
||||
assert!(vec![][..].verify(&zero)); // base case
|
||||
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
|
||||
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
|
||||
@ -134,14 +572,59 @@ mod tests {
|
||||
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
|
||||
}
|
||||
|
||||
fn make_tiny_test_entries(num: usize) -> Vec<Entry> {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
let keypair = Keypair::new();
|
||||
|
||||
let mut id = one;
|
||||
let mut num_hashes = 0;
|
||||
(0..num)
|
||||
.map(|_| {
|
||||
Entry::new_mut(
|
||||
&mut id,
|
||||
&mut num_hashes,
|
||||
vec![Transaction::new_timestamp(&keypair, Utc::now(), one)],
|
||||
false,
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn make_test_entries() -> Vec<Entry> {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
let keypair = Keypair::new();
|
||||
let tx0 = Transaction::new_vote(
|
||||
&keypair,
|
||||
Vote {
|
||||
version: 0,
|
||||
contact_info_version: 1,
|
||||
},
|
||||
one,
|
||||
1,
|
||||
);
|
||||
let tx1 = Transaction::new_timestamp(&keypair, Utc::now(), one);
|
||||
//
|
||||
// TODO: this magic number and the mix of transaction types
|
||||
// is designed to fill up a Blob more or less exactly,
|
||||
// to get near enough the the threshold that
|
||||
// deserialization falls over if it uses the wrong size()
|
||||
// parameter to index into blob.data()
|
||||
//
|
||||
// magic numbers -----------------+
|
||||
// |
|
||||
// V
|
||||
let mut transactions = vec![tx0; 362];
|
||||
transactions.extend(vec![tx1; 100]);
|
||||
next_entries(&zero, 0, transactions)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entries_to_blobs() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0; 10_000];
|
||||
let entries = next_entries(&zero, 0, transactions);
|
||||
use logger;
|
||||
logger::setup();
|
||||
let entries = make_test_entries();
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let mut blob_q = VecDeque::new();
|
||||
@ -152,6 +635,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_bad_blobs_attack() {
|
||||
use logger;
|
||||
logger::setup();
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
let blobs_q = packet::to_blobs(vec![(0, addr)], &blob_recycler).unwrap(); // <-- attack!
|
||||
@ -160,68 +645,230 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_next_entries() {
|
||||
use logger;
|
||||
logger::setup();
|
||||
let id = Hash::default();
|
||||
let next_id = hash(&id);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
|
||||
let next_id = hash(&id.as_ref());
|
||||
let keypair = Keypair::new();
|
||||
let tx_small = Transaction::new_vote(
|
||||
&keypair,
|
||||
Vote {
|
||||
version: 0,
|
||||
contact_info_version: 2,
|
||||
},
|
||||
next_id,
|
||||
2,
|
||||
);
|
||||
let tx_large = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
|
||||
|
||||
let tx_small_size = serialized_size(&tx_small).unwrap();
|
||||
let tx_large_size = serialized_size(&tx_large).unwrap();
|
||||
assert!(tx_small_size < tx_large_size);
|
||||
assert!(tx_large_size < PACKET_DATA_SIZE as u64);
|
||||
|
||||
// NOTE: if Entry grows to larger than a transaction, the code below falls over
|
||||
let threshold = (BLOB_DATA_SIZE / 256) - 1; // 256 is transaction size
|
||||
let threshold = (BLOB_DATA_SIZE / PACKET_DATA_SIZE) - 1;
|
||||
|
||||
// verify no split
|
||||
let transactions = vec![tx0.clone(); threshold];
|
||||
let transactions = vec![tx_small.clone(); threshold];
|
||||
let entries0 = next_entries(&id, 0, transactions.clone());
|
||||
assert_eq!(entries0.len(), 1);
|
||||
assert!(entries0.verify(&id));
|
||||
|
||||
// verify the split
|
||||
let transactions = vec![tx0.clone(); threshold * 2];
|
||||
// verify the split with uniform transactions
|
||||
let transactions = vec![tx_small.clone(); threshold * 2];
|
||||
let entries0 = next_entries(&id, 0, transactions.clone());
|
||||
assert_eq!(entries0.len(), 2);
|
||||
assert!(entries0[0].has_more);
|
||||
assert!(!entries0[entries0.len() - 1].has_more);
|
||||
|
||||
assert!(entries0.verify(&id));
|
||||
// test hand-construction... brittle, changes if split method changes... ?
|
||||
// let mut entries1 = vec![];
|
||||
// entries1.push(Entry::new(&id, 1, transactions[..threshold].to_vec(), true));
|
||||
// id = entries1[0].id;
|
||||
// entries1.push(Entry::new(
|
||||
// &id,
|
||||
// 1,
|
||||
// transactions[threshold..].to_vec(),
|
||||
// false,
|
||||
// ));
|
||||
//
|
||||
// assert_eq!(entries0, entries1);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use hash::hash;
|
||||
use ledger::*;
|
||||
use packet::BlobRecycler;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use transaction::Transaction;
|
||||
// verify the split with small transactions followed by large
|
||||
// transactions
|
||||
let mut transactions = vec![tx_small.clone(); BLOB_DATA_SIZE / (tx_small_size as usize)];
|
||||
let large_transactions = vec![tx_large.clone(); BLOB_DATA_SIZE / (tx_large_size as usize)];
|
||||
|
||||
#[bench]
|
||||
fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0; 10];
|
||||
let entries = next_entries(&zero, 1, transactions);
|
||||
transactions.extend(large_transactions);
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
bencher.iter(|| {
|
||||
let mut blob_q = VecDeque::new();
|
||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap(), entries);
|
||||
});
|
||||
let entries0 = next_entries(&id, 0, transactions.clone());
|
||||
assert!(entries0.len() > 2);
|
||||
assert!(entries0[0].has_more);
|
||||
assert!(!entries0[entries0.len() - 1].has_more);
|
||||
assert!(entries0.verify(&id));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ledger_reader_writer() {
|
||||
use logger;
|
||||
logger::setup();
|
||||
let ledger_path = tmp_ledger_path("test_ledger_reader_writer");
|
||||
let entries = make_tiny_test_entries(10);
|
||||
|
||||
{
|
||||
let mut writer = LedgerWriter::open(&ledger_path, true).unwrap();
|
||||
writer.write_entries(entries.clone()).unwrap();
|
||||
// drops writer, flushes buffers
|
||||
}
|
||||
verify_ledger(&ledger_path).unwrap();
|
||||
|
||||
let mut read_entries = vec![];
|
||||
for x in read_ledger(&ledger_path, true).unwrap() {
|
||||
let entry = x.unwrap();
|
||||
trace!("entry... {:?}", entry);
|
||||
read_entries.push(entry);
|
||||
}
|
||||
assert_eq!(read_entries, entries);
|
||||
|
||||
let mut window = LedgerWindow::open(&ledger_path).unwrap();
|
||||
|
||||
for (i, entry) in entries.iter().enumerate() {
|
||||
let read_entry = window.get_entry(i as u64).unwrap();
|
||||
assert_eq!(*entry, read_entry);
|
||||
}
|
||||
assert!(window.get_entry(100).is_err());
|
||||
|
||||
std::fs::remove_file(Path::new(&ledger_path).join("data")).unwrap();
|
||||
// empty data file should fall over
|
||||
assert!(LedgerWindow::open(&ledger_path).is_err());
|
||||
assert!(read_ledger(&ledger_path, false).is_err());
|
||||
|
||||
std::fs::remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
fn truncated_last_entry(ledger_path: &str, entries: Vec<Entry>) {
|
||||
let len = {
|
||||
let mut writer = LedgerWriter::open(&ledger_path, true).unwrap();
|
||||
writer.write_entries(entries).unwrap();
|
||||
writer.data.seek(SeekFrom::Current(0)).unwrap()
|
||||
};
|
||||
verify_ledger(&ledger_path).unwrap();
|
||||
|
||||
let data = OpenOptions::new()
|
||||
.write(true)
|
||||
.open(Path::new(&ledger_path).join("data"))
|
||||
.unwrap();
|
||||
data.set_len(len - 4).unwrap();
|
||||
}
|
||||
|
||||
fn garbage_on_data(ledger_path: &str, entries: Vec<Entry>) {
|
||||
let mut writer = LedgerWriter::open(&ledger_path, true).unwrap();
|
||||
writer.write_entries(entries).unwrap();
|
||||
writer.data.write_all(b"hi there!").unwrap();
|
||||
}
|
||||
|
||||
fn read_ledger_check(ledger_path: &str, entries: Vec<Entry>, len: usize) {
|
||||
let read_entries = read_ledger(&ledger_path, true).unwrap();
|
||||
let mut i = 0;
|
||||
|
||||
for entry in read_entries {
|
||||
assert_eq!(entry.unwrap(), entries[i]);
|
||||
i += 1;
|
||||
}
|
||||
assert_eq!(i, len);
|
||||
}
|
||||
|
||||
fn ledger_window_check(ledger_path: &str, entries: Vec<Entry>, len: usize) {
|
||||
let mut window = LedgerWindow::open(&ledger_path).unwrap();
|
||||
for i in 0..len {
|
||||
let entry = window.get_entry(i as u64);
|
||||
assert_eq!(entry.unwrap(), entries[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recover_ledger() {
|
||||
use logger;
|
||||
logger::setup();
|
||||
|
||||
let entries = make_tiny_test_entries(10);
|
||||
let ledger_path = tmp_ledger_path("test_recover_ledger");
|
||||
|
||||
// truncate data file, tests recover inside read_ledger_check()
|
||||
truncated_last_entry(&ledger_path, entries.clone());
|
||||
read_ledger_check(&ledger_path, entries.clone(), entries.len() - 1);
|
||||
|
||||
// truncate data file, tests recover inside LedgerWindow::new()
|
||||
truncated_last_entry(&ledger_path, entries.clone());
|
||||
ledger_window_check(&ledger_path, entries.clone(), entries.len() - 1);
|
||||
|
||||
// restore last entry, tests recover_ledger() inside LedgerWriter::new()
|
||||
truncated_last_entry(&ledger_path, entries.clone());
|
||||
// verify should fail at first
|
||||
assert!(verify_ledger(&ledger_path).is_err());
|
||||
{
|
||||
let mut writer = LedgerWriter::recover(&ledger_path).unwrap();
|
||||
writer.write_entry(&entries[entries.len() - 1]).unwrap();
|
||||
}
|
||||
// and be fine after recover()
|
||||
verify_ledger(&ledger_path).unwrap();
|
||||
|
||||
read_ledger_check(&ledger_path, entries.clone(), entries.len());
|
||||
ledger_window_check(&ledger_path, entries.clone(), entries.len());
|
||||
|
||||
// make it look like data is newer in time, check reader...
|
||||
garbage_on_data(&ledger_path, entries.clone());
|
||||
read_ledger_check(&ledger_path, entries.clone(), entries.len());
|
||||
|
||||
// make it look like data is newer in time, check window...
|
||||
garbage_on_data(&ledger_path, entries.clone());
|
||||
ledger_window_check(&ledger_path, entries.clone(), entries.len());
|
||||
|
||||
// make it look like data is newer in time, check writer...
|
||||
garbage_on_data(&ledger_path, entries[..entries.len() - 1].to_vec());
|
||||
assert!(verify_ledger(&ledger_path).is_err());
|
||||
{
|
||||
let mut writer = LedgerWriter::recover(&ledger_path).unwrap();
|
||||
writer.write_entry(&entries[entries.len() - 1]).unwrap();
|
||||
}
|
||||
verify_ledger(&ledger_path).unwrap();
|
||||
read_ledger_check(&ledger_path, entries.clone(), entries.len());
|
||||
ledger_window_check(&ledger_path, entries.clone(), entries.len());
|
||||
let _ignored = remove_dir_all(&ledger_path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_ledger() {
|
||||
use logger;
|
||||
logger::setup();
|
||||
|
||||
let entries = make_tiny_test_entries(10);
|
||||
let ledger_path = tmp_ledger_path("test_verify_ledger");
|
||||
{
|
||||
let mut writer = LedgerWriter::open(&ledger_path, true).unwrap();
|
||||
writer.write_entries(entries.clone()).unwrap();
|
||||
}
|
||||
// TODO more cases that make ledger_verify() fail
|
||||
// assert!(verify_ledger(&ledger_path).is_err());
|
||||
|
||||
assert!(verify_ledger(&ledger_path).is_ok());
|
||||
let _ignored = remove_dir_all(&ledger_path);
|
||||
}
|
||||
|
||||
// #[test]
|
||||
// fn test_copy_ledger() {
|
||||
// use logger;
|
||||
// logger::setup();
|
||||
//
|
||||
// let from = tmp_ledger_path("test_ledger_copy_from");
|
||||
// let entries = make_tiny_test_entries(10);
|
||||
//
|
||||
// let mut writer = LedgerWriter::new(&from, true).unwrap();
|
||||
// writer.write_entries(entries.clone()).unwrap();
|
||||
//
|
||||
// let to = tmp_ledger_path("test_ledger_copy_to");
|
||||
//
|
||||
// copy_ledger(&from, &to).unwrap();
|
||||
//
|
||||
// let mut read_entries = vec![];
|
||||
// for x in read_ledger(&to).unwrap() {
|
||||
// let entry = x.unwrap();
|
||||
// trace!("entry... {:?}", entry);
|
||||
// read_entries.push(entry);
|
||||
// }
|
||||
// assert_eq!(read_entries, entries);
|
||||
//
|
||||
// std::fs::remove_dir_all(from).unwrap();
|
||||
// std::fs::remove_dir_all(to).unwrap();
|
||||
// }
|
||||
|
||||
}
|
||||
|
13
src/lib.rs
13
src/lib.rs
@ -12,8 +12,10 @@ pub mod counter;
|
||||
pub mod bank;
|
||||
pub mod banking_stage;
|
||||
pub mod blob_fetch_stage;
|
||||
pub mod broadcast_stage;
|
||||
pub mod budget;
|
||||
pub mod choose_gossip_peer_strategy;
|
||||
pub mod client;
|
||||
pub mod crdt;
|
||||
pub mod drone;
|
||||
pub mod entry;
|
||||
@ -25,6 +27,7 @@ pub mod fullnode;
|
||||
pub mod hash;
|
||||
pub mod ledger;
|
||||
pub mod logger;
|
||||
pub mod metrics;
|
||||
pub mod mint;
|
||||
pub mod nat;
|
||||
pub mod ncp;
|
||||
@ -37,7 +40,9 @@ pub mod request;
|
||||
pub mod request_processor;
|
||||
pub mod request_stage;
|
||||
pub mod result;
|
||||
pub mod retransmit_stage;
|
||||
pub mod rpu;
|
||||
pub mod service;
|
||||
pub mod signature;
|
||||
pub mod sigverify;
|
||||
pub mod sigverify_stage;
|
||||
@ -47,9 +52,13 @@ pub mod timing;
|
||||
pub mod tpu;
|
||||
pub mod transaction;
|
||||
pub mod tvu;
|
||||
pub mod window_stage;
|
||||
pub mod vote_stage;
|
||||
pub mod voting;
|
||||
pub mod wallet;
|
||||
pub mod window;
|
||||
pub mod write_stage;
|
||||
extern crate bincode;
|
||||
extern crate bs58;
|
||||
extern crate byteorder;
|
||||
extern crate chrono;
|
||||
extern crate generic_array;
|
||||
@ -65,10 +74,12 @@ extern crate serde_derive;
|
||||
extern crate pnet_datalink;
|
||||
extern crate serde_json;
|
||||
extern crate sha2;
|
||||
extern crate sys_info;
|
||||
extern crate untrusted;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate matches;
|
||||
|
||||
extern crate influx_db_client;
|
||||
extern crate rand;
|
||||
|
@ -9,6 +9,8 @@ static INIT: Once = ONCE_INIT;
|
||||
/// Setup function that is only run once, even if called multiple times.
|
||||
pub fn setup() {
|
||||
INIT.call_once(|| {
|
||||
let _ = env_logger::init();
|
||||
env_logger::Builder::from_default_env()
|
||||
.default_format_timestamp_nanos(true)
|
||||
.init();
|
||||
});
|
||||
}
|
||||
|
354
src/metrics.rs
Normal file
354
src/metrics.rs
Normal file
@ -0,0 +1,354 @@
|
||||
//! The `metrics` module enables sending measurements to an InfluxDB instance
|
||||
|
||||
use influx_db_client as influxdb;
|
||||
use std::env;
|
||||
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
|
||||
use std::sync::{Arc, Barrier, Mutex, Once, ONCE_INIT};
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
use sys_info::hostname;
|
||||
use timing;
|
||||
|
||||
#[derive(Debug)]
|
||||
enum MetricsCommand {
|
||||
Submit(influxdb::Point),
|
||||
Flush(Arc<Barrier>),
|
||||
}
|
||||
|
||||
struct MetricsAgent {
|
||||
sender: Sender<MetricsCommand>,
|
||||
}
|
||||
|
||||
trait MetricsWriter {
|
||||
// Write the points and empty the vector. Called on the internal
|
||||
// MetricsAgent worker thread.
|
||||
fn write(&self, points: Vec<influxdb::Point>);
|
||||
}
|
||||
|
||||
struct InfluxDbMetricsWriter {
|
||||
client: Option<influxdb::Client>,
|
||||
}
|
||||
|
||||
impl InfluxDbMetricsWriter {
|
||||
fn new() -> Self {
|
||||
InfluxDbMetricsWriter {
|
||||
client: Self::build_client(),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_client() -> Option<influxdb::Client> {
|
||||
let host = env::var("INFLUX_HOST")
|
||||
.unwrap_or_else(|_| "https://metrics.solana.com:8086".to_string());
|
||||
let db = env::var("INFLUX_DATABASE").unwrap_or_else(|_| "scratch".to_string());
|
||||
let username = env::var("INFLUX_USERNAME").unwrap_or_else(|_| "scratch_writer".to_string());
|
||||
let password = env::var("INFLUX_PASSWORD").unwrap_or_else(|_| "topsecret".to_string());
|
||||
|
||||
debug!("InfluxDB host={} db={} username={}", host, db, username);
|
||||
let mut client = influxdb::Client::new_with_option(host, db, None)
|
||||
.set_authentication(username, password);
|
||||
|
||||
client.set_read_timeout(1 /*second*/);
|
||||
client.set_write_timeout(1 /*second*/);
|
||||
|
||||
debug!("InfluxDB version: {:?}", client.get_version());
|
||||
Some(client)
|
||||
}
|
||||
}
|
||||
|
||||
impl MetricsWriter for InfluxDbMetricsWriter {
|
||||
fn write(&self, points: Vec<influxdb::Point>) {
|
||||
if let Some(ref client) = self.client {
|
||||
debug!("submitting {} points", points.len());
|
||||
if let Err(err) = client.write_points(
|
||||
influxdb::Points { point: points },
|
||||
Some(influxdb::Precision::Milliseconds),
|
||||
None,
|
||||
) {
|
||||
debug!("InfluxDbMetricsWriter write error: {:?}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MetricsAgent {
|
||||
fn default() -> Self {
|
||||
Self::new(
|
||||
Arc::new(InfluxDbMetricsWriter::new()),
|
||||
Duration::from_secs(10),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl MetricsAgent {
|
||||
fn new(writer: Arc<MetricsWriter + Send + Sync>, write_frequency: Duration) -> Self {
|
||||
let (sender, receiver) = channel::<MetricsCommand>();
|
||||
thread::spawn(move || Self::run(&receiver, &writer, write_frequency));
|
||||
MetricsAgent { sender }
|
||||
}
|
||||
|
||||
fn run(
|
||||
receiver: &Receiver<MetricsCommand>,
|
||||
writer: &Arc<MetricsWriter + Send + Sync>,
|
||||
write_frequency: Duration,
|
||||
) {
|
||||
trace!("run: enter");
|
||||
let mut last_write_time = Instant::now();
|
||||
let mut points = Vec::new();
|
||||
|
||||
loop {
|
||||
match receiver.recv_timeout(write_frequency / 2) {
|
||||
Ok(cmd) => match cmd {
|
||||
MetricsCommand::Flush(barrier) => {
|
||||
debug!("metrics_thread: flush");
|
||||
if !points.is_empty() {
|
||||
writer.write(points);
|
||||
points = Vec::new();
|
||||
last_write_time = Instant::now();
|
||||
}
|
||||
barrier.wait();
|
||||
}
|
||||
MetricsCommand::Submit(point) => {
|
||||
debug!("run: submit {:?}", point);
|
||||
points.push(point);
|
||||
}
|
||||
},
|
||||
Err(RecvTimeoutError::Timeout) => {
|
||||
trace!("run: receive timeout");
|
||||
}
|
||||
Err(RecvTimeoutError::Disconnected) => {
|
||||
debug!("run: sender disconnected");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
if now.duration_since(last_write_time) >= write_frequency && !points.is_empty() {
|
||||
debug!("run: writing {} points", points.len());
|
||||
writer.write(points);
|
||||
points = Vec::new();
|
||||
last_write_time = now;
|
||||
}
|
||||
}
|
||||
trace!("run: exit");
|
||||
}
|
||||
|
||||
pub fn submit(&self, mut point: influxdb::Point) {
|
||||
if point.timestamp.is_none() {
|
||||
point.timestamp = Some(timing::timestamp() as i64);
|
||||
}
|
||||
debug!("Submitting point: {:?}", point);
|
||||
self.sender.send(MetricsCommand::Submit(point)).unwrap();
|
||||
}
|
||||
|
||||
pub fn flush(&self) {
|
||||
debug!("Flush");
|
||||
let barrier = Arc::new(Barrier::new(2));
|
||||
self.sender
|
||||
.send(MetricsCommand::Flush(Arc::clone(&barrier)))
|
||||
.unwrap();
|
||||
|
||||
barrier.wait();
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for MetricsAgent {
|
||||
fn drop(&mut self) {
|
||||
self.flush();
|
||||
}
|
||||
}
|
||||
|
||||
fn get_singleton_agent() -> Arc<Mutex<MetricsAgent>> {
|
||||
static INIT: Once = ONCE_INIT;
|
||||
static mut AGENT: Option<Arc<Mutex<MetricsAgent>>> = None;
|
||||
unsafe {
|
||||
INIT.call_once(|| AGENT = Some(Arc::new(Mutex::new(MetricsAgent::default()))));
|
||||
match AGENT {
|
||||
Some(ref agent) => agent.clone(),
|
||||
None => panic!("Failed to initialize metrics agent"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Submits a new point from any thread. Note that points are internally queued
|
||||
/// and transmitted periodically in batches.
|
||||
pub fn submit(point: influxdb::Point) {
|
||||
let agent_mutex = get_singleton_agent();
|
||||
let agent = agent_mutex.lock().unwrap();
|
||||
agent.submit(point);
|
||||
}
|
||||
|
||||
/// Blocks until all pending points from previous calls to `submit` have been
|
||||
/// transmitted.
|
||||
pub fn flush() {
|
||||
let agent_mutex = get_singleton_agent();
|
||||
let agent = agent_mutex.lock().unwrap();
|
||||
agent.flush();
|
||||
}
|
||||
|
||||
/// Hook the panic handler to generate a data point on each panic
|
||||
pub fn set_panic_hook(program: &'static str) {
|
||||
use std::panic;
|
||||
use std::sync::{Once, ONCE_INIT};
|
||||
static SET_HOOK: Once = ONCE_INIT;
|
||||
SET_HOOK.call_once(|| {
|
||||
let default_hook = panic::take_hook();
|
||||
panic::set_hook(Box::new(move |ono| {
|
||||
default_hook(ono);
|
||||
submit(
|
||||
influxdb::Point::new("panic")
|
||||
.add_tag("program", influxdb::Value::String(program.to_string()))
|
||||
.add_tag(
|
||||
"thread",
|
||||
influxdb::Value::String(
|
||||
thread::current().name().unwrap_or("?").to_string(),
|
||||
),
|
||||
)
|
||||
// The 'one' field exists to give Kapacitor Alerts a numerical value
|
||||
// to filter on
|
||||
.add_field("one", influxdb::Value::Integer(1))
|
||||
.add_field(
|
||||
"message",
|
||||
influxdb::Value::String(
|
||||
// TODO: use ono.message() when it becomes stable
|
||||
ono.to_string(),
|
||||
),
|
||||
)
|
||||
.add_field(
|
||||
"location",
|
||||
influxdb::Value::String(match ono.location() {
|
||||
Some(location) => location.to_string(),
|
||||
None => "?".to_string(),
|
||||
}),
|
||||
)
|
||||
.add_field(
|
||||
"host",
|
||||
influxdb::Value::String(
|
||||
hostname().unwrap_or_else(|_| "?".to_string())
|
||||
),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
// Flush metrics immediately in case the process exits immediately
|
||||
// upon return
|
||||
flush();
|
||||
}));
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use rand::random;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
struct MockMetricsWriter {
|
||||
points_written: AtomicUsize,
|
||||
}
|
||||
impl MockMetricsWriter {
|
||||
fn new() -> Self {
|
||||
MockMetricsWriter {
|
||||
points_written: AtomicUsize::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
fn points_written(&self) -> usize {
|
||||
return self.points_written.load(Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
impl MetricsWriter for MockMetricsWriter {
|
||||
fn write(&self, points: Vec<influxdb::Point>) {
|
||||
assert!(!points.is_empty());
|
||||
|
||||
self.points_written
|
||||
.fetch_add(points.len(), Ordering::SeqCst);
|
||||
|
||||
println!(
|
||||
"Writing {} points ({} total)",
|
||||
points.len(),
|
||||
self.points_written.load(Ordering::SeqCst)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_submit() {
|
||||
let writer = Arc::new(MockMetricsWriter::new());
|
||||
let agent = MetricsAgent::new(writer.clone(), Duration::from_secs(10));
|
||||
|
||||
for i in 0..42 {
|
||||
agent.submit(influxdb::Point::new(&format!("measurement {}", i)));
|
||||
}
|
||||
|
||||
agent.flush();
|
||||
assert_eq!(writer.points_written(), 42);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_submit_with_delay() {
|
||||
let writer = Arc::new(MockMetricsWriter::new());
|
||||
let agent = MetricsAgent::new(writer.clone(), Duration::from_millis(100));
|
||||
|
||||
agent.submit(influxdb::Point::new("point 1"));
|
||||
thread::sleep(Duration::from_secs(2));
|
||||
assert_eq!(writer.points_written(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multithread_submit() {
|
||||
let writer = Arc::new(MockMetricsWriter::new());
|
||||
let agent = Arc::new(Mutex::new(MetricsAgent::new(
|
||||
writer.clone(),
|
||||
Duration::from_secs(10),
|
||||
)));
|
||||
|
||||
//
|
||||
// Submit measurements from different threads
|
||||
//
|
||||
let mut threads = Vec::new();
|
||||
for i in 0..42 {
|
||||
let point = influxdb::Point::new(&format!("measurement {}", i));
|
||||
let agent = Arc::clone(&agent);
|
||||
threads.push(thread::spawn(move || {
|
||||
agent.lock().unwrap().submit(point);
|
||||
}));
|
||||
}
|
||||
|
||||
for thread in threads {
|
||||
thread.join().unwrap();
|
||||
}
|
||||
|
||||
agent.lock().unwrap().flush();
|
||||
assert_eq!(writer.points_written(), 42);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flush_before_drop() {
|
||||
let writer = Arc::new(MockMetricsWriter::new());
|
||||
{
|
||||
let agent = MetricsAgent::new(writer.clone(), Duration::from_secs(9999999));
|
||||
agent.submit(influxdb::Point::new("point 1"));
|
||||
}
|
||||
|
||||
assert_eq!(writer.points_written(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_live_submit() {
|
||||
let agent = MetricsAgent::default();
|
||||
|
||||
let point = influxdb::Point::new("live_submit_test")
|
||||
.add_tag("test", influxdb::Value::Boolean(true))
|
||||
.add_field(
|
||||
"random_bool",
|
||||
influxdb::Value::Boolean(random::<u8>() < 128),
|
||||
)
|
||||
.add_field(
|
||||
"random_int",
|
||||
influxdb::Value::Integer(random::<u8>() as i64),
|
||||
)
|
||||
.to_owned();
|
||||
agent.submit(point);
|
||||
}
|
||||
|
||||
}
|
26
src/mint.rs
26
src/mint.rs
@ -3,25 +3,21 @@
|
||||
use entry::Entry;
|
||||
use hash::{hash, Hash};
|
||||
use ring::rand::SystemRandom;
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
use signature::{Keypair, KeypairUtil, Pubkey};
|
||||
use transaction::Transaction;
|
||||
use untrusted::Input;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Mint {
|
||||
pub pkcs8: Vec<u8>,
|
||||
pubkey: PublicKey,
|
||||
pubkey: Pubkey,
|
||||
pub tokens: i64,
|
||||
}
|
||||
|
||||
impl Mint {
|
||||
pub fn new(tokens: i64) -> Self {
|
||||
let rnd = SystemRandom::new();
|
||||
let pkcs8 = KeyPair::generate_pkcs8(&rnd)
|
||||
.expect("generate_pkcs8 in mint pub fn new")
|
||||
.to_vec();
|
||||
pub fn new_with_pkcs8(tokens: i64, pkcs8: Vec<u8>) -> Self {
|
||||
let keypair =
|
||||
KeyPair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in mint pub fn new");
|
||||
Keypair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in mint pub fn new");
|
||||
let pubkey = keypair.pubkey();
|
||||
Mint {
|
||||
pkcs8,
|
||||
@ -30,6 +26,14 @@ impl Mint {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(tokens: i64) -> Self {
|
||||
let rnd = SystemRandom::new();
|
||||
let pkcs8 = Keypair::generate_pkcs8(&rnd)
|
||||
.expect("generate_pkcs8 in mint pub fn new")
|
||||
.to_vec();
|
||||
Self::new_with_pkcs8(tokens, pkcs8)
|
||||
}
|
||||
|
||||
pub fn seed(&self) -> Hash {
|
||||
hash(&self.pkcs8)
|
||||
}
|
||||
@ -38,11 +42,11 @@ impl Mint {
|
||||
self.create_entries()[1].id
|
||||
}
|
||||
|
||||
pub fn keypair(&self) -> KeyPair {
|
||||
KeyPair::from_pkcs8(Input::from(&self.pkcs8)).expect("from_pkcs8 in mint pub fn keypair")
|
||||
pub fn keypair(&self) -> Keypair {
|
||||
Keypair::from_pkcs8(Input::from(&self.pkcs8)).expect("from_pkcs8 in mint pub fn keypair")
|
||||
}
|
||||
|
||||
pub fn pubkey(&self) -> PublicKey {
|
||||
pub fn pubkey(&self) -> Pubkey {
|
||||
self.pubkey
|
||||
}
|
||||
|
||||
|
77
src/nat.rs
77
src/nat.rs
@ -1,16 +1,14 @@
|
||||
//! The `nat` module assists with NAT traversal
|
||||
|
||||
extern crate futures;
|
||||
extern crate p2p;
|
||||
extern crate rand;
|
||||
extern crate reqwest;
|
||||
extern crate tokio_core;
|
||||
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
|
||||
use self::futures::Future;
|
||||
use self::p2p::UdpSocketExt;
|
||||
use std::env;
|
||||
use std::str;
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::io;
|
||||
|
||||
/// A data type representing a public Udp socket
|
||||
pub struct UdpSocketPair {
|
||||
@ -32,66 +30,19 @@ pub fn get_public_ip_addr() -> Result<IpAddr, String> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Binds a private Udp address to a public address using UPnP if possible
|
||||
pub fn udp_public_bind(label: &str) -> UdpSocketPair {
|
||||
let private_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||
pub fn udp_random_bind(start: u16, end: u16, tries: u32) -> io::Result<UdpSocket> {
|
||||
let mut count = 0;
|
||||
loop {
|
||||
count += 1;
|
||||
|
||||
let mut core = tokio_core::reactor::Core::new().unwrap();
|
||||
let handle = core.handle();
|
||||
let mc = p2p::P2p::default();
|
||||
let res = core.run({
|
||||
tokio_core::net::UdpSocket::bind_public(&private_addr, &handle, &mc)
|
||||
.map_err(|e| {
|
||||
info!("Failed to bind public socket for {}: {}", label, e);
|
||||
})
|
||||
.and_then(|(socket, public_addr)| Ok((public_addr, socket.local_addr().unwrap())))
|
||||
});
|
||||
let rand_port = thread_rng().gen_range(start, end);
|
||||
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rand_port);
|
||||
|
||||
match res {
|
||||
Ok((public_addr, local_addr)) => {
|
||||
info!(
|
||||
"Using local address {} mapped to UPnP public address {} for {}",
|
||||
local_addr, public_addr, label
|
||||
);
|
||||
|
||||
// NAT should now be forwarding inbound packets directed at
|
||||
// |public_addr| to the local |receiver| socket...
|
||||
let receiver = UdpSocket::bind(local_addr).unwrap();
|
||||
|
||||
// TODO: try to autodetect a broken NAT (issue #496)
|
||||
let sender = if env::var("BROKEN_NAT").is_err() {
|
||||
receiver.try_clone().unwrap()
|
||||
} else {
|
||||
// ... however for outbound packets, some NATs *will not* rewrite the
|
||||
// source port from |receiver.local_addr().port()| to |public_addr.port()|.
|
||||
// This is currently a problem when talking with a fullnode as it
|
||||
// assumes it can send UDP packets back at the source. This hits the
|
||||
// NAT as a datagram for |receiver.local_addr().port()| on the NAT's public
|
||||
// IP, which the NAT promptly discards. As a short term hack, create a
|
||||
// local UDP socket, |sender|, with the same port as |public_addr.port()|.
|
||||
//
|
||||
// TODO: Remove the |sender| socket and deal with the downstream changes to
|
||||
// the UDP signalling
|
||||
let mut local_addr_sender = local_addr.clone();
|
||||
local_addr_sender.set_port(public_addr.port());
|
||||
UdpSocket::bind(local_addr_sender).unwrap()
|
||||
};
|
||||
|
||||
UdpSocketPair {
|
||||
addr: public_addr,
|
||||
receiver,
|
||||
sender,
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
let sender = UdpSocket::bind(private_addr).unwrap();
|
||||
let local_addr = sender.local_addr().unwrap();
|
||||
info!("Using local address {} for {}", local_addr, label);
|
||||
UdpSocketPair {
|
||||
addr: private_addr,
|
||||
receiver: sender.try_clone().unwrap(),
|
||||
sender,
|
||||
}
|
||||
match UdpSocket::bind(addr) {
|
||||
Result::Ok(val) => break Result::Ok(val),
|
||||
Result::Err(err) => if err.kind() != io::ErrorKind::AddrInUse || count >= tries {
|
||||
return Err(err);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
58
src/ncp.rs
58
src/ncp.rs
@ -1,23 +1,27 @@
|
||||
//! The `ncp` module implements the network control plane.
|
||||
|
||||
use crdt::Crdt;
|
||||
use packet::{BlobRecycler, SharedBlob};
|
||||
use packet::BlobRecycler;
|
||||
use result::Result;
|
||||
use service::Service;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use std::thread::{self, JoinHandle};
|
||||
use streamer;
|
||||
use window::SharedWindow;
|
||||
|
||||
pub struct Ncp {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Ncp {
|
||||
pub fn new(
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
window: SharedWindow,
|
||||
ledger_path: Option<&str>,
|
||||
gossip_listen_socket: UdpSocket,
|
||||
gossip_send_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
@ -26,7 +30,7 @@ impl Ncp {
|
||||
let (request_sender, request_receiver) = channel();
|
||||
trace!(
|
||||
"Ncp: id: {:?}, listening on: {:?}",
|
||||
&crdt.read().unwrap().me[..4],
|
||||
&crdt.read().unwrap().me.as_ref()[..4],
|
||||
gossip_listen_socket.local_addr().unwrap()
|
||||
);
|
||||
let t_receiver = streamer::blob_receiver(
|
||||
@ -37,22 +41,41 @@ impl Ncp {
|
||||
)?;
|
||||
let (response_sender, response_receiver) = channel();
|
||||
let t_responder = streamer::responder(
|
||||
"ncp",
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
response_receiver,
|
||||
);
|
||||
let t_listen = Crdt::listen(
|
||||
crdt.clone(),
|
||||
window,
|
||||
ledger_path,
|
||||
blob_recycler.clone(),
|
||||
request_receiver,
|
||||
response_sender.clone(),
|
||||
exit.clone(),
|
||||
);
|
||||
let t_gossip = Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit);
|
||||
let t_gossip = Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit.clone());
|
||||
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
|
||||
Ok(Ncp { thread_hdls })
|
||||
Ok(Ncp { exit, thread_hdls })
|
||||
}
|
||||
|
||||
pub fn close(self) -> thread::Result<()> {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
self.join()
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for Ncp {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
self.thread_hdls
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls() {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@ -60,27 +83,26 @@ impl Ncp {
|
||||
mod tests {
|
||||
use crdt::{Crdt, TestNode};
|
||||
use ncp::Ncp;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
// test that stage will exit when flag is set
|
||||
fn test_exit() {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let tn = TestNode::new();
|
||||
let crdt = Crdt::new(tn.data.clone());
|
||||
let tn = TestNode::new_localhost();
|
||||
let crdt = Crdt::new(tn.data.clone()).expect("Crdt::new");
|
||||
let c = Arc::new(RwLock::new(crdt));
|
||||
let w = Arc::new(RwLock::new(vec![]));
|
||||
let d = Ncp::new(
|
||||
c.clone(),
|
||||
&c,
|
||||
w,
|
||||
None,
|
||||
tn.sockets.gossip,
|
||||
tn.sockets.gossip_send,
|
||||
exit.clone(),
|
||||
).unwrap();
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in d.thread_hdls {
|
||||
t.join().expect("thread join");
|
||||
}
|
||||
d.close().expect("thread join");
|
||||
}
|
||||
}
|
||||
|
140
src/packet.rs
140
src/packet.rs
@ -2,9 +2,10 @@
|
||||
use bincode::{deserialize, serialize};
|
||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
use counter::Counter;
|
||||
use log::Level;
|
||||
use result::{Error, Result};
|
||||
use serde::Serialize;
|
||||
use signature::PublicKey;
|
||||
use signature::Pubkey;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
@ -12,7 +13,6 @@ use std::mem::size_of;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::Instant;
|
||||
|
||||
pub type SharedPackets = Arc<RwLock<Packets>>;
|
||||
pub type SharedBlob = Arc<RwLock<Blob>>;
|
||||
@ -21,12 +21,12 @@ pub type PacketRecycler = Recycler<Packets>;
|
||||
pub type BlobRecycler = Recycler<Blob>;
|
||||
|
||||
pub const NUM_PACKETS: usize = 1024 * 8;
|
||||
pub const BLOB_SIZE: usize = 64 * 1024;
|
||||
pub const BLOB_SIZE: usize = (64 * 1024 - 128); // wikipedia says there should be 20b for ipv4 headers
|
||||
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_HEADER_SIZE;
|
||||
pub const PACKET_DATA_SIZE: usize = 256;
|
||||
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
#[derive(Clone, Default, Debug, PartialEq)]
|
||||
#[repr(C)]
|
||||
pub struct Meta {
|
||||
pub size: usize,
|
||||
@ -63,6 +63,19 @@ impl Default for Packet {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Reset {
|
||||
// Reset trait is an object that can re-initialize important parts
|
||||
// of itself, similar to Default, but not necessarily a full clear
|
||||
// also, we do it in-place.
|
||||
fn reset(&mut self);
|
||||
}
|
||||
|
||||
impl Reset for Packet {
|
||||
fn reset(&mut self) {
|
||||
self.meta = Meta::default();
|
||||
}
|
||||
}
|
||||
|
||||
impl Meta {
|
||||
pub fn addr(&self) -> SocketAddr {
|
||||
if !self.v6 {
|
||||
@ -113,6 +126,14 @@ impl Default for Packets {
|
||||
}
|
||||
}
|
||||
|
||||
impl Reset for Packets {
|
||||
fn reset(&mut self) {
|
||||
for i in 0..self.packets.len() {
|
||||
self.packets[i].reset();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Blob {
|
||||
pub data: [u8; BLOB_SIZE],
|
||||
@ -140,6 +161,19 @@ impl Default for Blob {
|
||||
}
|
||||
}
|
||||
|
||||
impl Reset for Blob {
|
||||
fn reset(&mut self) {
|
||||
self.meta = Meta::default();
|
||||
self.data[..BLOB_HEADER_SIZE].copy_from_slice(&[0u8; BLOB_HEADER_SIZE]);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BlobError {
|
||||
/// the Blob's meta and data are not self-consistent
|
||||
BadState,
|
||||
}
|
||||
|
||||
pub struct Recycler<T> {
|
||||
gc: Arc<Mutex<Vec<Arc<RwLock<T>>>>>,
|
||||
}
|
||||
@ -160,24 +194,35 @@ impl<T: Default> Clone for Recycler<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Default> Recycler<T> {
|
||||
impl<T: Default + Reset> Recycler<T> {
|
||||
pub fn allocate(&self) -> Arc<RwLock<T>> {
|
||||
let mut gc = self.gc.lock().expect("recycler lock in pb fn allocate");
|
||||
let x = gc.pop()
|
||||
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())));
|
||||
|
||||
// Only return the item if this recycler is the last reference to it.
|
||||
// Remove this check once `T` holds a Weak reference back to this
|
||||
// recycler and implements `Drop`. At the time of this writing, Weak can't
|
||||
// be passed across threads ('alloc' is a nightly-only API), and so our
|
||||
// reference-counted recyclables are awkwardly being recycled by hand,
|
||||
// which allows this race condition to exist.
|
||||
if Arc::strong_count(&x) > 1 {
|
||||
warn!("Recycled item still in use. Booting it.");
|
||||
drop(gc);
|
||||
self.allocate()
|
||||
} else {
|
||||
x
|
||||
loop {
|
||||
if let Some(x) = gc.pop() {
|
||||
// Only return the item if this recycler is the last reference to it.
|
||||
// Remove this check once `T` holds a Weak reference back to this
|
||||
// recycler and implements `Drop`. At the time of this writing, Weak can't
|
||||
// be passed across threads ('alloc' is a nightly-only API), and so our
|
||||
// reference-counted recyclables are awkwardly being recycled by hand,
|
||||
// which allows this race condition to exist.
|
||||
if Arc::strong_count(&x) >= 1 {
|
||||
// Commenting out this message, is annoying for known use case of
|
||||
// validator hanging onto a blob in the window, but also sending it over
|
||||
// to retransmmit_request
|
||||
//
|
||||
// warn!("Recycled item still in use. Booting it.");
|
||||
continue;
|
||||
}
|
||||
|
||||
{
|
||||
let mut w = x.write().unwrap();
|
||||
w.reset();
|
||||
}
|
||||
return x;
|
||||
} else {
|
||||
return Arc::new(RwLock::new(Default::default()));
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn recycle(&self, x: Arc<RwLock<T>>) {
|
||||
@ -188,7 +233,6 @@ impl<T: Default> Recycler<T> {
|
||||
|
||||
impl Packets {
|
||||
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
||||
static mut COUNTER: Counter = create_counter!("packets", 10);
|
||||
self.packets.resize(NUM_PACKETS, Packet::default());
|
||||
let mut i = 0;
|
||||
//DOCUMENTED SIDE-EFFECT
|
||||
@ -198,13 +242,12 @@ impl Packets {
|
||||
// * read until it fails
|
||||
// * set it back to blocking before returning
|
||||
socket.set_nonblocking(false)?;
|
||||
let mut start = Instant::now();
|
||||
for p in &mut self.packets {
|
||||
p.meta.size = 0;
|
||||
trace!("receiving on {}", socket.local_addr().unwrap());
|
||||
match socket.recv_from(&mut p.data) {
|
||||
Err(_) if i > 0 => {
|
||||
inc_counter!(COUNTER, i, start);
|
||||
inc_new_counter_info!("packets-recv_count", 1);
|
||||
debug!("got {:?} messages on {}", i, socket.local_addr().unwrap());
|
||||
break;
|
||||
}
|
||||
@ -215,8 +258,8 @@ impl Packets {
|
||||
Ok((nrecv, from)) => {
|
||||
p.meta.size = nrecv;
|
||||
p.meta.set_addr(&from);
|
||||
trace!("got {} bytes from {}", nrecv, from);
|
||||
if i == 0 {
|
||||
start = Instant::now();
|
||||
socket.set_nonblocking(true)?;
|
||||
}
|
||||
}
|
||||
@ -242,7 +285,7 @@ impl Packets {
|
||||
|
||||
pub fn to_packets_chunked<T: Serialize>(
|
||||
r: &PacketRecycler,
|
||||
xs: Vec<T>,
|
||||
xs: &[T],
|
||||
chunks: usize,
|
||||
) -> Vec<SharedPackets> {
|
||||
let mut out = vec![];
|
||||
@ -260,10 +303,10 @@ pub fn to_packets_chunked<T: Serialize>(
|
||||
}
|
||||
out.push(p);
|
||||
}
|
||||
return out;
|
||||
out
|
||||
}
|
||||
|
||||
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPackets> {
|
||||
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: &[T]) -> Vec<SharedPackets> {
|
||||
to_packets_chunked(r, xs, NUM_PACKETS)
|
||||
}
|
||||
|
||||
@ -277,7 +320,7 @@ pub fn to_blob<T: Serialize>(
|
||||
let mut b = blob.write().unwrap();
|
||||
let v = serialize(&resp)?;
|
||||
let len = v.len();
|
||||
assert!(len < BLOB_SIZE);
|
||||
assert!(len <= BLOB_SIZE);
|
||||
b.data[..len].copy_from_slice(&v);
|
||||
b.meta.size = len;
|
||||
b.meta.set_addr(&rsp_addr);
|
||||
@ -297,7 +340,7 @@ pub fn to_blobs<T: Serialize>(
|
||||
}
|
||||
|
||||
const BLOB_INDEX_END: usize = size_of::<u64>();
|
||||
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>();
|
||||
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<Pubkey>();
|
||||
const BLOB_FLAGS_END: usize = BLOB_ID_END + size_of::<u32>();
|
||||
const BLOB_SIZE_END: usize = BLOB_FLAGS_END + size_of::<u64>();
|
||||
|
||||
@ -324,12 +367,12 @@ impl Blob {
|
||||
}
|
||||
/// sender id, we use this for identifying if its a blob from the leader that we should
|
||||
/// retransmit. eventually blobs should have a signature that we can use ffor spam filtering
|
||||
pub fn get_id(&self) -> Result<PublicKey> {
|
||||
pub fn get_id(&self) -> Result<Pubkey> {
|
||||
let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?;
|
||||
Ok(e)
|
||||
}
|
||||
|
||||
pub fn set_id(&mut self, id: PublicKey) -> Result<()> {
|
||||
pub fn set_id(&mut self, id: Pubkey) -> Result<()> {
|
||||
let wtr = serialize(&id)?;
|
||||
self.data[BLOB_INDEX_END..BLOB_ID_END].clone_from_slice(&wtr);
|
||||
Ok(())
|
||||
@ -349,7 +392,7 @@ impl Blob {
|
||||
}
|
||||
|
||||
pub fn is_coding(&self) -> bool {
|
||||
return (self.get_flags().unwrap() & BLOB_FLAG_IS_CODING) != 0;
|
||||
(self.get_flags().unwrap() & BLOB_FLAG_IS_CODING) != 0
|
||||
}
|
||||
|
||||
pub fn set_coding(&mut self) -> Result<()> {
|
||||
@ -376,6 +419,14 @@ impl Blob {
|
||||
pub fn data_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.data[BLOB_HEADER_SIZE..]
|
||||
}
|
||||
pub fn get_size(&self) -> Result<usize> {
|
||||
let size = self.get_data_size()? as usize;
|
||||
if self.meta.size == size {
|
||||
Ok(size - BLOB_HEADER_SIZE)
|
||||
} else {
|
||||
Err(Error::BlobError(BlobError::BadState))
|
||||
}
|
||||
}
|
||||
pub fn set_size(&mut self, size: usize) {
|
||||
let new_size = size + BLOB_HEADER_SIZE;
|
||||
self.meta.size = new_size;
|
||||
@ -409,6 +460,7 @@ impl Blob {
|
||||
Ok((nrecv, from)) => {
|
||||
p.meta.size = nrecv;
|
||||
p.meta.set_addr(&from);
|
||||
trace!("got {} bytes from {}", nrecv, from);
|
||||
if i == 0 {
|
||||
socket.set_nonblocking(true)?;
|
||||
}
|
||||
@ -424,7 +476,13 @@ impl Blob {
|
||||
{
|
||||
let p = r.read().expect("'r' read lock in pub fn send_to");
|
||||
let a = p.meta.addr();
|
||||
socket.send_to(&p.data[..p.meta.size], &a)?;
|
||||
if let Err(e) = socket.send_to(&p.data[..p.meta.size], &a) {
|
||||
warn!(
|
||||
"error sending {} byte packet to {:?}: {:?}",
|
||||
p.meta.size, a, e
|
||||
);
|
||||
Err(e)?;
|
||||
}
|
||||
}
|
||||
re.recycle(r);
|
||||
}
|
||||
@ -435,7 +493,8 @@ impl Blob {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use packet::{
|
||||
to_packets, Blob, BlobRecycler, Packet, PacketRecycler, Packets, Recycler, NUM_PACKETS,
|
||||
to_packets, Blob, BlobRecycler, Meta, Packet, PacketRecycler, Packets, Recycler, Reset,
|
||||
BLOB_HEADER_SIZE, NUM_PACKETS,
|
||||
};
|
||||
use request::Request;
|
||||
use std::collections::VecDeque;
|
||||
@ -454,6 +513,12 @@ mod tests {
|
||||
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
||||
}
|
||||
|
||||
impl Reset for u8 {
|
||||
fn reset(&mut self) {
|
||||
*self = Default::default();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_leaked_recyclable() {
|
||||
// Ensure that the recycler won't return an item
|
||||
@ -520,15 +585,15 @@ mod tests {
|
||||
fn test_to_packets() {
|
||||
let tx = Request::GetTransactionCount;
|
||||
let re = PacketRecycler::default();
|
||||
let rv = to_packets(&re, vec![tx.clone(); 1]);
|
||||
let rv = to_packets(&re, &vec![tx.clone(); 1]);
|
||||
assert_eq!(rv.len(), 1);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
|
||||
|
||||
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS]);
|
||||
let rv = to_packets(&re, &vec![tx.clone(); NUM_PACKETS]);
|
||||
assert_eq!(rv.len(), 1);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||
|
||||
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS + 1]);
|
||||
let rv = to_packets(&re, &vec![tx.clone(); NUM_PACKETS + 1]);
|
||||
assert_eq!(rv.len(), 2);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
|
||||
@ -591,6 +656,9 @@ mod tests {
|
||||
b.data_mut()[0] = 1;
|
||||
assert_eq!(b.data()[0], 1);
|
||||
assert_eq!(b.get_index().unwrap(), <u64>::max_value());
|
||||
b.reset();
|
||||
assert!(b.data[..BLOB_HEADER_SIZE].starts_with(&[0u8; BLOB_HEADER_SIZE]));
|
||||
assert_eq!(b.meta, Meta::default());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
//! `Payment`, the payment is executed.
|
||||
|
||||
use chrono::prelude::*;
|
||||
use signature::PublicKey;
|
||||
use signature::Pubkey;
|
||||
|
||||
/// The types of events a payment plan can process.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
@ -12,18 +12,18 @@ pub enum Witness {
|
||||
/// The current time.
|
||||
Timestamp(DateTime<Utc>),
|
||||
|
||||
/// A siganture from PublicKey.
|
||||
Signature(PublicKey),
|
||||
/// A siganture from Pubkey.
|
||||
Signature,
|
||||
}
|
||||
|
||||
/// Some amount of tokens that should be sent to the `to` `PublicKey`.
|
||||
/// Some amount of tokens that should be sent to the `to` `Pubkey`.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Payment {
|
||||
/// Amount to be paid.
|
||||
pub tokens: i64,
|
||||
|
||||
/// The `PublicKey` that `tokens` should be paid to.
|
||||
pub to: PublicKey,
|
||||
/// The `Pubkey` that `tokens` should be paid to.
|
||||
pub to: Pubkey,
|
||||
}
|
||||
|
||||
/// Interface to smart contracts.
|
||||
@ -36,5 +36,5 @@ pub trait PaymentPlan {
|
||||
|
||||
/// Apply a witness to the payment plan to see if the plan can be reduced.
|
||||
/// If so, modify the plan in-place.
|
||||
fn apply_witness(&mut self, witness: &Witness);
|
||||
fn apply_witness(&mut self, witness: &Witness, from: &Pubkey);
|
||||
}
|
||||
|
@ -8,8 +8,9 @@
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use recorder::Recorder;
|
||||
use service::Service;
|
||||
use std::sync::mpsc::{channel, Receiver, RecvError, Sender, TryRecvError};
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::{Duration, Instant};
|
||||
use transaction::Transaction;
|
||||
|
||||
@ -20,7 +21,7 @@ pub enum Signal {
|
||||
}
|
||||
|
||||
pub struct RecordStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl RecordStage {
|
||||
@ -31,7 +32,7 @@ impl RecordStage {
|
||||
start_hash: &Hash,
|
||||
) -> (Self, Receiver<Vec<Entry>>) {
|
||||
let (entry_sender, entry_receiver) = channel();
|
||||
let start_hash = start_hash.clone();
|
||||
let start_hash = *start_hash;
|
||||
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-record-stage".to_string())
|
||||
@ -51,7 +52,7 @@ impl RecordStage {
|
||||
tick_duration: Duration,
|
||||
) -> (Self, Receiver<Vec<Entry>>) {
|
||||
let (entry_sender, entry_receiver) = channel();
|
||||
let start_hash = start_hash.clone();
|
||||
let start_hash = *start_hash;
|
||||
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-record-stage".to_string())
|
||||
@ -59,13 +60,14 @@ impl RecordStage {
|
||||
let mut recorder = Recorder::new(start_hash);
|
||||
let start_time = Instant::now();
|
||||
loop {
|
||||
if let Err(_) = Self::try_process_signals(
|
||||
if Self::try_process_signals(
|
||||
&mut recorder,
|
||||
start_time,
|
||||
tick_duration,
|
||||
&signal_receiver,
|
||||
&entry_sender,
|
||||
) {
|
||||
).is_err()
|
||||
{
|
||||
return;
|
||||
}
|
||||
recorder.hash();
|
||||
@ -124,11 +126,21 @@ impl RecordStage {
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for RecordStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
vec![self.thread_hdl]
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ledger::Block;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::thread::sleep;
|
||||
|
||||
@ -173,8 +185,8 @@ mod tests {
|
||||
let (tx_sender, signal_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let (_record_stage, entry_receiver) = RecordStage::new(signal_receiver, &zero);
|
||||
let alice_keypair = KeyPair::new();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let alice_keypair = Keypair::new();
|
||||
let bob_pubkey = Keypair::new().pubkey();
|
||||
let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
|
||||
let tx1 = Transaction::new(&alice_keypair, bob_pubkey, 2, zero);
|
||||
tx_sender
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user