Compare commits
504 Commits
Author | SHA1 | Date | |
---|---|---|---|
6f3beb915c | |||
f399172f51 | |||
15f3b97492 | |||
de284466ff | |||
cb20ebc583 | |||
ceb5686175 | |||
55d59b1ac6 | |||
2b60b4e23a | |||
9bdc1b9727 | |||
87efdabcb3 | |||
7d5bb28128 | |||
ae433d6a34 | |||
e3c668acff | |||
5825501c79 | |||
7e84bb7a60 | |||
da1fd96d50 | |||
141e1e974d | |||
fc0d7f5982 | |||
f697632edb | |||
73797c789b | |||
036fcced31 | |||
1d3157fb80 | |||
0b11c2e119 | |||
96af892d95 | |||
c2983f824e | |||
88d6fea999 | |||
c23fa289c3 | |||
db35f220f7 | |||
982afa87a6 | |||
dccae18b53 | |||
53e86f2fa2 | |||
757dfd36a3 | |||
708add0e64 | |||
d8991ae2ca | |||
5f6cbe0cf8 | |||
f167b0c2c5 | |||
f784500fbb | |||
83df47323a | |||
c75d4abb0b | |||
5216a723b1 | |||
b801ca477d | |||
c830c604f4 | |||
0e66606c7f | |||
8707abe091 | |||
dc2a840985 | |||
2727067b94 | |||
6a8a494f5d | |||
a09d2e252a | |||
3e9c463ff1 | |||
46d50f5bde | |||
e8da903c6c | |||
ab10b7676a | |||
fa44a71d3e | |||
c86e9e8568 | |||
9e22e23ce6 | |||
835f29a178 | |||
9688f8fb64 | |||
df5cde74b0 | |||
231d5e5968 | |||
c2ba72fe1f | |||
d93786c86a | |||
bf15cad36b | |||
288ed7a8ea | |||
f07c038266 | |||
8eed120c38 | |||
5dbcb43abd | |||
dd1eefaf62 | |||
35de159d00 | |||
546a1e90d5 | |||
b033e1d904 | |||
96d6985895 | |||
58f220a3b7 | |||
a206f2570d | |||
2318ffc704 | |||
d4304eea28 | |||
06af9de753 | |||
7f71e1e09f | |||
bb7eccd542 | |||
b04c71acd9 | |||
bbf9ea89c5 | |||
846ad61941 | |||
8b41c415b7 | |||
197ba8b395 | |||
8d2a61a0c9 | |||
7512317243 | |||
bca2294655 | |||
abd55e4159 | |||
4a980568ac | |||
9d436fc5f8 | |||
ad331e6d56 | |||
d7e4e57548 | |||
b2067d2721 | |||
c2bbe4344e | |||
8567253833 | |||
ca7d4c42dd | |||
8ca514a5ca | |||
b605552079 | |||
74f5538bd3 | |||
ff57c7b7df | |||
ce8a4fa831 | |||
8331aab26a | |||
a6857dbaaa | |||
054298d957 | |||
cca240c279 | |||
89f17ceecf | |||
fe97857c62 | |||
75854cc234 | |||
9783d47fd1 | |||
38be61bd22 | |||
c64e2acf8b | |||
a200cedb4b | |||
5fec0ac82f | |||
999534248b | |||
fbc754ea25 | |||
ecea41a0ab | |||
1b6d472cb2 | |||
f0446c7e88 | |||
2a0025bb57 | |||
64d6d3015a | |||
90550c5b58 | |||
53cd2cdd9f | |||
1ac5d300a4 | |||
642c25bd3b | |||
df808dedd1 | |||
02f9cb415b | |||
e3cf1e6598 | |||
7681211c02 | |||
0ee935dd72 | |||
16772d3d51 | |||
1c38e40dee | |||
ceb5a76609 | |||
db2392a691 | |||
9c1b6288a4 | |||
575179be8e | |||
5b6ffaecc0 | |||
efc72b9572 | |||
5dc7177540 | |||
78a4b1287d | |||
c5001869f1 | |||
7c31f217d5 | |||
1152457691 | |||
3beb38ac8a | |||
8cbaa19d2e | |||
63d2b2eb42 | |||
e02da9a15a | |||
ae111a131c | |||
4402e1128f | |||
f55bb6d95c | |||
91741e20fa | |||
0514f5e573 | |||
637d403415 | |||
9fabd34156 | |||
039ed01abf | |||
ead0eb2754 | |||
c3db2df7eb | |||
ee6c15d2db | |||
715a3d50fe | |||
692b125391 | |||
5193819d8e | |||
210b9d346f | |||
4c4b0f551e | |||
6800ff1882 | |||
399a3852b1 | |||
e7d3069f58 | |||
40ea3e3e61 | |||
dc9a11bae0 | |||
906d18a709 | |||
a13058b6c4 | |||
98ee4b4672 | |||
7fd7310b96 | |||
28fa43d2a9 | |||
1a9e6ffdd7 | |||
c998199954 | |||
19792192a7 | |||
4aab413154 | |||
15a6179b97 | |||
83b308983f | |||
f2b1a04bca | |||
3e36e6dcf8 | |||
6feb6a27be | |||
c5ceb15e02 | |||
57e928d1d0 | |||
e2c68d8775 | |||
d173e6ef87 | |||
c230360f4c | |||
384b486b29 | |||
b72e91f681 | |||
46d9ba5ca0 | |||
a9240a42bf | |||
a7204d5353 | |||
f570ef1c66 | |||
ee0195d588 | |||
448b8b1c17 | |||
4d77fa900b | |||
7ccd771ccc | |||
e9f8b5b9db | |||
2366c1ebaf | |||
c5de237276 | |||
aa9bc57b4d | |||
11df477b20 | |||
7141750668 | |||
68675bd1ab | |||
19b3cacd60 | |||
bcfaf5d994 | |||
e9499ac5b8 | |||
7ff721e563 | |||
fda3b9bbd4 | |||
cf70e5ff2f | |||
a86618faf3 | |||
6693386bc5 | |||
4a8a0d03a3 | |||
2c9d288ca9 | |||
bb0aabae75 | |||
5cda0ed964 | |||
0aba74935b | |||
4eb666d4f9 | |||
d5e0cf81ff | |||
3ea784aff7 | |||
fef93958c8 | |||
cae88c90b1 | |||
1a8da769b6 | |||
2b259aeb41 | |||
de7e9b4b4c | |||
0f95031b99 | |||
d622742b84 | |||
ff254fbe5f | |||
05153e4884 | |||
2ece27ee3a | |||
a58df52205 | |||
2ea6f86199 | |||
7c5172a65e | |||
821e3bc3ca | |||
5dd2f737a3 | |||
c9bb5c1f5b | |||
5d936e5c8a | |||
e985c2e7d5 | |||
308b6c3371 | |||
ea7fa11b3e | |||
5a40ea3fd7 | |||
102510ac0e | |||
2158329058 | |||
bc484ffe5f | |||
6fcf4584d5 | |||
1adc83d148 | |||
647053e973 | |||
95b98b3845 | |||
f27613754a | |||
3e351b0b13 | |||
79ece53e3c | |||
f341b2ec10 | |||
167b079e29 | |||
7ded5a70be | |||
fc476ff979 | |||
c3279c8a00 | |||
e471ea41da | |||
552d4adff5 | |||
0c33c9e0d7 | |||
fae9fff24c | |||
79924e407c | |||
18d4da0076 | |||
416c141775 | |||
af1a2e83bc | |||
4cdb9a73f8 | |||
4433730610 | |||
71eb5bdecc | |||
029e2db2cf | |||
81db333490 | |||
c68ee0040d | |||
d96e267624 | |||
0b47404ba6 | |||
7f4844f426 | |||
50e1e0ae47 | |||
538c3b63e1 | |||
678b2870ff | |||
308d8c254d | |||
f11aa4a57b | |||
c52d4eca0b | |||
7672506b45 | |||
80a02359f7 | |||
ab3968e3bf | |||
42ebf9502a | |||
bd4fcf4ac6 | |||
4dceb73909 | |||
dd819cec3d | |||
5115cd7798 | |||
cbb8dee360 | |||
e0cdcb0973 | |||
a6a2a745ae | |||
297896bc49 | |||
f372840354 | |||
4c4659be13 | |||
1b79fe73a1 | |||
5fa072cf16 | |||
212874e155 | |||
75212f40e7 | |||
6fde65577e | |||
80ecef2832 | |||
edf2ffaf4e | |||
6c275ea5ef | |||
23ed65b339 | |||
9c7913ac9e | |||
8b01e6ac0b | |||
ff5854396a | |||
f0725b4900 | |||
327ba5301d | |||
dcce475f0b | |||
aa2104a21b | |||
0206020104 | |||
33bd1229d9 | |||
195098ca2b | |||
9daa7bdbe2 | |||
6bd18e18ea | |||
8f046cb1f8 | |||
735a0ee16d | |||
537be6a29d | |||
2b528e2225 | |||
75505bbd72 | |||
e1fc7444f9 | |||
940caf7876 | |||
fcdb0403ba | |||
caeb55d066 | |||
f11e60b801 | |||
54f2146429 | |||
f60ee87a52 | |||
9c06fe25df | |||
1eec8bf57f | |||
ddb24ebb61 | |||
a58c83d999 | |||
6656ec816c | |||
8d2bd43100 | |||
429ea98ace | |||
3d80926508 | |||
d713e3c2cf | |||
5d20d1ddbf | |||
257acdcda1 | |||
dab98dcd81 | |||
99653a4d04 | |||
dda563a169 | |||
782aa7b23b | |||
813e438d18 | |||
7a71adaa8c | |||
ce8796bc2e | |||
c7e1409f7b | |||
9de9379925 | |||
7d68b6edc8 | |||
48b5344586 | |||
686b7d3737 | |||
7c65e2fbfc | |||
96a6e09050 | |||
b3f823d544 | |||
ea21c7a43e | |||
437fb1a8d7 | |||
166099b9d9 | |||
c707b3d2e7 | |||
f7d294de90 | |||
4ecd0a0e45 | |||
7ebbaaeb2d | |||
cdcf59ede0 | |||
5d065133ef | |||
d403808564 | |||
3ffdca193d | |||
69688a18c7 | |||
7193bf28b6 | |||
637f890b91 | |||
009d5adcba | |||
52c55a0335 | |||
23428b0381 | |||
0e305bd7dd | |||
c068ca4cb7 | |||
6a8379109d | |||
120add0e82 | |||
b92ee51c2d | |||
cba3b35ac9 | |||
313fed375c | |||
1e63702c36 | |||
478ee9a1c4 | |||
eb1e5dcce4 | |||
84225beeef | |||
9cf0bd9b88 | |||
9d25d7611a | |||
1abefb2c7a | |||
bcc247f25f | |||
68ca9b2cb8 | |||
686e61d50c | |||
17d927ac74 | |||
966c55f58e | |||
d76d3162e5 | |||
d0a2d46923 | |||
a67f58e9a5 | |||
fece91c4d1 | |||
9d2d9a0189 | |||
6d3afc774a | |||
88646bf27d | |||
0696f9f497 | |||
b2ea2455e2 | |||
3f659a69fd | |||
2c62be951f | |||
2348733d6c | |||
cc229b535d | |||
7f810a29ff | |||
fc1dfd86d2 | |||
5deb34e5bd | |||
39df087902 | |||
6ff46540b6 | |||
dbab8792e4 | |||
4eb676afaa | |||
a6cb2f1bcf | |||
28af9a39b4 | |||
8cf5620b87 | |||
85d6627ee6 | |||
611a005ec9 | |||
90b3b90391 | |||
fd4f294fd3 | |||
145274c001 | |||
df5d6693f6 | |||
05c5603879 | |||
c2c48a5c3c | |||
4af556f70e | |||
8bad411962 | |||
5b0418793e | |||
4423ee6902 | |||
f0c39cc84d | |||
3d45b04da8 | |||
9e2f26a5d2 | |||
a016f6e82e | |||
eb3e5fd204 | |||
72282dc493 | |||
47a22c66b4 | |||
fb11d8a909 | |||
7d872f52f4 | |||
d882bfe65c | |||
103584ef27 | |||
1fb537deb9 | |||
2bd48b4207 | |||
f5a6db3dc0 | |||
dd0c1ac5b2 | |||
d8c9655128 | |||
09f2d273c5 | |||
f6eb85e7a3 | |||
0d85b43901 | |||
fdf94a77b4 | |||
af40ab0c04 | |||
015b7a1ddb | |||
ab3e460e64 | |||
194a84c8dd | |||
51d932dad1 | |||
561d31cc13 | |||
d6a8e437bb | |||
4631af5011 | |||
5d28729b2a | |||
8c08e614b7 | |||
e76bf1438b | |||
4e177877c9 | |||
60848b9d95 | |||
79b3564a26 | |||
1e8c36c555 | |||
94d015b089 | |||
cfb3736372 | |||
2b77f62233 | |||
e8d23c17ca | |||
a7ed2a304a | |||
0025b42c26 | |||
3f7f492cc0 | |||
490d7875dd | |||
4240edf710 | |||
30e50d0f70 | |||
751c1eba32 | |||
d349d6aa98 | |||
1f9152dc72 | |||
1b9d50172b | |||
084dbd7f58 | |||
58c0508f94 | |||
dcf82c024f | |||
b253ed0c46 | |||
61db53fc19 | |||
b0ead086a1 | |||
a3b22d0d33 | |||
28d24497a3 | |||
05cea4c1da | |||
260f5edfd6 | |||
7105136595 | |||
54db379bf2 | |||
effbf0b978 | |||
8e7a2a9587 | |||
18e6ff4167 | |||
fa1cdaa91a | |||
b538b67524 | |||
2b0f6355af | |||
11b9a0323d | |||
710fa822a0 | |||
aaf6ce5aea | |||
34ea483736 | |||
a3ff40476e | |||
4cca3ff454 | |||
3d9acdd970 | |||
428f220b88 | |||
10add6a8ac | |||
f06a8dceda | |||
545f4f1c87 | |||
77543d83ff | |||
eb6a30cb7c | |||
97372b8e63 | |||
cea29ed772 | |||
b5006b8f2b |
@ -40,11 +40,6 @@ else
|
||||
point_fields="${point_fields// /\\ }" # Escape spaces
|
||||
|
||||
point="job_stats,$point_tags $point_fields"
|
||||
echo "Influx data point: $point"
|
||||
if [[ -n $INFLUX_USERNAME && -n $INFLUX_PASSWORD ]]; then
|
||||
echo "https://metrics.solana.com:8086/write?db=ci&u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}" \
|
||||
| xargs curl -XPOST --data-binary "$point"
|
||||
else
|
||||
echo Influx user credentials not found
|
||||
fi
|
||||
|
||||
multinode-demo/metrics_write_datapoint.sh "$point" || true
|
||||
fi
|
||||
|
5
.gitignore
vendored
5
.gitignore
vendored
@ -1,5 +1,6 @@
|
||||
Cargo.lock
|
||||
/target/
|
||||
|
||||
**/*.rs.bk
|
||||
.cargo
|
||||
|
||||
@ -9,3 +10,7 @@ Cargo.lock
|
||||
/config-drone/
|
||||
/config-validator/
|
||||
/config-client/
|
||||
/multinode-demo/test/config-client/
|
||||
|
||||
# test temp files, ledgers, etc.
|
||||
/farf/
|
||||
|
53
CONTRIBUTING.md
Normal file
53
CONTRIBUTING.md
Normal file
@ -0,0 +1,53 @@
|
||||
Solana Coding Guidelines
|
||||
===
|
||||
|
||||
The goal of these guidelines is to improve developer productivity by allowing developers to
|
||||
jump any file in the codebase and not need to adapt to inconsistencies in how the code is
|
||||
written. The codebase should appear as if it had been authored by a single developer. If you
|
||||
don't agree with a convention, submit a PR patching this document and let's discuss! Once
|
||||
the PR is accepted, *all* code should be updated as soon as possible to reflect the new
|
||||
conventions.
|
||||
|
||||
Rust coding conventions
|
||||
---
|
||||
|
||||
* All Rust code is formatted using the latest version of `rustfmt`. Once installed, it will be
|
||||
updated automatically when you update the compiler with `rustup`.
|
||||
|
||||
* All Rust code is linted with Clippy. If you'd prefer to ignore its advice, do so explicitly:
|
||||
|
||||
```rust
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
|
||||
```
|
||||
|
||||
Note: Clippy defaults can be overridden in the top-level file `.clippy.toml`.
|
||||
|
||||
* For variable names, when in doubt, spell it out. The mapping from type names to variable names
|
||||
is to lowercase the type name, putting an underscore before each capital letter. Variable names
|
||||
should *not* be abbreviated unless being used as closure arguments and the brevity improves
|
||||
readability. When a function has multiple instances of the same type, qualify each with a
|
||||
prefix and underscore (i.e. alice_keypair) or a numeric suffix (i.e. tx0).
|
||||
|
||||
* For function and method names, use `<verb>_<subject>`. For unit tests, that verb should
|
||||
always be `test` and for benchmarks the verb should always be `bench`. Avoid namespacing
|
||||
function names with some arbitrary word. Avoid abreviating words in function names.
|
||||
|
||||
* As they say, "When in Rome, do as the Romans do." A good patch should acknowledge the coding
|
||||
conventions of the code that surrounds it, even in the case where that code has not yet been
|
||||
updated to meet the conventions described here.
|
||||
|
||||
|
||||
Terminology
|
||||
---
|
||||
|
||||
Inventing new terms is allowed, but should only be done when the term is widely used and
|
||||
understood. Avoid introducing new 3-letter terms, which can be confused with 3-letter acronyms.
|
||||
|
||||
Some terms we currently use regularly in the codebase:
|
||||
|
||||
* fullnode: n. A fully participating network node.
|
||||
* hash: n. A SHA-256 Hash.
|
||||
* keypair: n. A Ed25519 key-pair, containing a public and private key.
|
||||
* pubkey: n. The public key of a Ed25519 key-pair.
|
||||
* sigverify: v. To verify a Ed25519 digital signature.
|
||||
|
81
Cargo.toml
81
Cargo.toml
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.7.0-beta"
|
||||
version = "0.7.2"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "http://solana.com/"
|
||||
readme = "README.md"
|
||||
@ -10,25 +10,29 @@ authors = [
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
"Greg Fitzgerald <greg@solana.com>",
|
||||
"Stephen Akridge <stephen@solana.com>",
|
||||
"Michael Vines <mvines@solana.com>",
|
||||
"Rob Walker <rob@solana.com>",
|
||||
"Pankaj Garg <pankaj@solana.com>",
|
||||
"Tyera Eulberg <tyera@solana.com>",
|
||||
]
|
||||
license = "Apache-2.0"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-client-demo"
|
||||
path = "src/bin/client-demo.rs"
|
||||
name = "solana-bench-tps"
|
||||
path = "src/bin/bench-tps.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-wallet"
|
||||
path = "src/bin/wallet.rs"
|
||||
name = "solana-bench-streamer"
|
||||
path = "src/bin/bench-streamer.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-drone"
|
||||
path = "src/bin/drone.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-fullnode"
|
||||
path = "src/bin/fullnode.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-keygen"
|
||||
path = "src/bin/keygen.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-fullnode-config"
|
||||
path = "src/bin/fullnode-config.rs"
|
||||
@ -38,12 +42,16 @@ name = "solana-genesis"
|
||||
path = "src/bin/genesis.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-mint"
|
||||
path = "src/bin/mint.rs"
|
||||
name = "solana-ledger-tool"
|
||||
path = "src/bin/ledger-tool.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-drone"
|
||||
path = "src/bin/drone.rs"
|
||||
name = "solana-keygen"
|
||||
path = "src/bin/keygen.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-wallet"
|
||||
path = "src/bin/wallet.rs"
|
||||
|
||||
[badges]
|
||||
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
||||
@ -55,36 +63,37 @@ cuda = []
|
||||
erasure = []
|
||||
|
||||
[dependencies]
|
||||
rayon = "1.0.0"
|
||||
sha2 = "0.7.0"
|
||||
atty = "0.2"
|
||||
bincode = "1.0.0"
|
||||
bs58 = "0.2.0"
|
||||
byteorder = "1.2.1"
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
clap = "2.31"
|
||||
dirs = "1.0.2"
|
||||
env_logger = "0.5.12"
|
||||
futures = "0.1.21"
|
||||
generic-array = { version = "0.11.1", default-features = false, features = ["serde"] }
|
||||
getopts = "0.2"
|
||||
influx_db_client = "0.3.4"
|
||||
itertools = "0.7.8"
|
||||
libc = "0.2.1"
|
||||
log = "0.4.2"
|
||||
matches = "0.1.6"
|
||||
pnet_datalink = "0.21.0"
|
||||
rand = "0.5.1"
|
||||
rayon = "1.0.0"
|
||||
reqwest = "0.8.6"
|
||||
ring = "0.13.2"
|
||||
sha2 = "0.7.0"
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
serde_json = "1.0.10"
|
||||
ring = "0.12.1"
|
||||
untrusted = "0.5.1"
|
||||
bincode = "1.0.0"
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
log = "0.4.2"
|
||||
env_logger = "0.5.10"
|
||||
matches = "0.1.6"
|
||||
byteorder = "1.2.1"
|
||||
libc = "0.2.1"
|
||||
getopts = "0.2"
|
||||
atty = "0.2"
|
||||
rand = "0.5.1"
|
||||
pnet_datalink = "0.21.0"
|
||||
sys-info = "0.5.6"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-core = "0.1.17"
|
||||
tokio-io = "0.1"
|
||||
itertools = "0.7.8"
|
||||
bs58 = "0.2.0"
|
||||
p2p = "0.5.2"
|
||||
futures = "0.1.21"
|
||||
clap = "2.31"
|
||||
reqwest = "0.8.6"
|
||||
influx_db_client = "0.3.4"
|
||||
untrusted = "0.6.2"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.2"
|
||||
@ -106,5 +115,5 @@ name = "signature"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "streamer"
|
||||
name = "sigverify"
|
||||
harness = false
|
||||
|
48
README.md
48
README.md
@ -47,7 +47,7 @@ $ source $HOME/.cargo/env
|
||||
Now checkout the code from github:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/solana-labs/solana.git
|
||||
$ git clone https://github.com/solana-labs/solana.git
|
||||
$ cd solana
|
||||
```
|
||||
|
||||
@ -71,6 +71,20 @@ These files can be generated by running the following script.
|
||||
$ ./multinode-demo/setup.sh
|
||||
```
|
||||
|
||||
Drone
|
||||
---
|
||||
|
||||
In order for the leader, client and validators to work, we'll need to
|
||||
spin up a drone to give out some test tokens. The drone delivers Milton
|
||||
Friedman-style "air drops" (free tokens to requesting clients) to be used in
|
||||
test transactions.
|
||||
|
||||
Start the drone on the leader node with:
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/drone.sh
|
||||
```
|
||||
|
||||
Singlenode Testnet
|
||||
---
|
||||
|
||||
@ -84,16 +98,9 @@ Now start the server:
|
||||
$ ./multinode-demo/leader.sh
|
||||
```
|
||||
|
||||
To run a performance-enhanced fullnode on Linux,
|
||||
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
|
||||
your system:
|
||||
```bash
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
|
||||
```
|
||||
|
||||
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
|
||||
receive transactions.
|
||||
receive transactions. The leader will request some tokens from the drone if it doesn't have any.
|
||||
The drone does not need to be running for subsequent leader starts.
|
||||
|
||||
Multinode Testnet
|
||||
---
|
||||
@ -104,15 +111,18 @@ To run a multinode testnet, after starting a leader node, spin up some validator
|
||||
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
```
|
||||
|
||||
To run a performance-enhanced fullnode on Linux,
|
||||
To run a performance-enhanced leader or validator (on Linux),
|
||||
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
|
||||
your system:
|
||||
```bash
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
Testnet Client Demo
|
||||
---
|
||||
|
||||
@ -146,7 +156,7 @@ $ sudo snap install solana --edge --devmode
|
||||
Once installed the usual Solana programs will be available as `solona.*` instead
|
||||
of `solana-*`. For example, `solana.fullnode` instead of `solana-fullnode`.
|
||||
|
||||
Update to the latest version at any time with
|
||||
Update to the latest version at any time with:
|
||||
```bash
|
||||
$ snap info solana
|
||||
$ sudo snap refresh solana --devmode
|
||||
@ -156,8 +166,14 @@ $ sudo snap refresh solana --devmode
|
||||
The snap supports running a leader, validator or leader+drone node as a system
|
||||
daemon.
|
||||
|
||||
Run `sudo snap get solana` to view the current daemon configuration, and
|
||||
`sudo snap logs -f solana` to view the daemon logs.
|
||||
Run `sudo snap get solana` to view the current daemon configuration. To view
|
||||
daemon logs:
|
||||
1. Run `sudo snap logs -n=all solana` to view the daemon initialization log
|
||||
2. Runtime logging can be found under `/var/snap/solana/current/leader/`,
|
||||
`/var/snap/solana/current/validator/`, or `/var/snap/solana/current/drone/` depending
|
||||
on which `mode=` was selected. Within each log directory the file `current`
|
||||
contains the latest log, and the files `*.s` (if present) contain older rotated
|
||||
logs.
|
||||
|
||||
Disable the daemon at any time by running:
|
||||
```bash
|
||||
@ -276,7 +292,7 @@ to see the debug and info sections for streamer and server respectively. General
|
||||
we are using debug for infrequent debug messages, trace for potentially frequent messages and
|
||||
info for performance-related logging.
|
||||
|
||||
Attaching to a running process with gdb
|
||||
Attaching to a running process with gdb:
|
||||
|
||||
```
|
||||
$ sudo gdb
|
||||
|
@ -10,7 +10,7 @@ use rayon::prelude::*;
|
||||
use solana::bank::*;
|
||||
use solana::hash::hash;
|
||||
use solana::mint::Mint;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
|
||||
fn bench_process_transaction(bencher: &mut Bencher) {
|
||||
@ -22,7 +22,7 @@ fn bench_process_transaction(bencher: &mut Bencher) {
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
// Seed the 'from' account.
|
||||
let rando0 = KeyPair::new();
|
||||
let rando0 = Keypair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 10_000, mint.last_id());
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
|
||||
@ -30,9 +30,9 @@ fn bench_process_transaction(bencher: &mut Bencher) {
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
|
||||
let rando1 = KeyPair::new();
|
||||
let rando1 = Keypair::new();
|
||||
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||
assert!(bank.process_transaction(&tx.clone()).is_ok());
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
|
||||
// Finally, return the transaction to the benchmark.
|
||||
tx
|
||||
@ -58,5 +58,9 @@ fn bench(criterion: &mut Criterion) {
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench);
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
||||
|
@ -11,7 +11,7 @@ use solana::banking_stage::BankingStage;
|
||||
use solana::mint::Mint;
|
||||
use solana::packet::{to_packets_chunked, PacketRecycler};
|
||||
use solana::record_stage::Signal;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use std::iter;
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
@ -23,7 +23,7 @@ use std::sync::Arc;
|
||||
// use hash::hash;
|
||||
// use mint::Mint;
|
||||
// use rayon::prelude::*;
|
||||
// use signature::{KeyPair, KeyPairUtil};
|
||||
// use signature::{Keypair, KeypairUtil};
|
||||
// use std::collections::HashSet;
|
||||
// use std::time::Instant;
|
||||
// use transaction::Transaction;
|
||||
@ -49,11 +49,11 @@ use std::sync::Arc;
|
||||
// }
|
||||
//
|
||||
// // Seed the 'from' account.
|
||||
// let rando0 = KeyPair::new();
|
||||
// let rando0 = Keypair::new();
|
||||
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// let rando1 = KeyPair::new();
|
||||
// let rando1 = Keypair::new();
|
||||
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
@ -79,12 +79,15 @@ use std::sync::Arc;
|
||||
// println!("{} tps", tps);
|
||||
// }
|
||||
|
||||
fn check_txs(batches: usize, receiver: &Receiver<Signal>, ref_tx_count: usize) {
|
||||
fn check_txs(receiver: &Receiver<Signal>, ref_tx_count: usize) {
|
||||
let mut total = 0;
|
||||
for _ in 0..batches {
|
||||
loop {
|
||||
let signal = receiver.recv().unwrap();
|
||||
if let Signal::Transactions(transactions) = signal {
|
||||
total += transactions.len();
|
||||
if total >= ref_tx_count {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
@ -99,9 +102,9 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
let num_dst_accounts = 8 * 1024;
|
||||
let num_src_accounts = 8 * 1024;
|
||||
|
||||
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| KeyPair::new()).collect();
|
||||
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| Keypair::new()).collect();
|
||||
let dstkeys: Vec<_> = (0..num_dst_accounts)
|
||||
.map(|_| KeyPair::new().pubkey())
|
||||
.map(|_| Keypair::new().pubkey())
|
||||
.collect();
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
@ -147,7 +150,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(verified_setup_len, &signal_receiver, num_src_accounts);
|
||||
check_txs(&signal_receiver, num_src_accounts);
|
||||
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), 192)
|
||||
.into_iter()
|
||||
@ -162,7 +165,7 @@ fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(verified_len, &signal_receiver, tx);
|
||||
check_txs(&signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
@ -172,7 +175,7 @@ fn bench_banking_stage_single_from(bencher: &mut Bencher) {
|
||||
let mut pubkeys = Vec::new();
|
||||
let num_keys = 8;
|
||||
for _ in 0..num_keys {
|
||||
pubkeys.push(KeyPair::new().pubkey());
|
||||
pubkeys.push(Keypair::new().pubkey());
|
||||
}
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
@ -205,7 +208,7 @@ fn bench_banking_stage_single_from(bencher: &mut Bencher) {
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(verified_len, &signal_receiver, tx);
|
||||
check_txs(&signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -6,14 +6,14 @@ use criterion::{Bencher, Criterion};
|
||||
use solana::hash::{hash, Hash};
|
||||
use solana::ledger::{next_entries, reconstruct_entries_from_blobs, Block};
|
||||
use solana::packet::BlobRecycler;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let keypair = KeyPair::new();
|
||||
let one = hash(&zero.as_ref());
|
||||
let keypair = Keypair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0; 10];
|
||||
let entries = next_entries(&zero, 1, transactions);
|
||||
@ -32,5 +32,9 @@ fn bench(criterion: &mut Criterion) {
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench);
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
||||
|
@ -6,7 +6,7 @@ use criterion::{Bencher, Criterion};
|
||||
use solana::signature::GenKeys;
|
||||
|
||||
fn bench_gen_keys(b: &mut Bencher) {
|
||||
let rnd = GenKeys::new([0u8; 32]);
|
||||
let mut rnd = GenKeys::new([0u8; 32]);
|
||||
b.iter(|| rnd.gen_n_keypairs(1000));
|
||||
}
|
||||
|
||||
@ -16,5 +16,9 @@ fn bench(criterion: &mut Criterion) {
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench);
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
||||
|
36
benches/sigverify.rs
Normal file
36
benches/sigverify.rs
Normal file
@ -0,0 +1,36 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate bincode;
|
||||
extern crate rayon;
|
||||
extern crate solana;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use solana::packet::{to_packets, PacketRecycler};
|
||||
use solana::sigverify;
|
||||
use solana::transaction::test_tx;
|
||||
|
||||
fn bench_sigverify(bencher: &mut Bencher) {
|
||||
let tx = test_tx();
|
||||
|
||||
// generate packet vector
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
let batches = to_packets(&packet_recycler, &vec![tx; 128]);
|
||||
|
||||
// verify packets
|
||||
bencher.iter(|| {
|
||||
let _ans = sigverify::ed25519_verify(&batches);
|
||||
})
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_sigverify", |bencher| {
|
||||
bench_sigverify(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
32
ci/audit.sh
Executable file
32
ci/audit.sh
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Audits project dependencies for security vulnerabilities
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
maybe_cargo_install() {
|
||||
for cmd in "$@"; do
|
||||
set +e
|
||||
cargo "$cmd" --help > /dev/null 2>&1
|
||||
declare exitcode=$?
|
||||
set -e
|
||||
if [[ $exitcode -eq 101 ]]; then
|
||||
_ cargo install cargo-"$cmd"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
maybe_cargo_install audit tree
|
||||
|
||||
_ cargo tree
|
||||
_ cargo audit
|
@ -1,4 +1,4 @@
|
||||
steps:
|
||||
- command: "ci/snap.sh"
|
||||
timeout_in_minutes: 20
|
||||
timeout_in_minutes: 40
|
||||
name: "snap [public]"
|
||||
|
@ -1,13 +1,18 @@
|
||||
steps:
|
||||
- command: "ci/docker-run.sh rust ci/test-stable.sh"
|
||||
- command: "ci/docker-run.sh solanalabs/rust:1.28.0 ci/test-stable.sh"
|
||||
name: "stable [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 20
|
||||
timeout_in_minutes: 30
|
||||
- command: "ci/docker-run.sh solanalabs/rust:1.28.0 ci/test-bench.sh"
|
||||
name: "bench [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 30
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck [public]"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly ci/test-nightly.sh"
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly:2018-08-14 ci/test-nightly.sh"
|
||||
name: "nightly [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "nightly"
|
||||
@ -17,12 +22,15 @@ steps:
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable-perf"
|
||||
timeout_in_minutes: 20
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: "*"
|
||||
limit: 2
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
- command: "ci/test-large-network.sh || true"
|
||||
name: "large-network [public] [ignored]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 20
|
||||
agents:
|
||||
- "queue=large"
|
||||
- command: "ci/pr-snap.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "snap [public]"
|
||||
@ -30,9 +38,12 @@ steps:
|
||||
- command: "ci/publish-crate.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "publish crate [public]"
|
||||
- command: "ci/hoover.sh"
|
||||
timeout_in_minutes: 20
|
||||
name: "clean agent [public]"
|
||||
- trigger: "solana-snap"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
build:
|
||||
message: "${BUILDKITE_MESSAGE}"
|
||||
commit: "${BUILDKITE_COMMIT}"
|
||||
branch: "${BUILDKITE_BRANCH}"
|
||||
env:
|
||||
TRIGGERED_BUILDKITE_TAG: "${BUILDKITE_TAG}"
|
||||
|
@ -22,11 +22,14 @@ shift
|
||||
ARGS=(
|
||||
--workdir /solana
|
||||
--volume "$PWD:/solana"
|
||||
--volume "$HOME:/home"
|
||||
--env "CARGO_HOME=/home/.cargo"
|
||||
--rm
|
||||
)
|
||||
|
||||
if [[ -n $CI ]]; then
|
||||
ARGS+=(--volume "$HOME:/home")
|
||||
ARGS+=(--env "CARGO_HOME=/home/.cargo")
|
||||
fi
|
||||
|
||||
# kcov tries to set the personality of the binary which docker
|
||||
# doesn't allow by default.
|
||||
ARGS+=(--security-opt "seccomp=unconfined")
|
||||
@ -38,7 +41,10 @@ fi
|
||||
|
||||
# Environment variables to propagate into the container
|
||||
ARGS+=(
|
||||
--env BUILDKITE
|
||||
--env BUILDKITE_AGENT_ACCESS_TOKEN
|
||||
--env BUILDKITE_BRANCH
|
||||
--env BUILDKITE_JOB_ID
|
||||
--env BUILDKITE_TAG
|
||||
--env CODECOV_TOKEN
|
||||
--env CRATES_IO_TOKEN
|
||||
|
@ -1,3 +1,9 @@
|
||||
FROM rustlang/rust:nightly
|
||||
|
||||
RUN cargo install --force clippy cargo-cov
|
||||
RUN rustup component add clippy-preview --toolchain=nightly && \
|
||||
echo deb http://ftp.debian.org/debian stretch-backports main >> /etc/apt/sources.list && \
|
||||
apt update && \
|
||||
apt install -y \
|
||||
llvm-6.0 \
|
||||
&& \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
15
ci/docker-rust/Dockerfile
Normal file
15
ci/docker-rust/Dockerfile
Normal file
@ -0,0 +1,15 @@
|
||||
FROM rust:1.28
|
||||
|
||||
RUN apt update && \
|
||||
apt-get install apt-transport-https && \
|
||||
echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list && \
|
||||
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 && \
|
||||
apt update && \
|
||||
apt install -y \
|
||||
buildkite-agent \
|
||||
rsync \
|
||||
sudo \
|
||||
cmake \
|
||||
&& \
|
||||
rustup component add rustfmt-preview && \
|
||||
rm -rf /var/lib/apt/lists/*
|
6
ci/docker-rust/README.md
Normal file
6
ci/docker-rust/README.md
Normal file
@ -0,0 +1,6 @@
|
||||
Docker image containing rust and some preinstalled packages used in CI.
|
||||
|
||||
This image may be manually updated by running `./build.sh` if you are a member
|
||||
of the [Solana Labs](https://hub.docker.com/u/solanalabs/) Docker Hub
|
||||
organization, but it is also automatically updated periodically by
|
||||
[this automation](https://buildkite.com/solana-labs/solana-ci-docker-rust).
|
6
ci/docker-rust/build.sh
Executable file
6
ci/docker-rust/build.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
docker build -t solanalabs/rust .
|
||||
docker push solanalabs/rust
|
@ -2,6 +2,6 @@ FROM snapcraft/xenial-amd64
|
||||
|
||||
# Update snapcraft to latest version
|
||||
RUN apt-get update -qq \
|
||||
&& apt-get install -y snapcraft \
|
||||
&& apt-get install -y snapcraft daemontools \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& snapcraft --version
|
||||
|
24
ci/hoover.sh
24
ci/hoover.sh
@ -3,6 +3,7 @@
|
||||
# Regular maintenance performed on a buildkite agent to control disk usage
|
||||
#
|
||||
|
||||
|
||||
echo --- Delete all exited containers first
|
||||
(
|
||||
set -x
|
||||
@ -45,6 +46,29 @@ echo "--- Delete /tmp files older than 1 day owned by $(whoami)"
|
||||
find /tmp -maxdepth 1 -user "$(whoami)" -mtime +1 -print0 | xargs -0 rm -rf
|
||||
)
|
||||
|
||||
echo --- Deleting stale buildkite agent build directories
|
||||
if [[ ! -d ../../../../builds/$BUILDKITE_AGENT_NAME ]]; then
|
||||
# We might not be where we think we are, do nothing
|
||||
echo Warning: Skipping flush of stale agent build directories
|
||||
echo " PWD=$PWD"
|
||||
else
|
||||
# NOTE: this will be horribly broken if we ever decide to run multiple
|
||||
# agents on the same machine.
|
||||
(
|
||||
for keepDir in "$BUILDKITE_PIPELINE_SLUG" \
|
||||
"$BUILDKITE_ORGANIZATION_SLUG" \
|
||||
"$BUILDKITE_AGENT_NAME"; do
|
||||
cd .. || exit 1
|
||||
for dir in *; do
|
||||
if [[ -d $dir && $dir != "$keepDir" ]]; then
|
||||
echo "Removing $dir"
|
||||
rm -rf "${dir:?}"/
|
||||
fi
|
||||
done
|
||||
done
|
||||
)
|
||||
fi
|
||||
|
||||
echo --- System Status
|
||||
(
|
||||
set -x
|
||||
|
32
ci/install-earlyoom.sh
Executable file
32
ci/install-earlyoom.sh
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash -x
|
||||
#
|
||||
# Install EarlyOOM
|
||||
#
|
||||
|
||||
[[ $(uname) = Linux ]] || exit 1
|
||||
|
||||
# 64 - enable signalling of processes (term, kill, oom-kill)
|
||||
# TODO: This setting will not persist across reboots
|
||||
sysrq=$(( $(cat /proc/sys/kernel/sysrq) | 64 ))
|
||||
sudo sysctl -w kernel.sysrq=$sysrq
|
||||
|
||||
if command -v earlyoom; then
|
||||
sudo systemctl status earlyoom
|
||||
exit 0
|
||||
fi
|
||||
|
||||
wget http://ftp.us.debian.org/debian/pool/main/e/earlyoom/earlyoom_1.1-2_amd64.deb
|
||||
sudo apt install --quiet --yes ./earlyoom_1.1-2_amd64.deb
|
||||
|
||||
cat > earlyoom <<OOM
|
||||
# use the kernel OOM killer, trigger at 20% available RAM,
|
||||
EARLYOOM_ARGS="-k -m 20"
|
||||
OOM
|
||||
sudo cp earlyoom /etc/default/
|
||||
rm earlyoom
|
||||
|
||||
sudo systemctl stop earlyoom
|
||||
sudo systemctl enable earlyoom
|
||||
sudo systemctl start earlyoom
|
||||
|
||||
exit 0
|
86
ci/localnet-sanity.sh
Executable file
86
ci/localnet-sanity.sh
Executable file
@ -0,0 +1,86 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Perform a quick sanity test on a leader, drone, validator and client running
|
||||
# locally on the same machine
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")"/..
|
||||
source ci/upload_ci_artifact.sh
|
||||
source multinode-demo/common.sh
|
||||
|
||||
./multinode-demo/setup.sh
|
||||
|
||||
backgroundCommands="drone leader validator validator-x"
|
||||
pids=()
|
||||
|
||||
for cmd in $backgroundCommands; do
|
||||
echo "--- Start $cmd"
|
||||
rm -f log-"$cmd".txt
|
||||
./multinode-demo/"$cmd".sh > log-"$cmd".txt 2>&1 &
|
||||
declare pid=$!
|
||||
pids+=("$pid")
|
||||
echo "pid: $pid"
|
||||
done
|
||||
|
||||
killBackgroundCommands() {
|
||||
set +e
|
||||
for pid in "${pids[@]}"; do
|
||||
if kill "$pid"; then
|
||||
wait "$pid"
|
||||
else
|
||||
echo -e "^^^ +++\\nWarning: unable to kill $pid"
|
||||
fi
|
||||
done
|
||||
set -e
|
||||
pids=()
|
||||
}
|
||||
|
||||
shutdown() {
|
||||
exitcode=$?
|
||||
killBackgroundCommands
|
||||
|
||||
set +e
|
||||
|
||||
echo "--- Upload artifacts"
|
||||
for cmd in $backgroundCommands; do
|
||||
declare logfile=log-$cmd.txt
|
||||
upload_ci_artifact "$logfile"
|
||||
tail "$logfile"
|
||||
done
|
||||
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
trap shutdown EXIT INT
|
||||
|
||||
set -e
|
||||
|
||||
flag_error() {
|
||||
echo Failed
|
||||
echo "^^^ +++"
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo "--- Wallet sanity"
|
||||
(
|
||||
set -x
|
||||
multinode-demo/test/wallet-sanity.sh
|
||||
) || flag_error
|
||||
|
||||
echo "--- Node count"
|
||||
(
|
||||
set -x
|
||||
./multinode-demo/client.sh "$PWD" 3 -c --addr 127.0.0.1
|
||||
) || flag_error
|
||||
|
||||
killBackgroundCommands
|
||||
|
||||
echo "--- Ledger verification"
|
||||
(
|
||||
set -x
|
||||
$solana_ledger_tool --ledger "$SOLANA_CONFIG_DIR"/ledger verify
|
||||
) || flag_error
|
||||
|
||||
echo +++
|
||||
echo Ok
|
||||
exit 0
|
@ -5,7 +5,12 @@
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
set -x
|
||||
find . -name "*.sh" -not -regex ".*/.cargo/.*" -not -regex ".*/node_modules/.*" -print0 \
|
||||
find . -name "*.sh" \
|
||||
-not -regex ".*/.cargo/.*" \
|
||||
-not -regex ".*/node_modules/.*" \
|
||||
-not -regex ".*/target/.*" \
|
||||
-print0 \
|
||||
| xargs -0 \
|
||||
ci/docker-run.sh koalaman/shellcheck --color=always --external-sources --shell=bash
|
||||
|
||||
exit 0
|
||||
|
18
ci/snap.sh
18
ci/snap.sh
@ -7,7 +7,13 @@ if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
|
||||
DRYRUN="echo"
|
||||
fi
|
||||
|
||||
if [[ -z "$BUILDKITE_TAG" ]]; then
|
||||
# BUILDKITE_TAG is the normal environment variable set by Buildkite. However
|
||||
# when this script is run from a triggered pipeline, TRIGGERED_BUILDKITE_TAG is
|
||||
# used instead of BUILDKITE_TAG (due to Buildkite limitations that prevents
|
||||
# BUILDKITE_TAG from propagating through to triggered pipelines)
|
||||
if [[ -n "$BUILDKITE_TAG" || -n "$TRIGGERED_BUILDKITE_TAG" ]]; then
|
||||
SNAP_CHANNEL=stable
|
||||
elif [[ $BUILDKITE_BRANCH = master ]]; then
|
||||
SNAP_CHANNEL=edge
|
||||
else
|
||||
SNAP_CHANNEL=beta
|
||||
@ -33,11 +39,17 @@ fi
|
||||
|
||||
set -x
|
||||
|
||||
echo --- build
|
||||
echo --- checking for multilog
|
||||
if [[ ! -x /usr/bin/multilog ]]; then
|
||||
echo "multilog not found, install with: sudo apt-get install -y daemontools"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo --- build: $SNAP_CHANNEL channel
|
||||
snapcraft
|
||||
|
||||
source ci/upload_ci_artifact.sh
|
||||
upload_ci_artifact solana_*.snap
|
||||
|
||||
echo --- publish
|
||||
echo --- publish: $SNAP_CHANNEL channel
|
||||
$DRYRUN snapcraft push solana_*.snap --release $SNAP_CHANNEL
|
||||
|
13
ci/test-bench.sh
Executable file
13
ci/test-bench.sh
Executable file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
ci/version-check.sh stable
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo bench --verbose
|
45
ci/test-large-network.sh
Executable file
45
ci/test-large-network.sh
Executable file
@ -0,0 +1,45 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
here=$(dirname "$0")
|
||||
cd "$here"/..
|
||||
|
||||
if ! ci/version-check.sh stable; then
|
||||
# This job doesn't run within a container, try once to upgrade tooling on a
|
||||
# version check failure
|
||||
rustup install stable
|
||||
ci/version-check.sh stable
|
||||
fi
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
export LD_LIBRARY_PATH+=:$PWD
|
||||
|
||||
export RUST_LOG=multinode=info
|
||||
|
||||
if [[ $(ulimit -n) -lt 65000 ]]; then
|
||||
echo 'Error: nofiles too small, run "ulimit -n 65000" to continue'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(sysctl -n net.core.rmem_default) -lt 1610612736 ]]; then
|
||||
echo 'Error: rmem_default too small, run "sudo sysctl -w net.core.rmem_default=1610612736" to continue'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(sysctl -n net.core.rmem_max) -lt 1610612736 ]]; then
|
||||
echo 'Error: rmem_max too small, run "sudo sysctl -w net.core.rmem_max=1610612736" to continue'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(sysctl -n net.core.wmem_default) -lt 1610612736 ]]; then
|
||||
echo 'Error: rmem_default too small, run "sudo sysctl -w net.core.wmem_default=1610612736" to continue'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(sysctl -n net.core.wmem_max) -lt 1610612736 ]]; then
|
||||
echo 'Error: rmem_max too small, run "sudo sysctl -w net.core.wmem_max=1610612736" to continue'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -x
|
||||
exec cargo test --release --features=erasure test_multi_node_dynamic_network -- --ignored
|
@ -2,9 +2,8 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
ci/version-check.sh nightly
|
||||
export RUST_BACKTRACE=1
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
@ -27,6 +26,6 @@ ls -l target/cov/report/index.html
|
||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo CODECOV_TOKEN undefined
|
||||
else
|
||||
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov gcov'
|
||||
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov-6.0 gcov'
|
||||
fi
|
||||
|
||||
|
@ -2,11 +2,29 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
|
||||
export LD_LIBRARY_PATH=$PWD:/usr/local/cuda/lib64
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
if ! ci/version-check.sh stable; then
|
||||
# This job doesn't run within a container, try once to upgrade tooling on a
|
||||
# version check failure
|
||||
rustup install stable
|
||||
ci/version-check.sh stable
|
||||
fi
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
set -x
|
||||
exec cargo test --features=cuda,erasure
|
||||
./fetch-perf-libs.sh
|
||||
export LD_LIBRARY_PATH=$PWD:/usr/local/cuda/lib64
|
||||
export PATH=$PATH:/usr/local/cuda/bin
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ cargo test --features=cuda,erasure
|
||||
|
||||
echo --- ci/localnet-sanity.sh
|
||||
(
|
||||
set -x
|
||||
# Assume |cargo build| has populated target/debug/ successfully.
|
||||
export PATH=$PWD/target/debug:$PATH
|
||||
USE_INSTALL=1 ci/localnet-sanity.sh
|
||||
)
|
||||
|
@ -2,17 +2,24 @@
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
ci/version-check.sh stable
|
||||
export RUST_BACKTRACE=1
|
||||
rustc --version
|
||||
cargo --version
|
||||
|
||||
_() {
|
||||
echo "--- $*"
|
||||
"$@"
|
||||
}
|
||||
|
||||
_ rustup component add rustfmt-preview
|
||||
_ cargo fmt -- --write-mode=check
|
||||
_ cargo fmt -- --check
|
||||
_ cargo build --verbose
|
||||
_ cargo test --verbose
|
||||
_ cargo bench --verbose
|
||||
|
||||
echo --- ci/localnet-sanity.sh
|
||||
(
|
||||
set -x
|
||||
# Assume |cargo build| has populated target/debug/ successfully.
|
||||
export PATH=$PWD/target/debug:$PATH
|
||||
USE_INSTALL=1 ci/localnet-sanity.sh
|
||||
)
|
||||
|
||||
_ ci/audit.sh
|
||||
|
519
ci/testnet-deploy.sh
Executable file
519
ci/testnet-deploy.sh
Executable file
@ -0,0 +1,519 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Deploys the Solana software running on the testnet full nodes
|
||||
#
|
||||
# This script must be run by a user/machine that has successfully authenticated
|
||||
# with GCP and has sufficient permission.
|
||||
#
|
||||
here=$(dirname "$0")
|
||||
metrics_write_datapoint="$here"/../multinode-demo/metrics_write_datapoint.sh
|
||||
|
||||
# TODO: Switch over to rolling updates
|
||||
ROLLING_UPDATE=false
|
||||
#ROLLING_UPDATE=true
|
||||
|
||||
if [[ -z $SOLANA_METRICS_CONFIG ]]; then
|
||||
echo Error: SOLANA_METRICS_CONFIG environment variable is unset
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# The SOLANA_METRICS_CONFIG environment variable is formatted as a
|
||||
# comma-delimited list of parameters. All parameters are optional.
|
||||
#
|
||||
# Example:
|
||||
# export SOLANA_METRICS_CONFIG="host=<metrics host>,db=<database name>,u=<username>,p=<password>"
|
||||
#
|
||||
configure_metrics() {
|
||||
[[ -n $SOLANA_METRICS_CONFIG ]] || return 0
|
||||
|
||||
declare metrics_params
|
||||
IFS=',' read -r -a metrics_params <<< "$SOLANA_METRICS_CONFIG"
|
||||
for param in "${metrics_params[@]}"; do
|
||||
IFS='=' read -r -a pair <<< "$param"
|
||||
if [[ ${#pair[@]} != 2 ]]; then
|
||||
echo Error: invalid metrics parameter: "$param" >&2
|
||||
else
|
||||
declare name="${pair[0]}"
|
||||
declare value="${pair[1]}"
|
||||
case "$name" in
|
||||
host)
|
||||
export INFLUX_HOST="$value"
|
||||
echo INFLUX_HOST="$INFLUX_HOST" >&2
|
||||
;;
|
||||
db)
|
||||
export INFLUX_DATABASE="$value"
|
||||
echo INFLUX_DATABASE="$INFLUX_DATABASE" >&2
|
||||
;;
|
||||
u)
|
||||
export INFLUX_USERNAME="$value"
|
||||
echo INFLUX_USERNAME="$INFLUX_USERNAME" >&2
|
||||
;;
|
||||
p)
|
||||
export INFLUX_PASSWORD="$value"
|
||||
echo INFLUX_PASSWORD="********" >&2
|
||||
;;
|
||||
*)
|
||||
echo Error: Unknown metrics parameter name: "$name" >&2
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
done
|
||||
}
|
||||
configure_metrics
|
||||
|
||||
# Default to edge channel. To select the beta channel:
|
||||
# export SOLANA_SNAP_CHANNEL=beta
|
||||
if [[ -z $SOLANA_SNAP_CHANNEL ]]; then
|
||||
SOLANA_SNAP_CHANNEL=edge
|
||||
fi
|
||||
|
||||
# Select default network URL based on SOLANA_SNAP_CHANNEL if SOLANA_NET_ENTRYPOINT is
|
||||
# unspecified
|
||||
if [[ -z $SOLANA_NET_ENTRYPOINT ]]; then
|
||||
case $SOLANA_SNAP_CHANNEL in
|
||||
edge)
|
||||
SOLANA_NET_ENTRYPOINT=master.testnet.solana.com
|
||||
unset SOLANA_NET_NAME
|
||||
;;
|
||||
beta)
|
||||
SOLANA_NET_ENTRYPOINT=testnet.solana.com
|
||||
unset SOLANA_NET_NAME
|
||||
;;
|
||||
*)
|
||||
echo Error: Unknown SOLANA_SNAP_CHANNEL=$SOLANA_SNAP_CHANNEL
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [[ -z $SOLANA_NET_NAME ]]; then
|
||||
SOLANA_NET_NAME=${SOLANA_NET_ENTRYPOINT//./-}
|
||||
fi
|
||||
|
||||
: ${SOLANA_NET_NAME:?$SOLANA_NET_ENTRYPOINT}
|
||||
netBasename=${SOLANA_NET_NAME/-*/}
|
||||
if [[ $netBasename != testnet ]]; then
|
||||
netBasename="testnet-$netBasename"
|
||||
fi
|
||||
|
||||
# Figure installation command
|
||||
SNAP_INSTALL_CMD="\
|
||||
for i in {1..3}; do \
|
||||
sudo snap install solana --$SOLANA_SNAP_CHANNEL --devmode && break;
|
||||
sleep 1; \
|
||||
done \
|
||||
"
|
||||
LOCAL_SNAP=$1
|
||||
if [[ -n $LOCAL_SNAP ]]; then
|
||||
if [[ ! -f $LOCAL_SNAP ]]; then
|
||||
echo "Error: $LOCAL_SNAP is not a file"
|
||||
exit 1
|
||||
fi
|
||||
SNAP_INSTALL_CMD="sudo snap install ~/solana_local.snap --devmode --dangerous"
|
||||
fi
|
||||
SNAP_INSTALL_CMD="sudo snap remove solana; $SNAP_INSTALL_CMD"
|
||||
|
||||
EARLYOOM_INSTALL_CMD="\
|
||||
wget --retry-connrefused --waitretry=1 \
|
||||
--read-timeout=20 --timeout=15 --tries=5 \
|
||||
-O install-earlyoom.sh \
|
||||
https://raw.githubusercontent.com/solana-labs/solana/v0.7/ci/install-earlyoom.sh; \
|
||||
bash install-earlyoom.sh \
|
||||
"
|
||||
SNAP_INSTALL_CMD="$EARLYOOM_INSTALL_CMD; $SNAP_INSTALL_CMD"
|
||||
|
||||
# `export SKIP_INSTALL=1` to reset the network without reinstalling the snap
|
||||
if [[ -n $SKIP_INSTALL ]]; then
|
||||
SNAP_INSTALL_CMD="echo Install skipped"
|
||||
fi
|
||||
|
||||
echo "+++ Configuration for $netBasename"
|
||||
publicUrl="$SOLANA_NET_ENTRYPOINT"
|
||||
if [[ $publicUrl = testnet.solana.com ]]; then
|
||||
publicIp="" # Use default value
|
||||
else
|
||||
publicIp=$(dig +short $publicUrl | head -n1)
|
||||
fi
|
||||
|
||||
echo "Network name: $SOLANA_NET_NAME"
|
||||
echo "Network entry point URL: $publicUrl ($publicIp)"
|
||||
echo "Snap channel: $SOLANA_SNAP_CHANNEL"
|
||||
echo "Install command: $SNAP_INSTALL_CMD"
|
||||
echo "Setup args: $SOLANA_SETUP_ARGS"
|
||||
[[ -z $LOCAL_SNAP ]] || echo "Local snap: $LOCAL_SNAP"
|
||||
|
||||
vmlist=() # Each array element is formatted as "class:vmName:vmZone:vmPublicIp"
|
||||
|
||||
vm_exec() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare message=$4
|
||||
declare cmd=$5
|
||||
|
||||
echo "--- $message $vmName in zone $vmZone ($vmPublicIp)"
|
||||
ssh -o BatchMode=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
|
||||
testnet-deploy@"$vmPublicIp" "$cmd"
|
||||
}
|
||||
|
||||
#
|
||||
# vm_foreach [cmd] [extra args to cmd]
|
||||
# where
|
||||
# cmd - the command to execute on each VM
|
||||
# The command will receive three fixed arguments, followed by any
|
||||
# additionl arguments supplied to vm_foreach:
|
||||
# vmName - GCP name of the VM
|
||||
# vmZone - The GCP zone the VM is located in
|
||||
# vmPublicIp - The public IP address of this VM
|
||||
# vmClass - The 'class' of this VM
|
||||
# count - Monotonically increasing count for each
|
||||
# invocation of cmd, starting at 1
|
||||
# ... - Extra args to cmd..
|
||||
#
|
||||
#
|
||||
vm_foreach() {
|
||||
declare cmd=$1
|
||||
shift
|
||||
|
||||
declare count=1
|
||||
for info in "${vmlist[@]}"; do
|
||||
declare vmClass vmName vmZone vmPublicIp
|
||||
IFS=: read -r vmClass vmName vmZone vmPublicIp < <(echo "$info")
|
||||
|
||||
eval "$cmd" "$vmName" "$vmZone" "$vmPublicIp" "$vmClass" "$count" "$@"
|
||||
count=$((count + 1))
|
||||
done
|
||||
}
|
||||
|
||||
#
|
||||
# vm_foreach_in_class [class] [cmd]
|
||||
# where
|
||||
# class - the desired VM class to operate on
|
||||
# cmd - the command to execute on each VM in the desired class.
|
||||
# The command will receive three arguments:
|
||||
# vmName - GCP name of the VM
|
||||
# vmZone - The GCP zone the VM is located in
|
||||
# vmPublicIp - The public IP address of this VM
|
||||
# count - Monotonically increasing count for each
|
||||
# invocation of cmd, starting at 1
|
||||
#
|
||||
#
|
||||
_run_cmd_if_class() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare vmClass=$4
|
||||
declare count=$5
|
||||
declare class=$6
|
||||
declare cmd=$7
|
||||
if [[ $class = "$vmClass" ]]; then
|
||||
eval "$cmd" "$vmName" "$vmZone" "$vmPublicIp" "$count"
|
||||
fi
|
||||
}
|
||||
|
||||
vm_foreach_in_class() {
|
||||
declare class=$1
|
||||
declare cmd=$2
|
||||
vm_foreach _run_cmd_if_class "$1" "$2"
|
||||
}
|
||||
|
||||
#
|
||||
# Load all VMs matching the specified filter and tag them with the specified
|
||||
# class into the `vmlist` array.
|
||||
findVms() {
|
||||
declare class="$1"
|
||||
declare filter="$2"
|
||||
gcloud compute instances list --filter="$filter"
|
||||
while read -r vmName vmZone vmPublicIp status; do
|
||||
if [[ $status != RUNNING ]]; then
|
||||
echo "Warning: $vmName is not RUNNING, ignoring it."
|
||||
continue
|
||||
fi
|
||||
vmlist+=("$class:$vmName:$vmZone:$vmPublicIp")
|
||||
done < <(gcloud compute instances list \
|
||||
--filter="$filter" \
|
||||
--format 'value(name,zone,networkInterfaces[0].accessConfigs[0].natIP,status)')
|
||||
}
|
||||
|
||||
wait_for_pids() {
|
||||
echo "--- Waiting for $*"
|
||||
for pid in "${pids[@]}"; do
|
||||
declare ok=true
|
||||
wait "$pid" || ok=false
|
||||
cat "log-$pid.txt"
|
||||
if ! $ok; then
|
||||
echo ^^^ +++
|
||||
exit 1
|
||||
fi
|
||||
rm "log-$pid.txt"
|
||||
done
|
||||
}
|
||||
|
||||
delete_unreachable_validators() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
if ! vm_exec "$vmName" "$vmZone" "$vmPublicIp" "Checking $vmName" uptime; then
|
||||
echo "^^^ +++"
|
||||
|
||||
# Validators are managed by a Compute Engine Instance Group, so deleting
|
||||
# one will just cause a new one to be spawned.
|
||||
echo "Warning: $vmName is unreachable, deleting it"
|
||||
gcloud compute instances delete "$vmName" --zone "$vmZone"
|
||||
fi
|
||||
echo "validator checked in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
|
||||
echo "Validator nodes (unverified):"
|
||||
findVms validator "name~^$SOLANA_NET_NAME-validator-"
|
||||
pids=()
|
||||
vm_foreach_in_class validator delete_unreachable_validators
|
||||
wait_for_pids validator sanity check
|
||||
vmlist=()
|
||||
|
||||
echo "Leader node:"
|
||||
findVms leader "name=$SOLANA_NET_NAME"
|
||||
[[ ${#vmlist[@]} = 1 ]] || {
|
||||
echo "Unable to find $SOLANA_NET_NAME"
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo "Client node(s):"
|
||||
findVms client "name~^$SOLANA_NET_NAME-client"
|
||||
|
||||
echo "Validator nodes:"
|
||||
findVms validator "name~^$SOLANA_NET_NAME-validator-"
|
||||
|
||||
fullnode_count=0
|
||||
inc_fullnode_count() {
|
||||
fullnode_count=$((fullnode_count + 1))
|
||||
}
|
||||
vm_foreach_in_class leader inc_fullnode_count
|
||||
vm_foreach_in_class validator inc_fullnode_count
|
||||
|
||||
# Add "network stopping" datapoint
|
||||
$metrics_write_datapoint "testnet-deploy,name=$netBasename stop=1"
|
||||
|
||||
client_start() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare count=$4
|
||||
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" \
|
||||
"Starting client $count:" \
|
||||
"\
|
||||
set -x;
|
||||
snap info solana; \
|
||||
sudo snap get solana; \
|
||||
threadCount=\$(nproc); \
|
||||
if [[ \$threadCount -gt 4 ]]; then threadCount=4; fi; \
|
||||
tmux kill-session -t solana; \
|
||||
tmux new -s solana -d \" \
|
||||
set -x; \
|
||||
sudo rm /tmp/solana.log; \
|
||||
while : ; do \
|
||||
/snap/bin/solana.bench-tps $SOLANA_NET_ENTRYPOINT $fullnode_count --loop -s 600 --sustained -t \$threadCount 2>&1 | tee -a /tmp/solana.log; \
|
||||
echo 'https://metrics.solana.com:8086/write?db=${INFLUX_DATABASE}&u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}' \
|
||||
| xargs curl --max-time 5 -XPOST --data-binary 'testnet-deploy,name=$netBasename clientexit=1'; \
|
||||
echo Error: bench-tps should never exit | tee -a /tmp/solana.log; \
|
||||
done; \
|
||||
bash \
|
||||
\"; \
|
||||
sleep 2; \
|
||||
tmux capture-pane -t solana -p -S -100; \
|
||||
tail /tmp/solana.log; \
|
||||
"
|
||||
}
|
||||
|
||||
client_stop() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare count=$4
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" \
|
||||
"Stopping client $vmName ($count):" \
|
||||
"\
|
||||
set -x;
|
||||
tmux list-sessions; \
|
||||
tmux capture-pane -t solana -p; \
|
||||
tmux kill-session -t solana; \
|
||||
$SNAP_INSTALL_CMD; \
|
||||
sudo snap set solana metrics-config=$SOLANA_METRICS_CONFIG \
|
||||
rust-log=$RUST_LOG \
|
||||
default-metrics-rate=$SOLANA_DEFAULT_METRICS_RATE \
|
||||
; \
|
||||
"
|
||||
echo "Client stopped in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
fullnode_start() {
|
||||
declare class=$1
|
||||
declare vmName=$2
|
||||
declare vmZone=$3
|
||||
declare vmPublicIp=$4
|
||||
declare count=$5
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
commonNodeConfig="\
|
||||
rust-log=$RUST_LOG \
|
||||
default-metrics-rate=$SOLANA_DEFAULT_METRICS_RATE \
|
||||
metrics-config=$SOLANA_METRICS_CONFIG \
|
||||
setup-args=$SOLANA_SETUP_ARGS \
|
||||
"
|
||||
if [[ $class = leader ]]; then
|
||||
nodeConfig="mode=leader+drone $commonNodeConfig"
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
nodeConfig="$nodeConfig enable-cuda=1"
|
||||
fi
|
||||
else
|
||||
nodeConfig="mode=validator leader-address=$publicIp $commonNodeConfig"
|
||||
fi
|
||||
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" "Starting $class $count:" \
|
||||
"\
|
||||
set -ex; \
|
||||
logmarker='solana deploy $(date)/$RANDOM'; \
|
||||
logger \"\$logmarker\"; \
|
||||
$SNAP_INSTALL_CMD; \
|
||||
sudo snap set solana $nodeConfig; \
|
||||
snap info solana; \
|
||||
sudo snap get solana; \
|
||||
echo Slight delay to get more syslog output; \
|
||||
sleep 2; \
|
||||
sudo grep -Pzo \"\$logmarker(.|\\n)*\" /var/log/syslog \
|
||||
"
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
leader_start() {
|
||||
fullnode_start leader "$@"
|
||||
}
|
||||
|
||||
validator_start() {
|
||||
fullnode_start validator "$@"
|
||||
}
|
||||
|
||||
fullnode_stop() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare count=$4
|
||||
|
||||
touch "log-$vmName.txt"
|
||||
(
|
||||
SECONDS=0
|
||||
# Try to ping the machine first. When a machine (validator) is restarted,
|
||||
# there can be a delay between when the instance is reported as RUNNING and when
|
||||
# it's reachable over the network
|
||||
timeout 30s bash -c "set -o pipefail; until ping -c 3 $vmPublicIp | tr - _; do echo .; done"
|
||||
vm_exec "$vmName" "$vmZone" "$vmPublicIp" "Shutting down" "\
|
||||
if snap list solana; then \
|
||||
sudo snap set solana mode=; \
|
||||
fi"
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
) >> "log-$vmName.txt" 2>&1 &
|
||||
declare pid=$!
|
||||
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
|
||||
pids+=("$pid")
|
||||
}
|
||||
|
||||
if [[ -n $LOCAL_SNAP ]]; then
|
||||
echo "--- Transferring $LOCAL_SNAP to node(s)"
|
||||
|
||||
transfer_local_snap() {
|
||||
declare vmName=$1
|
||||
declare vmZone=$2
|
||||
declare vmPublicIp=$3
|
||||
declare vmClass=$4
|
||||
declare count=$5
|
||||
|
||||
echo "--- $vmName in zone $vmZone ($count)"
|
||||
SECONDS=0
|
||||
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
|
||||
"$LOCAL_SNAP" testnet-deploy@"$vmPublicIp":solana_local.snap
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
}
|
||||
vm_foreach transfer_local_snap
|
||||
fi
|
||||
|
||||
echo "--- Stopping client node(s)"
|
||||
pids=()
|
||||
vm_foreach_in_class client client_stop
|
||||
client_stop_pids=("${pids[@]}")
|
||||
|
||||
if ! $ROLLING_UPDATE; then
|
||||
pids=()
|
||||
echo "--- Shutting down all full nodes"
|
||||
vm_foreach_in_class leader fullnode_stop
|
||||
vm_foreach_in_class validator fullnode_stop
|
||||
wait_for_pids fullnode shutdown
|
||||
fi
|
||||
|
||||
pids=()
|
||||
echo --- Starting leader node
|
||||
vm_foreach_in_class leader leader_start
|
||||
wait_for_pids leader
|
||||
|
||||
pids=()
|
||||
echo --- Starting validator nodes
|
||||
vm_foreach_in_class validator validator_start
|
||||
wait_for_pids validators
|
||||
|
||||
echo "--- $publicUrl sanity test"
|
||||
if [[ -z $CI ]]; then
|
||||
# TODO: ssh into a node and run testnet-sanity.sh there. It's not safe to
|
||||
# assume the correct Snap is installed on the current non-CI machine
|
||||
echo Skipped for non-CI deploy
|
||||
snapVersion=unknown
|
||||
else
|
||||
(
|
||||
set -x
|
||||
USE_SNAP=1 ci/testnet-sanity.sh $publicUrl $fullnode_count
|
||||
)
|
||||
IFS=\ read -r _ snapVersion _ < <(snap info solana | grep "^installed:")
|
||||
snapVersion=${snapVersion/0+git./}
|
||||
fi
|
||||
|
||||
pids=("${client_stop_pids[@]}")
|
||||
wait_for_pids client shutdown
|
||||
vm_foreach_in_class client client_start
|
||||
|
||||
# Add "network started" datapoint
|
||||
$metrics_write_datapoint "testnet-deploy,name=$netBasename start=1,version=\"$snapVersion\""
|
||||
|
||||
exit 0
|
77
ci/testnet-sanity.sh
Executable file
77
ci/testnet-sanity.sh
Executable file
@ -0,0 +1,77 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Perform a quick sanity test on the specific testnet
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
source multinode-demo/common.sh
|
||||
|
||||
NET_URL=$1
|
||||
if [[ -z $NET_URL ]]; then
|
||||
NET_URL=testnet.solana.com
|
||||
fi
|
||||
|
||||
EXPECTED_NODE_COUNT=$2
|
||||
if [[ -z $EXPECTED_NODE_COUNT ]]; then
|
||||
EXPECTED_NODE_COUNT=50
|
||||
fi
|
||||
|
||||
echo "--- $NET_URL: verify ledger"
|
||||
if [[ -z $NO_LEDGER_VERIFY ]]; then
|
||||
if [[ -d /var/snap/solana/current/config/ledger ]]; then
|
||||
# Note: here we assume this script is actually running on the leader node...
|
||||
(
|
||||
set -x
|
||||
sudo cp -r /var/snap/solana/current/config/ledger /var/snap/solana/current/config/ledger-verify-$$
|
||||
sudo solana.ledger-tool --ledger /var/snap/solana/current/config/ledger-verify-$$ verify
|
||||
)
|
||||
else
|
||||
echo "^^^ +++"
|
||||
echo "Ledger verify skipped"
|
||||
fi
|
||||
else
|
||||
echo "^^^ +++"
|
||||
echo "Ledger verify skipped (NO_LEDGER_VERIFY defined)"
|
||||
fi
|
||||
|
||||
echo "--- $NET_URL: wallet sanity"
|
||||
(
|
||||
set -x
|
||||
multinode-demo/test/wallet-sanity.sh $NET_URL
|
||||
)
|
||||
|
||||
echo "--- $NET_URL: node count"
|
||||
if [[ -n "$USE_SNAP" ]]; then
|
||||
# TODO: Merge client.sh functionality into solana-bench-tps proper and
|
||||
# remove this USE_SNAP case
|
||||
cmd=$solana_bench_tps
|
||||
else
|
||||
cmd=multinode-demo/client.sh
|
||||
fi
|
||||
|
||||
(
|
||||
set -x
|
||||
$cmd $NET_URL $EXPECTED_NODE_COUNT -c
|
||||
)
|
||||
|
||||
echo "--- $NET_URL: validator sanity"
|
||||
if [[ -z $NO_VALIDATOR_SANITY ]]; then
|
||||
(
|
||||
./multinode-demo/setup.sh -t validator
|
||||
set -e pipefail
|
||||
timeout 10s ./multinode-demo/validator.sh "$NET_URL" 2>&1 | tee validator.log
|
||||
)
|
||||
wc -l validator.log
|
||||
if grep -C100 panic validator.log; then
|
||||
echo "^^^ +++"
|
||||
echo "Panic observed"
|
||||
exit 1
|
||||
else
|
||||
echo "Validator log looks ok"
|
||||
fi
|
||||
else
|
||||
echo "^^^ +++"
|
||||
echo "Validator sanity disabled (NO_VALIDATOR_SANITY defined)"
|
||||
fi
|
||||
|
||||
exit 0
|
35
ci/version-check.sh
Executable file
35
ci/version-check.sh
Executable file
@ -0,0 +1,35 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
require() {
|
||||
declare expectedProgram="$1"
|
||||
declare expectedVersion="$2"
|
||||
|
||||
read -r program version _ < <($expectedProgram -V)
|
||||
|
||||
declare ok=true
|
||||
[[ $program = "$expectedProgram" ]] || ok=false
|
||||
[[ $version =~ $expectedVersion ]] || ok=false
|
||||
|
||||
echo "Found $program $version"
|
||||
if ! $ok; then
|
||||
echo Error: expected "$expectedProgram $expectedVersion"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
case ${1:-stable} in
|
||||
nightly)
|
||||
require rustc 1.30.[0-9]+-nightly
|
||||
require cargo 1.29.[0-9]+-nightly
|
||||
;;
|
||||
stable)
|
||||
require rustc 1.28.[0-9]+
|
||||
require cargo 1.28.[0-9]+
|
||||
;;
|
||||
*)
|
||||
echo Error: unknown argument: "$1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
35
doc/testnet.md
Normal file
35
doc/testnet.md
Normal file
@ -0,0 +1,35 @@
|
||||
# TestNet debugging info
|
||||
|
||||
Currently we have two testnets, 'perf' and 'master', both on the master branch of the solana repo. Deploys happen
|
||||
at the top of every hour with the latest code. 'perf' has more cores for the client machine to flood the network
|
||||
with transactions until failure.
|
||||
|
||||
## Deploy process
|
||||
|
||||
They are deployed with the `ci/testnet-deploy.sh` script. There is a scheduled buildkite job which runs to do the deploy,
|
||||
look at `testnet-deploy` to see the agent which ran it and the logs. There is also a manual job to do the deploy manually..
|
||||
Validators are selected based on their machine name and everyone gets the binaries installed from snap.
|
||||
|
||||
## Where are the testnet logs?
|
||||
|
||||
For the client they are put in `/tmp/solana`; for validators and leaders they are in `/var/snap/solana/current/`.
|
||||
You can also see the backtrace of the client by ssh'ing into the client node and doing:
|
||||
|
||||
```bash
|
||||
$ sudo -u testnet-deploy
|
||||
$ tmux attach -t solana
|
||||
```
|
||||
|
||||
## How do I reset the testnet?
|
||||
|
||||
Through buildkite.
|
||||
|
||||
## How can I scale the tx generation rate?
|
||||
|
||||
Increase the TX rate by increasing the number of cores on the client machine which is running
|
||||
`bench-tps` or run multiple clients. Decrease by lowering cores or using the rayon env
|
||||
variable `RAYON_NUM_THREADS=<xx>`
|
||||
|
||||
## How can I test a change on the testnet?
|
||||
|
||||
Currently, a merged PR is the only way to test a change on the testnet.
|
@ -13,7 +13,7 @@ fi
|
||||
(
|
||||
set -x
|
||||
curl -o solana-perf.tgz \
|
||||
https://solana-perf.s3.amazonaws.com/master/x86_64-unknown-linux-gnu/solana-perf.tgz
|
||||
https://solana-perf.s3.amazonaws.com/v0.8.0/x86_64-unknown-linux-gnu/solana-perf.tgz
|
||||
tar zxvf solana-perf.tgz
|
||||
)
|
||||
|
||||
|
@ -1,31 +1,66 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# usage: $0 <rsync network path to solana repo on leader machine> <number of nodes in the network>"
|
||||
#!/bin/bash -e
|
||||
#
|
||||
USAGE=" usage: $0 [leader_url] [num_nodes] [--loop] [extra args]
|
||||
|
||||
leader_url URL to the leader (defaults to ..)
|
||||
num_nodes Minimum number of nodes to look for while converging
|
||||
--loop Add this flag to cause the program to loop infinitely
|
||||
\"extra args\" Any additional arguments are pass along to solana-bench-tps
|
||||
"
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
leader=${1:-${here}/..} # Default to local solana repo
|
||||
count=${2:-1}
|
||||
leader=$1
|
||||
if [[ -n $leader ]]; then
|
||||
if [[ $leader == "-h" || $leader == "--help" ]]; then
|
||||
echo "$USAGE"
|
||||
exit 0
|
||||
fi
|
||||
shift
|
||||
else
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
leader=testnet.solana.com # Default to testnet when running as a Snap
|
||||
else
|
||||
leader=$here/.. # Default to local solana repo
|
||||
fi
|
||||
fi
|
||||
|
||||
count=$1
|
||||
if [[ -n $count ]]; then
|
||||
shift
|
||||
else
|
||||
count=1
|
||||
fi
|
||||
|
||||
loop=
|
||||
if [[ $1 = --loop ]]; then
|
||||
loop=1
|
||||
shift
|
||||
fi
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
(
|
||||
set -x
|
||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
|
||||
set -ex
|
||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
if [[ ! -r "$SOLANA_CONFIG_CLIENT_DIR"/leader.json ]]; then
|
||||
(
|
||||
set -x
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
)
|
||||
fi
|
||||
client_json="$SOLANA_CONFIG_CLIENT_DIR"/client.json
|
||||
[[ -r $client_json ]] || $solana_keygen -o "$client_json"
|
||||
)
|
||||
|
||||
client_json="$SOLANA_CONFIG_CLIENT_DIR"/client.json
|
||||
if [[ ! -r $client_json ]]; then
|
||||
$solana_mint <<<0 > "$client_json"
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086 # $solana_client_demo should not be quoted
|
||||
exec $solana_client_demo \
|
||||
-n "$count" -l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -m "$SOLANA_CONFIG_CLIENT_DIR"/client.json
|
||||
iteration=0
|
||||
set -x
|
||||
while true; do
|
||||
$solana_bench_tps \
|
||||
-n "$count" \
|
||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json \
|
||||
-k "$SOLANA_CONFIG_CLIENT_DIR"/client.json \
|
||||
"$@"
|
||||
[[ -n $loop ]] || exit 0
|
||||
iteration=$((iteration + 1))
|
||||
echo ------------------------------------------------------------------------
|
||||
echo "Iteration: $iteration"
|
||||
echo ------------------------------------------------------------------------
|
||||
done
|
||||
|
@ -4,27 +4,55 @@
|
||||
# shellcheck disable=2034
|
||||
|
||||
rsync=rsync
|
||||
if [[ -d "$SNAP" ]]; then # Running inside a Linux Snap?
|
||||
leader_logger="cat"
|
||||
validator_logger="cat"
|
||||
drone_logger="cat"
|
||||
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
# Protect against unsupported configurations to prevent non-obvious errors
|
||||
# later. Arguably these should be fatal errors but for now prefer tolerance.
|
||||
if [[ -n $USE_SNAP ]]; then
|
||||
echo "Warning: Snap is not supported on $(uname)"
|
||||
USE_SNAP=
|
||||
fi
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
echo "Warning: CUDA is not supported on $(uname)"
|
||||
SOLANA_CUDA=
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -d $SNAP ]]; then # Running inside a Linux Snap?
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
if [[ "$program" = wallet ]]; then
|
||||
# TODO: Merge wallet.sh functionality into solana-wallet proper and
|
||||
# remove this special case
|
||||
if [[ "$program" = wallet || "$program" = bench-tps ]]; then
|
||||
# TODO: Merge wallet.sh/client.sh functionality into
|
||||
# solana-wallet/solana-demo-client proper and remove this special case
|
||||
printf "%s/bin/solana-%s" "$SNAP" "$program"
|
||||
else
|
||||
printf "%s/command-%s.wrapper" "$SNAP" "$program"
|
||||
fi
|
||||
}
|
||||
rsync="$SNAP"/bin/rsync
|
||||
SOLANA_METRICS_CONFIG="$(snapctl get metrics-config)"
|
||||
SOLANA_CUDA="$(snapctl get enable-cuda)"
|
||||
multilog="$SNAP/bin/multilog t s16777215 n200"
|
||||
leader_logger="$multilog $SNAP_DATA/leader"
|
||||
validator_logger="$multilog t $SNAP_DATA/validator"
|
||||
drone_logger="$multilog $SNAP_DATA/drone"
|
||||
# Create log directories manually to prevent multilog from creating them as
|
||||
# 0700
|
||||
mkdir -p "$SNAP_DATA"/{drone,leader,validator}
|
||||
|
||||
elif [[ -n "$USE_SNAP" ]]; then # Use the Linux Snap binaries
|
||||
SOLANA_METRICS_CONFIG="$(snapctl get metrics-config)"
|
||||
SOLANA_DEFAULT_METRICS_RATE="$(snapctl get default-metrics-rate)"
|
||||
export SOLANA_DEFAULT_METRICS_RATE
|
||||
SOLANA_CUDA="$(snapctl get enable-cuda)"
|
||||
RUST_LOG="$(snapctl get rust-log)"
|
||||
|
||||
elif [[ -n $USE_SNAP ]]; then # Use the Linux Snap binaries
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
printf "solana.%s" "$program"
|
||||
}
|
||||
elif [[ -n "$USE_INSTALL" ]]; then # Assume |cargo install| was run
|
||||
elif [[ -n $USE_INSTALL ]]; then # Assume |cargo install| was run
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
printf "solana-%s" "$program"
|
||||
@ -37,23 +65,35 @@ else
|
||||
declare features=""
|
||||
if [[ "$program" =~ ^(.*)-cuda$ ]]; then
|
||||
program=${BASH_REMATCH[1]}
|
||||
features="--features=cuda,erasure"
|
||||
features="--features=cuda"
|
||||
fi
|
||||
if [[ -z "$DEBUG" ]]; then
|
||||
if [[ -z $DEBUG ]]; then
|
||||
maybe_release=--release
|
||||
fi
|
||||
printf "cargo run $maybe_release --bin solana-%s %s -- " "$program" "$features"
|
||||
}
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
# shellcheck disable=2154 # 'here' is referenced but not assigned
|
||||
if [[ -z $here ]]; then
|
||||
echo "|here| is not defined"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Locate perf libs downloaded by |./fetch-perf-libs.sh|
|
||||
LD_LIBRARY_PATH=$(cd "$here" && dirname "$PWD"):$LD_LIBRARY_PATH
|
||||
export LD_LIBRARY_PATH
|
||||
fi
|
||||
fi
|
||||
|
||||
solana_client_demo=$(solana_program client-demo)
|
||||
solana_bench_tps=$(solana_program bench-tps)
|
||||
solana_wallet=$(solana_program wallet)
|
||||
solana_drone=$(solana_program drone)
|
||||
solana_fullnode=$(solana_program fullnode)
|
||||
solana_fullnode_config=$(solana_program fullnode-config)
|
||||
solana_fullnode_cuda=$(solana_program fullnode-cuda)
|
||||
solana_genesis=$(solana_program genesis)
|
||||
solana_mint=$(solana_program mint)
|
||||
solana_keygen=$(solana_program keygen)
|
||||
solana_ledger_tool=$(solana_program ledger-tool)
|
||||
|
||||
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
||||
export RUST_BACKTRACE=1
|
||||
@ -66,36 +106,36 @@ export RUST_BACKTRACE=1
|
||||
# export SOLANA_METRICS_CONFIG="host=<metrics host>,db=<database name>,u=<username>,p=<password>"
|
||||
#
|
||||
configure_metrics() {
|
||||
[[ -n $SOLANA_METRICS_CONFIG ]] || return
|
||||
[[ -n $SOLANA_METRICS_CONFIG ]] || return 0
|
||||
|
||||
declare metrics_params
|
||||
IFS=',' read -r -a metrics_params <<< "$SOLANA_METRICS_CONFIG"
|
||||
for param in "${metrics_params[@]}"; do
|
||||
IFS='=' read -r -a pair <<< "$param"
|
||||
if [[ "${#pair[@]}" != 2 ]]; then
|
||||
echo Error: invalid metrics parameter: "$param"
|
||||
if [[ ${#pair[@]} != 2 ]]; then
|
||||
echo Error: invalid metrics parameter: "$param" >&2
|
||||
else
|
||||
declare name="${pair[0]}"
|
||||
declare value="${pair[1]}"
|
||||
case "$name" in
|
||||
host)
|
||||
export INFLUX_HOST="$value"
|
||||
echo INFLUX_HOST="$INFLUX_HOST"
|
||||
echo INFLUX_HOST="$INFLUX_HOST" >&2
|
||||
;;
|
||||
db)
|
||||
export INFLUX_DATABASE="$value"
|
||||
echo INFLUX_DATABASE="$INFLUX_DATABASE"
|
||||
echo INFLUX_DATABASE="$INFLUX_DATABASE" >&2
|
||||
;;
|
||||
u)
|
||||
export INFLUX_USERNAME="$value"
|
||||
echo INFLUX_USERNAME="$INFLUX_USERNAME"
|
||||
echo INFLUX_USERNAME="$INFLUX_USERNAME" >&2
|
||||
;;
|
||||
p)
|
||||
export INFLUX_PASSWORD="$value"
|
||||
echo INFLUX_PASSWORD="********"
|
||||
echo INFLUX_PASSWORD="********" >&2
|
||||
;;
|
||||
*)
|
||||
echo Error: Unknown metrics parameter name: "$name"
|
||||
echo Error: Unknown metrics parameter name: "$name" >&2
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
@ -104,32 +144,68 @@ configure_metrics() {
|
||||
configure_metrics
|
||||
|
||||
tune_networking() {
|
||||
# Skip in CI
|
||||
[[ -z $CI ]] || return 0
|
||||
|
||||
# Reference: https://medium.com/@CameronSparr/increase-os-udp-buffers-to-improve-performance-51d167bb1360
|
||||
[[ $(uname) = Linux ]] && (
|
||||
set -x
|
||||
# TODO: Check values and warn instead, it's a little rude to set them here.
|
||||
sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
|
||||
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
|
||||
)
|
||||
if [[ $(uname) = Linux ]]; then
|
||||
(
|
||||
set -x +e
|
||||
# test the existence of the sysctls before trying to set them
|
||||
# go ahead and return true and don't exit if these calls fail
|
||||
sysctl net.core.rmem_max 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
|
||||
) || true
|
||||
fi
|
||||
|
||||
if [[ $(uname) = Darwin ]]; then
|
||||
(
|
||||
if [[ $(sysctl net.inet.udp.maxdgram | cut -d\ -f2) != 65535 ]]; then
|
||||
echo "Adjusting maxdgram to allow for large UDP packets, see BLOB_SIZE in src/packet.rs:"
|
||||
set -x
|
||||
sudo sysctl net.inet.udp.maxdgram=65535
|
||||
fi
|
||||
)
|
||||
|
||||
fi
|
||||
}
|
||||
|
||||
oom_score_adj() {
|
||||
declare pid=$1
|
||||
declare score=$2
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
echo "$score" > "/proc/$pid/oom_score_adj" || true
|
||||
declare currentScore
|
||||
currentScore=$(cat "/proc/$pid/oom_score_adj" || true)
|
||||
if [[ $score != "$currentScore" ]]; then
|
||||
echo "Failed to set oom_score_adj to $score for pid $pid (current score: $currentScore)"
|
||||
fi
|
||||
}
|
||||
|
||||
SOLANA_CONFIG_DIR=${SNAP_DATA:-$PWD}/config
|
||||
SOLANA_CONFIG_PRIVATE_DIR=${SNAP_DATA:-$PWD}/config-private
|
||||
SOLANA_CONFIG_CLIENT_DIR=${SNAP_USER_DATA:-$PWD}/config-client-client
|
||||
SOLANA_CONFIG_VALIDATOR_DIR=${SNAP_DATA:-$PWD}/config-validator
|
||||
SOLANA_CONFIG_CLIENT_DIR=${SNAP_USER_DATA:-$PWD}/config-client
|
||||
|
||||
rsync_url() { # adds the 'rsync://` prefix to URLs that need it
|
||||
declare url="$1"
|
||||
|
||||
if [[ "$url" =~ ^.*:.*$ ]]; then
|
||||
if [[ $url =~ ^.*:.*$ ]]; then
|
||||
# assume remote-shell transport when colon is present, use $url unmodified
|
||||
echo "$url"
|
||||
return
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ -d "$url" ]]; then
|
||||
if [[ -d $url ]]; then
|
||||
# assume local directory if $url is a valid directory, use $url unmodified
|
||||
echo "$url"
|
||||
return
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Default to rsync:// URL
|
||||
|
@ -21,7 +21,7 @@ if [[ -d "$SNAP" ]]; then
|
||||
fi
|
||||
leader="$leader_address"
|
||||
else
|
||||
leader=${1:-${here}/..} # Default to local solana repo
|
||||
leader=${1:-${here}/..} # Default to local tree for data
|
||||
fi
|
||||
|
||||
[[ -f "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json ]] || {
|
||||
@ -36,6 +36,11 @@ set -ex
|
||||
mkdir -p "$SOLANA_CONFIG_DIR"
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_DIR"/
|
||||
|
||||
# shellcheck disable=SC2086 # $solana_drone should not be quoted
|
||||
exec $solana_drone \
|
||||
-l "$SOLANA_CONFIG_DIR"/leader.json -m "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||
|
||||
trap 'kill "$pid" && wait "$pid"' INT TERM
|
||||
$solana_drone \
|
||||
-l "$SOLANA_CONFIG_DIR"/leader.json -k "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json \
|
||||
> >($drone_logger) 2>&1 &
|
||||
pid=$!
|
||||
oom_score_adj "$pid" 1000
|
||||
wait "$pid"
|
||||
|
@ -13,7 +13,7 @@ fi
|
||||
[[ -f "$SOLANA_CONFIG_DIR"/leader.json ]] || {
|
||||
echo "$SOLANA_CONFIG_DIR/leader.json not found, create it by running:"
|
||||
echo
|
||||
echo " ${here}/setup.sh -t leader"
|
||||
echo " ${here}/setup.sh"
|
||||
exit 1
|
||||
}
|
||||
|
||||
@ -25,9 +25,11 @@ fi
|
||||
|
||||
tune_networking
|
||||
|
||||
# shellcheck disable=SC2086 # $program should not be quoted
|
||||
exec $program \
|
||||
-l "$SOLANA_CONFIG_DIR"/leader.json \
|
||||
< <(shopt -s nullglob && cat "$SOLANA_CONFIG_DIR"/genesis.log \
|
||||
"$SOLANA_CONFIG_DIR"/tx-*.log) \
|
||||
> "$SOLANA_CONFIG_DIR"/tx-"$(date -u +%Y%m%d%H%M%S%N)".log
|
||||
trap 'kill "$pid" && wait "$pid"' INT TERM
|
||||
$program \
|
||||
--identity "$SOLANA_CONFIG_DIR"/leader.json \
|
||||
--ledger "$SOLANA_CONFIG_DIR"/ledger \
|
||||
> >($leader_logger) 2>&1 &
|
||||
pid=$!
|
||||
oom_score_adj "$pid" 1000
|
||||
wait "$pid"
|
||||
|
16
multinode-demo/metrics_write_datapoint.sh
Executable file
16
multinode-demo/metrics_write_datapoint.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
point=$1
|
||||
if [[ -z $point ]]; then
|
||||
echo "Data point not specified"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Influx data point: $point"
|
||||
if [[ -z $INFLUX_DATABASE || -z $INFLUX_USERNAME || -z $INFLUX_PASSWORD ]]; then
|
||||
echo Influx user credentials not found
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "https://metrics.solana.com:8086/write?db=${INFLUX_DATABASE}&u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}" \
|
||||
| xargs curl --max-time 5 -XPOST --data-binary "$point"
|
@ -1,59 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
function myip()
|
||||
{
|
||||
# shellcheck disable=SC2207
|
||||
declare ipaddrs=(
|
||||
# query interwebs
|
||||
$(curl -s ifconfig.co)
|
||||
# machine's interfaces
|
||||
$(ifconfig |
|
||||
awk '/inet addr:/ {gsub("addr:","",$2); print $2; next}
|
||||
/inet6 addr:/ {gsub("/.*", "", $3); print $3; next}
|
||||
/inet(6)? / {print $2}'
|
||||
)
|
||||
)
|
||||
|
||||
if (( ! ${#ipaddrs[*]} ))
|
||||
then
|
||||
echo "
|
||||
myip: error: I'm having trouble determining what our IP address is...
|
||||
Are we connected to a network?
|
||||
|
||||
"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
declare prompt="
|
||||
Please choose the IP address you want to advertise to the network:
|
||||
|
||||
0) ${ipaddrs[0]} <====== this one was returned by the interwebs...
|
||||
"
|
||||
|
||||
for ((i=1; i < ${#ipaddrs[*]}; i++))
|
||||
do
|
||||
prompt+=" $i) ${ipaddrs[i]}
|
||||
"
|
||||
done
|
||||
|
||||
while read -r -p "${prompt}
|
||||
please enter a number [0 for default]: " which
|
||||
do
|
||||
[[ -z ${which} ]] && break;
|
||||
[[ ${which} =~ [0-9]+ ]] && (( which < ${#ipaddrs[*]} )) && break;
|
||||
echo "Ug. invalid entry \"${which}\"...
|
||||
"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
which=${which:-0}
|
||||
|
||||
echo "${ipaddrs[which]}"
|
||||
|
||||
}
|
||||
|
||||
if [[ ${0} == "${BASH_SOURCE[0]}" ]]
|
||||
then
|
||||
myip "$@"
|
||||
fi
|
32
multinode-demo/oom_monitor.sh
Executable file
32
multinode-demo/oom_monitor.sh
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Reports Linux OOM Killer activity
|
||||
#
|
||||
|
||||
here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
if [[ $(uname) != Linux ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
syslog=/var/log/syslog
|
||||
if [[ ! -r $syslog ]]; then
|
||||
echo Unable to read $syslog
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Adjust OOM score to reduce the chance that this script will be killed
|
||||
# during an Out of Memory event since the purpose of this script is to
|
||||
# report such events
|
||||
oom_score_adj "self" -500
|
||||
|
||||
while read -r victim; do
|
||||
echo "Out of memory event detected, $victim killed"
|
||||
"$here"/metrics_write_datapoint.sh "oom-killer,victim=$victim killed=1"
|
||||
done < <( \
|
||||
tail --follow=name --retry -n0 $syslog \
|
||||
| sed --unbuffered -n 's/^.* Out of memory: Kill process [1-9][0-9]* (\([^)]*\)) .*/\1/p' \
|
||||
)
|
||||
exit 1
|
14
multinode-demo/remote_leader.sh
Executable file
14
multinode-demo/remote_leader.sh
Executable file
@ -0,0 +1,14 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n $FORCE ]] || exit
|
||||
|
||||
chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa
|
||||
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
|
||||
# Run setup
|
||||
USE_INSTALL=1 ./multinode-demo/setup.sh -p
|
||||
USE_INSTALL=1 ./multinode-demo/drone.sh >drone.log 2>&1 &
|
||||
USE_INSTALL=1 SOLANA_CUDA=1 ./multinode-demo/leader.sh >leader.log 2>&1 &
|
185
multinode-demo/remote_nodes.sh
Executable file
185
multinode-demo/remote_nodes.sh
Executable file
@ -0,0 +1,185 @@
|
||||
#!/bin/bash
|
||||
|
||||
command=$1
|
||||
ip_addr_file=
|
||||
remote_user=
|
||||
ssh_keys=
|
||||
|
||||
shift
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 <start|stop> <-f IP Addr Array file> <-u username> [-k ssh-keys]
|
||||
|
||||
Manage a GCE multinode network
|
||||
|
||||
start|stop - Create or delete the network
|
||||
-f file - A bash script that exports an array of IP addresses, ip_addr_array.
|
||||
Elements of the array are public IP address of remote nodes.
|
||||
-u username - The username for logging into remote nodes.
|
||||
-k ssh-keys - Path to public/private key pair that remote nodes can use to perform
|
||||
rsync and ssh among themselves. Must contain pub, and priv keys.
|
||||
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
while getopts "h?f:u:k:" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
;;
|
||||
f)
|
||||
ip_addr_file=$OPTARG
|
||||
;;
|
||||
u)
|
||||
remote_user=$OPTARG
|
||||
;;
|
||||
k)
|
||||
ssh_keys=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
set -e
|
||||
|
||||
# Sample IP Address array file contents
|
||||
# ip_addr_array=(192.168.1.1 192.168.1.5 192.168.2.2)
|
||||
|
||||
[[ -n $command ]] || usage "Need a command (start|stop)"
|
||||
[[ -n $ip_addr_file ]] || usage "Need a file with IP address array"
|
||||
[[ -n $remote_user ]] || usage "Need the username for remote nodes"
|
||||
|
||||
ip_addr_array=()
|
||||
# Get IP address array
|
||||
# shellcheck source=/dev/null
|
||||
source "$ip_addr_file"
|
||||
|
||||
build_project() {
|
||||
echo "Build started at $(date)"
|
||||
SECONDS=0
|
||||
|
||||
# Build and install locally
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
cargo install --force
|
||||
|
||||
echo "Build took $SECONDS seconds"
|
||||
}
|
||||
|
||||
common_start_setup() {
|
||||
ip_addr=$1
|
||||
|
||||
# Killing sshguard for now. TODO: Find a better solution
|
||||
# sshguard is blacklisting IP address after ssh-keyscan and ssh login attempts
|
||||
ssh "$remote_user@$ip_addr" " \
|
||||
set -ex; \
|
||||
sudo service sshguard stop; \
|
||||
sudo apt-get --assume-yes install rsync libssl-dev; \
|
||||
mkdir -p ~/.ssh ~/solana ~/.cargo/bin; \
|
||||
" >log/"$ip_addr".log
|
||||
|
||||
# If provided, deploy SSH keys
|
||||
if [[ -n $ssh_keys ]]; then
|
||||
{
|
||||
rsync -vPrz "$ssh_keys"/id_rsa "$remote_user@$ip_addr":~/.ssh/
|
||||
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/
|
||||
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/authorized_keys
|
||||
rsync -vPrz ./multinode-demo "$remote_user@$ip_addr":~/solana/
|
||||
} >>log/"$ip_addr".log
|
||||
fi
|
||||
}
|
||||
|
||||
start_leader() {
|
||||
common_start_setup "$1"
|
||||
|
||||
{
|
||||
rsync -vPrz ~/.cargo/bin/solana* "$remote_user@$ip_addr":~/.cargo/bin/
|
||||
rsync -vPrz ./fetch-perf-libs.sh "$remote_user@$ip_addr":~/solana/
|
||||
ssh -n -f "$remote_user@$ip_addr" 'cd solana; FORCE=1 ./multinode-demo/remote_leader.sh'
|
||||
} >>log/"$1".log
|
||||
|
||||
leader_ip=$1
|
||||
leader_time=$SECONDS
|
||||
SECONDS=0
|
||||
}
|
||||
|
||||
start_validator() {
|
||||
common_start_setup "$1"
|
||||
|
||||
ssh -n -f "$remote_user@$ip_addr" "cd solana; FORCE=1 ./multinode-demo/remote_validator.sh $leader_ip" >>log/"$1".log
|
||||
}
|
||||
|
||||
start_all_nodes() {
|
||||
echo "Deployment started at $(date)"
|
||||
SECONDS=0
|
||||
count=0
|
||||
leader_ip=
|
||||
leader_time=
|
||||
|
||||
mkdir -p log
|
||||
|
||||
for ip_addr in "${ip_addr_array[@]}"; do
|
||||
if ((!count)); then
|
||||
# Start the leader on the first node
|
||||
echo "Leader node $ip_addr, killing previous instance and restarting"
|
||||
start_leader "$ip_addr"
|
||||
else
|
||||
# Start validator on all other nodes
|
||||
echo "Validator[$count] node $ip_addr, killing previous instance and restarting"
|
||||
start_validator "$ip_addr" &
|
||||
# TBD: Remove the sleep or reduce time once GCP login quota is increased
|
||||
sleep 2
|
||||
fi
|
||||
|
||||
((count = count + 1))
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
((validator_count = count - 1))
|
||||
|
||||
echo "Deployment finished at $(date)"
|
||||
echo "Leader deployment too $leader_time seconds"
|
||||
echo "$validator_count Validator deployment took $SECONDS seconds"
|
||||
}
|
||||
|
||||
stop_all_nodes() {
|
||||
SECONDS=0
|
||||
local count=0
|
||||
for ip_addr in "${ip_addr_array[@]}"; do
|
||||
ssh-keygen -R "$ip_addr" >log/local.log
|
||||
ssh-keyscan "$ip_addr" >>~/.ssh/known_hosts 2>/dev/null
|
||||
|
||||
echo "Stopping node[$count] $ip_addr. Remote user $remote_user"
|
||||
|
||||
ssh -n -f "$remote_user@$ip_addr" " \
|
||||
set -ex; \
|
||||
sudo service sshguard stop; \
|
||||
pkill -9 solana-; \
|
||||
pkill -9 validator; \
|
||||
pkill -9 leader; \
|
||||
"
|
||||
sleep 2
|
||||
((count = count + 1))
|
||||
echo "Stopped node[$count] $ip_addr"
|
||||
done
|
||||
echo "Stopping $count nodes took $SECONDS seconds"
|
||||
}
|
||||
|
||||
if [[ $command == "start" ]]; then
|
||||
build_project
|
||||
stop_all_nodes
|
||||
start_all_nodes
|
||||
elif [[ $command == "stop" ]]; then
|
||||
stop_all_nodes
|
||||
else
|
||||
usage "Unknown command: $command"
|
||||
fi
|
17
multinode-demo/remote_validator.sh
Executable file
17
multinode-demo/remote_validator.sh
Executable file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n $FORCE ]] || exit
|
||||
|
||||
chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa
|
||||
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
|
||||
touch ~/.ssh/known_hosts
|
||||
ssh-keygen -R "$1" 2>/dev/null
|
||||
ssh-keyscan "$1" >>~/.ssh/known_hosts 2>/dev/null
|
||||
|
||||
rsync -vPrz "$1":~/.cargo/bin/solana* ~/.cargo/bin/
|
||||
|
||||
# Run setup
|
||||
USE_INSTALL=1 ./multinode-demo/setup.sh -p
|
||||
USE_INSTALL=1 ./multinode-demo/validator.sh "$1":~/solana "$1" >validator.log 2>&1
|
@ -71,35 +71,40 @@ done
|
||||
|
||||
leader_address_args=("$ip_address_arg")
|
||||
validator_address_args=("$ip_address_arg" -b 9000)
|
||||
leader_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/leader-id.json
|
||||
validator_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/validator-id.json
|
||||
mint_path="$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||
|
||||
set -e
|
||||
|
||||
echo "Cleaning $SOLANA_CONFIG_DIR"
|
||||
rm -rvf "$SOLANA_CONFIG_DIR"
|
||||
mkdir -p "$SOLANA_CONFIG_DIR"
|
||||
for i in "$SOLANA_CONFIG_DIR" "$SOLANA_CONFIG_PRIVATE_DIR" "$SOLANA_CONFIG_VALIDATOR_DIR"; do
|
||||
echo "Cleaning $i"
|
||||
rm -rvf "$i"
|
||||
mkdir -p "$i"
|
||||
done
|
||||
|
||||
|
||||
$solana_keygen -o "$leader_id_path"
|
||||
$solana_keygen -o "$validator_id_path"
|
||||
|
||||
if $node_type_leader; then
|
||||
rm -rvf "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
mkdir -p "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
echo "Creating $mint_path with $num_tokens tokens"
|
||||
$solana_keygen -o "$mint_path"
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_DIR/mint.json with $num_tokens tokens"
|
||||
$solana_mint <<<"$num_tokens" > "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_DIR/genesis.log"
|
||||
$solana_genesis < "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json > "$SOLANA_CONFIG_DIR"/genesis.log
|
||||
echo "Creating $SOLANA_CONFIG_DIR/ledger"
|
||||
$solana_genesis --tokens="$num_tokens" --ledger "$SOLANA_CONFIG_DIR"/ledger < "$mint_path"
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_DIR/leader.json"
|
||||
$solana_fullnode_config "${leader_address_args[@]}" > "$SOLANA_CONFIG_DIR"/leader.json
|
||||
$solana_fullnode_config --keypair="$leader_id_path" "${leader_address_args[@]}" > "$SOLANA_CONFIG_DIR"/leader.json
|
||||
fi
|
||||
|
||||
|
||||
if $node_type_validator; then
|
||||
echo "Creating $SOLANA_CONFIG_DIR/validator.json"
|
||||
$solana_fullnode_config "${validator_address_args[@]}" > "$SOLANA_CONFIG_DIR"/validator.json
|
||||
echo "Creating $SOLANA_CONFIG_VALIDATOR_DIR/validator.json"
|
||||
$solana_fullnode_config --keypair="$validator_id_path" "${validator_address_args[@]}" > "$SOLANA_CONFIG_VALIDATOR_DIR"/validator.json
|
||||
fi
|
||||
|
||||
ls -lh "$SOLANA_CONFIG_DIR"/
|
||||
ls -lhR "$SOLANA_CONFIG_DIR"/
|
||||
if $node_type_leader; then
|
||||
ls -lh "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
ls -lhR "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
fi
|
||||
|
@ -1,107 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
ip_addr_file=$1
|
||||
remote_user=$2
|
||||
ssh_keys=$3
|
||||
|
||||
usage() {
|
||||
echo -e "\\tUsage: $0 <IP Address array> <username> [path to ssh keys]\\n"
|
||||
echo -e "\\t <IP Address array>: A bash script that exports an array of IP addresses, ip_addr_array. Elements of the array are public IP address of remote nodes."
|
||||
echo -e "\\t <username>: The username for logging into remote nodes."
|
||||
echo -e "\\t [path to ssh keys]: The public/private key pair that remote nodes can use to perform rsync and ssh among themselves. Must contain pub, priv and authorized_keys.\\n"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Sample IP Address array file contents
|
||||
# ip_addr_array=(192.168.1.1 192.168.1.5 192.168.2.2)
|
||||
|
||||
if [[ -z "$ip_addr_file" ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
if [[ -z "$remote_user" ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
echo "Build started at $(date)"
|
||||
SECONDS=0
|
||||
# Build and install locally
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
cargo install --force
|
||||
|
||||
echo "Build took $SECONDS seconds"
|
||||
|
||||
ip_addr_array=()
|
||||
# Get IP address array
|
||||
# shellcheck source=/dev/null
|
||||
source "$ip_addr_file"
|
||||
|
||||
# shellcheck disable=SC2089,SC2016
|
||||
ssh_command_prefix='export PATH="$HOME/.cargo/bin:$PATH"; cd solana; USE_INSTALL=1'
|
||||
|
||||
echo "Deployment started at $(date)"
|
||||
SECONDS=0
|
||||
count=0
|
||||
leader=
|
||||
for ip_addr in "${ip_addr_array[@]}"; do
|
||||
echo "$ip_addr"
|
||||
|
||||
ssh-keygen -R "$ip_addr"
|
||||
ssh-keyscan "$ip_addr" >>~/.ssh/known_hosts
|
||||
|
||||
ssh -n -f "$remote_user@$ip_addr" 'mkdir -p ~/.ssh ~/solana ~/.cargo/bin'
|
||||
|
||||
# Killing sshguard for now. TODO: Find a better solution
|
||||
# sshguard is blacklisting IP address after ssh-keyscan and ssh login attempts
|
||||
ssh -n -f "$remote_user@$ip_addr" "sudo service sshguard stop"
|
||||
ssh -n -f "$remote_user@$ip_addr" 'sudo apt-get --assume-yes install rsync libssl-dev'
|
||||
|
||||
# If provided, deploy SSH keys
|
||||
if [[ -z $ssh_keys ]]; then
|
||||
echo "skip copying the ssh keys"
|
||||
else
|
||||
rsync -vPrz "$ssh_keys"/id_rsa "$remote_user@$ip_addr":~/.ssh/
|
||||
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/
|
||||
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/authorized_keys
|
||||
ssh -n -f "$remote_user@$ip_addr" 'chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa'
|
||||
fi
|
||||
|
||||
# Stop current nodes
|
||||
ssh "$remote_user@$ip_addr" 'pkill -9 solana-'
|
||||
|
||||
if [[ -n $leader ]]; then
|
||||
echo "Adding known hosts for $ip_addr"
|
||||
ssh -n -f "$remote_user@$ip_addr" "ssh-keygen -R $leader"
|
||||
ssh -n -f "$remote_user@$ip_addr" "ssh-keyscan $leader >> ~/.ssh/known_hosts"
|
||||
|
||||
ssh -n -f "$remote_user@$ip_addr" "rsync -vPrz ""$remote_user@$leader"":~/.cargo/bin/solana* ~/.cargo/bin/"
|
||||
ssh -n -f "$remote_user@$ip_addr" "rsync -vPrz ""$remote_user@$leader"":~/solana/multinode-demo ~/solana/"
|
||||
ssh -n -f "$remote_user@$ip_addr" "rsync -vPrz ""$remote_user@$leader"":~/solana/fetch-perf-libs.sh ~/solana/"
|
||||
else
|
||||
# Deploy build and scripts to remote node
|
||||
rsync -vPrz ~/.cargo/bin/solana* "$remote_user@$ip_addr":~/.cargo/bin/
|
||||
rsync -vPrz ./multinode-demo "$remote_user@$ip_addr":~/solana/
|
||||
rsync -vPrz ./fetch-perf-libs.sh "$remote_user@$ip_addr":~/solana/
|
||||
fi
|
||||
|
||||
# Run setup
|
||||
ssh "$remote_user@$ip_addr" "$ssh_command_prefix"' ./multinode-demo/setup.sh -p "$ip_addr"'
|
||||
|
||||
if ((!count)); then
|
||||
# Start the leader on the first node
|
||||
echo "Starting leader node $ip_addr"
|
||||
ssh -n -f "$remote_user@$ip_addr" 'cd solana; ./fetch-perf-libs.sh'
|
||||
ssh -n -f "$remote_user@$ip_addr" "$ssh_command_prefix"' SOLANA_CUDA=1 ./multinode-demo/leader.sh > leader.log 2>&1'
|
||||
ssh -n -f "$remote_user@$ip_addr" "$ssh_command_prefix"' ./multinode-demo/drone.sh > drone.log 2>&1'
|
||||
leader=${ip_addr_array[0]}
|
||||
else
|
||||
# Start validator on all other nodes
|
||||
echo "Starting validator node $ip_addr"
|
||||
ssh -n -f "$remote_user@$ip_addr" "$ssh_command_prefix"" ./multinode-demo/validator.sh $remote_user@$leader:~/solana $leader > validator.log 2>&1"
|
||||
fi
|
||||
|
||||
((count++))
|
||||
done
|
||||
|
||||
echo "Deployment finished at $(date)"
|
||||
echo "Deployment took $SECONDS seconds"
|
@ -6,7 +6,13 @@
|
||||
here=$(dirname "$0")
|
||||
cd "$here"
|
||||
|
||||
wallet="../wallet.sh $1"
|
||||
if [[ -n "$USE_SNAP" ]]; then
|
||||
# TODO: Merge wallet.sh functionality into solana-wallet proper and
|
||||
# remove this USE_SNAP case
|
||||
wallet="solana.wallet $1"
|
||||
else
|
||||
wallet="../wallet.sh $1"
|
||||
fi
|
||||
|
||||
# Tokens transferred to this address are lost forever...
|
||||
garbage_address=vS3ngn1TfQmpsW1Z4NkLuqNAQFF3dYQw8UZ6TCx9bmq
|
||||
|
4
multinode-demo/validator-x.sh
Executable file
4
multinode-demo/validator-x.sh
Executable file
@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
here=$(dirname "$0")
|
||||
|
||||
exec "$here"/validator.sh -x "$@"
|
@ -4,77 +4,108 @@ here=$(dirname "$0")
|
||||
source "$here"/common.sh
|
||||
|
||||
usage() {
|
||||
if [[ -n "$1" ]]; then
|
||||
if [[ -n $1 ]]; then
|
||||
echo "$*"
|
||||
echo
|
||||
fi
|
||||
echo "usage: $0 [rsync network path to solana repo on leader machine] [network ip address of leader]"
|
||||
echo "usage: $0 [-x] [rsync network path to solana repo on leader machine] [network ip address of leader]"
|
||||
echo ""
|
||||
echo " -x: runs a new, dynamically-configured validator"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [[ "$1" = "-h" || -n "$3" ]]; then
|
||||
if [[ $1 = -h ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
if [[ $1 == -x ]]; then
|
||||
self_setup=1
|
||||
shift
|
||||
else
|
||||
self_setup=0
|
||||
fi
|
||||
|
||||
if [[ -n $3 ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
if [[ -d $SNAP ]]; then
|
||||
# Exit if mode is not yet configured
|
||||
# (typically the case after the Snap is first installed)
|
||||
[[ -n "$(snapctl get mode)" ]] || exit 0
|
||||
[[ -n $(snapctl get mode) ]] || exit 0
|
||||
|
||||
# Select leader from the Snap configuration
|
||||
leader_address="$(snapctl get leader-address)"
|
||||
if [[ -z "$leader_address" ]]; then
|
||||
leader_address=$(snapctl get leader-address)
|
||||
if [[ -z $leader_address ]]; then
|
||||
# Assume public testnet by default
|
||||
leader_address=35.230.65.68 # testnet.solana.com
|
||||
leader_address=35.227.93.37 # testnet.solana.com
|
||||
fi
|
||||
leader="$leader_address"
|
||||
leader=$leader_address
|
||||
else
|
||||
if [[ -n "$3" ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
leader=${1:-${here}/..} # Default to local solana repo
|
||||
if [[ -z $1 ]]; then
|
||||
leader=${1:-${here}/..} # Default to local tree for data
|
||||
leader_address=${2:-127.0.0.1} # Default to local leader
|
||||
elif [[ -z "$2" ]]; then
|
||||
leader="$1"
|
||||
leader_address=$(dig +short "$1" | head -n1)
|
||||
if [[ -z "$leader_address" ]]; then
|
||||
elif [[ -z $2 ]]; then
|
||||
leader=$1
|
||||
leader_address=$(dig +short "${leader%:*}" | head -n1)
|
||||
if [[ -z $leader_address ]]; then
|
||||
usage "Error: unable to resolve IP address for $leader"
|
||||
fi
|
||||
else
|
||||
leader="$1"
|
||||
leader_address="$2"
|
||||
leader=$1
|
||||
leader_address=$2
|
||||
fi
|
||||
fi
|
||||
leader_port=8001
|
||||
|
||||
if [[ -n "$SOLANA_CUDA" ]]; then
|
||||
program="$solana_fullnode_cuda"
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
program=$solana_fullnode_cuda
|
||||
else
|
||||
program="$solana_fullnode"
|
||||
program=$solana_fullnode
|
||||
fi
|
||||
|
||||
if ((!self_setup)); then
|
||||
[[ -f $SOLANA_CONFIG_VALIDATOR_DIR/validator.json ]] || {
|
||||
echo "$SOLANA_CONFIG_VALIDATOR_DIR/validator.json not found, create it by running:"
|
||||
echo
|
||||
echo " ${here}/setup.sh"
|
||||
exit 1
|
||||
}
|
||||
validator_json_path=$SOLANA_CONFIG_VALIDATOR_DIR/validator.json
|
||||
SOLANA_LEADER_CONFIG_DIR=$SOLANA_CONFIG_VALIDATOR_DIR/leader-config
|
||||
else
|
||||
mkdir -p "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
validator_id_path=$SOLANA_CONFIG_PRIVATE_DIR/validator-id-x$$.json
|
||||
$solana_keygen -o "$validator_id_path"
|
||||
|
||||
[[ -f "$SOLANA_CONFIG_DIR"/validator.json ]] || {
|
||||
echo "$SOLANA_CONFIG_DIR/validator.json not found, create it by running:"
|
||||
echo
|
||||
echo " ${here}/setup.sh -t validator"
|
||||
exit 1
|
||||
}
|
||||
mkdir -p "$SOLANA_CONFIG_VALIDATOR_DIR"
|
||||
validator_json_path=$SOLANA_CONFIG_VALIDATOR_DIR/validator-x$$.json
|
||||
|
||||
port=9000
|
||||
(((port += ($$ % 1000)) && (port == 9000) && port++))
|
||||
|
||||
$solana_fullnode_config --keypair="$validator_id_path" -l -b "$port" > "$validator_json_path"
|
||||
|
||||
SOLANA_LEADER_CONFIG_DIR=$SOLANA_CONFIG_VALIDATOR_DIR/leader-config-x$$
|
||||
fi
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
|
||||
set -ex
|
||||
SOLANA_LEADER_CONFIG_DIR="$SOLANA_CONFIG_DIR"/leader-config
|
||||
rm -rf "$SOLANA_LEADER_CONFIG_DIR"
|
||||
$rsync -vPrz "$rsync_leader_url"/config/ "$SOLANA_LEADER_CONFIG_DIR"
|
||||
ls -lh "$SOLANA_LEADER_CONFIG_DIR"
|
||||
|
||||
tune_networking
|
||||
|
||||
# shellcheck disable=SC2086 # $program should not be quoted
|
||||
exec $program \
|
||||
-l "$SOLANA_CONFIG_DIR"/validator.json -t "$leader_address:$leader_port" \
|
||||
< <(shopt -s nullglob && cat "$SOLANA_LEADER_CONFIG_DIR"/genesis.log \
|
||||
"$SOLANA_LEADER_CONFIG_DIR"/tx-*.log)
|
||||
set -ex
|
||||
$rsync -vPr "$rsync_leader_url"/config/ "$SOLANA_LEADER_CONFIG_DIR"
|
||||
[[ -d $SOLANA_LEADER_CONFIG_DIR/ledger ]] || {
|
||||
echo "Unable to retrieve ledger from $rsync_leader_url"
|
||||
exit 1
|
||||
}
|
||||
|
||||
trap 'kill "$pid" && wait "$pid"' INT TERM
|
||||
$program \
|
||||
--identity "$validator_json_path" \
|
||||
--testnet "$leader_address:$leader_port" \
|
||||
--ledger "$SOLANA_LEADER_CONFIG_DIR"/ledger \
|
||||
> >($validator_logger) 2>&1 &
|
||||
pid=$!
|
||||
oom_score_adj "$pid" 1000
|
||||
wait "$pid"
|
||||
|
@ -30,18 +30,16 @@ rsync_leader_url=$(rsync_url "$leader")
|
||||
set -e
|
||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
if [[ ! -r "$SOLANA_CONFIG_CLIENT_DIR"/leader.json ]]; then
|
||||
(
|
||||
set -x
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
)
|
||||
echo "Fetching leader configuration from $rsync_leader_url"
|
||||
$rsync -Pz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
fi
|
||||
|
||||
client_json="$SOLANA_CONFIG_CLIENT_DIR"/client.json
|
||||
if [[ ! -r $client_json ]]; then
|
||||
$solana_mint <<<0 > "$client_json"
|
||||
client_id_path="$SOLANA_CONFIG_CLIENT_DIR"/id.json
|
||||
if [[ ! -r $client_id_path ]]; then
|
||||
echo "Generating client identity: $client_id_path"
|
||||
$solana_keygen -o "$client_id_path"
|
||||
fi
|
||||
|
||||
set -x
|
||||
# shellcheck disable=SC2086 # $solana_wallet should not be quoted
|
||||
exec $solana_wallet \
|
||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -m "$client_json" "$@"
|
||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -k "$client_id_path" "$@"
|
||||
|
77
rfcs/rfc-004-tictactoe-program.md
Normal file
77
rfcs/rfc-004-tictactoe-program.md
Normal file
@ -0,0 +1,77 @@
|
||||
|
||||
Two players want to play tic-tac-toe with each other on Solana.
|
||||
|
||||
The tic-tac-toe program has already been provisioned on the network, and the
|
||||
program author has advertised the following information to potential gamers:
|
||||
* `tictactoe_publickey` - the program's public key
|
||||
* `tictactoe_gamestate_size` - the number of bytes needed to maintain the game state
|
||||
|
||||
The game state is a well-documented data structure consisting of:
|
||||
- Player 1's public key
|
||||
- Player 2's public key
|
||||
- Game status. An 8-bit value where:
|
||||
* 0 = game uninitialized
|
||||
* 1 = Player 1's turn
|
||||
* 2 = Player 2's turn
|
||||
* 3 = Player 1 won
|
||||
* 4 = Player 2 won
|
||||
- Current board configuration. A 3x3 character array containing the values '\0', 'X' or 'O'
|
||||
|
||||
### Game Setup
|
||||
|
||||
1. Two players want to start a game. Player 2 sends Player 1 their public key,
|
||||
`player2_publickey` off-chain (IM, email, etc)
|
||||
|
||||
2. Player 1 creates a new keypair to represent the game state, `(gamestate_publickey,
|
||||
gamestate_privatekey)`.
|
||||
|
||||
3. Player 1 issues an allocate_memory transaction, assigning that memory page to the
|
||||
tic-tac-toe program. The `memory_fee` is used to *rent* the memory page for the
|
||||
duration of the game and is subtracted from current account balance of Player
|
||||
1:
|
||||
```
|
||||
allocate_memory(gamestate_publickey, tictactoe_publickey, tictactoe_gamestate_size, memory_fee)
|
||||
```
|
||||
|
||||
|
||||
4. Game state is then initialized by issuing a *new* call transaction to the
|
||||
tic-tac-toe program. This transaction is signed by `gamestate_privatekey`, known only
|
||||
to Player 1.
|
||||
```
|
||||
call(tictactoe_publickey, gamestate_publickey, 'new', player1_publickey, player2_publickey)
|
||||
```
|
||||
|
||||
5. Once the game is initialized, Player 1 shares `gamestate_publickey` with
|
||||
Player 2 off-chain (IM, email, etc)
|
||||
|
||||
Note that it's likely each player prefer to generate a game-specific keypair
|
||||
rather than sharing their primary public key (`player1_publickey`,
|
||||
`player2_publickey`) with each other and the tic-tac-toe program.
|
||||
|
||||
### Game Play
|
||||
|
||||
Both players poll the network, via a **TBD off-chain RPC API**, to read the
|
||||
current game state from the `gamestate_publickey` memory page.
|
||||
|
||||
When the *Game status* field indicates it's their turn, the player issues a
|
||||
*move* call transaction passing in the board position (1..9) that they want to
|
||||
mark as X or O:
|
||||
```
|
||||
call(tictactoe_publickey, gamestate_publickey, 'move', position)
|
||||
```
|
||||
The program will reject the transaction if it was not signed by the player whose
|
||||
turn it is.
|
||||
|
||||
The outcome of the *move* call is also observed by polling the current game state via
|
||||
the **TBD off-chain RPC API**.
|
||||
|
||||
### Game Cancellation
|
||||
|
||||
At any time Player 1 may conclude the game by issuing:
|
||||
```
|
||||
call(tictactoe_publickey, gamestate_publickey, 'abort')
|
||||
```
|
||||
causing any remaining *rent* tokens assigned to the `gamestate_publickey` page
|
||||
to be transferred back to Player 1 by the tic-tac-toe program. Lastly, the
|
||||
network recognizes the empty account and frees the `gamestate_publickey` memory
|
||||
page.
|
15
snap/hooks/configure
vendored
15
snap/hooks/configure
vendored
@ -4,27 +4,30 @@ echo Stopping daemons
|
||||
snapctl stop --disable solana.daemon-drone
|
||||
snapctl stop --disable solana.daemon-leader
|
||||
snapctl stop --disable solana.daemon-validator
|
||||
snapctl stop --disable solana.daemon-oom-monitor
|
||||
|
||||
mode="$(snapctl get mode)"
|
||||
if [[ -z "$mode" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ip_address_arg=-p # Use public IP address (TODO: make this configurable?)
|
||||
num_tokens="$(snapctl get num-tokens)"
|
||||
num_tokens="${num_tokens:+-n $num_tokens}"
|
||||
|
||||
setup_args="$(snapctl get setup-args)"
|
||||
|
||||
case $mode in
|
||||
leader+drone)
|
||||
$SNAP/bin/setup.sh ${num_tokens:+-n $num_tokens} ${ip_address_arg} -t leader
|
||||
snapctl start --enable solana.daemon-leader
|
||||
"$SNAP"/bin/setup.sh -t leader $num_tokens -p $setup_args
|
||||
snapctl start --enable solana.daemon-drone
|
||||
snapctl start --enable solana.daemon-leader
|
||||
;;
|
||||
leader)
|
||||
$SNAP/bin/setup.sh ${num_tokens:+-n $num_tokens} ${ip_address_arg} -t leader
|
||||
"$SNAP"/bin/setup.sh -t leader $num_tokens -p $setup_args
|
||||
snapctl start --enable solana.daemon-leader
|
||||
;;
|
||||
validator)
|
||||
$SNAP/bin/setup.sh ${ip_address_arg} -t validator
|
||||
"$SNAP"/bin/setup.sh -t validator -p $setup_args
|
||||
snapctl start --enable solana.daemon-validator
|
||||
;;
|
||||
*)
|
||||
@ -32,3 +35,5 @@ validator)
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
snapctl start --enable solana.daemon-oom-monitor
|
||||
|
@ -37,40 +37,73 @@ apps:
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
genesis:
|
||||
command: solana-genesis
|
||||
mint:
|
||||
command: solana-mint
|
||||
client-demo:
|
||||
command: solana-client-demo
|
||||
keygen:
|
||||
command: solana-keygen
|
||||
plugs:
|
||||
- home
|
||||
ledger-tool:
|
||||
command: solana-ledger-tool
|
||||
plugs:
|
||||
- home
|
||||
bench-tps:
|
||||
# TODO: Merge client.sh functionality into solana-bench-tps proper
|
||||
command: client.sh
|
||||
#command: solana-bench-tps
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
wallet:
|
||||
# TODO: Merge wallet.sh functionality into solana-wallet proper
|
||||
command: wallet.sh
|
||||
#command: solana-wallet
|
||||
|
||||
plugs:
|
||||
- network
|
||||
- home
|
||||
daemon-validator:
|
||||
daemon: simple
|
||||
command: validator.sh
|
||||
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
daemon-leader:
|
||||
daemon: simple
|
||||
command: leader.sh
|
||||
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
daemon-drone:
|
||||
daemon: simple
|
||||
command: drone.sh
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
daemon-oom-monitor:
|
||||
daemon: simple
|
||||
command: oom_monitor.sh
|
||||
plugs:
|
||||
- network
|
||||
|
||||
parts:
|
||||
solana:
|
||||
plugin: nil
|
||||
prime:
|
||||
- bin
|
||||
- usr/lib/libgf_complete.so.1
|
||||
- usr/lib/libJerasure.so.2
|
||||
- usr/lib
|
||||
override-build: |
|
||||
# Install CUDA 9.2 runtime
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/nvidia-396/
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/x86_64-linux-gnu/
|
||||
cp -rav /usr/local/cuda-9.2/targets/x86_64-linux/lib/libcudart.so* $SNAPCRAFT_PART_INSTALL/usr/lib
|
||||
cp -rav /usr/lib/x86_64-linux-gnu/libcuda.so* $SNAPCRAFT_PART_INSTALL/usr/lib/x86_64-linux-gnu/
|
||||
cp -v /usr/lib/nvidia-396/libnvidia-fatbinaryloader.so* $SNAPCRAFT_PART_INSTALL/usr/lib/nvidia-396/
|
||||
|
||||
# Build/install solana-fullnode-cuda
|
||||
./fetch-perf-libs.sh
|
||||
cargo install --features=cuda,erasure --root $SNAPCRAFT_PART_INSTALL --bin solana-fullnode
|
||||
cargo install --features=cuda --root $SNAPCRAFT_PART_INSTALL --bin solana-fullnode
|
||||
mv $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode $SNAPCRAFT_PART_INSTALL
|
||||
rm -rf $SNAPCRAFT_PART_INSTALL/bin/*
|
||||
mv $SNAPCRAFT_PART_INSTALL/solana-fullnode $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode-cuda
|
||||
@ -85,8 +118,10 @@ parts:
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
||||
cp -av multinode-demo/* $SNAPCRAFT_PART_INSTALL/bin/
|
||||
|
||||
# TODO: build rsync from source instead of sneaking it in from the host
|
||||
# TODO: build curl,rsync/multilog from source instead of sneaking it in from the host
|
||||
# system...
|
||||
set -x
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
||||
cp -av /usr/bin/curl $SNAPCRAFT_PART_INSTALL/bin/
|
||||
cp -av /usr/bin/multilog $SNAPCRAFT_PART_INSTALL/bin/
|
||||
cp -av /usr/bin/rsync $SNAPCRAFT_PART_INSTALL/bin/
|
||||
|
344
src/bank.rs
Normal file → Executable file
344
src/bank.rs
Normal file → Executable file
@ -6,22 +6,25 @@
|
||||
extern crate libc;
|
||||
|
||||
use chrono::prelude::*;
|
||||
use counter::Counter;
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use itertools::Itertools;
|
||||
use ledger::Block;
|
||||
use log::Level;
|
||||
use mint::Mint;
|
||||
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use signature::{Keypair, Pubkey, Signature};
|
||||
use std;
|
||||
use std::collections::hash_map::Entry::Occupied;
|
||||
use std::collections::{HashMap, HashSet, VecDeque};
|
||||
use std::result;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::RwLock;
|
||||
use std::time::Instant;
|
||||
use streamer::WINDOW_SIZE;
|
||||
use timing::duration_as_us;
|
||||
use timing::{duration_as_us, timestamp};
|
||||
use transaction::{Instruction, Plan, Transaction};
|
||||
use window::WINDOW_SIZE;
|
||||
|
||||
/// The number of most recent `last_id` values that the bank will track the signatures
|
||||
/// of. Once the bank discards a `last_id`, it will reject any transactions that use
|
||||
@ -36,13 +39,13 @@ pub const VERIFY_BLOCK_SIZE: usize = 16;
|
||||
/// Reasons a transaction might be rejected.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum BankError {
|
||||
/// Attempt to debit from `PublicKey`, but no found no record of a prior credit.
|
||||
AccountNotFound(PublicKey),
|
||||
/// Attempt to debit from `Pubkey`, but no found no record of a prior credit.
|
||||
AccountNotFound(Pubkey),
|
||||
|
||||
/// The requested debit from `PublicKey` has the potential to draw the balance
|
||||
/// The requested debit from `Pubkey` has the potential to draw the balance
|
||||
/// below zero. This can occur when a debit and credit are processed in parallel.
|
||||
/// The bank may reject the debit or push it to a future entry.
|
||||
InsufficientFunds(PublicKey),
|
||||
InsufficientFunds(Pubkey),
|
||||
|
||||
/// The bank has seen `Signature` before. This can occur under normal operation
|
||||
/// when a UDP packet is duplicated, as a user error from a client not updating
|
||||
@ -66,7 +69,7 @@ pub type Result<T> = result::Result<T, BankError>;
|
||||
/// The state of all accounts and contracts after processing its entries.
|
||||
pub struct Bank {
|
||||
/// A map of account public keys to the balance in that account.
|
||||
balances: RwLock<HashMap<PublicKey, i64>>,
|
||||
balances: RwLock<HashMap<Pubkey, i64>>,
|
||||
|
||||
/// A map of smart contract transaction signatures to what remains of its payment
|
||||
/// plan. Each transaction that targets the plan should cause it to be reduced.
|
||||
@ -78,13 +81,20 @@ pub struct Bank {
|
||||
/// values are so old that the `last_id` has been pulled out of the queue.
|
||||
last_ids: RwLock<VecDeque<Hash>>,
|
||||
|
||||
// Mapping of hashes to signature sets. The bank uses this data to
|
||||
/// Mapping of hashes to signature sets along with timestamp. The bank uses this data to
|
||||
/// reject transactions with signatures its seen before
|
||||
last_ids_sigs: RwLock<HashMap<Hash, HashSet<Signature>>>,
|
||||
last_ids_sigs: RwLock<HashMap<Hash, (HashSet<Signature>, u64)>>,
|
||||
|
||||
/// The number of transactions the bank has processed without error since the
|
||||
/// start of the ledger.
|
||||
transaction_count: AtomicUsize,
|
||||
|
||||
/// This bool allows us to submit metrics that are specific for leaders or validators
|
||||
/// It is set to `true` by fullnode before creating the bank.
|
||||
pub is_leader: bool,
|
||||
|
||||
// The latest finality time for the network
|
||||
finality_time: AtomicUsize,
|
||||
}
|
||||
|
||||
impl Default for Bank {
|
||||
@ -95,11 +105,19 @@ impl Default for Bank {
|
||||
last_ids: RwLock::new(VecDeque::new()),
|
||||
last_ids_sigs: RwLock::new(HashMap::new()),
|
||||
transaction_count: AtomicUsize::new(0),
|
||||
is_leader: true,
|
||||
finality_time: AtomicUsize::new(std::usize::MAX),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Bank {
|
||||
/// Create a default Bank
|
||||
pub fn new_default(is_leader: bool) -> Self {
|
||||
let mut bank = Bank::default();
|
||||
bank.is_leader = is_leader;
|
||||
bank
|
||||
}
|
||||
/// Create an Bank using a deposit.
|
||||
pub fn new_from_deposit(deposit: &Payment) -> Self {
|
||||
let bank = Self::default();
|
||||
@ -119,7 +137,7 @@ impl Bank {
|
||||
}
|
||||
|
||||
/// Commit funds to the `payment.to` party.
|
||||
fn apply_payment(&self, payment: &Payment, balances: &mut HashMap<PublicKey, i64>) {
|
||||
fn apply_payment(&self, payment: &Payment, balances: &mut HashMap<Pubkey, i64>) {
|
||||
*balances.entry(payment.to).or_insert(0) += payment.tokens;
|
||||
}
|
||||
|
||||
@ -134,11 +152,11 @@ impl Bank {
|
||||
}
|
||||
|
||||
/// Store the given signature. The bank will reject any transaction with the same signature.
|
||||
fn reserve_signature(signatures: &mut HashSet<Signature>, sig: &Signature) -> Result<()> {
|
||||
if let Some(sig) = signatures.get(sig) {
|
||||
return Err(BankError::DuplicateSignature(*sig));
|
||||
fn reserve_signature(signatures: &mut HashSet<Signature>, signature: &Signature) -> Result<()> {
|
||||
if let Some(signature) = signatures.get(signature) {
|
||||
return Err(BankError::DuplicateSignature(*signature));
|
||||
}
|
||||
signatures.insert(*sig);
|
||||
signatures.insert(*signature);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -149,64 +167,92 @@ impl Bank {
|
||||
|
||||
/// Forget the given `signature` with `last_id` because the transaction was rejected.
|
||||
fn forget_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) {
|
||||
if let Some(entry) = self.last_ids_sigs
|
||||
if let Some(entry) = self
|
||||
.last_ids_sigs
|
||||
.write()
|
||||
.expect("'last_ids' read lock in forget_signature_with_last_id")
|
||||
.get_mut(last_id)
|
||||
{
|
||||
Self::forget_signature(entry, signature);
|
||||
Self::forget_signature(&mut entry.0, signature);
|
||||
}
|
||||
}
|
||||
|
||||
/// Forget all signatures. Useful for benchmarking.
|
||||
pub fn clear_signatures(&self) {
|
||||
for (_, sigs) in self.last_ids_sigs.write().unwrap().iter_mut() {
|
||||
sigs.clear();
|
||||
sigs.0.clear();
|
||||
}
|
||||
}
|
||||
|
||||
fn reserve_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) -> Result<()> {
|
||||
if let Some(entry) = self.last_ids_sigs
|
||||
if let Some(entry) = self
|
||||
.last_ids_sigs
|
||||
.write()
|
||||
.expect("'last_ids' read lock in reserve_signature_with_last_id")
|
||||
.get_mut(last_id)
|
||||
{
|
||||
return Self::reserve_signature(entry, signature);
|
||||
return Self::reserve_signature(&mut entry.0, signature);
|
||||
}
|
||||
Err(BankError::LastIdNotFound(*last_id))
|
||||
}
|
||||
|
||||
/// Look through the last_ids and find all the valid ids
|
||||
/// This is batched to avoid holding the lock for a significant amount of time
|
||||
///
|
||||
/// Return a vec of tuple of (valid index, timestamp)
|
||||
/// index is into the passed ids slice to avoid copying hashes
|
||||
pub fn count_valid_ids(&self, ids: &[Hash]) -> Vec<(usize, u64)> {
|
||||
let last_ids = self.last_ids_sigs.read().unwrap();
|
||||
let mut ret = Vec::new();
|
||||
for (i, id) in ids.iter().enumerate() {
|
||||
if let Some(entry) = last_ids.get(id) {
|
||||
ret.push((i, entry.1));
|
||||
}
|
||||
}
|
||||
ret
|
||||
}
|
||||
|
||||
/// Tell the bank which Entry IDs exist on the ledger. This function
|
||||
/// assumes subsequent calls correspond to later entries, and will boot
|
||||
/// the oldest ones once its internal cache is full. Once boot, the
|
||||
/// bank will reject transactions using that `last_id`.
|
||||
pub fn register_entry_id(&self, last_id: &Hash) {
|
||||
let mut last_ids = self.last_ids
|
||||
let mut last_ids = self
|
||||
.last_ids
|
||||
.write()
|
||||
.expect("'last_ids' write lock in register_entry_id");
|
||||
let mut last_ids_sigs = self.last_ids_sigs
|
||||
let mut last_ids_sigs = self
|
||||
.last_ids_sigs
|
||||
.write()
|
||||
.expect("last_ids_sigs write lock");
|
||||
if last_ids.len() >= MAX_ENTRY_IDS {
|
||||
let id = last_ids.pop_front().unwrap();
|
||||
last_ids_sigs.remove(&id);
|
||||
}
|
||||
last_ids_sigs.insert(*last_id, HashSet::new());
|
||||
last_ids_sigs.insert(*last_id, (HashSet::new(), timestamp()));
|
||||
last_ids.push_back(*last_id);
|
||||
}
|
||||
|
||||
/// Deduct tokens from the 'from' address the account has sufficient
|
||||
/// funds and isn't a duplicate.
|
||||
fn apply_debits(&self, tx: &Transaction, bals: &mut HashMap<PublicKey, i64>) -> Result<()> {
|
||||
fn apply_debits(&self, tx: &Transaction, bals: &mut HashMap<Pubkey, i64>) -> Result<()> {
|
||||
let mut purge = false;
|
||||
{
|
||||
let option = bals.get_mut(&tx.from);
|
||||
if option.is_none() {
|
||||
// TODO: this is gnarly because the counters are static atomics
|
||||
if !self.is_leader {
|
||||
inc_new_counter_info!("bank-appy_debits-account_not_found-validator", 1);
|
||||
} else if let Instruction::NewVote(_) = &tx.instruction {
|
||||
inc_new_counter_info!("bank-appy_debits-vote_account_not_found", 1);
|
||||
} else {
|
||||
inc_new_counter_info!("bank-appy_debits-generic_account_not_found", 1);
|
||||
}
|
||||
return Err(BankError::AccountNotFound(tx.from));
|
||||
}
|
||||
let bal = option.unwrap();
|
||||
|
||||
self.reserve_signature_with_last_id(&tx.sig, &tx.last_id)?;
|
||||
self.reserve_signature_with_last_id(&tx.signature, &tx.last_id)?;
|
||||
|
||||
if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
if contract.tokens < 0 {
|
||||
@ -214,7 +260,7 @@ impl Bank {
|
||||
}
|
||||
|
||||
if *bal < contract.tokens {
|
||||
self.forget_signature_with_last_id(&tx.sig, &tx.last_id);
|
||||
self.forget_signature_with_last_id(&tx.signature, &tx.last_id);
|
||||
return Err(BankError::InsufficientFunds(tx.from));
|
||||
} else if *bal == contract.tokens {
|
||||
purge = true;
|
||||
@ -231,29 +277,30 @@ impl Bank {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Apply only a transaction's credits. Credits from multiple transactions
|
||||
/// may safely be applied in parallel.
|
||||
fn apply_credits(&self, tx: &Transaction, balances: &mut HashMap<PublicKey, i64>) {
|
||||
/// Apply only a transaction's credits.
|
||||
/// Note: It is safe to apply credits from multiple transactions in parallel.
|
||||
fn apply_credits(&self, tx: &Transaction, balances: &mut HashMap<Pubkey, i64>) {
|
||||
match &tx.instruction {
|
||||
Instruction::NewContract(contract) => {
|
||||
let plan = contract.plan.clone();
|
||||
if let Some(payment) = plan.final_payment() {
|
||||
self.apply_payment(&payment, balances);
|
||||
} else {
|
||||
let mut pending = self.pending
|
||||
let mut pending = self
|
||||
.pending
|
||||
.write()
|
||||
.expect("'pending' write lock in apply_credits");
|
||||
pending.insert(tx.sig, plan);
|
||||
pending.insert(tx.signature, plan);
|
||||
}
|
||||
}
|
||||
Instruction::ApplyTimestamp(dt) => {
|
||||
let _ = self.apply_timestamp(tx.from, *dt);
|
||||
}
|
||||
Instruction::ApplySignature(tx_sig) => {
|
||||
let _ = self.apply_signature(tx.from, *tx_sig);
|
||||
Instruction::ApplySignature(signature) => {
|
||||
let _ = self.apply_signature(tx.from, *signature);
|
||||
}
|
||||
Instruction::NewVote(_vote) => {
|
||||
info!("GOT VOTE!");
|
||||
trace!("GOT VOTE! last_id={:?}", &tx.last_id.as_ref()[..8]);
|
||||
// TODO: record the vote in the stake table...
|
||||
}
|
||||
}
|
||||
@ -276,7 +323,8 @@ impl Bank {
|
||||
debug!("processing Transactions {}", txs.len());
|
||||
let txs_len = txs.len();
|
||||
let now = Instant::now();
|
||||
let results: Vec<_> = txs.into_iter()
|
||||
let results: Vec<_> = txs
|
||||
.into_iter()
|
||||
.map(|tx| self.apply_debits(&tx, bals).map(|_| tx))
|
||||
.collect(); // Calling collect() here forces all debits to complete before moving on.
|
||||
|
||||
@ -301,19 +349,26 @@ impl Bank {
|
||||
);
|
||||
|
||||
let mut tx_count = 0;
|
||||
let mut err_count = 0;
|
||||
for r in &res {
|
||||
if r.is_ok() {
|
||||
tx_count += 1;
|
||||
} else {
|
||||
info!("tx error: {:?}", r);
|
||||
if err_count == 0 {
|
||||
info!("tx error: {:?}", r);
|
||||
}
|
||||
err_count += 1;
|
||||
}
|
||||
}
|
||||
if err_count > 0 {
|
||||
info!("{} errors of {} txs", err_count, err_count + tx_count);
|
||||
}
|
||||
self.transaction_count
|
||||
.fetch_add(tx_count, Ordering::Relaxed);
|
||||
res
|
||||
}
|
||||
|
||||
fn process_entry(&self, entry: Entry) -> Result<()> {
|
||||
pub fn process_entry(&self, entry: Entry) -> Result<()> {
|
||||
if !entry.transactions.is_empty() {
|
||||
for result in self.process_transactions(entry.transactions) {
|
||||
result?;
|
||||
@ -363,6 +418,7 @@ impl Bank {
|
||||
/// Append entry blocks to the ledger, verifying them along the way.
|
||||
fn process_blocks<I>(
|
||||
&self,
|
||||
start_hash: Hash,
|
||||
entries: I,
|
||||
tail: &mut Vec<Entry>,
|
||||
tail_idx: &mut usize,
|
||||
@ -372,12 +428,15 @@ impl Bank {
|
||||
{
|
||||
// Ledger verification needs to be parallelized, but we can't pull the whole
|
||||
// thing into memory. We therefore chunk it.
|
||||
let mut entry_count = 0;
|
||||
let mut entry_count = *tail_idx as u64;
|
||||
let mut id = start_hash;
|
||||
for block in &entries.into_iter().chunks(VERIFY_BLOCK_SIZE) {
|
||||
let block: Vec<_> = block.collect();
|
||||
if !block.verify(&self.last_id()) {
|
||||
if !block.verify(&id) {
|
||||
warn!("Ledger proof of history failed at entry: {}", entry_count);
|
||||
return Err(BankError::LedgerVerificationFailed);
|
||||
}
|
||||
id = block.last().unwrap().id;
|
||||
entry_count += self.process_entries_tail(block, tail, tail_idx)?;
|
||||
}
|
||||
Ok(entry_count)
|
||||
@ -412,30 +471,30 @@ impl Bank {
|
||||
}
|
||||
self.register_entry_id(&entry0.id);
|
||||
self.register_entry_id(&entry1.id);
|
||||
let entry1_id = entry1.id;
|
||||
|
||||
let mut tail = Vec::with_capacity(WINDOW_SIZE as usize);
|
||||
tail.push(entry0);
|
||||
tail.push(entry1);
|
||||
let mut tail_idx = 2;
|
||||
let entry_count = 2 + self.process_blocks(entries, &mut tail, &mut tail_idx)?;
|
||||
let entry_count = self.process_blocks(entry1_id, entries, &mut tail, &mut tail_idx)?;
|
||||
|
||||
// check f we need to rotate tail
|
||||
let tail = if tail.len() == WINDOW_SIZE as usize {
|
||||
rotate_vector(tail, tail_idx)
|
||||
} else {
|
||||
tail
|
||||
};
|
||||
if tail.len() == WINDOW_SIZE as usize {
|
||||
tail.rotate_left(tail_idx)
|
||||
}
|
||||
|
||||
Ok((entry_count, tail))
|
||||
}
|
||||
|
||||
/// Process a Witness Signature. Any payment plans waiting on this signature
|
||||
/// will progress one step.
|
||||
fn apply_signature(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
|
||||
if let Occupied(mut e) = self.pending
|
||||
fn apply_signature(&self, from: Pubkey, signature: Signature) -> Result<()> {
|
||||
if let Occupied(mut e) = self
|
||||
.pending
|
||||
.write()
|
||||
.expect("write() in apply_signature")
|
||||
.entry(tx_sig)
|
||||
.entry(signature)
|
||||
{
|
||||
e.get_mut().apply_witness(&Witness::Signature, &from);
|
||||
if let Some(payment) = e.get().final_payment() {
|
||||
@ -449,13 +508,14 @@ impl Bank {
|
||||
|
||||
/// Process a Witness Timestamp. Any payment plans waiting on this timestamp
|
||||
/// will progress one step.
|
||||
fn apply_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
||||
fn apply_timestamp(&self, from: Pubkey, dt: DateTime<Utc>) -> Result<()> {
|
||||
// Check to see if any timelocked transactions can be completed.
|
||||
let mut completed = vec![];
|
||||
|
||||
// Hold 'pending' write lock until the end of this function. Otherwise another thread can
|
||||
// double-spend if it enters before the modified plan is removed from 'pending'.
|
||||
let mut pending = self.pending
|
||||
let mut pending = self
|
||||
.pending
|
||||
.write()
|
||||
.expect("'pending' write lock in apply_timestamp");
|
||||
for (key, plan) in pending.iter_mut() {
|
||||
@ -478,13 +538,13 @@ impl Bank {
|
||||
pub fn transfer(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
keypair: &Keypair,
|
||||
to: Pubkey,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tx = Transaction::new(keypair, to, n, last_id);
|
||||
let sig = tx.sig;
|
||||
self.process_transaction(&tx).map(|_| sig)
|
||||
let signature = tx.signature;
|
||||
self.process_transaction(&tx).map(|_| signature)
|
||||
}
|
||||
|
||||
/// Create, sign, and process a postdated Transaction from `keypair`
|
||||
@ -493,18 +553,19 @@ impl Bank {
|
||||
pub fn transfer_on_date(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
keypair: &Keypair,
|
||||
to: Pubkey,
|
||||
dt: DateTime<Utc>,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tx = Transaction::new_on_date(keypair, to, dt, n, last_id);
|
||||
let sig = tx.sig;
|
||||
self.process_transaction(&tx).map(|_| sig)
|
||||
let signature = tx.signature;
|
||||
self.process_transaction(&tx).map(|_| signature)
|
||||
}
|
||||
|
||||
pub fn get_balance(&self, pubkey: &PublicKey) -> i64 {
|
||||
let bals = self.balances
|
||||
pub fn get_balance(&self, pubkey: &Pubkey) -> i64 {
|
||||
let bals = self
|
||||
.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in get_balance");
|
||||
bals.get(pubkey).cloned().unwrap_or(0)
|
||||
@ -515,26 +576,24 @@ impl Bank {
|
||||
}
|
||||
|
||||
pub fn has_signature(&self, signature: &Signature) -> bool {
|
||||
let last_ids_sigs = self.last_ids_sigs
|
||||
let last_ids_sigs = self
|
||||
.last_ids_sigs
|
||||
.read()
|
||||
.expect("'last_ids_sigs' read lock");
|
||||
for (_hash, signatures) in last_ids_sigs.iter() {
|
||||
if signatures.contains(signature) {
|
||||
if signatures.0.contains(signature) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn rotate_vector<T: Clone>(v: Vec<T>, at: usize) -> Vec<T> {
|
||||
if at != 0 {
|
||||
let mut ret = Vec::with_capacity(v.len());
|
||||
ret.extend_from_slice(&v[at..]);
|
||||
ret.extend_from_slice(&v[0..at]);
|
||||
ret
|
||||
} else {
|
||||
v
|
||||
pub fn finality(&self) -> usize {
|
||||
self.finality_time.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn set_finality(&self, finality: usize) {
|
||||
self.finality_time.store(finality, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
@ -546,13 +605,17 @@ mod tests {
|
||||
use entry::Entry;
|
||||
use entry_writer::{self, EntryWriter};
|
||||
use hash::hash;
|
||||
use signature::KeyPairUtil;
|
||||
use ledger;
|
||||
use packet::BLOB_DATA_SIZE;
|
||||
use signature::KeypairUtil;
|
||||
use std;
|
||||
use std::io::{BufReader, Cursor, Seek, SeekFrom};
|
||||
use std::mem::size_of;
|
||||
|
||||
#[test]
|
||||
fn test_two_payments_to_one_party() {
|
||||
let mint = Mint::new(10_000);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let pubkey = Keypair::new().pubkey();
|
||||
let bank = Bank::new(&mint);
|
||||
assert_eq!(bank.last_id(), mint.last_id());
|
||||
|
||||
@ -569,7 +632,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_negative_tokens() {
|
||||
let mint = Mint::new(1);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let pubkey = Keypair::new().pubkey();
|
||||
let bank = Bank::new(&mint);
|
||||
assert_eq!(
|
||||
bank.transfer(-1, &mint.keypair(), pubkey, mint.last_id()),
|
||||
@ -582,7 +645,7 @@ mod tests {
|
||||
fn test_account_not_found() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
assert_eq!(
|
||||
bank.transfer(1, &keypair, mint.pubkey(), mint.last_id()),
|
||||
Err(BankError::AccountNotFound(keypair.pubkey()))
|
||||
@ -594,7 +657,7 @@ mod tests {
|
||||
fn test_insufficient_funds() {
|
||||
let mint = Mint::new(11_000);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let pubkey = Keypair::new().pubkey();
|
||||
bank.transfer(1_000, &mint.keypair(), pubkey, mint.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
@ -613,7 +676,7 @@ mod tests {
|
||||
fn test_transfer_to_newb() {
|
||||
let mint = Mint::new(10_000);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let pubkey = Keypair::new().pubkey();
|
||||
bank.transfer(500, &mint.keypair(), pubkey, mint.last_id())
|
||||
.unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey), 500);
|
||||
@ -623,7 +686,7 @@ mod tests {
|
||||
fn test_transfer_on_date() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let pubkey = Keypair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||
.unwrap();
|
||||
@ -655,9 +718,10 @@ mod tests {
|
||||
fn test_cancel_transfer() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let pubkey = Keypair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
let sig = bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||
let signature = bank
|
||||
.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||
.unwrap();
|
||||
|
||||
// Assert the debit counts as a transaction.
|
||||
@ -671,14 +735,14 @@ mod tests {
|
||||
assert_eq!(bank.get_balance(&pubkey), 0);
|
||||
|
||||
// Now, cancel the trancaction. Mint gets her funds back, pubkey never sees them.
|
||||
bank.apply_signature(mint.pubkey(), sig).unwrap();
|
||||
bank.apply_signature(mint.pubkey(), signature).unwrap();
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
||||
assert_eq!(bank.get_balance(&pubkey), 0);
|
||||
|
||||
// Assert cancel doesn't cause count to go backward.
|
||||
assert_eq!(bank.transaction_count(), 1);
|
||||
|
||||
bank.apply_signature(mint.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
||||
bank.apply_signature(mint.pubkey(), signature).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
||||
assert_ne!(bank.get_balance(&mint.pubkey()), 2);
|
||||
}
|
||||
|
||||
@ -686,14 +750,14 @@ mod tests {
|
||||
fn test_duplicate_transaction_signature() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
let signature = Signature::default();
|
||||
assert!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||
bank.reserve_signature_with_last_id(&signature, &mint.last_id())
|
||||
.is_ok()
|
||||
);
|
||||
assert_eq!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
|
||||
Err(BankError::DuplicateSignature(sig))
|
||||
bank.reserve_signature_with_last_id(&signature, &mint.last_id()),
|
||||
Err(BankError::DuplicateSignature(signature))
|
||||
);
|
||||
}
|
||||
|
||||
@ -701,12 +765,12 @@ mod tests {
|
||||
fn test_forget_signature() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||
let signature = Signature::default();
|
||||
bank.reserve_signature_with_last_id(&signature, &mint.last_id())
|
||||
.unwrap();
|
||||
bank.forget_signature_with_last_id(&sig, &mint.last_id());
|
||||
bank.forget_signature_with_last_id(&signature, &mint.last_id());
|
||||
assert!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||
bank.reserve_signature_with_last_id(&signature, &mint.last_id())
|
||||
.is_ok()
|
||||
);
|
||||
}
|
||||
@ -715,33 +779,51 @@ mod tests {
|
||||
fn test_has_signature() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id())
|
||||
let signature = Signature::default();
|
||||
bank.reserve_signature_with_last_id(&signature, &mint.last_id())
|
||||
.expect("reserve signature");
|
||||
assert!(bank.has_signature(&sig));
|
||||
assert!(bank.has_signature(&signature));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reject_old_last_id() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let sig = Signature::default();
|
||||
let signature = Signature::default();
|
||||
for i in 0..MAX_ENTRY_IDS {
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
}
|
||||
// Assert we're no longer able to use the oldest entry ID.
|
||||
assert_eq!(
|
||||
bank.reserve_signature_with_last_id(&sig, &mint.last_id()),
|
||||
bank.reserve_signature_with_last_id(&signature, &mint.last_id()),
|
||||
Err(BankError::LastIdNotFound(mint.last_id()))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_valid_ids() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let ids: Vec<_> = (0..MAX_ENTRY_IDS)
|
||||
.map(|i| {
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
last_id
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(bank.count_valid_ids(&[]).len(), 0);
|
||||
assert_eq!(bank.count_valid_ids(&[mint.last_id()]).len(), 0);
|
||||
for (i, id) in bank.count_valid_ids(&ids).iter().enumerate() {
|
||||
assert_eq!(id.0, i);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debits_before_credits() {
|
||||
let mint = Mint::new(2);
|
||||
let bank = Bank::new(&mint);
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let tx0 = Transaction::new(&mint.keypair(), keypair.pubkey(), 2, mint.last_id());
|
||||
let tx1 = Transaction::new(&keypair, mint.pubkey(), 1, mint.last_id());
|
||||
let txs = vec![tx0, tx1];
|
||||
@ -756,7 +838,7 @@ mod tests {
|
||||
fn test_process_empty_entry_is_registered() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let entry = next_entry(&mint.last_id(), 1, vec![]);
|
||||
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, entry.id);
|
||||
|
||||
@ -780,19 +862,48 @@ mod tests {
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
||||
}
|
||||
|
||||
fn create_sample_block_with_next_entries(
|
||||
mint: &Mint,
|
||||
length: usize,
|
||||
) -> impl Iterator<Item = Entry> {
|
||||
let keypair = Keypair::new();
|
||||
let hash = mint.last_id();
|
||||
let mut txs = Vec::with_capacity(length);
|
||||
for i in 0..length {
|
||||
txs.push(Transaction::new(
|
||||
&mint.keypair(),
|
||||
keypair.pubkey(),
|
||||
i as i64,
|
||||
hash,
|
||||
));
|
||||
}
|
||||
let entries = ledger::next_entries(&hash, 0, txs);
|
||||
entries.into_iter()
|
||||
}
|
||||
|
||||
fn create_sample_block(mint: &Mint, length: usize) -> impl Iterator<Item = Entry> {
|
||||
let mut entries = Vec::with_capacity(length);
|
||||
let mut hash = mint.last_id();
|
||||
let mut cur_hashes = 0;
|
||||
let mut num_hashes = 0;
|
||||
for _ in 0..length {
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, hash);
|
||||
let entry = Entry::new_mut(&mut hash, &mut cur_hashes, vec![tx], false);
|
||||
let entry = Entry::new_mut(&mut hash, &mut num_hashes, vec![tx], false);
|
||||
entries.push(entry);
|
||||
}
|
||||
entries.into_iter()
|
||||
}
|
||||
fn create_sample_ledger(length: usize) -> (impl Iterator<Item = Entry>, PublicKey) {
|
||||
|
||||
fn create_sample_ledger_with_next_entries(
|
||||
length: usize,
|
||||
) -> (impl Iterator<Item = Entry>, Pubkey) {
|
||||
let mint = Mint::new((length * length) as i64);
|
||||
let genesis = mint.create_entries();
|
||||
let block = create_sample_block_with_next_entries(&mint, length);
|
||||
(genesis.into_iter().chain(block), mint.pubkey())
|
||||
}
|
||||
|
||||
fn create_sample_ledger(length: usize) -> (impl Iterator<Item = Entry>, Pubkey) {
|
||||
let mint = Mint::new(1 + length as i64);
|
||||
let genesis = mint.create_entries();
|
||||
let block = create_sample_block(&mint, length);
|
||||
@ -867,13 +978,30 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rotate_vector() {
|
||||
let expect = vec![1, 2, 3, 4];
|
||||
|
||||
assert_eq!(rotate_vector(vec![4, 1, 2, 3], 1), expect);
|
||||
assert_eq!(rotate_vector(vec![1, 2, 3, 4], 0), expect);
|
||||
assert_eq!(rotate_vector(vec![2, 3, 4, 1], 3), expect);
|
||||
assert_eq!(rotate_vector(vec![3, 4, 1, 2], 2), expect);
|
||||
fn test_process_ledger_has_more_cross_block() {
|
||||
// size_of<Transaction> is quite large for serialized size, so
|
||||
// use 2 * verify_block_size to ensure we get enough txes to cross that
|
||||
// block boundary with has_more set
|
||||
let num_txs = (2 * VERIFY_BLOCK_SIZE) * BLOB_DATA_SIZE / size_of::<Transaction>();
|
||||
let (ledger, _pubkey) = create_sample_ledger_with_next_entries(num_txs);
|
||||
let bank = Bank::default();
|
||||
assert!(bank.process_ledger(ledger).is_ok());
|
||||
}
|
||||
#[test]
|
||||
fn test_new_default() {
|
||||
let def_bank = Bank::default();
|
||||
assert!(def_bank.is_leader);
|
||||
let leader_bank = Bank::new_default(true);
|
||||
assert!(leader_bank.is_leader);
|
||||
let validator_bank = Bank::new_default(false);
|
||||
assert!(!validator_bank.is_leader);
|
||||
}
|
||||
#[test]
|
||||
fn test_finality() {
|
||||
let def_bank = Bank::default();
|
||||
assert_eq!(def_bank.finality(), std::usize::MAX);
|
||||
def_bank.set_finality(90);
|
||||
assert_eq!(def_bank.finality(), 90);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
use bank::Bank;
|
||||
use bincode::deserialize;
|
||||
use counter::Counter;
|
||||
use log::Level;
|
||||
use packet::{PacketRecycler, Packets, SharedPackets};
|
||||
use rayon::prelude::*;
|
||||
use record_stage::Signal;
|
||||
@ -40,7 +41,7 @@ impl BankingStage {
|
||||
.name("solana-banking-stage".to_string())
|
||||
.spawn(move || loop {
|
||||
if let Err(e) = Self::process_packets(
|
||||
&bank.clone(),
|
||||
&bank,
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
@ -88,8 +89,8 @@ impl BankingStage {
|
||||
timing::duration_as_ms(&recv_start.elapsed()),
|
||||
mms.len(),
|
||||
);
|
||||
let bank_starting_tx_count = bank.transaction_count();
|
||||
let count = mms.iter().map(|x| x.1.len()).sum();
|
||||
static mut COUNTER: Counter = create_counter!("banking_stage_process_packets", 1);
|
||||
let proc_start = Instant::now();
|
||||
for (msgs, vers) in mms {
|
||||
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
|
||||
@ -125,7 +126,11 @@ impl BankingStage {
|
||||
reqs_len,
|
||||
(reqs_len as f32) / (total_time_s)
|
||||
);
|
||||
inc_counter!(COUNTER, count);
|
||||
inc_new_counter_info!("banking_stage-process_packets", count);
|
||||
inc_new_counter_info!(
|
||||
"banking_stage-process_transactions",
|
||||
bank.transaction_count() - bank_starting_tx_count
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -1,28 +1,23 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate solana;
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use solana::packet::{Packet, PacketRecycler, BLOB_SIZE, PACKET_DATA_SIZE};
|
||||
use solana::result::Result;
|
||||
use solana::streamer::{receiver, PacketReceiver};
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
|
||||
fn producer(addr: &SocketAddr, recycler: PacketRecycler, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
fn producer(addr: &SocketAddr, recycler: &PacketRecycler, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let msgs = recycler.allocate();
|
||||
let msgs_ = msgs.clone();
|
||||
msgs.write().unwrap().packets.resize(10, Packet::default());
|
||||
for w in msgs.write().unwrap().packets.iter_mut() {
|
||||
for w in &mut msgs.write().unwrap().packets {
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
}
|
||||
@ -31,7 +26,7 @@ fn producer(addr: &SocketAddr, recycler: PacketRecycler, exit: Arc<AtomicBool>)
|
||||
return;
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in msgs_.read().unwrap().packets.iter() {
|
||||
for p in &msgs_.read().unwrap().packets {
|
||||
let a = p.meta.addr();
|
||||
assert!(p.meta.size < BLOB_SIZE);
|
||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||
@ -44,7 +39,7 @@ fn producer(addr: &SocketAddr, recycler: PacketRecycler, exit: Arc<AtomicBool>)
|
||||
fn sink(
|
||||
recycler: PacketRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
rvs: Arc<Mutex<usize>>,
|
||||
rvs: Arc<AtomicUsize>,
|
||||
r: PacketReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
@ -52,17 +47,14 @@ fn sink(
|
||||
return;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
match r.recv_timeout(timer) {
|
||||
Ok(msgs) => {
|
||||
*rvs.lock().unwrap() += msgs.read().unwrap().packets.len();
|
||||
recycler.recycle(msgs);
|
||||
}
|
||||
_ => (),
|
||||
if let Ok(msgs) = r.recv_timeout(timer) {
|
||||
rvs.fetch_add(msgs.read().unwrap().packets.len(), Ordering::Relaxed);
|
||||
recycler.recycle(msgs);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn bench_streamer_with_result() -> Result<()> {
|
||||
fn main() -> Result<()> {
|
||||
let read = UdpSocket::bind("127.0.0.1:0")?;
|
||||
read.set_read_timeout(Some(Duration::new(1, 0)))?;
|
||||
|
||||
@ -72,22 +64,22 @@ fn bench_streamer_with_result() -> Result<()> {
|
||||
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
|
||||
let t_producer1 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
let t_producer2 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
let t_producer3 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
let t_producer1 = producer(&addr, &pack_recycler, exit.clone());
|
||||
let t_producer2 = producer(&addr, &pack_recycler, exit.clone());
|
||||
let t_producer3 = producer(&addr, &pack_recycler, exit.clone());
|
||||
|
||||
let rvs = Arc::new(Mutex::new(0));
|
||||
let rvs = Arc::new(AtomicUsize::new(0));
|
||||
let t_sink = sink(pack_recycler.clone(), exit.clone(), rvs.clone(), r_reader);
|
||||
|
||||
let start = SystemTime::now();
|
||||
let start_val = *rvs.lock().unwrap();
|
||||
let start_val = rvs.load(Ordering::Relaxed);
|
||||
sleep(Duration::new(5, 0));
|
||||
let elapsed = start.elapsed().unwrap();
|
||||
let end_val = *rvs.lock().unwrap();
|
||||
let time = elapsed.as_secs() * 10000000000 + elapsed.subsec_nanos() as u64;
|
||||
let ftime = (time as f64) / 10000000000f64;
|
||||
let end_val = rvs.load(Ordering::Relaxed);
|
||||
let time = elapsed.as_secs() * 10_000_000_000 + u64::from(elapsed.subsec_nanos());
|
||||
let ftime = (time as f64) / 10_000_000_000_f64;
|
||||
let fcount = (end_val - start_val) as f64;
|
||||
trace!("performance: {:?}", fcount / ftime);
|
||||
println!("performance: {:?}", fcount / ftime);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_reader.join()?;
|
||||
t_producer1.join()?;
|
||||
@ -96,22 +88,3 @@ fn bench_streamer_with_result() -> Result<()> {
|
||||
t_sink.join()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn bench_streamer(bencher: &mut Bencher) {
|
||||
bencher.iter(|| {
|
||||
bench_streamer_with_result().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_streamer", |bencher| {
|
||||
bench_streamer(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
714
src/bin/bench-tps.rs
Normal file
714
src/bin/bench-tps.rs
Normal file
@ -0,0 +1,714 @@
|
||||
extern crate bincode;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate influx_db_client;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use clap::{App, Arg};
|
||||
use influx_db_client as influxdb;
|
||||
use rayon::prelude::*;
|
||||
use solana::client::mk_client;
|
||||
use solana::crdt::{Crdt, NodeInfo};
|
||||
use solana::drone::DRONE_PORT;
|
||||
use solana::fullnode::Config;
|
||||
use solana::hash::Hash;
|
||||
use solana::logger;
|
||||
use solana::metrics;
|
||||
use solana::nat::{get_public_ip_addr, udp_random_bind};
|
||||
use solana::ncp::Ncp;
|
||||
use solana::service::Service;
|
||||
use solana::signature::{read_keypair, GenKeys, Keypair, KeypairUtil};
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::timing::{duration_as_ms, duration_as_s};
|
||||
use solana::transaction::Transaction;
|
||||
use solana::wallet::request_airdrop;
|
||||
use solana::window::default_window;
|
||||
use std::collections::VecDeque;
|
||||
use std::fs::File;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::{AtomicBool, AtomicIsize, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::thread::Builder;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
pub struct NodeStats {
|
||||
pub tps: f64, // Maximum TPS reported by this node
|
||||
pub tx: u64, // Total transactions reported by this node
|
||||
}
|
||||
|
||||
fn metrics_submit_token_balance(token_balance: i64) {
|
||||
println!("Token balance: {}", token_balance);
|
||||
metrics::submit(
|
||||
influxdb::Point::new("bench-tps")
|
||||
.add_tag("op", influxdb::Value::String("token_balance".to_string()))
|
||||
.add_field("balance", influxdb::Value::Integer(token_balance as i64))
|
||||
.to_owned(),
|
||||
);
|
||||
}
|
||||
|
||||
fn sample_tx_count(
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
maxes: &Arc<RwLock<Vec<(SocketAddr, NodeStats)>>>,
|
||||
first_tx_count: u64,
|
||||
v: &NodeInfo,
|
||||
sample_period: u64,
|
||||
) {
|
||||
let mut client = mk_client(&v);
|
||||
let mut now = Instant::now();
|
||||
let mut initial_tx_count = client.transaction_count();
|
||||
let mut max_tps = 0.0;
|
||||
let mut total;
|
||||
|
||||
let log_prefix = format!("{:21}:", v.contact_info.tpu.to_string());
|
||||
|
||||
loop {
|
||||
let tx_count = client.transaction_count();
|
||||
assert!(
|
||||
tx_count >= initial_tx_count,
|
||||
"expected tx_count({}) >= initial_tx_count({})",
|
||||
tx_count,
|
||||
initial_tx_count
|
||||
);
|
||||
let duration = now.elapsed();
|
||||
now = Instant::now();
|
||||
let sample = tx_count - initial_tx_count;
|
||||
initial_tx_count = tx_count;
|
||||
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||
if tps > max_tps {
|
||||
max_tps = tps;
|
||||
}
|
||||
if tx_count > first_tx_count {
|
||||
total = tx_count - first_tx_count;
|
||||
} else {
|
||||
total = 0;
|
||||
}
|
||||
println!(
|
||||
"{} {:9.2} TPS, Transactions: {:6}, Total transactions: {}",
|
||||
log_prefix, tps, sample, total
|
||||
);
|
||||
sleep(Duration::new(sample_period, 0));
|
||||
|
||||
if exit_signal.load(Ordering::Relaxed) {
|
||||
println!("{} Exiting validator thread", log_prefix);
|
||||
let stats = NodeStats {
|
||||
tps: max_tps,
|
||||
tx: total,
|
||||
};
|
||||
maxes.write().unwrap().push((v.contact_info.tpu, stats));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Send loopback payment of 0 tokens and confirm the network processed it
|
||||
fn send_barrier_transaction(barrier_client: &mut ThinClient, last_id: &mut Hash, id: &Keypair) {
|
||||
let transfer_start = Instant::now();
|
||||
|
||||
let mut poll_count = 0;
|
||||
loop {
|
||||
if poll_count > 0 && poll_count % 8 == 0 {
|
||||
println!(
|
||||
"polling for barrier transaction confirmation, attempt {}",
|
||||
poll_count
|
||||
);
|
||||
}
|
||||
|
||||
*last_id = barrier_client.get_last_id();
|
||||
let signature = barrier_client
|
||||
.transfer(0, &id, id.pubkey(), last_id)
|
||||
.expect("Unable to send barrier transaction");
|
||||
|
||||
let confirmatiom = barrier_client.poll_for_signature(&signature);
|
||||
let duration_ms = duration_as_ms(&transfer_start.elapsed());
|
||||
if confirmatiom.is_ok() {
|
||||
println!("barrier transaction confirmed in {}ms", duration_ms);
|
||||
|
||||
metrics::submit(
|
||||
influxdb::Point::new("bench-tps")
|
||||
.add_tag(
|
||||
"op",
|
||||
influxdb::Value::String("send_barrier_transaction".to_string()),
|
||||
)
|
||||
.add_field("poll_count", influxdb::Value::Integer(poll_count))
|
||||
.add_field("duration", influxdb::Value::Integer(duration_ms as i64))
|
||||
.to_owned(),
|
||||
);
|
||||
|
||||
// Sanity check that the client balance is still 1
|
||||
let balance = barrier_client.poll_get_balance(&id.pubkey()).unwrap_or(-1);
|
||||
if balance != 1 {
|
||||
panic!("Expected an account balance of 1 (balance: {}", balance);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Timeout after 3 minutes. When running a CPU-only leader+validator+drone+bench-tps on a dev
|
||||
// machine, some batches of transactions can take upwards of 1 minute...
|
||||
if duration_ms > 1000 * 60 * 3 {
|
||||
println!("Error: Couldn't confirm barrier transaction!");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let new_last_id = barrier_client.get_last_id();
|
||||
if new_last_id == *last_id {
|
||||
if poll_count > 0 && poll_count % 8 == 0 {
|
||||
println!("last_id is not advancing, still at {:?}", *last_id);
|
||||
}
|
||||
} else {
|
||||
*last_id = new_last_id;
|
||||
}
|
||||
|
||||
poll_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_txs(
|
||||
shared_txs: &Arc<RwLock<VecDeque<Vec<Transaction>>>>,
|
||||
id: &Keypair,
|
||||
keypairs: &[Keypair],
|
||||
last_id: &Hash,
|
||||
threads: usize,
|
||||
reclaim: bool,
|
||||
) {
|
||||
let tx_count = keypairs.len();
|
||||
println!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
|
||||
let signing_start = Instant::now();
|
||||
|
||||
let transactions: Vec<_> = keypairs
|
||||
.par_iter()
|
||||
.map(|keypair| {
|
||||
if !reclaim {
|
||||
Transaction::new(&id, keypair.pubkey(), 1, *last_id)
|
||||
} else {
|
||||
Transaction::new(keypair, id.pubkey(), 1, *last_id)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let duration = signing_start.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = (tx_count) as f64 / ns as f64;
|
||||
let nsps = ns as f64 / (tx_count) as f64;
|
||||
println!(
|
||||
"Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time",
|
||||
bsps * 1_000_000_f64,
|
||||
nsps / 1_000_f64,
|
||||
duration_as_ms(&duration),
|
||||
);
|
||||
metrics::submit(
|
||||
influxdb::Point::new("bench-tps")
|
||||
.add_tag("op", influxdb::Value::String("generate_txs".to_string()))
|
||||
.add_field(
|
||||
"duration",
|
||||
influxdb::Value::Integer(duration_as_ms(&duration) as i64),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
{
|
||||
let mut shared_txs_wl = shared_txs.write().unwrap();
|
||||
for chunk in chunks {
|
||||
shared_txs_wl.push_back(chunk.to_vec());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn do_tx_transfers(
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
shared_txs: &Arc<RwLock<VecDeque<Vec<Transaction>>>>,
|
||||
leader: &NodeInfo,
|
||||
shared_tx_thread_count: &Arc<AtomicIsize>,
|
||||
) {
|
||||
let client = mk_client(&leader);
|
||||
loop {
|
||||
let txs;
|
||||
{
|
||||
let mut shared_txs_wl = shared_txs.write().unwrap();
|
||||
txs = shared_txs_wl.pop_front();
|
||||
}
|
||||
if let Some(txs0) = txs {
|
||||
shared_tx_thread_count.fetch_add(1, Ordering::Relaxed);
|
||||
println!(
|
||||
"Transferring 1 unit {} times... to {}",
|
||||
txs0.len(),
|
||||
leader.contact_info.tpu
|
||||
);
|
||||
let tx_len = txs0.len();
|
||||
let transfer_start = Instant::now();
|
||||
for tx in txs0 {
|
||||
client.transfer_signed(&tx).unwrap();
|
||||
}
|
||||
shared_tx_thread_count.fetch_add(-1, Ordering::Relaxed);
|
||||
println!(
|
||||
"Tx send done. {} ms {} tps",
|
||||
duration_as_ms(&transfer_start.elapsed()),
|
||||
tx_len as f32 / duration_as_s(&transfer_start.elapsed()),
|
||||
);
|
||||
metrics::submit(
|
||||
influxdb::Point::new("bench-tps")
|
||||
.add_tag("op", influxdb::Value::String("do_tx_transfers".to_string()))
|
||||
.add_field(
|
||||
"duration",
|
||||
influxdb::Value::Integer(duration_as_ms(&transfer_start.elapsed()) as i64),
|
||||
)
|
||||
.add_field("count", influxdb::Value::Integer(tx_len as i64))
|
||||
.to_owned(),
|
||||
);
|
||||
}
|
||||
if exit_signal.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn airdrop_tokens(client: &mut ThinClient, leader: &NodeInfo, id: &Keypair, tx_count: i64) {
|
||||
let mut drone_addr = leader.contact_info.tpu;
|
||||
drone_addr.set_port(DRONE_PORT);
|
||||
|
||||
let starting_balance = client.poll_get_balance(&id.pubkey()).unwrap();
|
||||
metrics_submit_token_balance(starting_balance);
|
||||
|
||||
if starting_balance < tx_count {
|
||||
let airdrop_amount = tx_count - starting_balance;
|
||||
println!(
|
||||
"Airdropping {:?} tokens from {}",
|
||||
airdrop_amount, drone_addr
|
||||
);
|
||||
|
||||
let previous_balance = starting_balance;
|
||||
request_airdrop(&drone_addr, &id.pubkey(), airdrop_amount as u64).unwrap();
|
||||
|
||||
// TODO: return airdrop Result from Drone instead of polling the
|
||||
// network
|
||||
let mut current_balance = previous_balance;
|
||||
for _ in 0..20 {
|
||||
sleep(Duration::from_millis(500));
|
||||
current_balance = client.poll_get_balance(&id.pubkey()).unwrap();
|
||||
if starting_balance != current_balance {
|
||||
break;
|
||||
}
|
||||
println!(".");
|
||||
}
|
||||
metrics_submit_token_balance(current_balance);
|
||||
if current_balance - starting_balance != airdrop_amount {
|
||||
println!("Airdrop failed!");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_and_report_stats(
|
||||
maxes: &Arc<RwLock<Vec<(SocketAddr, NodeStats)>>>,
|
||||
sample_period: u64,
|
||||
tx_send_elapsed: &Duration,
|
||||
) {
|
||||
// Compute/report stats
|
||||
let mut max_of_maxes = 0.0;
|
||||
let mut total_txs = 0;
|
||||
let mut nodes_with_zero_tps = 0;
|
||||
let mut total_maxes = 0.0;
|
||||
println!(" Node address | Max TPS | Total Transactions");
|
||||
println!("---------------------+---------------+--------------------");
|
||||
|
||||
for (sock, stats) in maxes.read().unwrap().iter() {
|
||||
let maybe_flag = match stats.tx {
|
||||
0 => "!!!!!",
|
||||
_ => "",
|
||||
};
|
||||
|
||||
println!(
|
||||
"{:20} | {:13.2} | {} {}",
|
||||
(*sock).to_string(),
|
||||
stats.tps,
|
||||
stats.tx,
|
||||
maybe_flag
|
||||
);
|
||||
|
||||
if stats.tps == 0.0 {
|
||||
nodes_with_zero_tps += 1;
|
||||
}
|
||||
total_maxes += stats.tps;
|
||||
|
||||
if stats.tps > max_of_maxes {
|
||||
max_of_maxes = stats.tps;
|
||||
}
|
||||
total_txs += stats.tx;
|
||||
}
|
||||
|
||||
if total_maxes > 0.0 {
|
||||
let num_nodes_with_tps = maxes.read().unwrap().len() - nodes_with_zero_tps;
|
||||
let average_max = total_maxes / num_nodes_with_tps as f64;
|
||||
println!(
|
||||
"\nAverage max TPS: {:.2}, {} nodes had 0 TPS",
|
||||
average_max, nodes_with_zero_tps
|
||||
);
|
||||
}
|
||||
|
||||
println!(
|
||||
"\nHighest TPS: {:.2} sampling period {}s total transactions: {} clients: {}",
|
||||
max_of_maxes,
|
||||
sample_period,
|
||||
total_txs,
|
||||
maxes.read().unwrap().len()
|
||||
);
|
||||
println!(
|
||||
"\tAverage TPS: {}",
|
||||
total_txs as f32 / duration_as_s(tx_send_elapsed)
|
||||
);
|
||||
}
|
||||
|
||||
fn main() {
|
||||
logger::setup();
|
||||
metrics::set_panic_hook("bench-tps");
|
||||
let mut threads = 4usize;
|
||||
let mut num_nodes = 1usize;
|
||||
let mut time_sec = 90;
|
||||
let mut sustained = false;
|
||||
let mut tx_count = 500_000;
|
||||
|
||||
let matches = App::new("solana-bench-tps")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("leader")
|
||||
.short("l")
|
||||
.long("leader")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/leader.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.default_value("~/.config/solana/id.json")
|
||||
.help("/path/to/id.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num_nodes")
|
||||
.short("n")
|
||||
.long("nodes")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("number of nodes to converge to"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("threads")
|
||||
.short("t")
|
||||
.long("threads")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("number of threads"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("seconds")
|
||||
.short("s")
|
||||
.long("sec")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("send transactions for this many seconds"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("converge_only")
|
||||
.short("c")
|
||||
.help("exit immediately after converging"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("addr")
|
||||
.short("a")
|
||||
.long("addr")
|
||||
.value_name("IPADDR")
|
||||
.takes_value(true)
|
||||
.help("address to advertise to the network"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("sustained")
|
||||
.long("sustained")
|
||||
.help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("tx_count")
|
||||
.long("tx_count")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("number of transactions to send in a single batch")
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let leader: NodeInfo;
|
||||
if let Some(l) = matches.value_of("leader") {
|
||||
leader = read_leader(l).node_info;
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
leader = NodeInfo::new_leader(&server_addr);
|
||||
};
|
||||
|
||||
let id = read_keypair(matches.value_of("keypair").unwrap()).expect("client keypair");
|
||||
|
||||
if let Some(t) = matches.value_of("threads") {
|
||||
threads = t.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
if let Some(n) = matches.value_of("num_nodes") {
|
||||
num_nodes = n.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
if let Some(s) = matches.value_of("seconds") {
|
||||
time_sec = s.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
let addr = if let Some(s) = matches.value_of("addr") {
|
||||
s.to_string().parse().unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse {} as IP address error: {:?}", s, e);
|
||||
exit(1);
|
||||
})
|
||||
} else {
|
||||
get_public_ip_addr().unwrap_or_else(|e| {
|
||||
eprintln!("failed to get public IP, try --addr? error: {:?}", e);
|
||||
exit(1);
|
||||
})
|
||||
};
|
||||
|
||||
if let Some(s) = matches.value_of("tx_count") {
|
||||
tx_count = s.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
if matches.is_present("sustained") {
|
||||
sustained = true;
|
||||
}
|
||||
|
||||
let exit_signal = Arc::new(AtomicBool::new(false));
|
||||
let mut c_threads = vec![];
|
||||
let validators = converge(&leader, &exit_signal, num_nodes, &mut c_threads, addr);
|
||||
|
||||
println!(" Node address | Node identifier");
|
||||
println!("----------------------+------------------");
|
||||
for node in &validators {
|
||||
println!(
|
||||
" {:20} | {:16x}",
|
||||
node.contact_info.tpu.to_string(),
|
||||
node.debug_id()
|
||||
);
|
||||
}
|
||||
println!("Nodes: {}", validators.len());
|
||||
|
||||
if validators.len() < num_nodes {
|
||||
println!(
|
||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||
num_nodes
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if matches.is_present("converge_only") {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut client = mk_client(&leader);
|
||||
let mut barrier_client = mk_client(&leader);
|
||||
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&id.public_key_bytes()[..32]);
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
|
||||
println!("Creating {} keypairs...", tx_count / 2);
|
||||
let keypairs = rnd.gen_n_keypairs(tx_count / 2);
|
||||
let barrier_id = rnd.gen_n_keypairs(1).pop().unwrap();
|
||||
|
||||
println!("Get tokens...");
|
||||
airdrop_tokens(&mut client, &leader, &id, tx_count);
|
||||
airdrop_tokens(&mut barrier_client, &leader, &barrier_id, 1);
|
||||
|
||||
println!("Get last ID...");
|
||||
let mut last_id = client.get_last_id();
|
||||
println!("Got last ID {:?}", last_id);
|
||||
|
||||
let first_tx_count = client.transaction_count();
|
||||
println!("Initial transaction count {}", first_tx_count);
|
||||
|
||||
// Setup a thread per validator to sample every period
|
||||
// collect the max transaction rate and total tx count seen
|
||||
let maxes = Arc::new(RwLock::new(Vec::new()));
|
||||
let sample_period = 1; // in seconds
|
||||
println!("Sampling TPS every {} second...", sample_period);
|
||||
let v_threads: Vec<_> = validators
|
||||
.into_iter()
|
||||
.map(|v| {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let maxes = maxes.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sample".to_string())
|
||||
.spawn(move || {
|
||||
sample_tx_count(&exit_signal, &maxes, first_tx_count, &v, sample_period);
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
let shared_txs: Arc<RwLock<VecDeque<Vec<Transaction>>>> =
|
||||
Arc::new(RwLock::new(VecDeque::new()));
|
||||
|
||||
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
|
||||
|
||||
let s_threads: Vec<_> = (0..threads)
|
||||
.map(|_| {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let shared_txs = shared_txs.clone();
|
||||
let leader = leader.clone();
|
||||
let shared_tx_active_thread_count = shared_tx_active_thread_count.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sender".to_string())
|
||||
.spawn(move || {
|
||||
do_tx_transfers(
|
||||
&exit_signal,
|
||||
&shared_txs,
|
||||
&leader,
|
||||
&shared_tx_active_thread_count,
|
||||
);
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
// generate and send transactions for the specified duration
|
||||
let time = Duration::new(time_sec, 0);
|
||||
let now = Instant::now();
|
||||
let mut reclaim_tokens_back_to_source_account = false;
|
||||
while now.elapsed() < time || reclaim_tokens_back_to_source_account {
|
||||
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(-1);
|
||||
metrics_submit_token_balance(balance);
|
||||
|
||||
// ping-pong between source and destination accounts for each loop iteration
|
||||
// this seems to be faster than trying to determine the balance of individual
|
||||
// accounts
|
||||
generate_txs(
|
||||
&shared_txs,
|
||||
&id,
|
||||
&keypairs,
|
||||
&last_id,
|
||||
threads,
|
||||
reclaim_tokens_back_to_source_account,
|
||||
);
|
||||
reclaim_tokens_back_to_source_account = !reclaim_tokens_back_to_source_account;
|
||||
|
||||
// In sustained mode overlap the transfers with generation
|
||||
// this has higher average performance but lower peak performance
|
||||
// in tested environments.
|
||||
if !sustained {
|
||||
while shared_tx_active_thread_count.load(Ordering::Relaxed) > 0 {
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
// It's not feasible (would take too much time) to confirm each of the `tx_count / 2`
|
||||
// transactions sent by `generate_txs()` so instead send and confirm a single transaction
|
||||
// to validate the network is still functional.
|
||||
send_barrier_transaction(&mut barrier_client, &mut last_id, &barrier_id);
|
||||
}
|
||||
|
||||
// Stop the sampling threads so it will collect the stats
|
||||
exit_signal.store(true, Ordering::Relaxed);
|
||||
|
||||
println!("Waiting for validator threads...");
|
||||
for t in v_threads {
|
||||
if let Err(err) = t.join() {
|
||||
println!(" join() failed with: {:?}", err);
|
||||
}
|
||||
}
|
||||
|
||||
// join the tx send threads
|
||||
println!("Waiting for transmit threads...");
|
||||
for t in s_threads {
|
||||
if let Err(err) = t.join() {
|
||||
println!(" join() failed with: {:?}", err);
|
||||
}
|
||||
}
|
||||
|
||||
let balance = client.poll_get_balance(&id.pubkey()).unwrap_or(-1);
|
||||
metrics_submit_token_balance(balance);
|
||||
|
||||
compute_and_report_stats(&maxes, sample_period, &now.elapsed());
|
||||
|
||||
// join the crdt client threads
|
||||
for t in c_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn spy_node(addr: IpAddr) -> (NodeInfo, UdpSocket) {
|
||||
let gossip_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
|
||||
let gossip_addr = SocketAddr::new(addr, gossip_socket.local_addr().unwrap().port());
|
||||
|
||||
let pubkey = Keypair::new().pubkey();
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
assert!(!gossip_addr.ip().is_unspecified());
|
||||
assert!(!gossip_addr.ip().is_multicast());
|
||||
let node = NodeInfo::new(pubkey, gossip_addr, daddr, daddr, daddr, daddr);
|
||||
(node, gossip_socket)
|
||||
}
|
||||
|
||||
fn converge(
|
||||
leader: &NodeInfo,
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
addr: IpAddr,
|
||||
) -> Vec<NodeInfo> {
|
||||
//lets spy on the network
|
||||
let (spy, spy_gossip) = spy_node(addr);
|
||||
let mut spy_crdt = Crdt::new(spy).expect("Crdt::new");
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let window = default_window();
|
||||
let gossip_send_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
let ncp = Ncp::new(
|
||||
&spy_ref,
|
||||
window.clone(),
|
||||
None,
|
||||
spy_gossip,
|
||||
gossip_send_socket,
|
||||
exit_signal.clone(),
|
||||
).expect("DataReplicator::new");
|
||||
let mut v: Vec<NodeInfo> = vec![];
|
||||
//wait for the network to converge, 30 seconds should be plenty
|
||||
for _ in 0..30 {
|
||||
v = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.filter(|x| Crdt::is_valid_address(x.contact_info.rpu))
|
||||
.cloned()
|
||||
.collect();
|
||||
if v.len() >= num_nodes {
|
||||
println!("CONVERGED!");
|
||||
break;
|
||||
} else {
|
||||
println!(
|
||||
"{} node(s) discovered (looking for {} or more)",
|
||||
v.len(),
|
||||
num_nodes
|
||||
);
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
threads.extend(ncp.thread_hdls().into_iter());
|
||||
v
|
||||
}
|
||||
|
||||
fn read_leader(path: &str) -> Config {
|
||||
let file = File::open(path).unwrap_or_else(|_| panic!("file not found: {}", path));
|
||||
serde_json::from_reader(file).unwrap_or_else(|_| panic!("failed to parse {}", path))
|
||||
}
|
@ -1,456 +0,0 @@
|
||||
extern crate bincode;
|
||||
extern crate clap;
|
||||
extern crate env_logger;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use bincode::serialize;
|
||||
use clap::{App, Arg};
|
||||
use rayon::prelude::*;
|
||||
use solana::crdt::{Crdt, NodeInfo};
|
||||
use solana::drone::DroneRequest;
|
||||
use solana::fullnode::Config;
|
||||
use solana::hash::Hash;
|
||||
use solana::mint::Mint;
|
||||
use solana::nat::{udp_public_bind, udp_random_bind};
|
||||
use solana::ncp::Ncp;
|
||||
use solana::service::Service;
|
||||
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
|
||||
use solana::streamer::default_window;
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::timing::{duration_as_ms, duration_as_s};
|
||||
use solana::transaction::Transaction;
|
||||
use std::error;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::thread::Builder;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
fn sample_tx_count(
|
||||
exit: &Arc<AtomicBool>,
|
||||
maxes: &Arc<RwLock<Vec<(f64, u64)>>>,
|
||||
first_count: u64,
|
||||
v: &NodeInfo,
|
||||
sample_period: u64,
|
||||
) {
|
||||
let mut client = mk_client(&v);
|
||||
let mut now = Instant::now();
|
||||
let mut initial_tx_count = client.transaction_count();
|
||||
let mut max_tps = 0.0;
|
||||
let mut total;
|
||||
loop {
|
||||
let tx_count = client.transaction_count();
|
||||
let duration = now.elapsed();
|
||||
now = Instant::now();
|
||||
let sample = tx_count - initial_tx_count;
|
||||
initial_tx_count = tx_count;
|
||||
println!("{}: Transactions processed {}", v.contact_info.tpu, sample);
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||
if tps > max_tps {
|
||||
max_tps = tps;
|
||||
}
|
||||
println!("{}: {:.2} tps", v.contact_info.tpu, tps);
|
||||
total = tx_count - first_count;
|
||||
println!(
|
||||
"{}: Total Transactions processed {}",
|
||||
v.contact_info.tpu, total
|
||||
);
|
||||
sleep(Duration::new(sample_period, 0));
|
||||
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
println!("exiting validator thread");
|
||||
maxes.write().unwrap().push((max_tps, total));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_and_send_txs(
|
||||
client: &mut ThinClient,
|
||||
tx_clients: &[ThinClient],
|
||||
id: &Mint,
|
||||
keypairs: &[KeyPair],
|
||||
leader: &NodeInfo,
|
||||
txs: i64,
|
||||
last_id: &mut Hash,
|
||||
threads: usize,
|
||||
reclaim: bool,
|
||||
) {
|
||||
println!("Signing transactions... {}", txs / 2,);
|
||||
let signing_start = Instant::now();
|
||||
|
||||
let transactions: Vec<_> = if !reclaim {
|
||||
keypairs
|
||||
.par_iter()
|
||||
.map(|keypair| Transaction::new(&id.keypair(), keypair.pubkey(), 1, *last_id))
|
||||
.collect()
|
||||
} else {
|
||||
keypairs
|
||||
.par_iter()
|
||||
.map(|keypair| Transaction::new(keypair, id.pubkey(), 1, *last_id))
|
||||
.collect()
|
||||
};
|
||||
|
||||
let duration = signing_start.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = txs as f64 / ns as f64;
|
||||
let nsps = ns as f64 / txs as f64;
|
||||
println!(
|
||||
"Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time",
|
||||
bsps * 1_000_000_f64,
|
||||
nsps / 1_000_f64,
|
||||
duration_as_ms(&duration),
|
||||
);
|
||||
|
||||
println!(
|
||||
"Transfering {} transactions in {} batches",
|
||||
txs / 2,
|
||||
threads
|
||||
);
|
||||
let transfer_start = Instant::now();
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
chunks
|
||||
.into_par_iter()
|
||||
.zip(tx_clients)
|
||||
.for_each(|(txs, client)| {
|
||||
println!(
|
||||
"Transferring 1 unit {} times... to {:?}",
|
||||
txs.len(),
|
||||
leader.contact_info.tpu
|
||||
);
|
||||
for tx in txs {
|
||||
client.transfer_signed(tx).unwrap();
|
||||
}
|
||||
});
|
||||
println!(
|
||||
"Transfer done. {:?} ms {} tps",
|
||||
duration_as_ms(&transfer_start.elapsed()),
|
||||
txs as f32 / (duration_as_s(&transfer_start.elapsed()))
|
||||
);
|
||||
|
||||
loop {
|
||||
let new_id = client.get_last_id();
|
||||
if *last_id != new_id {
|
||||
*last_id = new_id;
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
env_logger::init();
|
||||
let mut threads = 4usize;
|
||||
let mut num_nodes = 1usize;
|
||||
let mut time_sec = 90;
|
||||
|
||||
let matches = App::new("solana-client-demo")
|
||||
.arg(
|
||||
Arg::with_name("leader")
|
||||
.short("l")
|
||||
.long("leader")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/leader.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("mint")
|
||||
.short("m")
|
||||
.long("mint")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/mint.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num_nodes")
|
||||
.short("n")
|
||||
.long("nodes")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("number of nodes to converge to"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("threads")
|
||||
.short("t")
|
||||
.long("threads")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("number of threads"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("seconds")
|
||||
.short("s")
|
||||
.long("sec")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("send transactions for this many seconds"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let leader: NodeInfo;
|
||||
if let Some(l) = matches.value_of("leader") {
|
||||
leader = read_leader(l).node_info;
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
leader = NodeInfo::new_leader(&server_addr);
|
||||
};
|
||||
|
||||
let id: Mint;
|
||||
if let Some(m) = matches.value_of("mint") {
|
||||
id = read_mint(m).expect("client mint");
|
||||
} else {
|
||||
eprintln!("No mint found!");
|
||||
exit(1);
|
||||
};
|
||||
|
||||
if let Some(t) = matches.value_of("threads") {
|
||||
threads = t.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
if let Some(n) = matches.value_of("nodes") {
|
||||
num_nodes = n.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
if let Some(s) = matches.value_of("seconds") {
|
||||
time_sec = s.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
let mut drone_addr = leader.contact_info.tpu;
|
||||
drone_addr.set_port(9900);
|
||||
|
||||
let signal = Arc::new(AtomicBool::new(false));
|
||||
let mut c_threads = vec![];
|
||||
let validators = converge(&leader, &signal.clone(), num_nodes, &mut c_threads);
|
||||
assert_eq!(validators.len(), num_nodes);
|
||||
|
||||
let mut client = mk_client(&leader);
|
||||
|
||||
let starting_balance = client.poll_get_balance(&id.pubkey()).unwrap();
|
||||
let txs: i64 = 500_000;
|
||||
|
||||
if starting_balance < txs {
|
||||
let airdrop_amount = txs - starting_balance;
|
||||
println!("Airdropping {:?} tokens", airdrop_amount);
|
||||
request_airdrop(&drone_addr, &id, airdrop_amount as u64).unwrap();
|
||||
// TODO: return airdrop Result from Drone
|
||||
sleep(Duration::from_millis(100));
|
||||
|
||||
let balance = client.poll_get_balance(&id.pubkey()).unwrap();
|
||||
println!("Your balance is: {:?}", balance);
|
||||
|
||||
if balance < txs || (starting_balance == balance) {
|
||||
println!("TPS airdrop limit reached; wait 60sec to retry");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
println!("Get last ID...");
|
||||
let mut last_id = client.get_last_id();
|
||||
println!("Got last ID {:?}", last_id);
|
||||
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&id.keypair().public_key_bytes()[..32]);
|
||||
let rnd = GenKeys::new(seed);
|
||||
|
||||
println!("Creating keypairs...");
|
||||
let keypairs = rnd.gen_n_keypairs(txs / 2);
|
||||
|
||||
let first_count = client.transaction_count();
|
||||
println!("initial count {}", first_count);
|
||||
|
||||
println!("Sampling tps every second...",);
|
||||
|
||||
// Setup a thread per validator to sample every period
|
||||
// collect the max transaction rate and total tx count seen
|
||||
let maxes = Arc::new(RwLock::new(Vec::new()));
|
||||
let sample_period = 1; // in seconds
|
||||
let v_threads: Vec<_> = validators
|
||||
.into_iter()
|
||||
.map(|v| {
|
||||
let exit = signal.clone();
|
||||
let maxes = maxes.clone();
|
||||
Builder::new()
|
||||
.name("solana-client-sample".to_string())
|
||||
.spawn(move || {
|
||||
sample_tx_count(&exit, &maxes, first_count, &v, sample_period);
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
let clients: Vec<_> = (0..threads).map(|_| mk_client(&leader)).collect();
|
||||
|
||||
// generate and send transactions for the specified duration
|
||||
let time = Duration::new(time_sec / 2, 0);
|
||||
let mut now = Instant::now();
|
||||
while now.elapsed() < time {
|
||||
generate_and_send_txs(
|
||||
&mut client,
|
||||
&clients,
|
||||
&id,
|
||||
&keypairs,
|
||||
&leader,
|
||||
txs,
|
||||
&mut last_id,
|
||||
threads,
|
||||
false,
|
||||
);
|
||||
}
|
||||
last_id = client.get_last_id();
|
||||
now = Instant::now();
|
||||
while now.elapsed() < time {
|
||||
generate_and_send_txs(
|
||||
&mut client,
|
||||
&clients,
|
||||
&id,
|
||||
&keypairs,
|
||||
&leader,
|
||||
txs,
|
||||
&mut last_id,
|
||||
threads,
|
||||
true,
|
||||
);
|
||||
}
|
||||
|
||||
// Stop the sampling threads so it will collect the stats
|
||||
signal.store(true, Ordering::Relaxed);
|
||||
for t in v_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
|
||||
// Compute/report stats
|
||||
let mut max_of_maxes = 0.0;
|
||||
let mut total_txs = 0;
|
||||
for (max, txs) in maxes.read().unwrap().iter() {
|
||||
if *max > max_of_maxes {
|
||||
max_of_maxes = *max;
|
||||
}
|
||||
total_txs += *txs;
|
||||
}
|
||||
println!(
|
||||
"\nHighest TPS: {:.2} sampling period {}s total transactions: {} clients: {}",
|
||||
max_of_maxes,
|
||||
sample_period,
|
||||
total_txs,
|
||||
maxes.read().unwrap().len()
|
||||
);
|
||||
|
||||
// join the crdt client threads
|
||||
for t in c_threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn mk_client(r: &NodeInfo) -> ThinClient {
|
||||
let requests_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
let transactions_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
|
||||
ThinClient::new(
|
||||
r.contact_info.rpu,
|
||||
requests_socket,
|
||||
r.contact_info.tpu,
|
||||
transactions_socket,
|
||||
)
|
||||
}
|
||||
|
||||
fn spy_node() -> (NodeInfo, UdpSocket) {
|
||||
let gossip_socket_pair = udp_public_bind("gossip", 8000, 10000);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let node = NodeInfo::new(
|
||||
pubkey,
|
||||
//gossip.local_addr().unwrap(),
|
||||
gossip_socket_pair.addr,
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
);
|
||||
(node, gossip_socket_pair.receiver)
|
||||
}
|
||||
|
||||
fn converge(
|
||||
leader: &NodeInfo,
|
||||
exit: &Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) -> Vec<NodeInfo> {
|
||||
//lets spy on the network
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let (spy, spy_gossip) = spy_node();
|
||||
let mut spy_crdt = Crdt::new(spy);
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let window = default_window();
|
||||
let gossip_send_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
let ncp = Ncp::new(
|
||||
&spy_ref.clone(),
|
||||
window.clone(),
|
||||
spy_gossip,
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
).expect("DataReplicator::new");
|
||||
let mut rv = vec![];
|
||||
//wait for the network to converge, 30 seconds should be plenty
|
||||
for _ in 0..30 {
|
||||
let v: Vec<NodeInfo> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.contact_info.rpu != daddr)
|
||||
.cloned()
|
||||
.collect();
|
||||
if v.len() >= num_nodes {
|
||||
println!("CONVERGED!");
|
||||
rv.extend(v.into_iter());
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
threads.extend(ncp.thread_hdls().into_iter());
|
||||
rv
|
||||
}
|
||||
|
||||
fn read_leader(path: &str) -> Config {
|
||||
let file = File::open(path).unwrap_or_else(|_| panic!("file not found: {}", path));
|
||||
serde_json::from_reader(file).unwrap_or_else(|_| panic!("failed to parse {}", path))
|
||||
}
|
||||
|
||||
fn read_mint(path: &str) -> Result<Mint, Box<error::Error>> {
|
||||
let file = File::open(path.to_string())?;
|
||||
let mint = serde_json::from_reader(file)?;
|
||||
Ok(mint)
|
||||
}
|
||||
|
||||
fn request_airdrop(
|
||||
drone_addr: &SocketAddr,
|
||||
id: &Mint,
|
||||
tokens: u64,
|
||||
) -> Result<(), Box<error::Error>> {
|
||||
let mut stream = TcpStream::connect(drone_addr)?;
|
||||
let req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: tokens,
|
||||
client_public_key: id.pubkey(),
|
||||
};
|
||||
let tx = serialize(&req).expect("serialize drone request");
|
||||
stream.write_all(&tx).unwrap();
|
||||
// TODO: add timeout to this function, in case of unresponsive drone
|
||||
Ok(())
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
extern crate bincode;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate env_logger;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
extern crate tokio;
|
||||
@ -10,13 +10,13 @@ extern crate tokio_io;
|
||||
use bincode::deserialize;
|
||||
use clap::{App, Arg};
|
||||
use solana::crdt::NodeInfo;
|
||||
use solana::drone::{Drone, DroneRequest};
|
||||
use solana::drone::{Drone, DroneRequest, DRONE_PORT};
|
||||
use solana::fullnode::Config;
|
||||
use solana::mint::Mint;
|
||||
use std::error;
|
||||
use solana::logger;
|
||||
use solana::metrics::set_panic_hook;
|
||||
use solana::signature::read_keypair;
|
||||
use std::fs::File;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::process::exit;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
use tokio::net::TcpListener;
|
||||
@ -24,8 +24,10 @@ use tokio::prelude::*;
|
||||
use tokio_codec::{BytesCodec, Decoder};
|
||||
|
||||
fn main() {
|
||||
env_logger::init();
|
||||
let matches = App::new("solana-client-demo")
|
||||
logger::setup();
|
||||
set_panic_hook("drone");
|
||||
let matches = App::new("drone")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("leader")
|
||||
.short("l")
|
||||
@ -35,11 +37,12 @@ fn main() {
|
||||
.help("/path/to/leader.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("mint")
|
||||
.short("m")
|
||||
.long("mint")
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("/path/to/mint.json"),
|
||||
)
|
||||
.arg(
|
||||
@ -68,13 +71,9 @@ fn main() {
|
||||
leader = NodeInfo::new_leader(&server_addr);
|
||||
};
|
||||
|
||||
let mint: Mint;
|
||||
if let Some(m) = matches.value_of("mint") {
|
||||
mint = read_mint(m).expect("client mint");
|
||||
} else {
|
||||
eprintln!("No mint found!");
|
||||
exit(1);
|
||||
};
|
||||
let mint_keypair =
|
||||
read_keypair(matches.value_of("keypair").expect("keypair")).expect("client keypair");
|
||||
|
||||
let time_slice: Option<u64>;
|
||||
if let Some(t) = matches.value_of("time") {
|
||||
time_slice = Some(t.to_string().parse().expect("integer"));
|
||||
@ -88,9 +87,7 @@ fn main() {
|
||||
request_cap = None;
|
||||
}
|
||||
|
||||
let mint_keypair = mint.keypair();
|
||||
|
||||
let drone_addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
let drone_addr: SocketAddr = format!("0.0.0.0:{}", DRONE_PORT).parse().unwrap();
|
||||
|
||||
let drone = Arc::new(Mutex::new(Drone::new(
|
||||
mint_keypair,
|
||||
@ -121,8 +118,14 @@ fn main() {
|
||||
|
||||
let processor = reader
|
||||
.for_each(move |bytes| {
|
||||
let req: DroneRequest =
|
||||
deserialize(&bytes).expect("deserialize packet in drone");
|
||||
let req: DroneRequest = deserialize(&bytes).or_else(|err| {
|
||||
use std::io;
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("deserialize packet in drone: {:?}", err),
|
||||
))
|
||||
})?;
|
||||
|
||||
println!("Airdrop requested...");
|
||||
// let res = drone2.lock().unwrap().check_rate_limit(client_ip);
|
||||
let res1 = drone2.lock().unwrap().send_airdrop(req);
|
||||
@ -132,14 +135,6 @@ fn main() {
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
.and_then(|()| {
|
||||
println!("Socket received FIN packet and closed connection");
|
||||
Ok(())
|
||||
})
|
||||
.or_else(|err| {
|
||||
println!("Socket closed with error: {:?}", err);
|
||||
Err(err)
|
||||
})
|
||||
.then(|result| {
|
||||
println!("Socket closed with result: {:?}", result);
|
||||
Ok(())
|
||||
@ -152,9 +147,3 @@ fn read_leader(path: &str) -> Config {
|
||||
let file = File::open(path).unwrap_or_else(|_| panic!("file not found: {}", path));
|
||||
serde_json::from_reader(file).unwrap_or_else(|_| panic!("failed to parse {}", path))
|
||||
}
|
||||
|
||||
fn read_mint(path: &str) -> Result<Mint, Box<error::Error>> {
|
||||
let file = File::open(path.to_string())?;
|
||||
let mint = serde_json::from_reader(file)?;
|
||||
Ok(mint)
|
||||
}
|
||||
|
@ -1,4 +1,6 @@
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate dirs;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
@ -6,11 +8,13 @@ use clap::{App, Arg};
|
||||
use solana::crdt::{get_ip_addr, parse_port_or_addr};
|
||||
use solana::fullnode::Config;
|
||||
use solana::nat::get_public_ip_addr;
|
||||
use solana::signature::read_pkcs8;
|
||||
use std::io;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
fn main() {
|
||||
let matches = App::new("fullnode-config")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("local")
|
||||
.short("l")
|
||||
@ -18,6 +22,14 @@ fn main() {
|
||||
.takes_value(false)
|
||||
.help("detect network address from local machine configuration"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/id.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("public")
|
||||
.short("p")
|
||||
@ -54,9 +66,18 @@ fn main() {
|
||||
bind_addr
|
||||
};
|
||||
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let id_path = if matches.is_present("keypair") {
|
||||
matches.value_of("keypair").unwrap()
|
||||
} else {
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
let pkcs8 = read_pkcs8(id_path).expect("client keypair");
|
||||
|
||||
// we need all the receiving sockets to be bound within the expected
|
||||
// port range that we open on aws
|
||||
let config = Config::new(&bind_addr);
|
||||
let config = Config::new(&bind_addr, pkcs8);
|
||||
let stdout = io::stdout();
|
||||
serde_json::to_writer(stdout, &config).expect("serialize");
|
||||
}
|
||||
|
@ -1,29 +1,33 @@
|
||||
extern crate atty;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate log;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use clap::{App, Arg};
|
||||
use solana::client::mk_client;
|
||||
use solana::crdt::{NodeInfo, TestNode};
|
||||
use solana::fullnode::{Config, FullNode, InFile, OutFile};
|
||||
use solana::drone::DRONE_PORT;
|
||||
use solana::fullnode::{Config, Fullnode};
|
||||
use solana::logger;
|
||||
use solana::metrics::set_panic_hook;
|
||||
use solana::service::Service;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::signature::{Keypair, KeypairUtil};
|
||||
use solana::wallet::request_airdrop;
|
||||
use std::fs::File;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::process::exit;
|
||||
//use std::time::Duration;
|
||||
|
||||
fn main() -> () {
|
||||
env_logger::init();
|
||||
logger::setup();
|
||||
set_panic_hook("fullnode");
|
||||
let matches = App::new("fullnode")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("identity")
|
||||
.short("l")
|
||||
.long("local")
|
||||
.short("i")
|
||||
.long("identity")
|
||||
.value_name("FILE")
|
||||
.takes_value(true)
|
||||
.help("run with the identity found in FILE"),
|
||||
@ -34,27 +38,24 @@ fn main() -> () {
|
||||
.long("testnet")
|
||||
.value_name("HOST:PORT")
|
||||
.takes_value(true)
|
||||
.help("testnet; connect to the network at this gossip entry point"),
|
||||
.help("connect to the network at this gossip entry point"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("output")
|
||||
.short("o")
|
||||
.long("output")
|
||||
.value_name("FILE")
|
||||
Arg::with_name("ledger")
|
||||
.short("l")
|
||||
.long("ledger")
|
||||
.value_name("DIR")
|
||||
.takes_value(true)
|
||||
.help("output log to FILE, defaults to stdout (ignored by validators)"),
|
||||
.required(true)
|
||||
.help("use DIR as persistent ledger location"),
|
||||
)
|
||||
.get_matches();
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a log file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
let mut keypair = KeyPair::new();
|
||||
let mut keypair = Keypair::new();
|
||||
let mut repl_data = NodeInfo::new_leader_with_pubkey(keypair.pubkey(), &bind_addr);
|
||||
if let Some(l) = matches.value_of("identity") {
|
||||
let path = l.to_string();
|
||||
if let Some(i) = matches.value_of("identity") {
|
||||
let path = i.to_string();
|
||||
if let Ok(file) = File::open(path.clone()) {
|
||||
let parse: serde_json::Result<Config> = serde_json::from_reader(file);
|
||||
if let Ok(data) = parse {
|
||||
@ -69,27 +70,49 @@ fn main() -> () {
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
let leader_pubkey = keypair.pubkey();
|
||||
let repl_clone = repl_data.clone();
|
||||
|
||||
let ledger_path = matches.value_of("ledger").unwrap();
|
||||
|
||||
let mut node = TestNode::new_with_bind_addr(repl_data, bind_addr);
|
||||
let mut drone_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), DRONE_PORT);
|
||||
let fullnode = if let Some(t) = matches.value_of("testnet") {
|
||||
let testnet_address_string = t.to_string();
|
||||
let testnet_addr = testnet_address_string.parse().unwrap();
|
||||
FullNode::new(
|
||||
node,
|
||||
false,
|
||||
InFile::StdIn,
|
||||
Some(keypair),
|
||||
Some(testnet_addr),
|
||||
None,
|
||||
)
|
||||
let testnet_addr: SocketAddr = testnet_address_string.parse().unwrap();
|
||||
drone_addr.set_ip(testnet_addr.ip());
|
||||
|
||||
Fullnode::new(node, false, ledger_path, keypair, Some(testnet_addr))
|
||||
} else {
|
||||
node.data.leader_id = node.data.id;
|
||||
|
||||
let outfile = if let Some(o) = matches.value_of("output") {
|
||||
OutFile::Path(o.to_string())
|
||||
} else {
|
||||
OutFile::StdOut
|
||||
};
|
||||
FullNode::new(node, true, InFile::StdIn, None, None, Some(outfile))
|
||||
Fullnode::new(node, true, ledger_path, keypair, None)
|
||||
};
|
||||
|
||||
let mut client = mk_client(&repl_clone);
|
||||
let previous_balance = client.poll_get_balance(&leader_pubkey).unwrap();
|
||||
eprintln!("balance is {}", previous_balance);
|
||||
|
||||
if previous_balance == 0 {
|
||||
eprintln!("requesting airdrop from {}", drone_addr);
|
||||
request_airdrop(&drone_addr, &leader_pubkey, 50).unwrap_or_else(|_| {
|
||||
panic!(
|
||||
"Airdrop failed, is the drone address correct {:?} drone running?",
|
||||
drone_addr
|
||||
)
|
||||
});
|
||||
|
||||
// Try multiple times to confirm a non-zero balance. |poll_get_balance| currently times
|
||||
// out after 1 second, and sometimes this is not enough time while the network is
|
||||
// booting
|
||||
let balance_ok = (0..30).any(|i| {
|
||||
let balance = client.poll_get_balance(&leader_pubkey).unwrap();
|
||||
eprintln!("new balance is {} (attempt #{})", balance, i);
|
||||
balance > 0
|
||||
});
|
||||
assert!(balance_ok, "0 balance, airdrop failed?");
|
||||
}
|
||||
|
||||
fullnode.join().expect("join");
|
||||
}
|
||||
|
@ -1,17 +1,45 @@
|
||||
//! A command-line executable for generating the chain's genesis block.
|
||||
|
||||
extern crate atty;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use solana::entry_writer::EntryWriter;
|
||||
use clap::{App, Arg};
|
||||
use solana::ledger::LedgerWriter;
|
||||
use solana::mint::Mint;
|
||||
use std::error;
|
||||
use std::io::{stdin, stdout, Read};
|
||||
use std::io::{stdin, Read};
|
||||
use std::process::exit;
|
||||
|
||||
fn main() -> Result<(), Box<error::Error>> {
|
||||
let matches = App::new("solana-genesis")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("tokens")
|
||||
.short("t")
|
||||
.long("tokens")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("Number of tokens with which to initialize mint"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("ledger")
|
||||
.short("l")
|
||||
.long("ledger")
|
||||
.value_name("DIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("use DIR as persistent ledger location"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let tokens = value_t_or_exit!(matches, "tokens", i64);
|
||||
let ledger_path = matches.value_of("ledger").unwrap();
|
||||
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
@ -24,8 +52,11 @@ fn main() -> Result<(), Box<error::Error>> {
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mint: Mint = serde_json::from_str(&buffer)?;
|
||||
let mut writer = stdout();
|
||||
EntryWriter::write_entries(&mut writer, mint.create_entries())?;
|
||||
let pkcs8: Vec<u8> = serde_json::from_str(&buffer)?;
|
||||
let mint = Mint::new_with_pkcs8(tokens, pkcs8);
|
||||
|
||||
let mut ledger_writer = LedgerWriter::open(&ledger_path, true)?;
|
||||
ledger_writer.write_entries(mint.create_entries())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,14 +1,51 @@
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate dirs;
|
||||
extern crate ring;
|
||||
extern crate serde_json;
|
||||
|
||||
use clap::{App, Arg};
|
||||
use ring::rand::SystemRandom;
|
||||
use ring::signature::Ed25519KeyPair;
|
||||
use std::error;
|
||||
use std::fs::{self, File};
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
|
||||
fn main() -> Result<(), Box<error::Error>> {
|
||||
let matches = App::new("solana-keygen")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("outfile")
|
||||
.short("o")
|
||||
.long("outfile")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("path to generated file"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let rnd = SystemRandom::new();
|
||||
let pkcs8_bytes = Ed25519KeyPair::generate_pkcs8(&rnd)?;
|
||||
let serialized = serde_json::to_string(&pkcs8_bytes.to_vec())?;
|
||||
println!("{}", serialized);
|
||||
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let outfile = if matches.is_present("outfile") {
|
||||
matches.value_of("outfile").unwrap()
|
||||
} else {
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
|
||||
if outfile == "-" {
|
||||
println!("{}", serialized);
|
||||
} else {
|
||||
if let Some(outdir) = Path::new(outfile).parent() {
|
||||
fs::create_dir_all(outdir)?;
|
||||
}
|
||||
let mut f = File::create(outfile)?;
|
||||
f.write_all(&serialized.into_bytes())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
137
src/bin/ledger-tool.rs
Normal file
137
src/bin/ledger-tool.rs
Normal file
@ -0,0 +1,137 @@
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use solana::bank::Bank;
|
||||
use solana::ledger::{read_ledger, verify_ledger};
|
||||
use solana::logger;
|
||||
use std::io::{stdout, Write};
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
logger::setup();
|
||||
let matches = App::new("ledger-tool")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("ledger")
|
||||
.short("l")
|
||||
.long("ledger")
|
||||
.value_name("DIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("use DIR for ledger location"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("head")
|
||||
.short("n")
|
||||
.long("head")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.help("at most the first NUM entries in ledger\n (only applies to verify, print, json commands)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("precheck")
|
||||
.short("p")
|
||||
.long("precheck")
|
||||
.help("use ledger_verify() to check internal ledger consistency before proceeding"),
|
||||
)
|
||||
.subcommand(SubCommand::with_name("print").about("Print the ledger"))
|
||||
.subcommand(SubCommand::with_name("json").about("Print the ledger in JSON format"))
|
||||
.subcommand(SubCommand::with_name("verify").about("Verify the ledger's PoH"))
|
||||
.get_matches();
|
||||
|
||||
let ledger_path = matches.value_of("ledger").unwrap();
|
||||
|
||||
if matches.is_present("precheck") {
|
||||
if let Err(e) = verify_ledger(&ledger_path) {
|
||||
eprintln!("ledger precheck failed, error: {:?} ", e);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
let entries = match read_ledger(ledger_path, true) {
|
||||
Ok(entries) => entries,
|
||||
Err(err) => {
|
||||
eprintln!("Failed to open ledger at {}: {}", ledger_path, err);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
let head = match matches.value_of("head") {
|
||||
Some(head) => head.parse().expect("please pass a number for --head"),
|
||||
None => <usize>::max_value(),
|
||||
};
|
||||
|
||||
match matches.subcommand() {
|
||||
("print", _) => {
|
||||
let entries = match read_ledger(ledger_path, true) {
|
||||
Ok(entries) => entries,
|
||||
Err(err) => {
|
||||
eprintln!("Failed to open ledger at {}: {}", ledger_path, err);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
for (i, entry) in entries.enumerate() {
|
||||
if i >= head {
|
||||
break;
|
||||
}
|
||||
let entry = entry.unwrap();
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
}
|
||||
("json", _) => {
|
||||
stdout().write_all(b"{\"ledger\":[\n").expect("open array");
|
||||
for (i, entry) in entries.enumerate() {
|
||||
if i >= head {
|
||||
break;
|
||||
}
|
||||
let entry = entry.unwrap();
|
||||
serde_json::to_writer(stdout(), &entry).expect("serialize");
|
||||
stdout().write_all(b",\n").expect("newline");
|
||||
}
|
||||
stdout().write_all(b"\n]}\n").expect("close array");
|
||||
}
|
||||
("verify", _) => {
|
||||
if head < 2 {
|
||||
eprintln!("verify requires at least 2 entries to run");
|
||||
exit(1);
|
||||
}
|
||||
let bank = Bank::default();
|
||||
|
||||
{
|
||||
let genesis = match read_ledger(ledger_path, true) {
|
||||
Ok(entries) => entries,
|
||||
Err(err) => {
|
||||
eprintln!("Failed to open ledger at {}: {}", ledger_path, err);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
let genesis = genesis.take(2).map(|e| e.unwrap());
|
||||
|
||||
if let Err(e) = bank.process_ledger(genesis) {
|
||||
eprintln!("verify failed at genesis err: {:?}", e);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
let entries = entries.map(|e| e.unwrap());
|
||||
|
||||
let head = head - 2;
|
||||
for (i, entry) in entries.skip(2).enumerate() {
|
||||
if i >= head {
|
||||
break;
|
||||
}
|
||||
if let Err(e) = bank.process_entry(entry) {
|
||||
eprintln!("verify failed at entry[{}], err: {:?}", i + 2, e);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
("", _) => {
|
||||
eprintln!("{}", matches.usage());
|
||||
exit(1);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
extern crate atty;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use solana::mint::Mint;
|
||||
use std::io;
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
let mut input_text = String::new();
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a token number");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
io::stdin().read_line(&mut input_text).unwrap();
|
||||
let trimmed = input_text.trim();
|
||||
let tokens = trimmed.parse::<i64>().unwrap_or_else(|e| {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
});
|
||||
let mint = Mint::new(tokens);
|
||||
let serialized = serde_json::to_string(&mint).unwrap_or_else(|e| {
|
||||
eprintln!("failed to serialize: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
println!("{}", serialized);
|
||||
}
|
@ -1,26 +1,25 @@
|
||||
extern crate atty;
|
||||
extern crate bincode;
|
||||
extern crate bs58;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate env_logger;
|
||||
extern crate dirs;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use bincode::serialize;
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use solana::client::mk_client;
|
||||
use solana::crdt::NodeInfo;
|
||||
use solana::drone::DroneRequest;
|
||||
use solana::drone::DRONE_PORT;
|
||||
use solana::fullnode::Config;
|
||||
use solana::mint::Mint;
|
||||
use solana::signature::{PublicKey, Signature};
|
||||
use solana::logger;
|
||||
use solana::signature::{read_keypair, Keypair, KeypairUtil, Pubkey, Signature};
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::wallet::request_airdrop;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::io::prelude::*;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
@ -28,7 +27,7 @@ enum WalletCommand {
|
||||
Address,
|
||||
Balance,
|
||||
AirDrop(i64),
|
||||
Pay(i64, PublicKey),
|
||||
Pay(i64, Pubkey),
|
||||
Confirm(Signature),
|
||||
}
|
||||
|
||||
@ -57,7 +56,7 @@ impl error::Error for WalletError {
|
||||
|
||||
struct WalletConfig {
|
||||
leader: NodeInfo,
|
||||
id: Mint,
|
||||
id: Keypair,
|
||||
drone_addr: SocketAddr,
|
||||
command: WalletCommand,
|
||||
}
|
||||
@ -67,7 +66,7 @@ impl Default for WalletConfig {
|
||||
let default_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
WalletConfig {
|
||||
leader: NodeInfo::new_leader(&default_addr),
|
||||
id: Mint::new(0),
|
||||
id: Keypair::new(),
|
||||
drone_addr: default_addr,
|
||||
command: WalletCommand::Balance,
|
||||
}
|
||||
@ -76,6 +75,7 @@ impl Default for WalletConfig {
|
||||
|
||||
fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
let matches = App::new("solana-wallet")
|
||||
.version(crate_version!())
|
||||
.arg(
|
||||
Arg::with_name("leader")
|
||||
.short("l")
|
||||
@ -85,12 +85,12 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
.help("/path/to/leader.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("mint")
|
||||
.short("m")
|
||||
.long("mint")
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/mint.json"),
|
||||
.help("/path/to/id.json"),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("airdrop")
|
||||
@ -101,6 +101,7 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
.long("tokens")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("The number of tokens to request"),
|
||||
),
|
||||
)
|
||||
@ -122,7 +123,6 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
.long("to")
|
||||
.value_name("PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("The pubkey of recipient"),
|
||||
),
|
||||
)
|
||||
@ -143,30 +143,32 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
|
||||
let leader: NodeInfo;
|
||||
if let Some(l) = matches.value_of("leader") {
|
||||
leader = read_leader(l).node_info;
|
||||
leader = read_leader(l)?.node_info;
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
leader = NodeInfo::new_leader(&server_addr);
|
||||
};
|
||||
|
||||
let id: Mint;
|
||||
if let Some(m) = matches.value_of("mint") {
|
||||
id = read_mint(m)?;
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let id_path = if matches.is_present("keypair") {
|
||||
matches.value_of("keypair").unwrap()
|
||||
} else {
|
||||
eprintln!("No mint found!");
|
||||
exit(1);
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
let id = read_keypair(id_path).or_else(|err| {
|
||||
Err(WalletError::BadParameter(format!(
|
||||
"{}: Unable to open keypair file: {}",
|
||||
err, id_path
|
||||
)))
|
||||
})?;
|
||||
|
||||
let mut drone_addr = leader.contact_info.tpu;
|
||||
drone_addr.set_port(9900);
|
||||
drone_addr.set_port(DRONE_PORT);
|
||||
|
||||
let command = match matches.subcommand() {
|
||||
("airdrop", Some(airdrop_matches)) => {
|
||||
let tokens = if airdrop_matches.is_present("tokens") {
|
||||
airdrop_matches.value_of("tokens").unwrap().parse()?
|
||||
} else {
|
||||
id.tokens
|
||||
};
|
||||
let tokens = airdrop_matches.value_of("tokens").unwrap().parse()?;
|
||||
Ok(WalletCommand::AirDrop(tokens))
|
||||
}
|
||||
("pay", Some(pay_matches)) => {
|
||||
@ -175,40 +177,36 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
.into_vec()
|
||||
.expect("base58-encoded public key");
|
||||
|
||||
if pubkey_vec.len() != std::mem::size_of::<PublicKey>() {
|
||||
display_actions();
|
||||
if pubkey_vec.len() != std::mem::size_of::<Pubkey>() {
|
||||
eprintln!("{}", pay_matches.usage());
|
||||
Err(WalletError::BadParameter("Invalid public key".to_string()))?;
|
||||
}
|
||||
PublicKey::clone_from_slice(&pubkey_vec)
|
||||
Pubkey::new(&pubkey_vec)
|
||||
} else {
|
||||
id.pubkey()
|
||||
};
|
||||
|
||||
let tokens = if pay_matches.is_present("tokens") {
|
||||
pay_matches.value_of("tokens").unwrap().parse()?
|
||||
} else {
|
||||
id.tokens
|
||||
};
|
||||
let tokens = pay_matches.value_of("tokens").unwrap().parse()?;
|
||||
|
||||
Ok(WalletCommand::Pay(tokens, to))
|
||||
}
|
||||
("confirm", Some(confirm_matches)) => {
|
||||
let sig_vec = bs58::decode(confirm_matches.value_of("signature").unwrap())
|
||||
let signatures = bs58::decode(confirm_matches.value_of("signature").unwrap())
|
||||
.into_vec()
|
||||
.expect("base58-encoded signature");
|
||||
|
||||
if sig_vec.len() == std::mem::size_of::<Signature>() {
|
||||
let sig = Signature::clone_from_slice(&sig_vec);
|
||||
Ok(WalletCommand::Confirm(sig))
|
||||
if signatures.len() == std::mem::size_of::<Signature>() {
|
||||
let signature = Signature::new(&signatures);
|
||||
Ok(WalletCommand::Confirm(signature))
|
||||
} else {
|
||||
display_actions();
|
||||
eprintln!("{}", confirm_matches.usage());
|
||||
Err(WalletError::BadParameter("Invalid signature".to_string()))
|
||||
}
|
||||
}
|
||||
("balance", Some(_balance_matches)) => Ok(WalletCommand::Balance),
|
||||
("address", Some(_address_matches)) => Ok(WalletCommand::Address),
|
||||
("", None) => {
|
||||
display_actions();
|
||||
println!("{}", matches.usage());
|
||||
Err(WalletError::CommandNotRecognized(
|
||||
"no subcommand given".to_string(),
|
||||
))
|
||||
@ -231,7 +229,7 @@ fn process_command(
|
||||
match config.command {
|
||||
// Check client balance
|
||||
WalletCommand::Address => {
|
||||
println!("{}", bs58::encode(config.id.pubkey()).into_string());
|
||||
println!("{}", config.id.pubkey());
|
||||
}
|
||||
WalletCommand::Balance => {
|
||||
println!("Balance requested...");
|
||||
@ -245,31 +243,45 @@ fn process_command(
|
||||
}
|
||||
Err(error) => {
|
||||
println!("An error occurred: {:?}", error);
|
||||
Err(error)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Request an airdrop from Solana Drone;
|
||||
// Request amount is set in request_airdrop function
|
||||
WalletCommand::AirDrop(tokens) => {
|
||||
println!("Airdrop requested...");
|
||||
println!("Airdropping {:?} tokens", tokens);
|
||||
request_airdrop(&config.drone_addr, &config.id, tokens as u64)?;
|
||||
// TODO: return airdrop Result from Drone
|
||||
sleep(Duration::from_millis(100));
|
||||
println!(
|
||||
"Your balance is: {:?}",
|
||||
client.poll_get_balance(&config.id.pubkey()).unwrap()
|
||||
"Requesting airdrop of {:?} tokens from {}",
|
||||
tokens, config.drone_addr
|
||||
);
|
||||
let previous_balance = client.poll_get_balance(&config.id.pubkey())?;
|
||||
request_airdrop(&config.drone_addr, &config.id.pubkey(), tokens as u64)?;
|
||||
|
||||
// TODO: return airdrop Result from Drone instead of polling the
|
||||
// network
|
||||
let mut current_balance = previous_balance;
|
||||
for _ in 0..20 {
|
||||
sleep(Duration::from_millis(500));
|
||||
current_balance = client.poll_get_balance(&config.id.pubkey())?;
|
||||
if previous_balance != current_balance {
|
||||
break;
|
||||
}
|
||||
println!(".");
|
||||
}
|
||||
println!("Your balance is: {:?}", current_balance);
|
||||
if current_balance - previous_balance != tokens {
|
||||
Err("Airdrop failed!")?;
|
||||
}
|
||||
}
|
||||
// If client has positive balance, spend tokens in {balance} number of transactions
|
||||
WalletCommand::Pay(tokens, to) => {
|
||||
let last_id = client.get_last_id();
|
||||
let sig = client.transfer(tokens, &config.id.keypair(), to, &last_id)?;
|
||||
println!("{}", bs58::encode(sig).into_string());
|
||||
let signature = client.transfer(tokens, &config.id, to, &last_id)?;
|
||||
println!("{}", signature);
|
||||
}
|
||||
// Confirm the last client transaction by signature
|
||||
WalletCommand::Confirm(sig) => {
|
||||
if client.check_signature(&sig) {
|
||||
WalletCommand::Confirm(signature) => {
|
||||
if client.check_signature(&signature) {
|
||||
println!("Confirmed");
|
||||
} else {
|
||||
println!("Not found");
|
||||
@ -279,62 +291,25 @@ fn process_command(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn display_actions() {
|
||||
println!();
|
||||
println!("Commands:");
|
||||
println!(" address Get your public key");
|
||||
println!(" balance Get your account balance");
|
||||
println!(" airdrop Request a batch of tokens");
|
||||
println!(" pay Send tokens to a public key");
|
||||
println!(" confirm Confirm your last payment by signature");
|
||||
println!();
|
||||
}
|
||||
fn read_leader(path: &str) -> Result<Config, WalletError> {
|
||||
let file = File::open(path.to_string()).or_else(|err| {
|
||||
Err(WalletError::BadParameter(format!(
|
||||
"{}: Unable to open leader file: {}",
|
||||
err, path
|
||||
)))
|
||||
})?;
|
||||
|
||||
fn read_leader(path: &str) -> Config {
|
||||
let file = File::open(path.to_string()).unwrap_or_else(|_| panic!("file not found: {}", path));
|
||||
serde_json::from_reader(file).unwrap_or_else(|_| panic!("failed to parse {}", path))
|
||||
}
|
||||
|
||||
fn read_mint(path: &str) -> Result<Mint, Box<error::Error>> {
|
||||
let file = File::open(path.to_string())?;
|
||||
let mint = serde_json::from_reader(file)?;
|
||||
Ok(mint)
|
||||
}
|
||||
|
||||
fn mk_client(r: &NodeInfo) -> io::Result<ThinClient> {
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
|
||||
Ok(ThinClient::new(
|
||||
r.contact_info.rpu,
|
||||
requests_socket,
|
||||
r.contact_info.tpu,
|
||||
transactions_socket,
|
||||
))
|
||||
}
|
||||
|
||||
fn request_airdrop(
|
||||
drone_addr: &SocketAddr,
|
||||
id: &Mint,
|
||||
tokens: u64,
|
||||
) -> Result<(), Box<error::Error>> {
|
||||
let mut stream = TcpStream::connect(drone_addr)?;
|
||||
let req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: tokens,
|
||||
client_public_key: id.pubkey(),
|
||||
};
|
||||
let tx = serialize(&req).expect("serialize drone request");
|
||||
stream.write_all(&tx).unwrap();
|
||||
// TODO: add timeout to this function, in case of unresponsive drone
|
||||
Ok(())
|
||||
serde_json::from_reader(file).or_else(|err| {
|
||||
Err(WalletError::BadParameter(format!(
|
||||
"{}: Failed to parse leader file: {}",
|
||||
err, path
|
||||
)))
|
||||
})
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Box<error::Error>> {
|
||||
env_logger::init();
|
||||
logger::setup();
|
||||
let config = parse_args()?;
|
||||
let mut client = mk_client(&config.leader)?;
|
||||
let mut client = mk_client(&config.leader);
|
||||
process_command(&config, &mut client)
|
||||
}
|
||||
|
203
src/broadcast_stage.rs
Normal file
203
src/broadcast_stage.rs
Normal file
@ -0,0 +1,203 @@
|
||||
//! The `broadcast_stage` broadcasts data from a leader node to validators
|
||||
//!
|
||||
use counter::Counter;
|
||||
use crdt::{Crdt, CrdtError, NodeInfo};
|
||||
#[cfg(feature = "erasure")]
|
||||
use erasure;
|
||||
use log::Level;
|
||||
use packet::BlobRecycler;
|
||||
use result::{Error, Result};
|
||||
use service::Service;
|
||||
use std::mem;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::mpsc::RecvTimeoutError;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer::BlobReceiver;
|
||||
use window::{self, SharedWindow, WindowIndex, WINDOW_SIZE};
|
||||
|
||||
fn broadcast(
|
||||
node_info: &NodeInfo,
|
||||
broadcast_table: &[NodeInfo],
|
||||
window: &SharedWindow,
|
||||
recycler: &BlobRecycler,
|
||||
receiver: &BlobReceiver,
|
||||
sock: &UdpSocket,
|
||||
transmit_index: &mut WindowIndex,
|
||||
receive_index: &mut u64,
|
||||
) -> Result<()> {
|
||||
let debug_id = node_info.debug_id();
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut dq = receiver.recv_timeout(timer)?;
|
||||
while let Ok(mut nq) = receiver.try_recv() {
|
||||
dq.append(&mut nq);
|
||||
}
|
||||
|
||||
// flatten deque to vec
|
||||
let blobs_vec: Vec<_> = dq.into_iter().collect();
|
||||
|
||||
// We could receive more blobs than window slots so
|
||||
// break them up into window-sized chunks to process
|
||||
let blobs_chunked = blobs_vec.chunks(WINDOW_SIZE as usize).map(|x| x.to_vec());
|
||||
|
||||
if log_enabled!(Level::Trace) {
|
||||
trace!("{}", window::print_window(debug_id, window, *receive_index));
|
||||
}
|
||||
|
||||
for mut blobs in blobs_chunked {
|
||||
let blobs_len = blobs.len();
|
||||
trace!("{:x}: broadcast blobs.len: {}", debug_id, blobs_len);
|
||||
|
||||
// Index the blobs
|
||||
window::index_blobs(node_info, &blobs, receive_index)
|
||||
.expect("index blobs for initial window");
|
||||
|
||||
// keep the cache of blobs that are broadcast
|
||||
inc_new_counter_info!("streamer-broadcast-sent", blobs.len());
|
||||
{
|
||||
let mut win = window.write().unwrap();
|
||||
assert!(blobs.len() <= win.len());
|
||||
for b in &blobs {
|
||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||
let pos = (ix % WINDOW_SIZE) as usize;
|
||||
if let Some(x) = mem::replace(&mut win[pos].data, None) {
|
||||
trace!(
|
||||
"{:x} popped {} at {}",
|
||||
debug_id,
|
||||
x.read().unwrap().get_index().unwrap(),
|
||||
pos
|
||||
);
|
||||
recycler.recycle(x);
|
||||
}
|
||||
if let Some(x) = mem::replace(&mut win[pos].coding, None) {
|
||||
trace!(
|
||||
"{:x} popped {} at {}",
|
||||
debug_id,
|
||||
x.read().unwrap().get_index().unwrap(),
|
||||
pos
|
||||
);
|
||||
recycler.recycle(x);
|
||||
}
|
||||
|
||||
trace!("{:x} null {}", debug_id, pos);
|
||||
}
|
||||
while let Some(b) = blobs.pop() {
|
||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||
let pos = (ix % WINDOW_SIZE) as usize;
|
||||
trace!("{:x} caching {} at {}", debug_id, ix, pos);
|
||||
assert!(win[pos].data.is_none());
|
||||
win[pos].data = Some(b);
|
||||
}
|
||||
}
|
||||
|
||||
// Fill in the coding blob data from the window data blobs
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
erasure::generate_coding(
|
||||
debug_id,
|
||||
&mut window.write().unwrap(),
|
||||
recycler,
|
||||
*receive_index,
|
||||
blobs_len,
|
||||
&mut transmit_index.coding,
|
||||
)?;
|
||||
}
|
||||
|
||||
*receive_index += blobs_len as u64;
|
||||
|
||||
// Send blobs out from the window
|
||||
Crdt::broadcast(
|
||||
&node_info,
|
||||
&broadcast_table,
|
||||
&window,
|
||||
&sock,
|
||||
transmit_index,
|
||||
*receive_index,
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct BroadcastStage {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl BroadcastStage {
|
||||
fn run(
|
||||
sock: &UdpSocket,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
window: &SharedWindow,
|
||||
entry_height: u64,
|
||||
recycler: &BlobRecycler,
|
||||
receiver: &BlobReceiver,
|
||||
) {
|
||||
let mut transmit_index = WindowIndex {
|
||||
data: entry_height,
|
||||
coding: entry_height,
|
||||
};
|
||||
let mut receive_index = entry_height;
|
||||
let me = crdt.read().unwrap().my_data().clone();
|
||||
loop {
|
||||
let broadcast_table = crdt.read().unwrap().compute_broadcast_table();
|
||||
if let Err(e) = broadcast(
|
||||
&me,
|
||||
&broadcast_table,
|
||||
&window,
|
||||
&recycler,
|
||||
&receiver,
|
||||
&sock,
|
||||
&mut transmit_index,
|
||||
&mut receive_index,
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
Error::CrdtError(CrdtError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these?
|
||||
_ => {
|
||||
inc_new_counter_info!("streamer-broadcaster-error", 1, 1);
|
||||
error!("broadcaster error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Service to broadcast messages from the leader to layer 1 nodes.
|
||||
/// See `crdt` for network layer definitions.
|
||||
/// # Arguments
|
||||
/// * `sock` - Socket to send from.
|
||||
/// * `exit` - Boolean to signal system exit.
|
||||
/// * `crdt` - CRDT structure
|
||||
/// * `window` - Cache of blobs that we have broadcast
|
||||
/// * `recycler` - Blob recycler.
|
||||
/// * `receiver` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||
pub fn new(
|
||||
sock: UdpSocket,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: SharedWindow,
|
||||
entry_height: u64,
|
||||
recycler: BlobRecycler,
|
||||
receiver: BlobReceiver,
|
||||
) -> Self {
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-broadcaster".to_string())
|
||||
.spawn(move || {
|
||||
Self::run(&sock, &crdt, &window, entry_height, &recycler, &receiver);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
BroadcastStage { thread_hdl }
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for BroadcastStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
vec![self.thread_hdl]
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
}
|
@ -5,22 +5,22 @@
|
||||
|
||||
use chrono::prelude::*;
|
||||
use payment_plan::{Payment, PaymentPlan, Witness};
|
||||
use signature::PublicKey;
|
||||
use signature::Pubkey;
|
||||
use std::mem;
|
||||
|
||||
/// A data type representing a `Witness` that the payment plan is waiting on.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Condition {
|
||||
/// Wait for a `Timestamp` `Witness` at or after the given `DateTime`.
|
||||
Timestamp(DateTime<Utc>, PublicKey),
|
||||
Timestamp(DateTime<Utc>, Pubkey),
|
||||
|
||||
/// Wait for a `Signature` `Witness` from `PublicKey`.
|
||||
Signature(PublicKey),
|
||||
/// Wait for a `Signature` `Witness` from `Pubkey`.
|
||||
Signature(Pubkey),
|
||||
}
|
||||
|
||||
impl Condition {
|
||||
/// Return true if the given Witness satisfies this Condition.
|
||||
pub fn is_satisfied(&self, witness: &Witness, from: &PublicKey) -> bool {
|
||||
pub fn is_satisfied(&self, witness: &Witness, from: &Pubkey) -> bool {
|
||||
match (self, witness) {
|
||||
(Condition::Signature(pubkey), Witness::Signature) => pubkey == from,
|
||||
(Condition::Timestamp(dt, pubkey), Witness::Timestamp(last_time)) => {
|
||||
@ -47,23 +47,18 @@ pub enum Budget {
|
||||
}
|
||||
|
||||
impl Budget {
|
||||
/// Create the simplest budget - one that pays `tokens` to PublicKey.
|
||||
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
|
||||
/// Create the simplest budget - one that pays `tokens` to Pubkey.
|
||||
pub fn new_payment(tokens: i64, to: Pubkey) -> Self {
|
||||
Budget::Pay(Payment { tokens, to })
|
||||
}
|
||||
|
||||
/// Create a budget that pays `tokens` to `to` after being witnessed by `from`.
|
||||
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
|
||||
pub fn new_authorized_payment(from: Pubkey, tokens: i64, to: Pubkey) -> Self {
|
||||
Budget::After(Condition::Signature(from), Payment { tokens, to })
|
||||
}
|
||||
|
||||
/// Create a budget that pays `tokens` to `to` after the given DateTime.
|
||||
pub fn new_future_payment(
|
||||
dt: DateTime<Utc>,
|
||||
from: PublicKey,
|
||||
tokens: i64,
|
||||
to: PublicKey,
|
||||
) -> Self {
|
||||
pub fn new_future_payment(dt: DateTime<Utc>, from: Pubkey, tokens: i64, to: Pubkey) -> Self {
|
||||
Budget::After(Condition::Timestamp(dt, from), Payment { tokens, to })
|
||||
}
|
||||
|
||||
@ -71,9 +66,9 @@ impl Budget {
|
||||
/// unless cancelled by `from`.
|
||||
pub fn new_cancelable_future_payment(
|
||||
dt: DateTime<Utc>,
|
||||
from: PublicKey,
|
||||
from: Pubkey,
|
||||
tokens: i64,
|
||||
to: PublicKey,
|
||||
to: Pubkey,
|
||||
) -> Self {
|
||||
Budget::Or(
|
||||
(Condition::Timestamp(dt, from), Payment { tokens, to }),
|
||||
@ -101,7 +96,7 @@ impl PaymentPlan for Budget {
|
||||
|
||||
/// Apply a witness to the budget to see if the budget can be reduced.
|
||||
/// If so, modify the budget in-place.
|
||||
fn apply_witness(&mut self, witness: &Witness, from: &PublicKey) {
|
||||
fn apply_witness(&mut self, witness: &Witness, from: &Pubkey) {
|
||||
let new_payment = match self {
|
||||
Budget::After(cond, payment) if cond.is_satisfied(witness, from) => Some(payment),
|
||||
Budget::Or((cond, payment), _) if cond.is_satisfied(witness, from) => Some(payment),
|
||||
@ -118,11 +113,11 @@ impl PaymentPlan for Budget {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
|
||||
#[test]
|
||||
fn test_signature_satisfied() {
|
||||
let from = PublicKey::default();
|
||||
let from = Pubkey::default();
|
||||
assert!(Condition::Signature(from).is_satisfied(&Witness::Signature, &from));
|
||||
}
|
||||
|
||||
@ -130,7 +125,7 @@ mod tests {
|
||||
fn test_timestamp_satisfied() {
|
||||
let dt1 = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let dt2 = Utc.ymd(2014, 11, 14).and_hms(10, 9, 8);
|
||||
let from = PublicKey::default();
|
||||
let from = Pubkey::default();
|
||||
assert!(Condition::Timestamp(dt1, from).is_satisfied(&Witness::Timestamp(dt1), &from));
|
||||
assert!(Condition::Timestamp(dt1, from).is_satisfied(&Witness::Timestamp(dt2), &from));
|
||||
assert!(!Condition::Timestamp(dt2, from).is_satisfied(&Witness::Timestamp(dt1), &from));
|
||||
@ -139,8 +134,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_verify() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
let from = Pubkey::default();
|
||||
let to = Pubkey::default();
|
||||
assert!(Budget::new_payment(42, to).verify(42));
|
||||
assert!(Budget::new_authorized_payment(from, 42, to).verify(42));
|
||||
assert!(Budget::new_future_payment(dt, from, 42, to).verify(42));
|
||||
@ -149,8 +144,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_authorized_payment() {
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
let from = Pubkey::default();
|
||||
let to = Pubkey::default();
|
||||
|
||||
let mut budget = Budget::new_authorized_payment(from, 42, to);
|
||||
budget.apply_witness(&Witness::Signature, &from);
|
||||
@ -160,8 +155,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_future_payment() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let from = KeyPair::new().pubkey();
|
||||
let to = KeyPair::new().pubkey();
|
||||
let from = Keypair::new().pubkey();
|
||||
let to = Keypair::new().pubkey();
|
||||
|
||||
let mut budget = Budget::new_future_payment(dt, from, 42, to);
|
||||
budget.apply_witness(&Witness::Timestamp(dt), &from);
|
||||
@ -173,8 +168,8 @@ mod tests {
|
||||
// Ensure timestamp will only be acknowledged if it came from the
|
||||
// whitelisted public key.
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let from = KeyPair::new().pubkey();
|
||||
let to = KeyPair::new().pubkey();
|
||||
let from = Keypair::new().pubkey();
|
||||
let to = Keypair::new().pubkey();
|
||||
|
||||
let mut budget = Budget::new_future_payment(dt, from, 42, to);
|
||||
let orig_budget = budget.clone();
|
||||
@ -185,8 +180,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_cancelable_future_payment() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
let from = Pubkey::default();
|
||||
let to = Pubkey::default();
|
||||
|
||||
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
|
||||
budget.apply_witness(&Witness::Timestamp(dt), &from);
|
||||
|
@ -2,7 +2,7 @@ use crdt::{CrdtError, NodeInfo};
|
||||
use rand::distributions::{Distribution, Weighted, WeightedChoice};
|
||||
use rand::thread_rng;
|
||||
use result::Result;
|
||||
use signature::PublicKey;
|
||||
use signature::Pubkey;
|
||||
use std;
|
||||
use std::collections::HashMap;
|
||||
|
||||
@ -29,7 +29,7 @@ impl<'a, 'b> ChooseRandomPeerStrategy<'a> {
|
||||
impl<'a> ChooseGossipPeerStrategy for ChooseRandomPeerStrategy<'a> {
|
||||
fn choose_peer<'b>(&self, options: Vec<&'b NodeInfo>) -> Result<&'b NodeInfo> {
|
||||
if options.is_empty() {
|
||||
Err(CrdtError::TooSmall)?;
|
||||
Err(CrdtError::NoPeers)?;
|
||||
}
|
||||
|
||||
let n = ((self.random)() as usize) % options.len();
|
||||
@ -61,21 +61,21 @@ impl<'a> ChooseGossipPeerStrategy for ChooseRandomPeerStrategy<'a> {
|
||||
pub struct ChooseWeightedPeerStrategy<'a> {
|
||||
// The map of last directly observed update_index for each active validator.
|
||||
// This is how we get observed(v) from the formula above.
|
||||
remote: &'a HashMap<PublicKey, u64>,
|
||||
remote: &'a HashMap<Pubkey, u64>,
|
||||
// The map of rumored update_index for each active validator. Using the formula above,
|
||||
// to find rumor_v(i), we would first look up "v" in the outer map, then look up
|
||||
// "i" in the inner map, i.e. look up external_liveness[v][i]
|
||||
external_liveness: &'a HashMap<PublicKey, HashMap<PublicKey, u64>>,
|
||||
external_liveness: &'a HashMap<Pubkey, HashMap<Pubkey, u64>>,
|
||||
// A function returning the size of the stake for a particular validator, corresponds
|
||||
// to stake(i) in the formula above.
|
||||
get_stake: &'a Fn(PublicKey) -> f64,
|
||||
get_stake: &'a Fn(Pubkey) -> f64,
|
||||
}
|
||||
|
||||
impl<'a> ChooseWeightedPeerStrategy<'a> {
|
||||
pub fn new(
|
||||
remote: &'a HashMap<PublicKey, u64>,
|
||||
external_liveness: &'a HashMap<PublicKey, HashMap<PublicKey, u64>>,
|
||||
get_stake: &'a Fn(PublicKey) -> f64,
|
||||
remote: &'a HashMap<Pubkey, u64>,
|
||||
external_liveness: &'a HashMap<Pubkey, HashMap<Pubkey, u64>>,
|
||||
get_stake: &'a Fn(Pubkey) -> f64,
|
||||
) -> Self {
|
||||
ChooseWeightedPeerStrategy {
|
||||
remote,
|
||||
@ -84,7 +84,7 @@ impl<'a> ChooseWeightedPeerStrategy<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
fn calculate_weighted_remote_index(&self, peer_id: PublicKey) -> u32 {
|
||||
fn calculate_weighted_remote_index(&self, peer_id: Pubkey) -> u32 {
|
||||
let mut last_seen_index = 0;
|
||||
// If the peer is not in our remote table, then we leave last_seen_index as zero.
|
||||
// Only happens when a peer appears in our crdt.table but not in our crdt.remote,
|
||||
@ -174,7 +174,7 @@ impl<'a> ChooseWeightedPeerStrategy<'a> {
|
||||
impl<'a> ChooseGossipPeerStrategy for ChooseWeightedPeerStrategy<'a> {
|
||||
fn choose_peer<'b>(&self, options: Vec<&'b NodeInfo>) -> Result<&'b NodeInfo> {
|
||||
if options.is_empty() {
|
||||
Err(CrdtError::TooSmall)?;
|
||||
Err(CrdtError::NoPeers)?;
|
||||
}
|
||||
|
||||
let mut weighted_peers = vec![];
|
||||
@ -192,11 +192,11 @@ impl<'a> ChooseGossipPeerStrategy for ChooseWeightedPeerStrategy<'a> {
|
||||
mod tests {
|
||||
use choose_gossip_peer_strategy::{ChooseWeightedPeerStrategy, DEFAULT_WEIGHT};
|
||||
use logger;
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
use signature::{Keypair, KeypairUtil, Pubkey};
|
||||
use std;
|
||||
use std::collections::HashMap;
|
||||
|
||||
fn get_stake(_id: PublicKey) -> f64 {
|
||||
fn get_stake(_id: Pubkey) -> f64 {
|
||||
1.0
|
||||
}
|
||||
|
||||
@ -205,10 +205,10 @@ mod tests {
|
||||
logger::setup();
|
||||
|
||||
// Initialize the filler keys
|
||||
let key1 = KeyPair::new().pubkey();
|
||||
let key1 = Keypair::new().pubkey();
|
||||
|
||||
let remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||
let remote: HashMap<Pubkey, u64> = HashMap::new();
|
||||
let external_liveness: HashMap<Pubkey, HashMap<Pubkey, u64>> = HashMap::new();
|
||||
|
||||
let weighted_strategy =
|
||||
ChooseWeightedPeerStrategy::new(&remote, &external_liveness, &get_stake);
|
||||
@ -224,16 +224,16 @@ mod tests {
|
||||
logger::setup();
|
||||
|
||||
// Initialize the filler keys
|
||||
let key1 = KeyPair::new().pubkey();
|
||||
let key2 = KeyPair::new().pubkey();
|
||||
let key1 = Keypair::new().pubkey();
|
||||
let key2 = Keypair::new().pubkey();
|
||||
|
||||
let remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||
let remote: HashMap<Pubkey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<Pubkey, HashMap<Pubkey, u64>> = HashMap::new();
|
||||
|
||||
// If only the liveness table contains the entry, should return the
|
||||
// weighted liveness entries
|
||||
let test_value: u32 = 5;
|
||||
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut rumors: HashMap<Pubkey, u64> = HashMap::new();
|
||||
rumors.insert(key2, test_value as u64);
|
||||
external_liveness.insert(key1, rumors);
|
||||
|
||||
@ -249,15 +249,15 @@ mod tests {
|
||||
logger::setup();
|
||||
|
||||
// Initialize the filler keys
|
||||
let key1 = KeyPair::new().pubkey();
|
||||
let key2 = KeyPair::new().pubkey();
|
||||
let key1 = Keypair::new().pubkey();
|
||||
let key2 = Keypair::new().pubkey();
|
||||
|
||||
let remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||
let remote: HashMap<Pubkey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<Pubkey, HashMap<Pubkey, u64>> = HashMap::new();
|
||||
|
||||
// If the vote index is greater than u32::MAX, default to u32::MAX
|
||||
let test_value = (std::u32::MAX as u64) + 10;
|
||||
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut rumors: HashMap<Pubkey, u64> = HashMap::new();
|
||||
rumors.insert(key2, test_value);
|
||||
external_liveness.insert(key1, rumors);
|
||||
|
||||
@ -273,20 +273,20 @@ mod tests {
|
||||
logger::setup();
|
||||
|
||||
// Initialize the filler keys
|
||||
let key1 = KeyPair::new().pubkey();
|
||||
let key1 = Keypair::new().pubkey();
|
||||
|
||||
let mut remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||
let mut remote: HashMap<Pubkey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<Pubkey, HashMap<Pubkey, u64>> = HashMap::new();
|
||||
|
||||
// Test many validators' rumors in external_liveness
|
||||
let num_peers = 10;
|
||||
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut rumors: HashMap<Pubkey, u64> = HashMap::new();
|
||||
|
||||
remote.insert(key1, 0);
|
||||
|
||||
for i in 0..num_peers {
|
||||
let pk = KeyPair::new().pubkey();
|
||||
rumors.insert(pk, i);
|
||||
let pubkey = Keypair::new().pubkey();
|
||||
rumors.insert(pubkey, i);
|
||||
}
|
||||
|
||||
external_liveness.insert(key1, rumors);
|
||||
@ -303,21 +303,21 @@ mod tests {
|
||||
logger::setup();
|
||||
|
||||
// Initialize the filler keys
|
||||
let key1 = KeyPair::new().pubkey();
|
||||
let key1 = Keypair::new().pubkey();
|
||||
|
||||
let mut remote: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<PublicKey, HashMap<PublicKey, u64>> = HashMap::new();
|
||||
let mut remote: HashMap<Pubkey, u64> = HashMap::new();
|
||||
let mut external_liveness: HashMap<Pubkey, HashMap<Pubkey, u64>> = HashMap::new();
|
||||
|
||||
// Test many validators' rumors in external_liveness
|
||||
let num_peers = 10;
|
||||
let old_index = 20;
|
||||
let mut rumors: HashMap<PublicKey, u64> = HashMap::new();
|
||||
let mut rumors: HashMap<Pubkey, u64> = HashMap::new();
|
||||
|
||||
remote.insert(key1, old_index);
|
||||
|
||||
for _i in 0..num_peers {
|
||||
let pk = KeyPair::new().pubkey();
|
||||
rumors.insert(pk, old_index);
|
||||
let pubkey = Keypair::new().pubkey();
|
||||
rumors.insert(pubkey, old_index);
|
||||
}
|
||||
|
||||
external_liveness.insert(key1, rumors);
|
||||
|
20
src/client.rs
Normal file
20
src/client.rs
Normal file
@ -0,0 +1,20 @@
|
||||
use crdt::NodeInfo;
|
||||
use nat::udp_random_bind;
|
||||
use std::time::Duration;
|
||||
use thin_client::ThinClient;
|
||||
|
||||
pub fn mk_client(r: &NodeInfo) -> ThinClient {
|
||||
let requests_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
let transactions_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
|
||||
ThinClient::new(
|
||||
r.contact_info.rpu,
|
||||
requests_socket,
|
||||
r.contact_info.tpu,
|
||||
transactions_socket,
|
||||
)
|
||||
}
|
118
src/counter.rs
118
src/counter.rs
@ -1,9 +1,10 @@
|
||||
use influx_db_client as influxdb;
|
||||
use metrics;
|
||||
use std::env;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use timing;
|
||||
|
||||
const INFLUX_RATE: usize = 100;
|
||||
const DEFAULT_METRICS_RATE: usize = 100;
|
||||
|
||||
pub struct Counter {
|
||||
pub name: &'static str,
|
||||
@ -12,7 +13,7 @@ pub struct Counter {
|
||||
pub times: AtomicUsize,
|
||||
/// last accumulated value logged
|
||||
pub lastlog: AtomicUsize,
|
||||
pub lograte: usize,
|
||||
pub lograte: AtomicUsize,
|
||||
}
|
||||
|
||||
macro_rules! create_counter {
|
||||
@ -22,7 +23,7 @@ macro_rules! create_counter {
|
||||
counts: AtomicUsize::new(0),
|
||||
times: AtomicUsize::new(0),
|
||||
lastlog: AtomicUsize::new(0),
|
||||
lograte: $lograte,
|
||||
lograte: AtomicUsize::new($lograte),
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -33,12 +34,45 @@ macro_rules! inc_counter {
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! inc_new_counter_info {
|
||||
($name:expr, $count:expr) => {{
|
||||
inc_new_counter!($name, $count, Level::Info, 0);
|
||||
}};
|
||||
($name:expr, $count:expr, $lograte:expr) => {{
|
||||
inc_new_counter!($name, $count, Level::Info, $lograte);
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! inc_new_counter {
|
||||
($name:expr, $count:expr, $level:expr, $lograte:expr) => {{
|
||||
if log_enabled!($level) {
|
||||
static mut INC_NEW_COUNTER: Counter = create_counter!($name, $lograte);
|
||||
inc_counter!(INC_NEW_COUNTER, $count);
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
impl Counter {
|
||||
fn default_log_rate() -> usize {
|
||||
let v = env::var("SOLANA_DEFAULT_METRICS_RATE")
|
||||
.map(|x| x.parse().unwrap_or(DEFAULT_METRICS_RATE))
|
||||
.unwrap_or(DEFAULT_METRICS_RATE);
|
||||
if v == 0 {
|
||||
DEFAULT_METRICS_RATE
|
||||
} else {
|
||||
v
|
||||
}
|
||||
}
|
||||
pub fn inc(&mut self, events: usize) {
|
||||
let counts = self.counts.fetch_add(events, Ordering::Relaxed);
|
||||
let times = self.times.fetch_add(1, Ordering::Relaxed);
|
||||
let lastlog = self.lastlog.load(Ordering::Relaxed);
|
||||
if times % self.lograte == 0 && times > 0 {
|
||||
let mut lograte = self.lograte.load(Ordering::Relaxed);
|
||||
if lograte == 0 {
|
||||
lograte = Counter::default_log_rate();
|
||||
self.lograte.store(lograte, Ordering::Relaxed);
|
||||
}
|
||||
if times % lograte == 0 && times > 0 {
|
||||
let lastlog = self.lastlog.load(Ordering::Relaxed);
|
||||
info!(
|
||||
"COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"samples\": {}, \"now\": {}}}",
|
||||
self.name,
|
||||
@ -46,10 +80,8 @@ impl Counter {
|
||||
times,
|
||||
timing::timestamp(),
|
||||
);
|
||||
}
|
||||
if times % INFLUX_RATE == 0 && times > 0 {
|
||||
metrics::submit(
|
||||
influxdb::Point::new(&format!("counter_{}", self.name))
|
||||
influxdb::Point::new(&format!("counter-{}", self.name))
|
||||
.add_field(
|
||||
"count",
|
||||
influxdb::Value::Integer(counts as i64 - lastlog as i64),
|
||||
@ -63,17 +95,34 @@ impl Counter {
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use counter::Counter;
|
||||
use counter::{Counter, DEFAULT_METRICS_RATE};
|
||||
use log::Level;
|
||||
use std::env;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Once, RwLock, ONCE_INIT};
|
||||
|
||||
fn get_env_lock() -> &'static RwLock<()> {
|
||||
static mut ENV_LOCK: Option<RwLock<()>> = None;
|
||||
static INIT_HOOK: Once = ONCE_INIT;
|
||||
|
||||
unsafe {
|
||||
INIT_HOOK.call_once(|| {
|
||||
ENV_LOCK = Some(RwLock::new(()));
|
||||
});
|
||||
&ENV_LOCK.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_counter() {
|
||||
let _readlock = get_env_lock().read();
|
||||
static mut COUNTER: Counter = create_counter!("test", 100);
|
||||
let count = 1;
|
||||
inc_counter!(COUNTER, count);
|
||||
unsafe {
|
||||
assert_eq!(COUNTER.counts.load(Ordering::Relaxed), 1);
|
||||
assert_eq!(COUNTER.times.load(Ordering::Relaxed), 1);
|
||||
assert_eq!(COUNTER.lograte, 100);
|
||||
assert_eq!(COUNTER.lograte.load(Ordering::Relaxed), 100);
|
||||
assert_eq!(COUNTER.lastlog.load(Ordering::Relaxed), 0);
|
||||
assert_eq!(COUNTER.name, "test");
|
||||
}
|
||||
@ -88,4 +137,53 @@ mod tests {
|
||||
assert_eq!(COUNTER.lastlog.load(Ordering::Relaxed), 399);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn test_inc_new_counter() {
|
||||
let _readlock = get_env_lock().read();
|
||||
//make sure that macros are syntactically correct
|
||||
//the variable is internal to the macro scope so there is no way to introspect it
|
||||
inc_new_counter_info!("counter-1", 1);
|
||||
inc_new_counter_info!("counter-2", 1, 2);
|
||||
}
|
||||
#[test]
|
||||
fn test_lograte() {
|
||||
let _readlock = get_env_lock().read();
|
||||
assert_eq!(
|
||||
Counter::default_log_rate(),
|
||||
DEFAULT_METRICS_RATE,
|
||||
"default_log_rate() is {}, expected {}, SOLANA_DEFAULT_METRICS_RATE environment variable set?",
|
||||
Counter::default_log_rate(),
|
||||
DEFAULT_METRICS_RATE,
|
||||
);
|
||||
static mut COUNTER: Counter = create_counter!("test_lograte", 0);
|
||||
inc_counter!(COUNTER, 2);
|
||||
unsafe {
|
||||
assert_eq!(
|
||||
COUNTER.lograte.load(Ordering::Relaxed),
|
||||
DEFAULT_METRICS_RATE
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lograte_env() {
|
||||
assert_ne!(DEFAULT_METRICS_RATE, 0);
|
||||
let _writelock = get_env_lock().write();
|
||||
static mut COUNTER: Counter = create_counter!("test_lograte_env", 0);
|
||||
env::set_var("SOLANA_DEFAULT_METRICS_RATE", "50");
|
||||
inc_counter!(COUNTER, 2);
|
||||
unsafe {
|
||||
assert_eq!(COUNTER.lograte.load(Ordering::Relaxed), 50);
|
||||
}
|
||||
|
||||
static mut COUNTER2: Counter = create_counter!("test_lograte_env", 0);
|
||||
env::set_var("SOLANA_DEFAULT_METRICS_RATE", "0");
|
||||
inc_counter!(COUNTER2, 2);
|
||||
unsafe {
|
||||
assert_eq!(
|
||||
COUNTER2.lograte.load(Ordering::Relaxed),
|
||||
DEFAULT_METRICS_RATE
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
983
src/crdt.rs
983
src/crdt.rs
File diff suppressed because it is too large
Load Diff
103
src/drone.rs
103
src/drone.rs
@ -6,7 +6,8 @@
|
||||
|
||||
use influx_db_client as influxdb;
|
||||
use metrics;
|
||||
use signature::{KeyPair, PublicKey};
|
||||
use signature::Signature;
|
||||
use signature::{Keypair, Pubkey};
|
||||
use std::io;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::net::{IpAddr, SocketAddr, UdpSocket};
|
||||
@ -16,17 +17,18 @@ use transaction::Transaction;
|
||||
|
||||
pub const TIME_SLICE: u64 = 60;
|
||||
pub const REQUEST_CAP: u64 = 1_000_000;
|
||||
pub const DRONE_PORT: u16 = 9900;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
||||
pub enum DroneRequest {
|
||||
GetAirdrop {
|
||||
airdrop_request_amount: u64,
|
||||
client_public_key: PublicKey,
|
||||
client_pubkey: Pubkey,
|
||||
},
|
||||
}
|
||||
|
||||
pub struct Drone {
|
||||
mint_keypair: KeyPair,
|
||||
mint_keypair: Keypair,
|
||||
ip_cache: Vec<IpAddr>,
|
||||
_airdrop_addr: SocketAddr,
|
||||
transactions_addr: SocketAddr,
|
||||
@ -38,7 +40,7 @@ pub struct Drone {
|
||||
|
||||
impl Drone {
|
||||
pub fn new(
|
||||
mint_keypair: KeyPair,
|
||||
mint_keypair: Keypair,
|
||||
_airdrop_addr: SocketAddr,
|
||||
transactions_addr: SocketAddr,
|
||||
requests_addr: SocketAddr,
|
||||
@ -93,8 +95,7 @@ impl Drone {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_airdrop(&mut self, req: DroneRequest) -> Result<usize, io::Error> {
|
||||
let tx: Transaction;
|
||||
pub fn send_airdrop(&mut self, req: DroneRequest) -> Result<Signature, io::Error> {
|
||||
let request_amount: u64;
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
@ -107,20 +108,24 @@ impl Drone {
|
||||
);
|
||||
let last_id = client.get_last_id();
|
||||
|
||||
match req {
|
||||
let tx = match req {
|
||||
DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount,
|
||||
client_public_key,
|
||||
client_pubkey,
|
||||
} => {
|
||||
info!(
|
||||
"Requesting airdrop of {} to {:?}",
|
||||
airdrop_request_amount, client_pubkey
|
||||
);
|
||||
request_amount = airdrop_request_amount;
|
||||
tx = Transaction::new(
|
||||
Transaction::new(
|
||||
&self.mint_keypair,
|
||||
client_public_key,
|
||||
client_pubkey,
|
||||
airdrop_request_amount as i64,
|
||||
last_id,
|
||||
);
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
if self.check_request_limit(request_amount) {
|
||||
self.request_current += request_amount;
|
||||
metrics::submit(
|
||||
@ -154,12 +159,12 @@ mod tests {
|
||||
use bank::Bank;
|
||||
use crdt::{get_ip_addr, TestNode};
|
||||
use drone::{Drone, DroneRequest, REQUEST_CAP, TIME_SLICE};
|
||||
use fullnode::FullNode;
|
||||
use fullnode::Fullnode;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use service::Service;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use std::fs::remove_dir_all;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
@ -169,7 +174,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_check_request_limit() {
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
@ -189,7 +194,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_clear_request_count() {
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
@ -203,7 +208,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_add_ip_to_cache() {
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
@ -218,7 +223,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_clear_ip_cache() {
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
@ -235,7 +240,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_drone_default_init() {
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
addr.set_ip(get_ip_addr().unwrap());
|
||||
let transactions_addr = "0.0.0.0:0".parse().unwrap();
|
||||
@ -254,6 +259,14 @@ mod tests {
|
||||
assert_eq!(drone.request_cap, REQUEST_CAP);
|
||||
}
|
||||
|
||||
fn tmp_ledger_path(name: &str) -> String {
|
||||
use std::env;
|
||||
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
|
||||
let keypair = Keypair::new();
|
||||
|
||||
format!("{}/tmp-ledger-{}-{}", out_dir, name, keypair.pubkey())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_send_airdrop() {
|
||||
@ -261,23 +274,26 @@ mod tests {
|
||||
const TPS_BATCH: i64 = 5_000_000;
|
||||
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let leader_keypair = Keypair::new();
|
||||
let leader = TestNode::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||
|
||||
let alice = Mint::new(10_000_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let carlos_pubkey = KeyPair::new().pubkey();
|
||||
let bob_pubkey = Keypair::new().pubkey();
|
||||
let carlos_pubkey = Keypair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let leader_data = leader.data.clone();
|
||||
let ledger_path = tmp_ledger_path("send_airdrop");
|
||||
|
||||
let server = FullNode::new_leader(
|
||||
let server = Fullnode::new_leader(
|
||||
leader_keypair,
|
||||
bank,
|
||||
0,
|
||||
None,
|
||||
Some(Duration::from_millis(30)),
|
||||
&[],
|
||||
leader,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
&ledger_path,
|
||||
false,
|
||||
);
|
||||
//TODO: this seems unstable
|
||||
sleep(Duration::from_millis(900));
|
||||
@ -293,20 +309,6 @@ mod tests {
|
||||
Some(150_000),
|
||||
);
|
||||
|
||||
let bob_req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: 50,
|
||||
client_public_key: bob_pubkey,
|
||||
};
|
||||
let bob_result = drone.send_airdrop(bob_req).expect("send airdrop test");
|
||||
assert!(bob_result > 0);
|
||||
|
||||
let carlos_req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: 5_000_000,
|
||||
client_public_key: carlos_pubkey,
|
||||
};
|
||||
let carlos_result = drone.send_airdrop(carlos_req).expect("send airdrop test");
|
||||
assert!(carlos_result > 0);
|
||||
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").expect("drone bind to requests socket");
|
||||
let transactions_socket =
|
||||
UdpSocket::bind("0.0.0.0:0").expect("drone bind to transactions socket");
|
||||
@ -318,15 +320,30 @@ mod tests {
|
||||
transactions_socket,
|
||||
);
|
||||
|
||||
let bob_balance = client.poll_get_balance(&bob_pubkey);
|
||||
let bob_req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: 50,
|
||||
client_pubkey: bob_pubkey,
|
||||
};
|
||||
let bob_sig = drone.send_airdrop(bob_req).unwrap();
|
||||
assert!(client.poll_for_signature(&bob_sig).is_ok());
|
||||
|
||||
let carlos_req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: 5_000_000,
|
||||
client_pubkey: carlos_pubkey,
|
||||
};
|
||||
let carlos_sig = drone.send_airdrop(carlos_req).unwrap();
|
||||
assert!(client.poll_for_signature(&carlos_sig).is_ok());
|
||||
|
||||
let bob_balance = client.get_balance(&bob_pubkey);
|
||||
info!("Small request balance: {:?}", bob_balance);
|
||||
assert_eq!(bob_balance.unwrap(), SMALL_BATCH);
|
||||
|
||||
let carlos_balance = client.poll_get_balance(&carlos_pubkey);
|
||||
let carlos_balance = client.get_balance(&carlos_pubkey);
|
||||
info!("TPS request balance: {:?}", carlos_balance);
|
||||
assert_eq!(carlos_balance.unwrap(), TPS_BATCH);
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
server.join().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
}
|
||||
|
101
src/entry.rs
101
src/entry.rs
@ -2,10 +2,13 @@
|
||||
//! unique ID that is the hash of the Entry before it, plus the hash of the
|
||||
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
|
||||
//! represents an approximate amount of time since the last Entry was created.
|
||||
use bincode::serialized_size;
|
||||
use bincode::{serialize_into, serialized_size};
|
||||
use hash::{extend_and_hash, hash, Hash};
|
||||
use packet::BLOB_DATA_SIZE;
|
||||
use packet::{BlobRecycler, SharedBlob, BLOB_DATA_SIZE};
|
||||
use rayon::prelude::*;
|
||||
use signature::Pubkey;
|
||||
use std::io::Cursor;
|
||||
use std::net::SocketAddr;
|
||||
use transaction::Transaction;
|
||||
|
||||
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
|
||||
@ -20,7 +23,7 @@ use transaction::Transaction;
|
||||
/// world's fastest processor at the time the entry was recorded. Or said another way, it
|
||||
/// is physically not possible for a shorter duration to have occurred if one assumes the
|
||||
/// hash was computed by the world's fastest processor at that time. The hash chain is both
|
||||
/// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof or
|
||||
/// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof of
|
||||
/// Work consensus!)
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
@ -32,7 +35,7 @@ pub struct Entry {
|
||||
pub id: Hash,
|
||||
|
||||
/// An unordered list of transactions that were observed before the Entry ID was
|
||||
/// generated. The may have been observed before a previous Entry ID but were
|
||||
/// generated. They may have been observed before a previous Entry ID but were
|
||||
/// pushed back into this list to ensure deterministic interpretation of the ledger.
|
||||
pub transactions: Vec<Transaction>,
|
||||
|
||||
@ -51,11 +54,11 @@ impl Entry {
|
||||
/// Creates the next Entry `num_hashes` after `start_hash`.
|
||||
pub fn new(
|
||||
start_hash: &Hash,
|
||||
cur_hashes: u64,
|
||||
num_hashes: u64,
|
||||
transactions: Vec<Transaction>,
|
||||
has_more: bool,
|
||||
) -> Self {
|
||||
let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
|
||||
let num_hashes = num_hashes + if transactions.is_empty() { 0 } else { 1 };
|
||||
let id = next_hash(start_hash, 0, &transactions);
|
||||
let entry = Entry {
|
||||
num_hashes,
|
||||
@ -64,10 +67,49 @@ impl Entry {
|
||||
has_more,
|
||||
pad: [0, 0, 0],
|
||||
};
|
||||
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
||||
|
||||
let size = serialized_size(&entry).unwrap();
|
||||
if size > BLOB_DATA_SIZE as u64 {
|
||||
panic!(
|
||||
"Serialized entry size too large: {} ({} transactions):",
|
||||
size,
|
||||
entry.transactions.len()
|
||||
);
|
||||
}
|
||||
entry
|
||||
}
|
||||
|
||||
pub fn to_blob(
|
||||
&self,
|
||||
blob_recycler: &BlobRecycler,
|
||||
idx: Option<u64>,
|
||||
id: Option<Pubkey>,
|
||||
addr: Option<&SocketAddr>,
|
||||
) -> SharedBlob {
|
||||
let blob = blob_recycler.allocate();
|
||||
{
|
||||
let mut blob_w = blob.write().unwrap();
|
||||
let pos = {
|
||||
let mut out = Cursor::new(blob_w.data_mut());
|
||||
serialize_into(&mut out, &self).expect("failed to serialize output");
|
||||
out.position() as usize
|
||||
};
|
||||
blob_w.set_size(pos);
|
||||
|
||||
if let Some(idx) = idx {
|
||||
blob_w.set_index(idx).expect("set_index()");
|
||||
}
|
||||
if let Some(id) = id {
|
||||
blob_w.set_id(id).expect("set_id()");
|
||||
}
|
||||
if let Some(addr) = addr {
|
||||
blob_w.meta.set_addr(addr);
|
||||
}
|
||||
blob_w.set_flags(0).unwrap();
|
||||
}
|
||||
blob
|
||||
}
|
||||
|
||||
pub fn will_fit(transactions: Vec<Transaction>) -> bool {
|
||||
serialized_size(&Entry {
|
||||
num_hashes: 0,
|
||||
@ -81,13 +123,13 @@ impl Entry {
|
||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
||||
pub fn new_mut(
|
||||
start_hash: &mut Hash,
|
||||
cur_hashes: &mut u64,
|
||||
num_hashes: &mut u64,
|
||||
transactions: Vec<Transaction>,
|
||||
has_more: bool,
|
||||
) -> Self {
|
||||
let entry = Self::new(start_hash, *cur_hashes, transactions, has_more);
|
||||
let entry = Self::new(start_hash, *num_hashes, transactions, has_more);
|
||||
*start_hash = entry.id;
|
||||
*cur_hashes = 0;
|
||||
*num_hashes = 0;
|
||||
assert!(serialized_size(&entry).unwrap() <= BLOB_DATA_SIZE as u64);
|
||||
entry
|
||||
}
|
||||
@ -107,14 +149,31 @@ impl Entry {
|
||||
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
|
||||
/// If the transaction is not a Tick, then hash that as well.
|
||||
pub fn verify(&self, start_hash: &Hash) -> bool {
|
||||
self.transactions.par_iter().all(|tx| tx.verify_plan())
|
||||
&& self.id == next_hash(start_hash, self.num_hashes, &self.transactions)
|
||||
let tx_plans_verified = self.transactions.par_iter().all(|tx| {
|
||||
let r = tx.verify_plan();
|
||||
if !r {
|
||||
warn!("tx plan invalid: {:?}", tx);
|
||||
}
|
||||
r
|
||||
});
|
||||
if !tx_plans_verified {
|
||||
return false;
|
||||
}
|
||||
let ref_hash = next_hash(start_hash, self.num_hashes, &self.transactions);
|
||||
if self.id != ref_hash {
|
||||
warn!(
|
||||
"next_hash is invalid expected: {:?} actual: {:?}",
|
||||
self.id, ref_hash
|
||||
);
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
fn add_transaction_data(hash_data: &mut Vec<u8>, tx: &Transaction) {
|
||||
hash_data.push(0u8);
|
||||
hash_data.extend_from_slice(&tx.sig);
|
||||
hash_data.extend_from_slice(&tx.signature.as_ref());
|
||||
}
|
||||
|
||||
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
|
||||
@ -124,7 +183,7 @@ fn add_transaction_data(hash_data: &mut Vec<u8>, tx: &Transaction) {
|
||||
fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
|
||||
let mut id = *start_hash;
|
||||
for _ in 1..num_hashes {
|
||||
id = hash(&id);
|
||||
id = hash(&id.as_ref());
|
||||
}
|
||||
|
||||
// Hash all the transaction data
|
||||
@ -136,7 +195,7 @@ fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -
|
||||
if !hash_data.is_empty() {
|
||||
extend_and_hash(&id, &hash_data)
|
||||
} else if num_hashes != 0 {
|
||||
hash(&id)
|
||||
hash(&id.as_ref())
|
||||
} else {
|
||||
id
|
||||
}
|
||||
@ -160,13 +219,13 @@ mod tests {
|
||||
use chrono::prelude::*;
|
||||
use entry::Entry;
|
||||
use hash::hash;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[test]
|
||||
fn test_entry_verify() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let one = hash(&zero.as_ref());
|
||||
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case
|
||||
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
|
||||
assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step
|
||||
@ -178,7 +237,7 @@ mod tests {
|
||||
let zero = Hash::default();
|
||||
|
||||
// First, verify entries
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
|
||||
let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero);
|
||||
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false);
|
||||
@ -195,7 +254,7 @@ mod tests {
|
||||
let zero = Hash::default();
|
||||
|
||||
// First, verify entries
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
|
||||
let tx1 = Transaction::new_signature(&keypair, Default::default(), zero);
|
||||
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()], false);
|
||||
@ -218,7 +277,7 @@ mod tests {
|
||||
assert_eq!(tick.num_hashes, 0);
|
||||
assert_eq!(tick.id, zero);
|
||||
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
|
||||
let entry0 = next_entry(&zero, 1, vec![tx0.clone()]);
|
||||
assert_eq!(entry0.num_hashes, 1);
|
||||
@ -229,7 +288,7 @@ mod tests {
|
||||
#[should_panic]
|
||||
fn test_next_entry_panic() {
|
||||
let zero = Hash::default();
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let tx = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
|
||||
next_entry(&zero, 0, vec![tx]);
|
||||
}
|
||||
|
@ -3,9 +3,10 @@
|
||||
//! stdout, and then sends the Entry to its output channel.
|
||||
|
||||
use bank::Bank;
|
||||
use bincode;
|
||||
use entry::Entry;
|
||||
use serde_json;
|
||||
use std::io::{self, BufRead, Cursor, Error, ErrorKind, Write};
|
||||
use std::io::{self, BufRead, Error, ErrorKind, Write};
|
||||
use std::mem::size_of;
|
||||
|
||||
pub struct EntryWriter<'a, W> {
|
||||
bank: &'a Bank,
|
||||
@ -19,8 +20,16 @@ impl<'a, W: Write> EntryWriter<'a, W> {
|
||||
}
|
||||
|
||||
fn write_entry(writer: &mut W, entry: &Entry) -> io::Result<()> {
|
||||
let serialized = serde_json::to_string(entry).unwrap();
|
||||
writeln!(writer, "{}", serialized)
|
||||
let entry_bytes =
|
||||
bincode::serialize(&entry).map_err(|e| Error::new(ErrorKind::Other, e.to_string()))?;
|
||||
|
||||
let len = entry_bytes.len();
|
||||
let len_bytes =
|
||||
bincode::serialize(&len).map_err(|e| Error::new(ErrorKind::Other, e.to_string()))?;
|
||||
|
||||
writer.write_all(&len_bytes[..])?;
|
||||
writer.write_all(&entry_bytes[..])?;
|
||||
writer.flush()
|
||||
}
|
||||
|
||||
pub fn write_entries<I>(writer: &mut W, entries: I) -> io::Result<()>
|
||||
@ -49,28 +58,44 @@ impl<'a, W: Write> EntryWriter<'a, W> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a string containing an Entry.
|
||||
pub fn read_entry(s: &str) -> io::Result<Entry> {
|
||||
serde_json::from_str(s).map_err(|e| Error::new(ErrorKind::Other, e.to_string()))
|
||||
struct EntryReader<R: BufRead> {
|
||||
reader: R,
|
||||
entry_bytes: Vec<u8>,
|
||||
}
|
||||
|
||||
impl<R: BufRead> Iterator for EntryReader<R> {
|
||||
type Item = io::Result<Entry>;
|
||||
|
||||
fn next(&mut self) -> Option<io::Result<Entry>> {
|
||||
let mut entry_len_bytes = [0u8; size_of::<usize>()];
|
||||
|
||||
if self.reader.read_exact(&mut entry_len_bytes[..]).is_ok() {
|
||||
let entry_len = bincode::deserialize(&entry_len_bytes).unwrap();
|
||||
|
||||
if entry_len > self.entry_bytes.len() {
|
||||
self.entry_bytes.resize(entry_len, 0);
|
||||
}
|
||||
|
||||
if let Err(e) = self.reader.read_exact(&mut self.entry_bytes[..entry_len]) {
|
||||
Some(Err(e))
|
||||
} else {
|
||||
Some(
|
||||
bincode::deserialize(&self.entry_bytes)
|
||||
.map_err(|e| Error::new(ErrorKind::Other, e.to_string())),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
None // EOF (probably)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return an iterator for all the entries in the given file.
|
||||
pub fn read_entries<R: BufRead>(reader: R) -> impl Iterator<Item = io::Result<Entry>> {
|
||||
reader.lines().map(|s| read_entry(&s?))
|
||||
}
|
||||
|
||||
/// Same as read_entries() but returning a vector. Handy for debugging short logs.
|
||||
pub fn read_entries_to_vec<R: BufRead>(reader: R) -> io::Result<Vec<Entry>> {
|
||||
let mut result = vec![];
|
||||
for x in read_entries(reader) {
|
||||
result.push(x?);
|
||||
EntryReader {
|
||||
reader,
|
||||
entry_bytes: Vec::new(),
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Same as read_entries() but parsing a string and returning a vector.
|
||||
pub fn read_entries_from_str(s: &str) -> io::Result<Vec<Entry>> {
|
||||
read_entries_to_vec(Cursor::new(s))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -79,8 +104,8 @@ mod tests {
|
||||
use ledger;
|
||||
use mint::Mint;
|
||||
use packet::BLOB_DATA_SIZE;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::str;
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use std::io::Cursor;
|
||||
use transaction::Transaction;
|
||||
|
||||
#[test]
|
||||
@ -90,7 +115,7 @@ mod tests {
|
||||
|
||||
let writer = io::sink();
|
||||
let mut entry_writer = EntryWriter::new(&bank, writer);
|
||||
let keypair = KeyPair::new();
|
||||
let keypair = Keypair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, mint.last_id());
|
||||
|
||||
// NOTE: if Entry grows to larger than a transaction, the code below falls over
|
||||
@ -113,12 +138,23 @@ mod tests {
|
||||
assert_eq!(bank.last_id(), entries[1].id);
|
||||
}
|
||||
|
||||
/// Same as read_entries() but parsing a buffer and returning a vector.
|
||||
fn read_entries_from_buf(s: &[u8]) -> io::Result<Vec<Entry>> {
|
||||
let mut result = vec![];
|
||||
let reader = Cursor::new(s);
|
||||
for x in read_entries(reader) {
|
||||
trace!("entry... {:?}", x);
|
||||
result.push(x?);
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_entries_from_str() {
|
||||
fn test_read_entries_from_buf() {
|
||||
let mint = Mint::new(1);
|
||||
let mut buf = vec![];
|
||||
EntryWriter::write_entries(&mut buf, mint.create_entries()).unwrap();
|
||||
let entries = read_entries_from_str(str::from_utf8(&buf).unwrap()).unwrap();
|
||||
let entries = read_entries_from_buf(&buf).unwrap();
|
||||
assert_eq!(entries, mint.create_entries());
|
||||
}
|
||||
}
|
||||
|
1071
src/erasure.rs
1071
src/erasure.rs
File diff suppressed because it is too large
Load Diff
250
src/fullnode.rs
250
src/fullnode.rs
@ -1,46 +1,29 @@
|
||||
//! The `fullnode` module hosts all the fullnode microservices.
|
||||
|
||||
use bank::Bank;
|
||||
use broadcast_stage::BroadcastStage;
|
||||
use crdt::{Crdt, NodeInfo, TestNode};
|
||||
use entry::Entry;
|
||||
use entry_writer;
|
||||
use ledger::Block;
|
||||
use ledger::read_ledger;
|
||||
use ncp::Ncp;
|
||||
use packet::BlobRecycler;
|
||||
use ring::rand::SystemRandom;
|
||||
use rpu::Rpu;
|
||||
use service::Service;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::collections::VecDeque;
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::io::{sink, stdin, stdout, BufReader};
|
||||
use std::io::{Read, Write};
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{JoinHandle, Result};
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
use tpu::Tpu;
|
||||
use tvu::Tvu;
|
||||
use untrusted::Input;
|
||||
use window;
|
||||
|
||||
//use std::time::Duration;
|
||||
pub struct FullNode {
|
||||
pub struct Fullnode {
|
||||
exit: Arc<AtomicBool>,
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
pub enum InFile {
|
||||
StdIn,
|
||||
Path(String),
|
||||
}
|
||||
|
||||
pub enum OutFile {
|
||||
StdOut,
|
||||
Path(String),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
/// Fullnode configuration to be stored in file
|
||||
pub struct Config {
|
||||
@ -50,40 +33,34 @@ pub struct Config {
|
||||
|
||||
/// Structure to be replicated by the network
|
||||
impl Config {
|
||||
pub fn new(bind_addr: &SocketAddr) -> Self {
|
||||
let rnd = SystemRandom::new();
|
||||
let pkcs8 = KeyPair::generate_pkcs8(&rnd)
|
||||
.expect("generate_pkcs8 in mint pub fn new")
|
||||
.to_vec();
|
||||
pub fn new(bind_addr: &SocketAddr, pkcs8: Vec<u8>) -> Self {
|
||||
let keypair =
|
||||
KeyPair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in fullnode::Config new");
|
||||
Keypair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in fullnode::Config new");
|
||||
let pubkey = keypair.pubkey();
|
||||
let node_info = NodeInfo::new_leader_with_pubkey(pubkey, bind_addr);
|
||||
Config { node_info, pkcs8 }
|
||||
}
|
||||
pub fn keypair(&self) -> KeyPair {
|
||||
KeyPair::from_pkcs8(Input::from(&self.pkcs8))
|
||||
pub fn keypair(&self) -> Keypair {
|
||||
Keypair::from_pkcs8(Input::from(&self.pkcs8))
|
||||
.expect("from_pkcs8 in fullnode::Config keypair")
|
||||
}
|
||||
}
|
||||
|
||||
impl FullNode {
|
||||
pub fn new(
|
||||
impl Fullnode {
|
||||
fn new_internal(
|
||||
mut node: TestNode,
|
||||
leader: bool,
|
||||
infile: InFile,
|
||||
keypair_for_validator: Option<KeyPair>,
|
||||
ledger_path: &str,
|
||||
keypair: Keypair,
|
||||
network_entry_for_validator: Option<SocketAddr>,
|
||||
outfile_for_leader: Option<OutFile>,
|
||||
) -> FullNode {
|
||||
sigverify_disabled: bool,
|
||||
) -> Self {
|
||||
info!("creating bank...");
|
||||
let bank = Bank::default();
|
||||
let infile: Box<Read> = match infile {
|
||||
InFile::Path(path) => Box::new(File::open(path).unwrap()),
|
||||
InFile::StdIn => Box::new(stdin()),
|
||||
};
|
||||
let reader = BufReader::new(infile);
|
||||
let entries = entry_writer::read_entries(reader).map(|e| e.expect("failed to parse entry"));
|
||||
let bank = Bank::new_default(leader);
|
||||
|
||||
let entries = read_ledger(ledger_path, true).expect("opening ledger");
|
||||
|
||||
let entries = entries.map(|e| e.expect("failed to parse entry"));
|
||||
|
||||
info!("processing ledger...");
|
||||
let (entry_height, ledger_tail) = bank.process_ledger(entries).expect("process_ledger");
|
||||
@ -105,15 +82,16 @@ impl FullNode {
|
||||
let testnet_addr = network_entry_for_validator.expect("validator requires entry");
|
||||
|
||||
let network_entry_point = NodeInfo::new_entry_point(testnet_addr);
|
||||
let keypair = keypair_for_validator.expect("validator requires keypair");
|
||||
let server = FullNode::new_validator(
|
||||
let server = Self::new_validator(
|
||||
keypair,
|
||||
bank,
|
||||
entry_height,
|
||||
Some(ledger_tail),
|
||||
&ledger_tail,
|
||||
node,
|
||||
&network_entry_point,
|
||||
exit.clone(),
|
||||
Some(ledger_path),
|
||||
sigverify_disabled,
|
||||
);
|
||||
info!(
|
||||
"validator ready... local request address: {} (advertising {}) connected to: {}",
|
||||
@ -122,27 +100,16 @@ impl FullNode {
|
||||
server
|
||||
} else {
|
||||
node.data.leader_id = node.data.id;
|
||||
let outfile_for_leader: Box<Write + Send> = match outfile_for_leader {
|
||||
Some(OutFile::Path(file)) => Box::new(
|
||||
OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(file)
|
||||
.expect("opening ledger file"),
|
||||
),
|
||||
Some(OutFile::StdOut) => Box::new(stdout()),
|
||||
None => Box::new(sink()),
|
||||
};
|
||||
|
||||
let server = FullNode::new_leader(
|
||||
let server = Self::new_leader(
|
||||
keypair,
|
||||
bank,
|
||||
entry_height,
|
||||
Some(ledger_tail),
|
||||
//Some(Duration::from_millis(1000)),
|
||||
None,
|
||||
&ledger_tail,
|
||||
node,
|
||||
exit.clone(),
|
||||
outfile_for_leader,
|
||||
ledger_path,
|
||||
sigverify_disabled,
|
||||
);
|
||||
info!(
|
||||
"leader ready... local request address: {} (advertising {})",
|
||||
@ -152,24 +119,38 @@ impl FullNode {
|
||||
}
|
||||
}
|
||||
|
||||
fn new_window(
|
||||
ledger_tail: Option<Vec<Entry>>,
|
||||
entry_height: u64,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
blob_recycler: &BlobRecycler,
|
||||
) -> streamer::Window {
|
||||
match ledger_tail {
|
||||
Some(ledger_tail) => {
|
||||
// convert to blobs
|
||||
let mut blobs = VecDeque::new();
|
||||
ledger_tail.to_blobs(&blob_recycler, &mut blobs);
|
||||
pub fn new(
|
||||
node: TestNode,
|
||||
leader: bool,
|
||||
ledger: &str,
|
||||
keypair: Keypair,
|
||||
network_entry_for_validator: Option<SocketAddr>,
|
||||
) -> Self {
|
||||
Self::new_internal(
|
||||
node,
|
||||
leader,
|
||||
ledger,
|
||||
keypair,
|
||||
network_entry_for_validator,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// flatten deque to vec
|
||||
let blobs: Vec<_> = blobs.into_iter().collect();
|
||||
streamer::initialized_window(&crdt, blobs, entry_height)
|
||||
}
|
||||
None => streamer::default_window(),
|
||||
}
|
||||
pub fn new_without_sigverify(
|
||||
node: TestNode,
|
||||
leader: bool,
|
||||
ledger_path: &str,
|
||||
keypair: Keypair,
|
||||
network_entry_for_validator: Option<SocketAddr>,
|
||||
) -> Self {
|
||||
Self::new_internal(
|
||||
node,
|
||||
leader,
|
||||
ledger_path,
|
||||
keypair,
|
||||
network_entry_for_validator,
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a server instance acting as a leader.
|
||||
@ -196,19 +177,24 @@ impl FullNode {
|
||||
/// | | `------------`
|
||||
/// `---------------------`
|
||||
/// ```
|
||||
pub fn new_leader<W: Write + Send + 'static>(
|
||||
pub fn new_leader(
|
||||
keypair: Keypair,
|
||||
bank: Bank,
|
||||
entry_height: u64,
|
||||
ledger_tail: Option<Vec<Entry>>,
|
||||
tick_duration: Option<Duration>,
|
||||
ledger_tail: &[Entry],
|
||||
node: TestNode,
|
||||
exit: Arc<AtomicBool>,
|
||||
writer: W,
|
||||
ledger_path: &str,
|
||||
sigverify_disabled: bool,
|
||||
) -> Self {
|
||||
let tick_duration = None;
|
||||
// TODO: To light up PoH, uncomment the following line:
|
||||
//let tick_duration = Some(Duration::from_millis(1000));
|
||||
|
||||
let bank = Arc::new(bank);
|
||||
let mut thread_hdls = vec![];
|
||||
let rpu = Rpu::new(
|
||||
&bank.clone(),
|
||||
&bank,
|
||||
node.sockets.requests,
|
||||
node.sockets.respond,
|
||||
exit.clone(),
|
||||
@ -216,28 +202,34 @@ impl FullNode {
|
||||
thread_hdls.extend(rpu.thread_hdls());
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(node.data)));
|
||||
let window =
|
||||
window::new_window_from_entries(ledger_tail, entry_height, &node.data, &blob_recycler);
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(node.data).expect("Crdt::new")));
|
||||
|
||||
let (tpu, blob_receiver) = Tpu::new(
|
||||
&bank.clone(),
|
||||
&crdt.clone(),
|
||||
keypair,
|
||||
&bank,
|
||||
&crdt,
|
||||
tick_duration,
|
||||
node.sockets.transaction,
|
||||
&blob_recycler.clone(),
|
||||
&blob_recycler,
|
||||
exit.clone(),
|
||||
writer,
|
||||
ledger_path,
|
||||
sigverify_disabled,
|
||||
);
|
||||
thread_hdls.extend(tpu.thread_hdls());
|
||||
let window = FullNode::new_window(ledger_tail, entry_height, &crdt, &blob_recycler);
|
||||
let ncp = Ncp::new(
|
||||
&crdt.clone(),
|
||||
&crdt,
|
||||
window.clone(),
|
||||
Some(ledger_path),
|
||||
node.sockets.gossip,
|
||||
node.sockets.gossip_send,
|
||||
exit.clone(),
|
||||
).expect("Ncp::new");
|
||||
thread_hdls.extend(ncp.thread_hdls());
|
||||
|
||||
let t_broadcast = streamer::broadcaster(
|
||||
let broadcast_stage = BroadcastStage::new(
|
||||
node.sockets.broadcast,
|
||||
crdt,
|
||||
window,
|
||||
@ -245,9 +237,9 @@ impl FullNode {
|
||||
blob_recycler.clone(),
|
||||
blob_receiver,
|
||||
);
|
||||
thread_hdls.extend(vec![t_broadcast]);
|
||||
thread_hdls.extend(broadcast_stage.thread_hdls());
|
||||
|
||||
FullNode { exit, thread_hdls }
|
||||
Fullnode { exit, thread_hdls }
|
||||
}
|
||||
|
||||
/// Create a server instance acting as a validator.
|
||||
@ -280,36 +272,39 @@ impl FullNode {
|
||||
/// `-------------------------------`
|
||||
/// ```
|
||||
pub fn new_validator(
|
||||
keypair: KeyPair,
|
||||
keypair: Keypair,
|
||||
bank: Bank,
|
||||
entry_height: u64,
|
||||
ledger_tail: Option<Vec<Entry>>,
|
||||
ledger_tail: &[Entry],
|
||||
node: TestNode,
|
||||
entry_point: &NodeInfo,
|
||||
exit: Arc<AtomicBool>,
|
||||
ledger_path: Option<&str>,
|
||||
_sigverify_disabled: bool,
|
||||
) -> Self {
|
||||
let bank = Arc::new(bank);
|
||||
let mut thread_hdls = vec![];
|
||||
let rpu = Rpu::new(
|
||||
&bank.clone(),
|
||||
&bank,
|
||||
node.sockets.requests,
|
||||
node.sockets.respond,
|
||||
exit.clone(),
|
||||
);
|
||||
thread_hdls.extend(rpu.thread_hdls());
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(node.data)));
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let window =
|
||||
window::new_window_from_entries(ledger_tail, entry_height, &node.data, &blob_recycler);
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(node.data).expect("Crdt::new")));
|
||||
crdt.write()
|
||||
.expect("'crdt' write lock before insert() in pub fn replicate")
|
||||
.insert(&entry_point);
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
|
||||
let window = FullNode::new_window(ledger_tail, entry_height, &crdt, &blob_recycler);
|
||||
|
||||
let ncp = Ncp::new(
|
||||
&crdt.clone(),
|
||||
&crdt,
|
||||
window.clone(),
|
||||
ledger_path,
|
||||
node.sockets.gossip,
|
||||
node.sockets.gossip_send,
|
||||
exit.clone(),
|
||||
@ -317,27 +312,32 @@ impl FullNode {
|
||||
|
||||
let tvu = Tvu::new(
|
||||
keypair,
|
||||
bank.clone(),
|
||||
&bank,
|
||||
entry_height,
|
||||
crdt.clone(),
|
||||
window.clone(),
|
||||
node.sockets.replicate,
|
||||
node.sockets.repair,
|
||||
node.sockets.retransmit,
|
||||
ledger_path,
|
||||
exit.clone(),
|
||||
);
|
||||
thread_hdls.extend(tvu.thread_hdls());
|
||||
thread_hdls.extend(ncp.thread_hdls());
|
||||
FullNode { exit, thread_hdls }
|
||||
Fullnode { exit, thread_hdls }
|
||||
}
|
||||
|
||||
pub fn close(self) -> Result<()> {
|
||||
//used for notifying many nodes in parallel to exit
|
||||
pub fn exit(&self) {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
pub fn close(self) -> Result<()> {
|
||||
self.exit();
|
||||
self.join()
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for FullNode {
|
||||
impl Service for Fullnode {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
self.thread_hdls
|
||||
}
|
||||
@ -354,20 +354,44 @@ impl Service for FullNode {
|
||||
mod tests {
|
||||
use bank::Bank;
|
||||
use crdt::TestNode;
|
||||
use fullnode::FullNode;
|
||||
use fullnode::Fullnode;
|
||||
use mint::Mint;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use service::Service;
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[test]
|
||||
fn validator_exit() {
|
||||
let kp = KeyPair::new();
|
||||
let tn = TestNode::new_with_pubkey(kp.pubkey());
|
||||
let keypair = Keypair::new();
|
||||
let tn = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let entry = tn.data.clone();
|
||||
let v = FullNode::new_validator(kp, bank, 0, None, tn, &entry, exit);
|
||||
v.close().unwrap();
|
||||
let v = Fullnode::new_validator(keypair, bank, 0, &[], tn, &entry, exit, None, false);
|
||||
v.exit();
|
||||
v.join().unwrap();
|
||||
}
|
||||
#[test]
|
||||
fn validator_parallel_exit() {
|
||||
let vals: Vec<Fullnode> = (0..2)
|
||||
.map(|_| {
|
||||
let keypair = Keypair::new();
|
||||
let tn = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let entry = tn.data.clone();
|
||||
Fullnode::new_validator(keypair, bank, 0, &[], tn, &entry, exit, None, false)
|
||||
})
|
||||
.collect();
|
||||
//each validator can exit in parallel to speed many sequential calls to `join`
|
||||
vals.iter().for_each(|v| v.exit());
|
||||
//while join is called sequentially, the above exit call notified all the
|
||||
//validators to exit from all their threads
|
||||
vals.into_iter().for_each(|v| {
|
||||
v.join().unwrap();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
26
src/hash.rs
26
src/hash.rs
@ -1,11 +1,31 @@
|
||||
//! The `hash` module provides functions for creating SHA-256 hashes.
|
||||
|
||||
use bs58;
|
||||
use generic_array::typenum::U32;
|
||||
use generic_array::GenericArray;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::fmt;
|
||||
|
||||
pub type Hash = GenericArray<u8, U32>;
|
||||
#[derive(Serialize, Deserialize, Clone, Copy, Default, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
pub struct Hash(GenericArray<u8, U32>);
|
||||
|
||||
impl AsRef<[u8]> for Hash {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
&self.0[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Hash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", bs58::encode(self.0).into_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Hash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", bs58::encode(self.0).into_string())
|
||||
}
|
||||
}
|
||||
/// Return a Sha256 hash for the given data.
|
||||
pub fn hash(val: &[u8]) -> Hash {
|
||||
let mut hasher = Sha256::default();
|
||||
@ -13,12 +33,12 @@ pub fn hash(val: &[u8]) -> Hash {
|
||||
|
||||
// At the time of this writing, the sha2 library is stuck on an old version
|
||||
// of generic_array (0.9.0). Decouple ourselves with a clone to our version.
|
||||
GenericArray::clone_from_slice(hasher.result().as_slice())
|
||||
Hash(GenericArray::clone_from_slice(hasher.result().as_slice()))
|
||||
}
|
||||
|
||||
/// Return the hash of the given hash extended with the given value.
|
||||
pub fn extend_and_hash(id: &Hash, val: &[u8]) -> Hash {
|
||||
let mut hash_data = id.to_vec();
|
||||
let mut hash_data = id.as_ref().to_vec();
|
||||
hash_data.extend_from_slice(val);
|
||||
hash(&hash_data)
|
||||
}
|
||||
|
822
src/ledger.rs
822
src/ledger.rs
@ -1,17 +1,414 @@
|
||||
//! The `ledger` module provides functions for parallel verification of the
|
||||
//! Proof of History ledger.
|
||||
//! Proof of History ledger as well as iterative read, append write, and random
|
||||
//! access read to a persistent file-based ledger.
|
||||
|
||||
use bincode::{self, deserialize, serialize_into};
|
||||
use bincode::{self, deserialize, deserialize_from, serialize_into, serialized_size};
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use packet::{self, SharedBlob, BLOB_SIZE};
|
||||
use log::Level::Trace;
|
||||
use packet::{self, SharedBlob, BLOB_DATA_SIZE};
|
||||
use rayon::prelude::*;
|
||||
use result::{Error, Result};
|
||||
use std::collections::VecDeque;
|
||||
use std::io::Cursor;
|
||||
use std::fs::{create_dir_all, remove_dir_all, File, OpenOptions};
|
||||
use std::io::prelude::*;
|
||||
use std::io::{self, BufReader, BufWriter, Seek, SeekFrom};
|
||||
use std::mem::size_of;
|
||||
use std::path::Path;
|
||||
use transaction::Transaction;
|
||||
use window::WINDOW_SIZE;
|
||||
|
||||
//
|
||||
// A persistent ledger is 2 files:
|
||||
// ledger_path/ --+
|
||||
// +-- index <== an array of u64 offsets into data,
|
||||
// | each offset points to the first bytes
|
||||
// | of a u64 that contains the length of
|
||||
// | the entry. To make the code smaller,
|
||||
// | index[0] is set to 0, TODO: this field
|
||||
// | could later be used for other stuff...
|
||||
// +-- data <== concatenated instances of
|
||||
// u64 length
|
||||
// entry data
|
||||
//
|
||||
// When opening a ledger, we have the ability to "audit" it, which means we need
|
||||
// to pick which file to use as "truth", and correct the other file as
|
||||
// necessary, if possible.
|
||||
//
|
||||
// The protocol for writing the ledger is to append to the data file first, the
|
||||
// index file 2nd. If the writing node is interupted while appending to the
|
||||
// ledger, there are some possibilities we need to cover:
|
||||
//
|
||||
// 1. a partial write of data, which might be a partial write of length
|
||||
// or a partial write entry data
|
||||
// 2. a partial or missing write to index for that entry
|
||||
//
|
||||
// There is also the possibility of "unsynchronized" reading of the ledger
|
||||
// during transfer across nodes via rsync (or whatever). In this case, if the
|
||||
// transfer of the data file is done before the transfer of the index file,
|
||||
// it's likely that the index file will be far ahead of the data file in time.
|
||||
//
|
||||
// The quickest and most reliable strategy for recovery is therefore to treat
|
||||
// the data file as nearest to the "truth".
|
||||
//
|
||||
// The logic for "recovery/audit" is to open index and read backwards from the
|
||||
// last u64-aligned entry to get to where index and data agree (i.e. where a
|
||||
// successful deserialization of an entry can be performed), then truncate
|
||||
// both files to this syncrhonization point.
|
||||
//
|
||||
|
||||
// ledger window
|
||||
#[derive(Debug)]
|
||||
pub struct LedgerWindow {
|
||||
index: BufReader<File>,
|
||||
data: BufReader<File>,
|
||||
}
|
||||
|
||||
// use a CONST because there's a cast, and we don't want "sizeof::<u64> as u64"...
|
||||
const SIZEOF_U64: u64 = size_of::<u64>() as u64;
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
|
||||
fn err_bincode_to_io(e: Box<bincode::ErrorKind>) -> io::Error {
|
||||
io::Error::new(io::ErrorKind::Other, e.to_string())
|
||||
}
|
||||
|
||||
fn entry_at<A: Read + Seek>(file: &mut A, at: u64) -> io::Result<Entry> {
|
||||
file.seek(SeekFrom::Start(at))?;
|
||||
|
||||
let len = deserialize_from(file.take(SIZEOF_U64)).map_err(err_bincode_to_io)?;
|
||||
trace!("entry_at({}) len: {}", at, len);
|
||||
|
||||
deserialize_from(file.take(len)).map_err(err_bincode_to_io)
|
||||
}
|
||||
|
||||
fn next_entry<A: Read>(file: &mut A) -> io::Result<Entry> {
|
||||
let len = deserialize_from(file.take(SIZEOF_U64)).map_err(err_bincode_to_io)?;
|
||||
deserialize_from(file.take(len)).map_err(err_bincode_to_io)
|
||||
}
|
||||
|
||||
fn u64_at<A: Read + Seek>(file: &mut A, at: u64) -> io::Result<u64> {
|
||||
file.seek(SeekFrom::Start(at))?;
|
||||
deserialize_from(file.take(SIZEOF_U64)).map_err(err_bincode_to_io)
|
||||
}
|
||||
|
||||
impl LedgerWindow {
|
||||
// opens a Ledger in directory, provides "infinite" window
|
||||
//
|
||||
pub fn open(ledger_path: &str) -> io::Result<Self> {
|
||||
let ledger_path = Path::new(&ledger_path);
|
||||
|
||||
let index = File::open(ledger_path.join("index"))?;
|
||||
let index = BufReader::with_capacity((WINDOW_SIZE * SIZEOF_U64) as usize, index);
|
||||
let data = File::open(ledger_path.join("data"))?;
|
||||
let data = BufReader::with_capacity(WINDOW_SIZE as usize * BLOB_DATA_SIZE, data);
|
||||
|
||||
Ok(LedgerWindow { index, data })
|
||||
}
|
||||
|
||||
pub fn get_entry(&mut self, index: u64) -> io::Result<Entry> {
|
||||
let offset = u64_at(&mut self.index, index * SIZEOF_U64)?;
|
||||
entry_at(&mut self.data, offset)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn verify_ledger(ledger_path: &str) -> io::Result<()> {
|
||||
let ledger_path = Path::new(&ledger_path);
|
||||
|
||||
let index = File::open(ledger_path.join("index"))?;
|
||||
|
||||
let index_len = index.metadata()?.len();
|
||||
|
||||
if index_len % SIZEOF_U64 != 0 {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("index is not a multiple of {} bytes long", SIZEOF_U64),
|
||||
))?;
|
||||
}
|
||||
let mut index = BufReader::with_capacity((WINDOW_SIZE * SIZEOF_U64) as usize, index);
|
||||
|
||||
let data = File::open(ledger_path.join("data"))?;
|
||||
let mut data = BufReader::with_capacity(WINDOW_SIZE as usize * BLOB_DATA_SIZE, data);
|
||||
|
||||
let mut last_data_offset = 0;
|
||||
let mut index_offset = 0;
|
||||
let mut data_read = 0;
|
||||
let mut last_len = 0;
|
||||
let mut i = 0;
|
||||
|
||||
while index_offset < index_len {
|
||||
let data_offset = u64_at(&mut index, index_offset)?;
|
||||
|
||||
if last_data_offset + last_len != data_offset {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"at entry[{}], a gap or an overlap last_offset {} offset {} last_len {}",
|
||||
i, last_data_offset, data_offset, last_len
|
||||
),
|
||||
))?;
|
||||
}
|
||||
|
||||
match entry_at(&mut data, data_offset) {
|
||||
Err(e) => Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"entry[{}] deserialize() failed at offset {}, err: {}",
|
||||
index_offset / SIZEOF_U64,
|
||||
data_offset,
|
||||
e.to_string(),
|
||||
),
|
||||
))?,
|
||||
Ok(entry) => {
|
||||
last_len = serialized_size(&entry).map_err(err_bincode_to_io)? + SIZEOF_U64
|
||||
}
|
||||
}
|
||||
|
||||
last_data_offset = data_offset;
|
||||
data_read += last_len;
|
||||
index_offset += SIZEOF_U64;
|
||||
i += 1;
|
||||
}
|
||||
let data = data.into_inner();
|
||||
if data_read != data.metadata()?.len() {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"garbage on end of data file",
|
||||
))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn recover_ledger(ledger_path: &str) -> io::Result<()> {
|
||||
let ledger_path = Path::new(ledger_path);
|
||||
let mut index = OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.open(ledger_path.join("index"))?;
|
||||
|
||||
let mut data = OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.open(ledger_path.join("data"))?;
|
||||
|
||||
// first, truncate to a multiple of SIZEOF_U64
|
||||
let len = index.metadata()?.len();
|
||||
|
||||
if len % SIZEOF_U64 != 0 {
|
||||
trace!("recover: trimming index len to {}", len - len % SIZEOF_U64);
|
||||
index.set_len(len - (len % SIZEOF_U64))?;
|
||||
}
|
||||
|
||||
// next, pull index offsets off one at a time until the last one points
|
||||
// to a valid entry deserialization offset...
|
||||
loop {
|
||||
let len = index.metadata()?.len();
|
||||
trace!("recover: index len:{}", len);
|
||||
|
||||
// should never happen
|
||||
if len < SIZEOF_U64 {
|
||||
trace!("recover: error index len {} too small", len);
|
||||
|
||||
Err(io::Error::new(io::ErrorKind::Other, "empty ledger index"))?;
|
||||
}
|
||||
|
||||
let offset = u64_at(&mut index, len - SIZEOF_U64)?;
|
||||
trace!("recover: offset[{}]: {}", (len / SIZEOF_U64) - 1, offset);
|
||||
|
||||
match entry_at(&mut data, offset) {
|
||||
Ok(entry) => {
|
||||
trace!("recover: entry[{}]: {:?}", (len / SIZEOF_U64) - 1, entry);
|
||||
|
||||
let entry_len = serialized_size(&entry).map_err(err_bincode_to_io)?;
|
||||
|
||||
trace!("recover: entry_len: {}", entry_len);
|
||||
|
||||
// now trim data file to size...
|
||||
data.set_len(offset + SIZEOF_U64 + entry_len)?;
|
||||
|
||||
trace!(
|
||||
"recover: trimmed data file to {}",
|
||||
offset + SIZEOF_U64 + entry_len
|
||||
);
|
||||
|
||||
break; // all good
|
||||
}
|
||||
Err(_err) => {
|
||||
trace!(
|
||||
"recover: no entry recovered at {} {}",
|
||||
offset,
|
||||
_err.to_string()
|
||||
);
|
||||
index.set_len(len - SIZEOF_U64)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
if log_enabled!(Trace) {
|
||||
let num_entries = index.metadata()?.len() / SIZEOF_U64;
|
||||
trace!("recover: done. {} entries", num_entries);
|
||||
}
|
||||
|
||||
// flush everything to disk...
|
||||
index.sync_all()?;
|
||||
data.sync_all()
|
||||
}
|
||||
|
||||
// TODO?? ... we could open the files on demand to support [], but today
|
||||
// LedgerWindow needs "&mut self"
|
||||
//
|
||||
//impl Index<u64> for LedgerWindow {
|
||||
// type Output = io::Result<Entry>;
|
||||
//
|
||||
// fn index(&mut self, index: u64) -> &io::Result<Entry> {
|
||||
// match u64_at(&mut self.index, index * SIZEOF_U64) {
|
||||
// Ok(offset) => &entry_at(&mut self.data, offset),
|
||||
// Err(e) => &Err(e),
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LedgerWriter {
|
||||
index: BufWriter<File>,
|
||||
data: BufWriter<File>,
|
||||
}
|
||||
|
||||
impl LedgerWriter {
|
||||
// recover and open the ledger for writing
|
||||
pub fn recover(ledger_path: &str) -> io::Result<Self> {
|
||||
recover_ledger(ledger_path)?;
|
||||
LedgerWriter::open(ledger_path, false)
|
||||
}
|
||||
|
||||
// opens or creates a LedgerWriter in ledger_path directory
|
||||
pub fn open(ledger_path: &str, create: bool) -> io::Result<Self> {
|
||||
let ledger_path = Path::new(&ledger_path);
|
||||
|
||||
if create {
|
||||
let _ignored = remove_dir_all(ledger_path);
|
||||
create_dir_all(ledger_path)?;
|
||||
}
|
||||
|
||||
let index = OpenOptions::new()
|
||||
.create(create)
|
||||
.append(true)
|
||||
.open(ledger_path.join("index"))?;
|
||||
|
||||
if log_enabled!(Trace) {
|
||||
let len = index.metadata()?.len();
|
||||
trace!("LedgerWriter::new: index fp:{}", len);
|
||||
}
|
||||
let index = BufWriter::new(index);
|
||||
|
||||
let data = OpenOptions::new()
|
||||
.create(create)
|
||||
.append(true)
|
||||
.open(ledger_path.join("data"))?;
|
||||
|
||||
if log_enabled!(Trace) {
|
||||
let len = data.metadata()?.len();
|
||||
trace!("LedgerWriter::new: data fp:{}", len);
|
||||
}
|
||||
let data = BufWriter::new(data);
|
||||
|
||||
Ok(LedgerWriter { index, data })
|
||||
}
|
||||
|
||||
fn write_entry_noflush(&mut self, entry: &Entry) -> io::Result<()> {
|
||||
let len = serialized_size(&entry).map_err(err_bincode_to_io)?;
|
||||
|
||||
serialize_into(&mut self.data, &len).map_err(err_bincode_to_io)?;
|
||||
if log_enabled!(Trace) {
|
||||
let offset = self.data.seek(SeekFrom::Current(0))?;
|
||||
trace!("write_entry: after len data fp:{}", offset);
|
||||
}
|
||||
|
||||
serialize_into(&mut self.data, &entry).map_err(err_bincode_to_io)?;
|
||||
if log_enabled!(Trace) {
|
||||
let offset = self.data.seek(SeekFrom::Current(0))?;
|
||||
trace!("write_entry: after entry data fp:{}", offset);
|
||||
}
|
||||
|
||||
let offset = self.data.seek(SeekFrom::Current(0))? - len - SIZEOF_U64;
|
||||
trace!("write_entry: offset:{} len:{}", offset, len);
|
||||
|
||||
serialize_into(&mut self.index, &offset).map_err(err_bincode_to_io)?;
|
||||
|
||||
if log_enabled!(Trace) {
|
||||
let offset = self.index.seek(SeekFrom::Current(0))?;
|
||||
trace!("write_entry: end index fp:{}", offset);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_entry(&mut self, entry: &Entry) -> io::Result<()> {
|
||||
self.write_entry_noflush(&entry)?;
|
||||
self.index.flush()?;
|
||||
self.data.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_entries<I>(&mut self, entries: I) -> io::Result<()>
|
||||
where
|
||||
I: IntoIterator<Item = Entry>,
|
||||
{
|
||||
for entry in entries {
|
||||
self.write_entry_noflush(&entry)?;
|
||||
}
|
||||
self.index.flush()?;
|
||||
self.data.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LedgerReader {
|
||||
data: BufReader<File>,
|
||||
}
|
||||
|
||||
impl Iterator for LedgerReader {
|
||||
type Item = io::Result<Entry>;
|
||||
|
||||
fn next(&mut self) -> Option<io::Result<Entry>> {
|
||||
match next_entry(&mut self.data) {
|
||||
Ok(entry) => Some(Ok(entry)),
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return an iterator for all the entries in the given file.
|
||||
pub fn read_ledger(
|
||||
ledger_path: &str,
|
||||
recover: bool,
|
||||
) -> io::Result<impl Iterator<Item = io::Result<Entry>>> {
|
||||
if recover {
|
||||
recover_ledger(ledger_path)?;
|
||||
}
|
||||
|
||||
let ledger_path = Path::new(&ledger_path);
|
||||
let data = File::open(ledger_path.join("data"))?;
|
||||
let data = BufReader::new(data);
|
||||
|
||||
Ok(LedgerReader { data })
|
||||
}
|
||||
|
||||
///// copy ledger is doesn't fix up the "from" ledger
|
||||
//pub fn copy_ledger(from: &str, to: &str) -> io::Result<()> {
|
||||
// let mut to = LedgerWriter::new(to, true)?;
|
||||
//
|
||||
// let from = Path::new(&from);
|
||||
//
|
||||
// // for a copy, we read "readonly" from data
|
||||
// let data = File::open(from.join("data"))?;
|
||||
//
|
||||
// for entry in (LedgerReader { data }) {
|
||||
// let entry = entry?;
|
||||
// to.write_entry(&entry)?;
|
||||
// }
|
||||
// Ok(())
|
||||
//}
|
||||
|
||||
// a Block is a slice of Entries
|
||||
|
||||
pub trait Block {
|
||||
/// Verifies the hashes and counts of a slice of transactions are all consistent.
|
||||
fn verify(&self, start_hash: &Hash) -> bool;
|
||||
@ -22,39 +419,42 @@ impl Block for [Entry] {
|
||||
fn verify(&self, start_hash: &Hash) -> bool {
|
||||
let genesis = [Entry::new_tick(0, start_hash)];
|
||||
let entry_pairs = genesis.par_iter().chain(self).zip(self);
|
||||
entry_pairs.all(|(x0, x1)| x1.verify(&x0.id))
|
||||
entry_pairs.all(|(x0, x1)| {
|
||||
let r = x1.verify(&x0.id);
|
||||
if !r {
|
||||
warn!(
|
||||
"entry invalid!: {:?} num txs: {}",
|
||||
x1.id,
|
||||
x1.transactions.len()
|
||||
);
|
||||
}
|
||||
r
|
||||
})
|
||||
}
|
||||
|
||||
fn to_blobs(&self, blob_recycler: &packet::BlobRecycler, q: &mut VecDeque<SharedBlob>) {
|
||||
for entry in self {
|
||||
let blob = blob_recycler.allocate();
|
||||
let pos = {
|
||||
let mut bd = blob.write().unwrap();
|
||||
let mut out = Cursor::new(bd.data_mut());
|
||||
serialize_into(&mut out, &entry).expect("failed to serialize output");
|
||||
out.position() as usize
|
||||
};
|
||||
assert!(pos < BLOB_SIZE);
|
||||
blob.write().unwrap().set_size(pos);
|
||||
let blob = entry.to_blob(blob_recycler, None, None, None);
|
||||
q.push_back(blob);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reconstruct_entries_from_blobs(blobs: VecDeque<SharedBlob>) -> bincode::Result<Vec<Entry>> {
|
||||
pub fn reconstruct_entries_from_blobs(blobs: VecDeque<SharedBlob>) -> Result<Vec<Entry>> {
|
||||
let mut entries: Vec<Entry> = Vec::with_capacity(blobs.len());
|
||||
|
||||
for blob in blobs {
|
||||
let entry = {
|
||||
let msg = blob.read().unwrap();
|
||||
deserialize(&msg.data()[..msg.meta.size])
|
||||
let msg_size = msg.get_size()?;
|
||||
deserialize(&msg.data()[..msg_size])
|
||||
};
|
||||
|
||||
match entry {
|
||||
Ok(entry) => entries.push(entry),
|
||||
Err(err) => {
|
||||
trace!("reconstruct_entry_from_blobs: {}", err);
|
||||
return Err(err);
|
||||
trace!("reconstruct_entry_from_blobs: {:?}", err);
|
||||
return Err(Error::Serialize(err));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -62,39 +462,64 @@ pub fn reconstruct_entries_from_blobs(blobs: VecDeque<SharedBlob>) -> bincode::R
|
||||
}
|
||||
|
||||
/// Creates the next entries for given transactions, outputs
|
||||
/// updates start_hash to id of last Entry, sets cur_hashes to 0
|
||||
/// updates start_hash to id of last Entry, sets num_hashes to 0
|
||||
pub fn next_entries_mut(
|
||||
start_hash: &mut Hash,
|
||||
cur_hashes: &mut u64,
|
||||
num_hashes: &mut u64,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> Vec<Entry> {
|
||||
if transactions.is_empty() {
|
||||
vec![Entry::new_mut(start_hash, cur_hashes, transactions, false)]
|
||||
// TODO: find a magic number that works better than | ?
|
||||
// V
|
||||
if transactions.is_empty() || transactions.len() == 1 {
|
||||
vec![Entry::new_mut(start_hash, num_hashes, transactions, false)]
|
||||
} else {
|
||||
let mut chunk_len = transactions.len();
|
||||
let mut chunk_start = 0;
|
||||
let mut entries = Vec::new();
|
||||
|
||||
// check for fit, make sure they can be serialized
|
||||
while !Entry::will_fit(transactions[0..chunk_len].to_vec()) {
|
||||
chunk_len /= 2;
|
||||
}
|
||||
while chunk_start < transactions.len() {
|
||||
let mut chunk_end = transactions.len();
|
||||
let mut upper = chunk_end;
|
||||
let mut lower = chunk_start;
|
||||
let mut next = chunk_end; // be optimistic that all will fit
|
||||
|
||||
let mut num_chunks = if transactions.len() % chunk_len == 0 {
|
||||
transactions.len() / chunk_len
|
||||
} else {
|
||||
transactions.len() / chunk_len + 1
|
||||
};
|
||||
|
||||
let mut entries = Vec::with_capacity(num_chunks);
|
||||
|
||||
for chunk in transactions.chunks(chunk_len) {
|
||||
num_chunks -= 1;
|
||||
// binary search for how many transactions will fit in an Entry (i.e. a BLOB)
|
||||
loop {
|
||||
debug!(
|
||||
"chunk_end {}, upper {} lower {} next {} transactions.len() {}",
|
||||
chunk_end,
|
||||
upper,
|
||||
lower,
|
||||
next,
|
||||
transactions.len()
|
||||
);
|
||||
if Entry::will_fit(transactions[chunk_start..chunk_end].to_vec()) {
|
||||
next = (upper + chunk_end) / 2;
|
||||
lower = chunk_end;
|
||||
debug!(
|
||||
"chunk_end {} fits, maybe too well? trying {}",
|
||||
chunk_end, next
|
||||
);
|
||||
} else {
|
||||
next = (lower + chunk_end) / 2;
|
||||
upper = chunk_end;
|
||||
debug!("chunk_end {} doesn't fit! trying {}", chunk_end, next);
|
||||
}
|
||||
// same as last time
|
||||
if next == chunk_end {
|
||||
debug!("converged on chunk_end {}", chunk_end);
|
||||
break;
|
||||
}
|
||||
chunk_end = next;
|
||||
}
|
||||
entries.push(Entry::new_mut(
|
||||
start_hash,
|
||||
cur_hashes,
|
||||
chunk.to_vec(),
|
||||
num_chunks > 0,
|
||||
num_hashes,
|
||||
transactions[chunk_start..chunk_end].to_vec(),
|
||||
transactions.len() - chunk_end > 0,
|
||||
));
|
||||
chunk_start = chunk_end;
|
||||
}
|
||||
|
||||
entries
|
||||
}
|
||||
}
|
||||
@ -102,28 +527,41 @@ pub fn next_entries_mut(
|
||||
/// Creates the next Entries for given transactions
|
||||
pub fn next_entries(
|
||||
start_hash: &Hash,
|
||||
cur_hashes: u64,
|
||||
num_hashes: u64,
|
||||
transactions: Vec<Transaction>,
|
||||
) -> Vec<Entry> {
|
||||
let mut id = *start_hash;
|
||||
let mut num_hashes = cur_hashes;
|
||||
let mut num_hashes = num_hashes;
|
||||
next_entries_mut(&mut id, &mut num_hashes, transactions)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode::serialized_size;
|
||||
use chrono::prelude::*;
|
||||
use entry::{next_entry, Entry};
|
||||
use hash::hash;
|
||||
use packet::{BlobRecycler, BLOB_DATA_SIZE};
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use packet::{BlobRecycler, BLOB_DATA_SIZE, PACKET_DATA_SIZE};
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use std;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use transaction::Transaction;
|
||||
use transaction::{Transaction, Vote};
|
||||
|
||||
fn tmp_ledger_path(name: &str) -> String {
|
||||
use std::env;
|
||||
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
|
||||
let keypair = Keypair::new();
|
||||
|
||||
format!("{}/tmp-ledger-{}-{}", out_dir, name, keypair.pubkey())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice() {
|
||||
use logger;
|
||||
logger::setup();
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let one = hash(&zero.as_ref());
|
||||
assert!(vec![][..].verify(&zero)); // base case
|
||||
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
|
||||
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
|
||||
@ -134,14 +572,59 @@ mod tests {
|
||||
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
|
||||
}
|
||||
|
||||
fn make_tiny_test_entries(num: usize) -> Vec<Entry> {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
let keypair = Keypair::new();
|
||||
|
||||
let mut id = one;
|
||||
let mut num_hashes = 0;
|
||||
(0..num)
|
||||
.map(|_| {
|
||||
Entry::new_mut(
|
||||
&mut id,
|
||||
&mut num_hashes,
|
||||
vec![Transaction::new_timestamp(&keypair, Utc::now(), one)],
|
||||
false,
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn make_test_entries() -> Vec<Entry> {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero.as_ref());
|
||||
let keypair = Keypair::new();
|
||||
let tx0 = Transaction::new_vote(
|
||||
&keypair,
|
||||
Vote {
|
||||
version: 0,
|
||||
contact_info_version: 1,
|
||||
},
|
||||
one,
|
||||
1,
|
||||
);
|
||||
let tx1 = Transaction::new_timestamp(&keypair, Utc::now(), one);
|
||||
//
|
||||
// TODO: this magic number and the mix of transaction types
|
||||
// is designed to fill up a Blob more or less exactly,
|
||||
// to get near enough the the threshold that
|
||||
// deserialization falls over if it uses the wrong size()
|
||||
// parameter to index into blob.data()
|
||||
//
|
||||
// magic numbers -----------------+
|
||||
// |
|
||||
// V
|
||||
let mut transactions = vec![tx0; 362];
|
||||
transactions.extend(vec![tx1; 100]);
|
||||
next_entries(&zero, 0, transactions)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entries_to_blobs() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0; 10_000];
|
||||
let entries = next_entries(&zero, 0, transactions);
|
||||
use logger;
|
||||
logger::setup();
|
||||
let entries = make_test_entries();
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let mut blob_q = VecDeque::new();
|
||||
@ -152,6 +635,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_bad_blobs_attack() {
|
||||
use logger;
|
||||
logger::setup();
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
let blobs_q = packet::to_blobs(vec![(0, addr)], &blob_recycler).unwrap(); // <-- attack!
|
||||
@ -160,39 +645,230 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_next_entries() {
|
||||
use logger;
|
||||
logger::setup();
|
||||
let id = Hash::default();
|
||||
let next_id = hash(&id);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
|
||||
let next_id = hash(&id.as_ref());
|
||||
let keypair = Keypair::new();
|
||||
let tx_small = Transaction::new_vote(
|
||||
&keypair,
|
||||
Vote {
|
||||
version: 0,
|
||||
contact_info_version: 2,
|
||||
},
|
||||
next_id,
|
||||
2,
|
||||
);
|
||||
let tx_large = Transaction::new(&keypair, keypair.pubkey(), 1, next_id);
|
||||
|
||||
let tx_small_size = serialized_size(&tx_small).unwrap();
|
||||
let tx_large_size = serialized_size(&tx_large).unwrap();
|
||||
assert!(tx_small_size < tx_large_size);
|
||||
assert!(tx_large_size < PACKET_DATA_SIZE as u64);
|
||||
|
||||
// NOTE: if Entry grows to larger than a transaction, the code below falls over
|
||||
let threshold = (BLOB_DATA_SIZE / 256) - 1; // 256 is transaction size
|
||||
let threshold = (BLOB_DATA_SIZE / PACKET_DATA_SIZE) - 1;
|
||||
|
||||
// verify no split
|
||||
let transactions = vec![tx0.clone(); threshold];
|
||||
let transactions = vec![tx_small.clone(); threshold];
|
||||
let entries0 = next_entries(&id, 0, transactions.clone());
|
||||
assert_eq!(entries0.len(), 1);
|
||||
assert!(entries0.verify(&id));
|
||||
|
||||
// verify the split
|
||||
let transactions = vec![tx0.clone(); threshold * 2];
|
||||
// verify the split with uniform transactions
|
||||
let transactions = vec![tx_small.clone(); threshold * 2];
|
||||
let entries0 = next_entries(&id, 0, transactions.clone());
|
||||
assert_eq!(entries0.len(), 2);
|
||||
assert!(entries0[0].has_more);
|
||||
assert!(!entries0[entries0.len() - 1].has_more);
|
||||
|
||||
assert!(entries0.verify(&id));
|
||||
// test hand-construction... brittle, changes if split method changes... ?
|
||||
// let mut entries1 = vec![];
|
||||
// entries1.push(Entry::new(&id, 1, transactions[..threshold].to_vec(), true));
|
||||
// id = entries1[0].id;
|
||||
// entries1.push(Entry::new(
|
||||
// &id,
|
||||
// 1,
|
||||
// transactions[threshold..].to_vec(),
|
||||
// false,
|
||||
// ));
|
||||
//
|
||||
// assert_eq!(entries0, entries1);
|
||||
|
||||
// verify the split with small transactions followed by large
|
||||
// transactions
|
||||
let mut transactions = vec![tx_small.clone(); BLOB_DATA_SIZE / (tx_small_size as usize)];
|
||||
let large_transactions = vec![tx_large.clone(); BLOB_DATA_SIZE / (tx_large_size as usize)];
|
||||
|
||||
transactions.extend(large_transactions);
|
||||
|
||||
let entries0 = next_entries(&id, 0, transactions.clone());
|
||||
assert!(entries0.len() > 2);
|
||||
assert!(entries0[0].has_more);
|
||||
assert!(!entries0[entries0.len() - 1].has_more);
|
||||
assert!(entries0.verify(&id));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ledger_reader_writer() {
|
||||
use logger;
|
||||
logger::setup();
|
||||
let ledger_path = tmp_ledger_path("test_ledger_reader_writer");
|
||||
let entries = make_tiny_test_entries(10);
|
||||
|
||||
{
|
||||
let mut writer = LedgerWriter::open(&ledger_path, true).unwrap();
|
||||
writer.write_entries(entries.clone()).unwrap();
|
||||
// drops writer, flushes buffers
|
||||
}
|
||||
verify_ledger(&ledger_path).unwrap();
|
||||
|
||||
let mut read_entries = vec![];
|
||||
for x in read_ledger(&ledger_path, true).unwrap() {
|
||||
let entry = x.unwrap();
|
||||
trace!("entry... {:?}", entry);
|
||||
read_entries.push(entry);
|
||||
}
|
||||
assert_eq!(read_entries, entries);
|
||||
|
||||
let mut window = LedgerWindow::open(&ledger_path).unwrap();
|
||||
|
||||
for (i, entry) in entries.iter().enumerate() {
|
||||
let read_entry = window.get_entry(i as u64).unwrap();
|
||||
assert_eq!(*entry, read_entry);
|
||||
}
|
||||
assert!(window.get_entry(100).is_err());
|
||||
|
||||
std::fs::remove_file(Path::new(&ledger_path).join("data")).unwrap();
|
||||
// empty data file should fall over
|
||||
assert!(LedgerWindow::open(&ledger_path).is_err());
|
||||
assert!(read_ledger(&ledger_path, false).is_err());
|
||||
|
||||
std::fs::remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
fn truncated_last_entry(ledger_path: &str, entries: Vec<Entry>) {
|
||||
let len = {
|
||||
let mut writer = LedgerWriter::open(&ledger_path, true).unwrap();
|
||||
writer.write_entries(entries).unwrap();
|
||||
writer.data.seek(SeekFrom::Current(0)).unwrap()
|
||||
};
|
||||
verify_ledger(&ledger_path).unwrap();
|
||||
|
||||
let data = OpenOptions::new()
|
||||
.write(true)
|
||||
.open(Path::new(&ledger_path).join("data"))
|
||||
.unwrap();
|
||||
data.set_len(len - 4).unwrap();
|
||||
}
|
||||
|
||||
fn garbage_on_data(ledger_path: &str, entries: Vec<Entry>) {
|
||||
let mut writer = LedgerWriter::open(&ledger_path, true).unwrap();
|
||||
writer.write_entries(entries).unwrap();
|
||||
writer.data.write_all(b"hi there!").unwrap();
|
||||
}
|
||||
|
||||
fn read_ledger_check(ledger_path: &str, entries: Vec<Entry>, len: usize) {
|
||||
let read_entries = read_ledger(&ledger_path, true).unwrap();
|
||||
let mut i = 0;
|
||||
|
||||
for entry in read_entries {
|
||||
assert_eq!(entry.unwrap(), entries[i]);
|
||||
i += 1;
|
||||
}
|
||||
assert_eq!(i, len);
|
||||
}
|
||||
|
||||
fn ledger_window_check(ledger_path: &str, entries: Vec<Entry>, len: usize) {
|
||||
let mut window = LedgerWindow::open(&ledger_path).unwrap();
|
||||
for i in 0..len {
|
||||
let entry = window.get_entry(i as u64);
|
||||
assert_eq!(entry.unwrap(), entries[i]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recover_ledger() {
|
||||
use logger;
|
||||
logger::setup();
|
||||
|
||||
let entries = make_tiny_test_entries(10);
|
||||
let ledger_path = tmp_ledger_path("test_recover_ledger");
|
||||
|
||||
// truncate data file, tests recover inside read_ledger_check()
|
||||
truncated_last_entry(&ledger_path, entries.clone());
|
||||
read_ledger_check(&ledger_path, entries.clone(), entries.len() - 1);
|
||||
|
||||
// truncate data file, tests recover inside LedgerWindow::new()
|
||||
truncated_last_entry(&ledger_path, entries.clone());
|
||||
ledger_window_check(&ledger_path, entries.clone(), entries.len() - 1);
|
||||
|
||||
// restore last entry, tests recover_ledger() inside LedgerWriter::new()
|
||||
truncated_last_entry(&ledger_path, entries.clone());
|
||||
// verify should fail at first
|
||||
assert!(verify_ledger(&ledger_path).is_err());
|
||||
{
|
||||
let mut writer = LedgerWriter::recover(&ledger_path).unwrap();
|
||||
writer.write_entry(&entries[entries.len() - 1]).unwrap();
|
||||
}
|
||||
// and be fine after recover()
|
||||
verify_ledger(&ledger_path).unwrap();
|
||||
|
||||
read_ledger_check(&ledger_path, entries.clone(), entries.len());
|
||||
ledger_window_check(&ledger_path, entries.clone(), entries.len());
|
||||
|
||||
// make it look like data is newer in time, check reader...
|
||||
garbage_on_data(&ledger_path, entries.clone());
|
||||
read_ledger_check(&ledger_path, entries.clone(), entries.len());
|
||||
|
||||
// make it look like data is newer in time, check window...
|
||||
garbage_on_data(&ledger_path, entries.clone());
|
||||
ledger_window_check(&ledger_path, entries.clone(), entries.len());
|
||||
|
||||
// make it look like data is newer in time, check writer...
|
||||
garbage_on_data(&ledger_path, entries[..entries.len() - 1].to_vec());
|
||||
assert!(verify_ledger(&ledger_path).is_err());
|
||||
{
|
||||
let mut writer = LedgerWriter::recover(&ledger_path).unwrap();
|
||||
writer.write_entry(&entries[entries.len() - 1]).unwrap();
|
||||
}
|
||||
verify_ledger(&ledger_path).unwrap();
|
||||
read_ledger_check(&ledger_path, entries.clone(), entries.len());
|
||||
ledger_window_check(&ledger_path, entries.clone(), entries.len());
|
||||
let _ignored = remove_dir_all(&ledger_path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_ledger() {
|
||||
use logger;
|
||||
logger::setup();
|
||||
|
||||
let entries = make_tiny_test_entries(10);
|
||||
let ledger_path = tmp_ledger_path("test_verify_ledger");
|
||||
{
|
||||
let mut writer = LedgerWriter::open(&ledger_path, true).unwrap();
|
||||
writer.write_entries(entries.clone()).unwrap();
|
||||
}
|
||||
// TODO more cases that make ledger_verify() fail
|
||||
// assert!(verify_ledger(&ledger_path).is_err());
|
||||
|
||||
assert!(verify_ledger(&ledger_path).is_ok());
|
||||
let _ignored = remove_dir_all(&ledger_path);
|
||||
}
|
||||
|
||||
// #[test]
|
||||
// fn test_copy_ledger() {
|
||||
// use logger;
|
||||
// logger::setup();
|
||||
//
|
||||
// let from = tmp_ledger_path("test_ledger_copy_from");
|
||||
// let entries = make_tiny_test_entries(10);
|
||||
//
|
||||
// let mut writer = LedgerWriter::new(&from, true).unwrap();
|
||||
// writer.write_entries(entries.clone()).unwrap();
|
||||
//
|
||||
// let to = tmp_ledger_path("test_ledger_copy_to");
|
||||
//
|
||||
// copy_ledger(&from, &to).unwrap();
|
||||
//
|
||||
// let mut read_entries = vec![];
|
||||
// for x in read_ledger(&to).unwrap() {
|
||||
// let entry = x.unwrap();
|
||||
// trace!("entry... {:?}", entry);
|
||||
// read_entries.push(entry);
|
||||
// }
|
||||
// assert_eq!(read_entries, entries);
|
||||
//
|
||||
// std::fs::remove_dir_all(from).unwrap();
|
||||
// std::fs::remove_dir_all(to).unwrap();
|
||||
// }
|
||||
|
||||
}
|
||||
|
@ -12,8 +12,10 @@ pub mod counter;
|
||||
pub mod bank;
|
||||
pub mod banking_stage;
|
||||
pub mod blob_fetch_stage;
|
||||
pub mod broadcast_stage;
|
||||
pub mod budget;
|
||||
pub mod choose_gossip_peer_strategy;
|
||||
pub mod client;
|
||||
pub mod crdt;
|
||||
pub mod drone;
|
||||
pub mod entry;
|
||||
@ -38,6 +40,7 @@ pub mod request;
|
||||
pub mod request_processor;
|
||||
pub mod request_stage;
|
||||
pub mod result;
|
||||
pub mod retransmit_stage;
|
||||
pub mod rpu;
|
||||
pub mod service;
|
||||
pub mod signature;
|
||||
@ -49,10 +52,13 @@ pub mod timing;
|
||||
pub mod tpu;
|
||||
pub mod transaction;
|
||||
pub mod tvu;
|
||||
pub mod vote_stage;
|
||||
pub mod voting;
|
||||
pub mod window_stage;
|
||||
pub mod wallet;
|
||||
pub mod window;
|
||||
pub mod write_stage;
|
||||
extern crate bincode;
|
||||
extern crate bs58;
|
||||
extern crate byteorder;
|
||||
extern crate chrono;
|
||||
extern crate generic_array;
|
||||
@ -68,6 +74,7 @@ extern crate serde_derive;
|
||||
extern crate pnet_datalink;
|
||||
extern crate serde_json;
|
||||
extern crate sha2;
|
||||
extern crate sys_info;
|
||||
extern crate untrusted;
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -9,6 +9,8 @@ static INIT: Once = ONCE_INIT;
|
||||
/// Setup function that is only run once, even if called multiple times.
|
||||
pub fn setup() {
|
||||
INIT.call_once(|| {
|
||||
env_logger::init();
|
||||
env_logger::Builder::from_default_env()
|
||||
.default_format_timestamp_nanos(true)
|
||||
.init();
|
||||
});
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
|
||||
use std::sync::{Arc, Barrier, Mutex, Once, ONCE_INIT};
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
use sys_info::hostname;
|
||||
use timing;
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -57,7 +58,7 @@ impl InfluxDbMetricsWriter {
|
||||
impl MetricsWriter for InfluxDbMetricsWriter {
|
||||
fn write(&self, points: Vec<influxdb::Point>) {
|
||||
if let Some(ref client) = self.client {
|
||||
info!("submitting {} points", points.len());
|
||||
debug!("submitting {} points", points.len());
|
||||
if let Err(err) = client.write_points(
|
||||
influxdb::Points { point: points },
|
||||
Some(influxdb::Precision::Milliseconds),
|
||||
@ -184,6 +185,56 @@ pub fn flush() {
|
||||
agent.flush();
|
||||
}
|
||||
|
||||
/// Hook the panic handler to generate a data point on each panic
|
||||
pub fn set_panic_hook(program: &'static str) {
|
||||
use std::panic;
|
||||
use std::sync::{Once, ONCE_INIT};
|
||||
static SET_HOOK: Once = ONCE_INIT;
|
||||
SET_HOOK.call_once(|| {
|
||||
let default_hook = panic::take_hook();
|
||||
panic::set_hook(Box::new(move |ono| {
|
||||
default_hook(ono);
|
||||
submit(
|
||||
influxdb::Point::new("panic")
|
||||
.add_tag("program", influxdb::Value::String(program.to_string()))
|
||||
.add_tag(
|
||||
"thread",
|
||||
influxdb::Value::String(
|
||||
thread::current().name().unwrap_or("?").to_string(),
|
||||
),
|
||||
)
|
||||
// The 'one' field exists to give Kapacitor Alerts a numerical value
|
||||
// to filter on
|
||||
.add_field("one", influxdb::Value::Integer(1))
|
||||
.add_field(
|
||||
"message",
|
||||
influxdb::Value::String(
|
||||
// TODO: use ono.message() when it becomes stable
|
||||
ono.to_string(),
|
||||
),
|
||||
)
|
||||
.add_field(
|
||||
"location",
|
||||
influxdb::Value::String(match ono.location() {
|
||||
Some(location) => location.to_string(),
|
||||
None => "?".to_string(),
|
||||
}),
|
||||
)
|
||||
.add_field(
|
||||
"host",
|
||||
influxdb::Value::String(
|
||||
hostname().unwrap_or_else(|_| "?".to_string())
|
||||
),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
// Flush metrics immediately in case the process exits immediately
|
||||
// upon return
|
||||
flush();
|
||||
}));
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
27
src/mint.rs
27
src/mint.rs
@ -3,25 +3,21 @@
|
||||
use entry::Entry;
|
||||
use hash::{hash, Hash};
|
||||
use ring::rand::SystemRandom;
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
use signature::{Keypair, KeypairUtil, Pubkey};
|
||||
use transaction::Transaction;
|
||||
use untrusted::Input;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Mint {
|
||||
pub pkcs8: Vec<u8>,
|
||||
pubkey: PublicKey,
|
||||
pubkey: Pubkey,
|
||||
pub tokens: i64,
|
||||
}
|
||||
|
||||
impl Mint {
|
||||
pub fn new(tokens: i64) -> Self {
|
||||
let rnd = SystemRandom::new();
|
||||
let pkcs8 = KeyPair::generate_pkcs8(&rnd)
|
||||
.expect("generate_pkcs8 in mint pub fn new")
|
||||
.to_vec();
|
||||
pub fn new_with_pkcs8(tokens: i64, pkcs8: Vec<u8>) -> Self {
|
||||
let keypair =
|
||||
KeyPair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in mint pub fn new");
|
||||
Keypair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in mint pub fn new");
|
||||
let pubkey = keypair.pubkey();
|
||||
Mint {
|
||||
pkcs8,
|
||||
@ -29,6 +25,15 @@ impl Mint {
|
||||
tokens,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(tokens: i64) -> Self {
|
||||
let rnd = SystemRandom::new();
|
||||
let pkcs8 = Keypair::generate_pkcs8(&rnd)
|
||||
.expect("generate_pkcs8 in mint pub fn new")
|
||||
.to_vec();
|
||||
Self::new_with_pkcs8(tokens, pkcs8)
|
||||
}
|
||||
|
||||
pub fn seed(&self) -> Hash {
|
||||
hash(&self.pkcs8)
|
||||
}
|
||||
@ -37,11 +42,11 @@ impl Mint {
|
||||
self.create_entries()[1].id
|
||||
}
|
||||
|
||||
pub fn keypair(&self) -> KeyPair {
|
||||
KeyPair::from_pkcs8(Input::from(&self.pkcs8)).expect("from_pkcs8 in mint pub fn keypair")
|
||||
pub fn keypair(&self) -> Keypair {
|
||||
Keypair::from_pkcs8(Input::from(&self.pkcs8)).expect("from_pkcs8 in mint pub fn keypair")
|
||||
}
|
||||
|
||||
pub fn pubkey(&self) -> PublicKey {
|
||||
pub fn pubkey(&self) -> Pubkey {
|
||||
self.pubkey
|
||||
}
|
||||
|
||||
|
69
src/nat.rs
69
src/nat.rs
@ -1,19 +1,14 @@
|
||||
//! The `nat` module assists with NAT traversal
|
||||
|
||||
extern crate futures;
|
||||
extern crate p2p;
|
||||
extern crate rand;
|
||||
extern crate reqwest;
|
||||
extern crate tokio_core;
|
||||
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
|
||||
use self::futures::Future;
|
||||
use self::p2p::UdpSocketExt;
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::env;
|
||||
use std::io;
|
||||
use std::str;
|
||||
|
||||
/// A data type representing a public Udp socket
|
||||
pub struct UdpSocketPair {
|
||||
@ -51,67 +46,3 @@ pub fn udp_random_bind(start: u16, end: u16, tries: u32) -> io::Result<UdpSocket
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Binds a private Udp address to a public address using UPnP if possible
|
||||
pub fn udp_public_bind(label: &str, startport: u16, endport: u16) -> UdpSocketPair {
|
||||
let private_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||
|
||||
let mut core = tokio_core::reactor::Core::new().unwrap();
|
||||
let handle = core.handle();
|
||||
let mc = p2p::P2p::default();
|
||||
let res = core.run({
|
||||
tokio_core::net::UdpSocket::bind_public(&private_addr, &handle, &mc)
|
||||
.map_err(|e| {
|
||||
info!("Failed to bind public socket for {}: {}", label, e);
|
||||
})
|
||||
.and_then(|(socket, public_addr)| Ok((public_addr, socket.local_addr().unwrap())))
|
||||
});
|
||||
|
||||
match res {
|
||||
Ok((public_addr, local_addr)) => {
|
||||
info!(
|
||||
"Using local address {} mapped to UPnP public address {} for {}",
|
||||
local_addr, public_addr, label
|
||||
);
|
||||
|
||||
// NAT should now be forwarding inbound packets directed at
|
||||
// |public_addr| to the local |receiver| socket...
|
||||
let receiver = UdpSocket::bind(local_addr).unwrap();
|
||||
|
||||
// TODO: try to autodetect a broken NAT (issue #496)
|
||||
let sender = if env::var("BROKEN_NAT").is_err() {
|
||||
receiver.try_clone().unwrap()
|
||||
} else {
|
||||
// ... however for outbound packets, some NATs *will not* rewrite the
|
||||
// source port from |receiver.local_addr().port()| to |public_addr.port()|.
|
||||
// This is currently a problem when talking with a fullnode as it
|
||||
// assumes it can send UDP packets back at the source. This hits the
|
||||
// NAT as a datagram for |receiver.local_addr().port()| on the NAT's public
|
||||
// IP, which the NAT promptly discards. As a short term hack, create a
|
||||
// local UDP socket, |sender|, with the same port as |public_addr.port()|.
|
||||
//
|
||||
// TODO: Remove the |sender| socket and deal with the downstream changes to
|
||||
// the UDP signalling
|
||||
let mut local_addr_sender = local_addr;
|
||||
local_addr_sender.set_port(public_addr.port());
|
||||
UdpSocket::bind(local_addr_sender).unwrap()
|
||||
};
|
||||
|
||||
UdpSocketPair {
|
||||
addr: public_addr,
|
||||
receiver,
|
||||
sender,
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
let sender = udp_random_bind(startport, endport, 5).unwrap();
|
||||
let local_addr = sender.local_addr().unwrap();
|
||||
info!("Using local address {} for {}", local_addr, label);
|
||||
UdpSocketPair {
|
||||
addr: private_addr,
|
||||
receiver: sender.try_clone().unwrap(),
|
||||
sender,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
16
src/ncp.rs
16
src/ncp.rs
@ -1,7 +1,7 @@
|
||||
//! The `ncp` module implements the network control plane.
|
||||
|
||||
use crdt::Crdt;
|
||||
use packet::{BlobRecycler, SharedBlob};
|
||||
use packet::BlobRecycler;
|
||||
use result::Result;
|
||||
use service::Service;
|
||||
use std::net::UdpSocket;
|
||||
@ -10,6 +10,7 @@ use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{self, JoinHandle};
|
||||
use streamer;
|
||||
use window::SharedWindow;
|
||||
|
||||
pub struct Ncp {
|
||||
exit: Arc<AtomicBool>,
|
||||
@ -19,7 +20,8 @@ pub struct Ncp {
|
||||
impl Ncp {
|
||||
pub fn new(
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
window: SharedWindow,
|
||||
ledger_path: Option<&str>,
|
||||
gossip_listen_socket: UdpSocket,
|
||||
gossip_send_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
@ -28,7 +30,7 @@ impl Ncp {
|
||||
let (request_sender, request_receiver) = channel();
|
||||
trace!(
|
||||
"Ncp: id: {:?}, listening on: {:?}",
|
||||
&crdt.read().unwrap().me[..4],
|
||||
&crdt.read().unwrap().me.as_ref()[..4],
|
||||
gossip_listen_socket.local_addr().unwrap()
|
||||
);
|
||||
let t_receiver = streamer::blob_receiver(
|
||||
@ -47,6 +49,7 @@ impl Ncp {
|
||||
let t_listen = Crdt::listen(
|
||||
crdt.clone(),
|
||||
window,
|
||||
ledger_path,
|
||||
blob_recycler.clone(),
|
||||
request_receiver,
|
||||
response_sender.clone(),
|
||||
@ -88,13 +91,14 @@ mod tests {
|
||||
// test that stage will exit when flag is set
|
||||
fn test_exit() {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let tn = TestNode::new();
|
||||
let crdt = Crdt::new(tn.data.clone());
|
||||
let tn = TestNode::new_localhost();
|
||||
let crdt = Crdt::new(tn.data.clone()).expect("Crdt::new");
|
||||
let c = Arc::new(RwLock::new(crdt));
|
||||
let w = Arc::new(RwLock::new(vec![]));
|
||||
let d = Ncp::new(
|
||||
&c.clone(),
|
||||
&c,
|
||||
w,
|
||||
None,
|
||||
tn.sockets.gossip,
|
||||
tn.sockets.gossip_send,
|
||||
exit.clone(),
|
||||
|
118
src/packet.rs
118
src/packet.rs
@ -2,9 +2,10 @@
|
||||
use bincode::{deserialize, serialize};
|
||||
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||
use counter::Counter;
|
||||
use log::Level;
|
||||
use result::{Error, Result};
|
||||
use serde::Serialize;
|
||||
use signature::PublicKey;
|
||||
use signature::Pubkey;
|
||||
use std::collections::VecDeque;
|
||||
use std::fmt;
|
||||
use std::io;
|
||||
@ -19,14 +20,13 @@ pub type SharedBlobs = VecDeque<SharedBlob>;
|
||||
pub type PacketRecycler = Recycler<Packets>;
|
||||
pub type BlobRecycler = Recycler<Blob>;
|
||||
|
||||
const LOG_RATE: usize = 10;
|
||||
pub const NUM_PACKETS: usize = 1024 * 8;
|
||||
pub const BLOB_SIZE: usize = 64 * 1024;
|
||||
pub const BLOB_SIZE: usize = (64 * 1024 - 128); // wikipedia says there should be 20b for ipv4 headers
|
||||
pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_HEADER_SIZE;
|
||||
pub const PACKET_DATA_SIZE: usize = 256;
|
||||
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
#[derive(Clone, Default, Debug, PartialEq)]
|
||||
#[repr(C)]
|
||||
pub struct Meta {
|
||||
pub size: usize,
|
||||
@ -63,6 +63,19 @@ impl Default for Packet {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Reset {
|
||||
// Reset trait is an object that can re-initialize important parts
|
||||
// of itself, similar to Default, but not necessarily a full clear
|
||||
// also, we do it in-place.
|
||||
fn reset(&mut self);
|
||||
}
|
||||
|
||||
impl Reset for Packet {
|
||||
fn reset(&mut self) {
|
||||
self.meta = Meta::default();
|
||||
}
|
||||
}
|
||||
|
||||
impl Meta {
|
||||
pub fn addr(&self) -> SocketAddr {
|
||||
if !self.v6 {
|
||||
@ -113,6 +126,14 @@ impl Default for Packets {
|
||||
}
|
||||
}
|
||||
|
||||
impl Reset for Packets {
|
||||
fn reset(&mut self) {
|
||||
for i in 0..self.packets.len() {
|
||||
self.packets[i].reset();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Blob {
|
||||
pub data: [u8; BLOB_SIZE],
|
||||
@ -140,6 +161,19 @@ impl Default for Blob {
|
||||
}
|
||||
}
|
||||
|
||||
impl Reset for Blob {
|
||||
fn reset(&mut self) {
|
||||
self.meta = Meta::default();
|
||||
self.data[..BLOB_HEADER_SIZE].copy_from_slice(&[0u8; BLOB_HEADER_SIZE]);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BlobError {
|
||||
/// the Blob's meta and data are not self-consistent
|
||||
BadState,
|
||||
}
|
||||
|
||||
pub struct Recycler<T> {
|
||||
gc: Arc<Mutex<Vec<Arc<RwLock<T>>>>>,
|
||||
}
|
||||
@ -160,24 +194,35 @@ impl<T: Default> Clone for Recycler<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Default> Recycler<T> {
|
||||
impl<T: Default + Reset> Recycler<T> {
|
||||
pub fn allocate(&self) -> Arc<RwLock<T>> {
|
||||
let mut gc = self.gc.lock().expect("recycler lock in pb fn allocate");
|
||||
let x = gc.pop()
|
||||
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())));
|
||||
|
||||
// Only return the item if this recycler is the last reference to it.
|
||||
// Remove this check once `T` holds a Weak reference back to this
|
||||
// recycler and implements `Drop`. At the time of this writing, Weak can't
|
||||
// be passed across threads ('alloc' is a nightly-only API), and so our
|
||||
// reference-counted recyclables are awkwardly being recycled by hand,
|
||||
// which allows this race condition to exist.
|
||||
if Arc::strong_count(&x) > 1 {
|
||||
warn!("Recycled item still in use. Booting it.");
|
||||
drop(gc);
|
||||
self.allocate()
|
||||
} else {
|
||||
x
|
||||
loop {
|
||||
if let Some(x) = gc.pop() {
|
||||
// Only return the item if this recycler is the last reference to it.
|
||||
// Remove this check once `T` holds a Weak reference back to this
|
||||
// recycler and implements `Drop`. At the time of this writing, Weak can't
|
||||
// be passed across threads ('alloc' is a nightly-only API), and so our
|
||||
// reference-counted recyclables are awkwardly being recycled by hand,
|
||||
// which allows this race condition to exist.
|
||||
if Arc::strong_count(&x) >= 1 {
|
||||
// Commenting out this message, is annoying for known use case of
|
||||
// validator hanging onto a blob in the window, but also sending it over
|
||||
// to retransmmit_request
|
||||
//
|
||||
// warn!("Recycled item still in use. Booting it.");
|
||||
continue;
|
||||
}
|
||||
|
||||
{
|
||||
let mut w = x.write().unwrap();
|
||||
w.reset();
|
||||
}
|
||||
return x;
|
||||
} else {
|
||||
return Arc::new(RwLock::new(Default::default()));
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn recycle(&self, x: Arc<RwLock<T>>) {
|
||||
@ -188,7 +233,6 @@ impl<T: Default> Recycler<T> {
|
||||
|
||||
impl Packets {
|
||||
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
||||
static mut COUNTER: Counter = create_counter!("packets", LOG_RATE);
|
||||
self.packets.resize(NUM_PACKETS, Packet::default());
|
||||
let mut i = 0;
|
||||
//DOCUMENTED SIDE-EFFECT
|
||||
@ -203,7 +247,7 @@ impl Packets {
|
||||
trace!("receiving on {}", socket.local_addr().unwrap());
|
||||
match socket.recv_from(&mut p.data) {
|
||||
Err(_) if i > 0 => {
|
||||
inc_counter!(COUNTER, i);
|
||||
inc_new_counter_info!("packets-recv_count", 1);
|
||||
debug!("got {:?} messages on {}", i, socket.local_addr().unwrap());
|
||||
break;
|
||||
}
|
||||
@ -214,6 +258,7 @@ impl Packets {
|
||||
Ok((nrecv, from)) => {
|
||||
p.meta.size = nrecv;
|
||||
p.meta.set_addr(&from);
|
||||
trace!("got {} bytes from {}", nrecv, from);
|
||||
if i == 0 {
|
||||
socket.set_nonblocking(true)?;
|
||||
}
|
||||
@ -275,7 +320,7 @@ pub fn to_blob<T: Serialize>(
|
||||
let mut b = blob.write().unwrap();
|
||||
let v = serialize(&resp)?;
|
||||
let len = v.len();
|
||||
assert!(len < BLOB_SIZE);
|
||||
assert!(len <= BLOB_SIZE);
|
||||
b.data[..len].copy_from_slice(&v);
|
||||
b.meta.size = len;
|
||||
b.meta.set_addr(&rsp_addr);
|
||||
@ -295,7 +340,7 @@ pub fn to_blobs<T: Serialize>(
|
||||
}
|
||||
|
||||
const BLOB_INDEX_END: usize = size_of::<u64>();
|
||||
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<PublicKey>();
|
||||
const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::<usize>() + size_of::<Pubkey>();
|
||||
const BLOB_FLAGS_END: usize = BLOB_ID_END + size_of::<u32>();
|
||||
const BLOB_SIZE_END: usize = BLOB_FLAGS_END + size_of::<u64>();
|
||||
|
||||
@ -322,12 +367,12 @@ impl Blob {
|
||||
}
|
||||
/// sender id, we use this for identifying if its a blob from the leader that we should
|
||||
/// retransmit. eventually blobs should have a signature that we can use ffor spam filtering
|
||||
pub fn get_id(&self) -> Result<PublicKey> {
|
||||
pub fn get_id(&self) -> Result<Pubkey> {
|
||||
let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?;
|
||||
Ok(e)
|
||||
}
|
||||
|
||||
pub fn set_id(&mut self, id: PublicKey) -> Result<()> {
|
||||
pub fn set_id(&mut self, id: Pubkey) -> Result<()> {
|
||||
let wtr = serialize(&id)?;
|
||||
self.data[BLOB_INDEX_END..BLOB_ID_END].clone_from_slice(&wtr);
|
||||
Ok(())
|
||||
@ -374,6 +419,14 @@ impl Blob {
|
||||
pub fn data_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.data[BLOB_HEADER_SIZE..]
|
||||
}
|
||||
pub fn get_size(&self) -> Result<usize> {
|
||||
let size = self.get_data_size()? as usize;
|
||||
if self.meta.size == size {
|
||||
Ok(size - BLOB_HEADER_SIZE)
|
||||
} else {
|
||||
Err(Error::BlobError(BlobError::BadState))
|
||||
}
|
||||
}
|
||||
pub fn set_size(&mut self, size: usize) {
|
||||
let new_size = size + BLOB_HEADER_SIZE;
|
||||
self.meta.size = new_size;
|
||||
@ -407,6 +460,7 @@ impl Blob {
|
||||
Ok((nrecv, from)) => {
|
||||
p.meta.size = nrecv;
|
||||
p.meta.set_addr(&from);
|
||||
trace!("got {} bytes from {}", nrecv, from);
|
||||
if i == 0 {
|
||||
socket.set_nonblocking(true)?;
|
||||
}
|
||||
@ -423,7 +477,7 @@ impl Blob {
|
||||
let p = r.read().expect("'r' read lock in pub fn send_to");
|
||||
let a = p.meta.addr();
|
||||
if let Err(e) = socket.send_to(&p.data[..p.meta.size], &a) {
|
||||
info!(
|
||||
warn!(
|
||||
"error sending {} byte packet to {:?}: {:?}",
|
||||
p.meta.size, a, e
|
||||
);
|
||||
@ -439,7 +493,8 @@ impl Blob {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use packet::{
|
||||
to_packets, Blob, BlobRecycler, Packet, PacketRecycler, Packets, Recycler, NUM_PACKETS,
|
||||
to_packets, Blob, BlobRecycler, Meta, Packet, PacketRecycler, Packets, Recycler, Reset,
|
||||
BLOB_HEADER_SIZE, NUM_PACKETS,
|
||||
};
|
||||
use request::Request;
|
||||
use std::collections::VecDeque;
|
||||
@ -458,6 +513,12 @@ mod tests {
|
||||
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
||||
}
|
||||
|
||||
impl Reset for u8 {
|
||||
fn reset(&mut self) {
|
||||
*self = Default::default();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_leaked_recyclable() {
|
||||
// Ensure that the recycler won't return an item
|
||||
@ -595,6 +656,9 @@ mod tests {
|
||||
b.data_mut()[0] = 1;
|
||||
assert_eq!(b.data()[0], 1);
|
||||
assert_eq!(b.get_index().unwrap(), <u64>::max_value());
|
||||
b.reset();
|
||||
assert!(b.data[..BLOB_HEADER_SIZE].starts_with(&[0u8; BLOB_HEADER_SIZE]));
|
||||
assert_eq!(b.meta, Meta::default());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
//! `Payment`, the payment is executed.
|
||||
|
||||
use chrono::prelude::*;
|
||||
use signature::PublicKey;
|
||||
use signature::Pubkey;
|
||||
|
||||
/// The types of events a payment plan can process.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
@ -12,18 +12,18 @@ pub enum Witness {
|
||||
/// The current time.
|
||||
Timestamp(DateTime<Utc>),
|
||||
|
||||
/// A siganture from PublicKey.
|
||||
/// A siganture from Pubkey.
|
||||
Signature,
|
||||
}
|
||||
|
||||
/// Some amount of tokens that should be sent to the `to` `PublicKey`.
|
||||
/// Some amount of tokens that should be sent to the `to` `Pubkey`.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Payment {
|
||||
/// Amount to be paid.
|
||||
pub tokens: i64,
|
||||
|
||||
/// The `PublicKey` that `tokens` should be paid to.
|
||||
pub to: PublicKey,
|
||||
/// The `Pubkey` that `tokens` should be paid to.
|
||||
pub to: Pubkey,
|
||||
}
|
||||
|
||||
/// Interface to smart contracts.
|
||||
@ -36,5 +36,5 @@ pub trait PaymentPlan {
|
||||
|
||||
/// Apply a witness to the payment plan to see if the plan can be reduced.
|
||||
/// If so, modify the plan in-place.
|
||||
fn apply_witness(&mut self, witness: &Witness, from: &PublicKey);
|
||||
fn apply_witness(&mut self, witness: &Witness, from: &Pubkey);
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ impl Service for RecordStage {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ledger::Block;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::thread::sleep;
|
||||
|
||||
@ -185,8 +185,8 @@ mod tests {
|
||||
let (tx_sender, signal_receiver) = channel();
|
||||
let zero = Hash::default();
|
||||
let (_record_stage, entry_receiver) = RecordStage::new(signal_receiver, &zero);
|
||||
let alice_keypair = KeyPair::new();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let alice_keypair = Keypair::new();
|
||||
let bob_pubkey = Keypair::new().pubkey();
|
||||
let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
|
||||
let tx1 = Transaction::new(&alice_keypair, bob_pubkey, 2, zero);
|
||||
tx_sender
|
||||
|
@ -23,7 +23,7 @@ impl Recorder {
|
||||
}
|
||||
|
||||
pub fn hash(&mut self) {
|
||||
self.last_hash = hash(&self.last_hash);
|
||||
self.last_hash = hash(&self.last_hash.as_ref());
|
||||
self.num_hashes += 1;
|
||||
}
|
||||
|
||||
|
@ -1,44 +1,38 @@
|
||||
//! The `replicate_stage` replicates transactions broadcast by the leader.
|
||||
|
||||
use bank::Bank;
|
||||
use bincode::serialize;
|
||||
use counter::Counter;
|
||||
use crdt::Crdt;
|
||||
use ledger;
|
||||
use ledger::{reconstruct_entries_from_blobs, LedgerWriter};
|
||||
use log::Level;
|
||||
use packet::BlobRecycler;
|
||||
use result::{Error, Result};
|
||||
use service::Service;
|
||||
use signature::KeyPair;
|
||||
use std::collections::VecDeque;
|
||||
use signature::Keypair;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::mpsc::RecvTimeoutError;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer::{responder, BlobReceiver, BlobSender};
|
||||
use timing;
|
||||
use transaction::Transaction;
|
||||
use streamer::{responder, BlobReceiver};
|
||||
use vote_stage::VoteStage;
|
||||
use voting::entries_to_votes;
|
||||
|
||||
pub struct ReplicateStage {
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
const VOTE_TIMEOUT_MS: u64 = 1000;
|
||||
const LOG_RATE: usize = 10;
|
||||
|
||||
impl ReplicateStage {
|
||||
/// Process entry blobs, already in order
|
||||
fn replicate_requests(
|
||||
keypair: &Arc<KeyPair>,
|
||||
bank: &Arc<Bank>,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
blob_recycler: &BlobRecycler,
|
||||
window_receiver: &BlobReceiver,
|
||||
vote_blob_sender: &BlobSender,
|
||||
last_vote: &mut u64,
|
||||
ledger_writer: Option<&mut LedgerWriter>,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
//coalesce all the available blobs into a single vote
|
||||
@ -46,54 +40,45 @@ impl ReplicateStage {
|
||||
while let Ok(mut more) = window_receiver.try_recv() {
|
||||
blobs.append(&mut more);
|
||||
}
|
||||
let blobs_len = blobs.len();
|
||||
let entries = ledger::reconstruct_entries_from_blobs(blobs.clone())?;
|
||||
let votes = entries_to_votes(&entries);
|
||||
let entries = reconstruct_entries_from_blobs(blobs.clone())?;
|
||||
|
||||
let res = bank.process_entries(entries.clone());
|
||||
|
||||
static mut COUNTER_REPLICATE: Counter = create_counter!("replicate-transactions", LOG_RATE);
|
||||
inc_counter!(
|
||||
COUNTER_REPLICATE,
|
||||
entries.iter().map(|x| x.transactions.len()).sum()
|
||||
);
|
||||
let res = bank.process_entries(entries);
|
||||
if res.is_err() {
|
||||
error!("process_entries {} {:?}", blobs_len, res);
|
||||
}
|
||||
let now = timing::timestamp();
|
||||
if now - *last_vote > VOTE_TIMEOUT_MS {
|
||||
let height = res?;
|
||||
let last_id = bank.last_id();
|
||||
let shared_blob = blob_recycler.allocate();
|
||||
let (vote, addr) = {
|
||||
let mut wcrdt = crdt.write().unwrap();
|
||||
wcrdt.insert_votes(&votes);
|
||||
//TODO: doesn't seem like there is a synchronous call to get height and id
|
||||
info!("replicate_stage {} {:?}", height, &last_id[..8]);
|
||||
wcrdt.new_vote(height, last_id)
|
||||
}?;
|
||||
{
|
||||
let mut blob = shared_blob.write().unwrap();
|
||||
let tx = Transaction::new_vote(&keypair, vote, last_id, 0);
|
||||
let bytes = serialize(&tx)?;
|
||||
let len = bytes.len();
|
||||
blob.data[..len].copy_from_slice(&bytes);
|
||||
blob.meta.set_addr(&addr);
|
||||
blob.meta.size = len;
|
||||
}
|
||||
*last_vote = now;
|
||||
vote_blob_sender.send(VecDeque::from(vec![shared_blob]))?;
|
||||
}
|
||||
while let Some(blob) = blobs.pop_front() {
|
||||
blob_recycler.recycle(blob);
|
||||
}
|
||||
|
||||
{
|
||||
let votes = entries_to_votes(&entries);
|
||||
let mut wcrdt = crdt.write().unwrap();
|
||||
wcrdt.insert_votes(&votes);
|
||||
}
|
||||
|
||||
inc_new_counter_info!(
|
||||
"replicate-transactions",
|
||||
entries.iter().map(|x| x.transactions.len()).sum()
|
||||
);
|
||||
|
||||
// TODO: move this to another stage?
|
||||
if let Some(ledger_writer) = ledger_writer {
|
||||
ledger_writer.write_entries(entries)?;
|
||||
}
|
||||
|
||||
if res.is_err() {
|
||||
error!("process_entries {:?}", res);
|
||||
}
|
||||
let _ = res?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
pub fn new(
|
||||
keypair: KeyPair,
|
||||
keypair: Keypair,
|
||||
bank: Arc<Bank>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
blob_recycler: BlobRecycler,
|
||||
window_receiver: BlobReceiver,
|
||||
ledger_path: Option<&str>,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let (vote_blob_sender, vote_blob_receiver) = channel();
|
||||
let send = UdpSocket::bind("0.0.0.0:0").expect("bind");
|
||||
@ -103,34 +88,41 @@ impl ReplicateStage {
|
||||
blob_recycler.clone(),
|
||||
vote_blob_receiver,
|
||||
);
|
||||
let skeypair = Arc::new(keypair);
|
||||
|
||||
let vote_stage = VoteStage::new(
|
||||
Arc::new(keypair),
|
||||
bank.clone(),
|
||||
crdt.clone(),
|
||||
blob_recycler.clone(),
|
||||
vote_blob_sender,
|
||||
exit,
|
||||
);
|
||||
|
||||
let mut ledger_writer = ledger_path.map(|p| LedgerWriter::open(p, false).unwrap());
|
||||
|
||||
let t_replicate = Builder::new()
|
||||
.name("solana-replicate-stage".to_string())
|
||||
.spawn(move || {
|
||||
let mut timestamp: u64 = 0;
|
||||
loop {
|
||||
if let Err(e) = Self::replicate_requests(
|
||||
&skeypair,
|
||||
&bank,
|
||||
&crdt,
|
||||
&blob_recycler,
|
||||
&window_receiver,
|
||||
&vote_blob_sender,
|
||||
&mut timestamp,
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => error!("{:?}", e),
|
||||
}
|
||||
.spawn(move || loop {
|
||||
if let Err(e) = Self::replicate_requests(
|
||||
&bank,
|
||||
&crdt,
|
||||
&blob_recycler,
|
||||
&window_receiver,
|
||||
ledger_writer.as_mut(),
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => error!("{:?}", e),
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
ReplicateStage {
|
||||
thread_hdls: vec![t_responder, t_replicate],
|
||||
}
|
||||
|
||||
let mut thread_hdls = vec![t_responder, t_replicate];
|
||||
thread_hdls.extend(vote_stage.thread_hdls());
|
||||
|
||||
ReplicateStage { thread_hdls }
|
||||
}
|
||||
}
|
||||
|
||||
|
8
src/request.rs
Normal file → Executable file
8
src/request.rs
Normal file → Executable file
@ -1,15 +1,16 @@
|
||||
//! The `request` module defines the messages for the thin client.
|
||||
|
||||
use hash::Hash;
|
||||
use signature::{PublicKey, Signature};
|
||||
use signature::{Pubkey, Signature};
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
||||
pub enum Request {
|
||||
GetBalance { key: PublicKey },
|
||||
GetBalance { key: Pubkey },
|
||||
GetLastId,
|
||||
GetTransactionCount,
|
||||
GetSignature { signature: Signature },
|
||||
GetFinality,
|
||||
}
|
||||
|
||||
impl Request {
|
||||
@ -21,8 +22,9 @@ impl Request {
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum Response {
|
||||
Balance { key: PublicKey, val: i64 },
|
||||
Balance { key: Pubkey, val: i64 },
|
||||
LastId { id: Hash },
|
||||
TransactionCount { transaction_count: u64 },
|
||||
SignatureStatus { signature_status: bool },
|
||||
Finality { time: usize },
|
||||
}
|
||||
|
6
src/request_processor.rs
Normal file → Executable file
6
src/request_processor.rs
Normal file → Executable file
@ -46,6 +46,12 @@ impl RequestProcessor {
|
||||
info!("Response::Signature {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
Request::GetFinality => {
|
||||
let time = self.bank.finality();
|
||||
let rsp = (Response::Finality { time }, rsp_addr);
|
||||
info!("Response::Finality {:?}", rsp);
|
||||
Some(rsp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5,10 +5,11 @@ use bincode;
|
||||
use crdt;
|
||||
#[cfg(feature = "erasure")]
|
||||
use erasure;
|
||||
use packet;
|
||||
use serde_json;
|
||||
use std;
|
||||
use std::any::Any;
|
||||
use streamer;
|
||||
use window;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
@ -21,7 +22,8 @@ pub enum Error {
|
||||
Serialize(std::boxed::Box<bincode::ErrorKind>),
|
||||
BankError(bank::BankError),
|
||||
CrdtError(crdt::CrdtError),
|
||||
WindowError(streamer::WindowError),
|
||||
WindowError(window::WindowError),
|
||||
BlobError(packet::BlobError),
|
||||
#[cfg(feature = "erasure")]
|
||||
ErasureError(erasure::ErasureError),
|
||||
SendError,
|
||||
@ -49,8 +51,8 @@ impl std::convert::From<crdt::CrdtError> for Error {
|
||||
Error::CrdtError(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<streamer::WindowError> for Error {
|
||||
fn from(e: streamer::WindowError) -> Error {
|
||||
impl std::convert::From<window::WindowError> for Error {
|
||||
fn from(e: window::WindowError) -> Error {
|
||||
Error::WindowError(e)
|
||||
}
|
||||
}
|
||||
|
123
src/retransmit_stage.rs
Normal file
123
src/retransmit_stage.rs
Normal file
@ -0,0 +1,123 @@
|
||||
//! The `retransmit_stage` retransmits blobs between validators
|
||||
|
||||
use counter::Counter;
|
||||
use crdt::Crdt;
|
||||
use log::Level;
|
||||
use packet::BlobRecycler;
|
||||
use result::{Error, Result};
|
||||
use service::Service;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::mpsc::RecvTimeoutError;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer::BlobReceiver;
|
||||
use window::{self, SharedWindow};
|
||||
|
||||
fn retransmit(
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
recycler: &BlobRecycler,
|
||||
r: &BlobReceiver,
|
||||
sock: &UdpSocket,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq);
|
||||
}
|
||||
{
|
||||
for b in &dq {
|
||||
Crdt::retransmit(&crdt, b, sock)?;
|
||||
}
|
||||
}
|
||||
while let Some(b) = dq.pop_front() {
|
||||
recycler.recycle(b);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Service to retransmit messages from the leader to layer 1 nodes.
|
||||
/// See `crdt` for network layer definitions.
|
||||
/// # Arguments
|
||||
/// * `sock` - Socket to read from. Read timeout is set to 1.
|
||||
/// * `exit` - Boolean to signal system exit.
|
||||
/// * `crdt` - This structure needs to be updated and populated by the bank and via gossip.
|
||||
/// * `recycler` - Blob recycler.
|
||||
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||
fn retransmitter(
|
||||
sock: UdpSocket,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
Builder::new()
|
||||
.name("solana-retransmitter".to_string())
|
||||
.spawn(move || {
|
||||
trace!("retransmitter started");
|
||||
loop {
|
||||
if let Err(e) = retransmit(&crdt, &recycler, &r, &sock) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => {
|
||||
inc_new_counter_info!("streamer-retransmit-error", 1, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
trace!("exiting retransmitter");
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub struct RetransmitStage {
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl RetransmitStage {
|
||||
pub fn new(
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
window: SharedWindow,
|
||||
entry_height: u64,
|
||||
retransmit_socket: UdpSocket,
|
||||
blob_recycler: &BlobRecycler,
|
||||
fetch_stage_receiver: BlobReceiver,
|
||||
) -> (Self, BlobReceiver) {
|
||||
let (retransmit_sender, retransmit_receiver) = channel();
|
||||
|
||||
let t_retransmit = retransmitter(
|
||||
retransmit_socket,
|
||||
crdt.clone(),
|
||||
blob_recycler.clone(),
|
||||
retransmit_receiver,
|
||||
);
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let t_window = window::window(
|
||||
crdt.clone(),
|
||||
window,
|
||||
entry_height,
|
||||
blob_recycler.clone(),
|
||||
fetch_stage_receiver,
|
||||
blob_sender,
|
||||
retransmit_sender,
|
||||
);
|
||||
let thread_hdls = vec![t_retransmit, t_window];
|
||||
|
||||
(RetransmitStage { thread_hdls }, blob_receiver)
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for RetransmitStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
self.thread_hdls
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls() {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
156
src/signature.rs
156
src/signature.rs
@ -1,94 +1,136 @@
|
||||
//! The `signature` module provides functionality for public, and private keys.
|
||||
|
||||
use bs58;
|
||||
use generic_array::typenum::{U32, U64};
|
||||
use generic_array::GenericArray;
|
||||
use rand::{ChaChaRng, Rng, SeedableRng};
|
||||
use rayon::prelude::*;
|
||||
use ring::error::Unspecified;
|
||||
use ring::rand::SecureRandom;
|
||||
use ring::signature::Ed25519KeyPair;
|
||||
use ring::{rand, signature};
|
||||
use std::cell::RefCell;
|
||||
use untrusted;
|
||||
use serde_json;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::fs::File;
|
||||
use untrusted::Input;
|
||||
|
||||
pub type KeyPair = Ed25519KeyPair;
|
||||
pub type PublicKey = GenericArray<u8, U32>;
|
||||
pub type Signature = GenericArray<u8, U64>;
|
||||
pub type Keypair = Ed25519KeyPair;
|
||||
#[derive(Serialize, Deserialize, Clone, Copy, Default, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
pub struct Pubkey(GenericArray<u8, U32>);
|
||||
|
||||
pub trait KeyPairUtil {
|
||||
fn new() -> Self;
|
||||
fn pubkey(&self) -> PublicKey;
|
||||
impl Pubkey {
|
||||
pub fn new(pubkey_vec: &[u8]) -> Self {
|
||||
Pubkey(GenericArray::clone_from_slice(&pubkey_vec))
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyPairUtil for Ed25519KeyPair {
|
||||
impl AsRef<[u8]> for Pubkey {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
&self.0[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Pubkey {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", bs58::encode(self.0).into_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Pubkey {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", bs58::encode(self.0).into_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Copy, Default, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
pub struct Signature(GenericArray<u8, U64>);
|
||||
|
||||
impl Signature {
|
||||
pub fn new(signature_slice: &[u8]) -> Self {
|
||||
Signature(GenericArray::clone_from_slice(&signature_slice))
|
||||
}
|
||||
pub fn verify(&self, pubkey_bytes: &[u8], message_bytes: &[u8]) -> bool {
|
||||
let pubkey = Input::from(pubkey_bytes);
|
||||
let message = Input::from(message_bytes);
|
||||
let signature = Input::from(self.0.as_slice());
|
||||
signature::verify(&signature::ED25519, pubkey, message, signature).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for Signature {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
&self.0[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Signature {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", bs58::encode(self.0).into_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Signature {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", bs58::encode(self.0).into_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait KeypairUtil {
|
||||
fn new() -> Self;
|
||||
fn pubkey(&self) -> Pubkey;
|
||||
}
|
||||
|
||||
impl KeypairUtil for Ed25519KeyPair {
|
||||
/// Return a new ED25519 keypair
|
||||
fn new() -> Self {
|
||||
let rng = rand::SystemRandom::new();
|
||||
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng)
|
||||
.expect("generate_pkcs8 in signature pb fn new");
|
||||
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes))
|
||||
.expect("from_pcks8 in signature pb fn new")
|
||||
let pkcs8_bytes = Ed25519KeyPair::generate_pkcs8(&rng).expect("generate_pkcs8");
|
||||
Ed25519KeyPair::from_pkcs8(Input::from(&pkcs8_bytes)).expect("from_pcks8")
|
||||
}
|
||||
|
||||
/// Return the public key for the given keypair
|
||||
fn pubkey(&self) -> PublicKey {
|
||||
GenericArray::clone_from_slice(self.public_key_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait SignatureUtil {
|
||||
fn verify(&self, peer_public_key_bytes: &[u8], msg_bytes: &[u8]) -> bool;
|
||||
}
|
||||
|
||||
impl SignatureUtil for GenericArray<u8, U64> {
|
||||
fn verify(&self, peer_public_key_bytes: &[u8], msg_bytes: &[u8]) -> bool {
|
||||
let peer_public_key = untrusted::Input::from(peer_public_key_bytes);
|
||||
let msg = untrusted::Input::from(msg_bytes);
|
||||
let sig = untrusted::Input::from(self);
|
||||
signature::verify(&signature::ED25519, peer_public_key, msg, sig).is_ok()
|
||||
fn pubkey(&self) -> Pubkey {
|
||||
Pubkey(GenericArray::clone_from_slice(self.public_key_bytes()))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GenKeys {
|
||||
// This is necessary because the rng needs to mutate its state to remain
|
||||
// deterministic, and the fill trait requires an immuatble reference to self
|
||||
generator: RefCell<ChaChaRng>,
|
||||
generator: ChaChaRng,
|
||||
}
|
||||
|
||||
impl GenKeys {
|
||||
pub fn new(seed: [u8; 32]) -> GenKeys {
|
||||
let rng = ChaChaRng::from_seed(seed);
|
||||
GenKeys {
|
||||
generator: RefCell::new(rng),
|
||||
}
|
||||
let generator = ChaChaRng::from_seed(seed);
|
||||
GenKeys { generator }
|
||||
}
|
||||
|
||||
pub fn new_key(&self) -> Vec<u8> {
|
||||
KeyPair::generate_pkcs8(self).unwrap().to_vec()
|
||||
fn gen_seed(&mut self) -> [u8; 32] {
|
||||
let mut seed = [0u8; 32];
|
||||
self.generator.fill(&mut seed);
|
||||
seed
|
||||
}
|
||||
|
||||
pub fn gen_n_seeds(&self, n: i64) -> Vec<[u8; 32]> {
|
||||
let mut rng = self.generator.borrow_mut();
|
||||
(0..n).map(|_| rng.gen()).collect()
|
||||
fn gen_n_seeds(&mut self, n: i64) -> Vec<[u8; 32]> {
|
||||
(0..n).map(|_| self.gen_seed()).collect()
|
||||
}
|
||||
|
||||
pub fn gen_n_keypairs(&self, n: i64) -> Vec<KeyPair> {
|
||||
pub fn gen_n_keypairs(&mut self, n: i64) -> Vec<Keypair> {
|
||||
self.gen_n_seeds(n)
|
||||
.into_par_iter()
|
||||
.map(|seed| {
|
||||
let pkcs8 = GenKeys::new(seed).new_key();
|
||||
KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8)).unwrap()
|
||||
})
|
||||
.map(|seed| Keypair::from_seed_unchecked(Input::from(&seed)).unwrap())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl SecureRandom for GenKeys {
|
||||
fn fill(&self, dest: &mut [u8]) -> Result<(), Unspecified> {
|
||||
let mut rng = self.generator.borrow_mut();
|
||||
rng.fill(dest);
|
||||
Ok(())
|
||||
}
|
||||
pub fn read_pkcs8(path: &str) -> Result<Vec<u8>, Box<error::Error>> {
|
||||
let file = File::open(path.to_string())?;
|
||||
let pkcs8: Vec<u8> = serde_json::from_reader(file)?;
|
||||
Ok(pkcs8)
|
||||
}
|
||||
|
||||
pub fn read_keypair(path: &str) -> Result<Keypair, Box<error::Error>> {
|
||||
let pkcs8 = read_pkcs8(path)?;
|
||||
let keypair = Ed25519KeyPair::from_pkcs8(Input::from(&pkcs8))?;
|
||||
Ok(keypair)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -99,15 +141,15 @@ mod tests {
|
||||
#[test]
|
||||
fn test_new_key_is_deterministic() {
|
||||
let seed = [0u8; 32];
|
||||
let rng0 = GenKeys::new(seed);
|
||||
let rng1 = GenKeys::new(seed);
|
||||
let mut gen0 = GenKeys::new(seed);
|
||||
let mut gen1 = GenKeys::new(seed);
|
||||
|
||||
for _ in 0..100 {
|
||||
assert_eq!(rng0.new_key(), rng1.new_key());
|
||||
assert_eq!(gen0.gen_seed().to_vec(), gen1.gen_seed().to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_n_pubkeys(seed: [u8; 32], n: i64) -> HashSet<PublicKey> {
|
||||
fn gen_n_pubkeys(seed: [u8; 32], n: i64) -> HashSet<Pubkey> {
|
||||
GenKeys::new(seed)
|
||||
.gen_n_keypairs(n)
|
||||
.into_iter()
|
||||
|
@ -5,6 +5,7 @@
|
||||
//!
|
||||
|
||||
use counter::Counter;
|
||||
use log::Level;
|
||||
use packet::{Packet, SharedPackets};
|
||||
use std::mem::size_of;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
@ -22,11 +23,13 @@ struct Elems {
|
||||
#[cfg(feature = "cuda")]
|
||||
#[link(name = "cuda_verify_ed25519")]
|
||||
extern "C" {
|
||||
fn ed25519_init() -> bool;
|
||||
fn ed25519_set_verbose(val: bool);
|
||||
fn ed25519_verify_many(
|
||||
vecs: *const Elems,
|
||||
num: u32, //number of vecs
|
||||
message_size: u32, //size of each element inside the elems field of the vec
|
||||
public_key_offset: u32,
|
||||
pubkey_offset: u32,
|
||||
signature_offset: u32,
|
||||
signed_message_offset: u32,
|
||||
signed_message_len_offset: u32,
|
||||
@ -35,16 +38,20 @@ extern "C" {
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "cuda"))]
|
||||
pub fn init() {
|
||||
// stub
|
||||
}
|
||||
|
||||
fn verify_packet(packet: &Packet) -> u8 {
|
||||
use ring::signature;
|
||||
use signature::{PublicKey, Signature};
|
||||
use signature::{Pubkey, Signature};
|
||||
use untrusted;
|
||||
|
||||
let msg_start = TX_OFFSET + SIGNED_DATA_OFFSET;
|
||||
let sig_start = TX_OFFSET + SIG_OFFSET;
|
||||
let sig_end = sig_start + size_of::<Signature>();
|
||||
let pub_key_start = TX_OFFSET + PUB_KEY_OFFSET;
|
||||
let pub_key_end = pub_key_start + size_of::<PublicKey>();
|
||||
let pubkey_start = TX_OFFSET + PUB_KEY_OFFSET;
|
||||
let pubkey_end = pubkey_start + size_of::<Pubkey>();
|
||||
|
||||
if packet.meta.size <= msg_start {
|
||||
return 0;
|
||||
@ -53,12 +60,17 @@ fn verify_packet(packet: &Packet) -> u8 {
|
||||
let msg_end = packet.meta.size;
|
||||
signature::verify(
|
||||
&signature::ED25519,
|
||||
untrusted::Input::from(&packet.data[pub_key_start..pub_key_end]),
|
||||
untrusted::Input::from(&packet.data[pubkey_start..pubkey_end]),
|
||||
untrusted::Input::from(&packet.data[msg_start..msg_end]),
|
||||
untrusted::Input::from(&packet.data[sig_start..sig_end]),
|
||||
).is_ok() as u8
|
||||
}
|
||||
|
||||
fn verify_packet_disabled(_packet: &Packet) -> u8 {
|
||||
warn!("signature verification is disabled");
|
||||
1
|
||||
}
|
||||
|
||||
fn batch_size(batches: &[SharedPackets]) -> usize {
|
||||
batches
|
||||
.iter()
|
||||
@ -66,11 +78,13 @@ fn batch_size(batches: &[SharedPackets]) -> usize {
|
||||
.sum()
|
||||
}
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(ptr_arg))]
|
||||
#[cfg(not(feature = "cuda"))]
|
||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
pub fn ed25519_verify(batches: &[SharedPackets]) -> Vec<Vec<u8>> {
|
||||
ed25519_verify_cpu(batches)
|
||||
}
|
||||
|
||||
pub fn ed25519_verify_cpu(batches: &[SharedPackets]) -> Vec<Vec<u8>> {
|
||||
use rayon::prelude::*;
|
||||
static mut COUNTER: Counter = create_counter!("ed25519_verify", 1);
|
||||
let count = batch_size(batches);
|
||||
info!("CPU ECDSA for {}", batch_size(batches));
|
||||
let rv = batches
|
||||
@ -84,15 +98,54 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
.collect()
|
||||
})
|
||||
.collect();
|
||||
inc_counter!(COUNTER, count);
|
||||
inc_new_counter_info!("ed25519_verify", count);
|
||||
rv
|
||||
}
|
||||
|
||||
pub fn ed25519_verify_disabled(batches: &[SharedPackets]) -> Vec<Vec<u8>> {
|
||||
use rayon::prelude::*;
|
||||
let count = batch_size(batches);
|
||||
info!("CPU ECDSA for {}", batch_size(batches));
|
||||
let rv = batches
|
||||
.into_par_iter()
|
||||
.map(|p| {
|
||||
p.read()
|
||||
.expect("'p' read lock in ed25519_verify")
|
||||
.packets
|
||||
.par_iter()
|
||||
.map(verify_packet_disabled)
|
||||
.collect()
|
||||
})
|
||||
.collect();
|
||||
inc_new_counter_info!("ed25519_verify", count);
|
||||
rv
|
||||
}
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
pub fn init() {
|
||||
unsafe {
|
||||
ed25519_set_verbose(true);
|
||||
if !ed25519_init() {
|
||||
panic!("ed25519_init() failed");
|
||||
}
|
||||
ed25519_set_verbose(false);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
pub fn ed25519_verify(batches: &[SharedPackets]) -> Vec<Vec<u8>> {
|
||||
use packet::PACKET_DATA_SIZE;
|
||||
static mut COUNTER: Counter = create_counter!("ed25519_verify_cuda", 1);
|
||||
let count = batch_size(batches);
|
||||
|
||||
// micro-benchmarks show GPU time for smallest batch around 15-20ms
|
||||
// and CPU speed for 64-128 sigverifies around 10-20ms. 64 is a nice
|
||||
// power-of-two number around that accounting for the fact that the CPU
|
||||
// may be busy doing other things while being a real fullnode
|
||||
// TODO: dynamically adjust this crossover
|
||||
if count < 64 {
|
||||
return ed25519_verify_cpu(batches);
|
||||
}
|
||||
|
||||
info!("CUDA ECDSA for {}", batch_size(batches));
|
||||
let mut out = Vec::new();
|
||||
let mut elems = Vec::new();
|
||||
@ -121,8 +174,8 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
trace!("Starting verify num packets: {}", num);
|
||||
trace!("elem len: {}", elems.len() as u32);
|
||||
trace!("packet sizeof: {}", size_of::<Packet>() as u32);
|
||||
trace!("pub key: {}", (TX_OFFSET + PUB_KEY_OFFSET) as u32);
|
||||
trace!("sig offset: {}", (TX_OFFSET + SIG_OFFSET) as u32);
|
||||
trace!("pubkey: {}", (TX_OFFSET + PUB_KEY_OFFSET) as u32);
|
||||
trace!("signature offset: {}", (TX_OFFSET + SIG_OFFSET) as u32);
|
||||
trace!("sign data: {}", (TX_OFFSET + SIGNED_DATA_OFFSET) as u32);
|
||||
trace!("len offset: {}", PACKET_DATA_SIZE as u32);
|
||||
unsafe {
|
||||
@ -151,7 +204,7 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
num += 1;
|
||||
}
|
||||
}
|
||||
inc_counter!(COUNTER, count);
|
||||
inc_new_counter_info!("ed25519_verify", count);
|
||||
rvs
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,8 @@
|
||||
//! transaction. All processing is done on the CPU by default and on a GPU
|
||||
//! if the `cuda` feature is enabled with `--features=cuda`.
|
||||
|
||||
use influx_db_client as influxdb;
|
||||
use metrics;
|
||||
use packet::SharedPackets;
|
||||
use rand::{thread_rng, Rng};
|
||||
use result::{Error, Result};
|
||||
@ -24,20 +26,30 @@ pub struct SigVerifyStage {
|
||||
}
|
||||
|
||||
impl SigVerifyStage {
|
||||
pub fn new(packet_receiver: Receiver<SharedPackets>) -> (Self, Receiver<VerifiedPackets>) {
|
||||
pub fn new(
|
||||
packet_receiver: Receiver<SharedPackets>,
|
||||
sigverify_disabled: bool,
|
||||
) -> (Self, Receiver<VerifiedPackets>) {
|
||||
sigverify::init();
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let thread_hdls = Self::verifier_services(packet_receiver, verified_sender);
|
||||
let thread_hdls =
|
||||
Self::verifier_services(packet_receiver, verified_sender, sigverify_disabled);
|
||||
(SigVerifyStage { thread_hdls }, verified_receiver)
|
||||
}
|
||||
|
||||
fn verify_batch(batch: Vec<SharedPackets>) -> VerifiedPackets {
|
||||
let r = sigverify::ed25519_verify(&batch);
|
||||
fn verify_batch(batch: Vec<SharedPackets>, sigverify_disabled: bool) -> VerifiedPackets {
|
||||
let r = if sigverify_disabled {
|
||||
sigverify::ed25519_verify_disabled(&batch)
|
||||
} else {
|
||||
sigverify::ed25519_verify(&batch)
|
||||
};
|
||||
batch.into_iter().zip(r).collect()
|
||||
}
|
||||
|
||||
fn verifier(
|
||||
recvr: &Arc<Mutex<PacketReceiver>>,
|
||||
sendr: &Arc<Mutex<Sender<VerifiedPackets>>>,
|
||||
sigverify_disabled: bool,
|
||||
) -> Result<()> {
|
||||
let (batch, len) =
|
||||
streamer::recv_batch(&recvr.lock().expect("'recvr' lock in fn verifier"))?;
|
||||
@ -52,7 +64,7 @@ impl SigVerifyStage {
|
||||
rand_id
|
||||
);
|
||||
|
||||
let verified_batch = Self::verify_batch(batch);
|
||||
let verified_batch = Self::verify_batch(batch, sigverify_disabled);
|
||||
sendr
|
||||
.lock()
|
||||
.expect("lock in fn verify_batch in tpu")
|
||||
@ -69,15 +81,28 @@ impl SigVerifyStage {
|
||||
len,
|
||||
(len as f32 / total_time_s)
|
||||
);
|
||||
|
||||
metrics::submit(
|
||||
influxdb::Point::new("sigverify_stage-total_verify_time")
|
||||
.add_field("batch_len", influxdb::Value::Integer(batch_len as i64))
|
||||
.add_field("len", influxdb::Value::Integer(len as i64))
|
||||
.add_field(
|
||||
"total_time_ms",
|
||||
influxdb::Value::Integer(total_time_ms as i64),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verifier_service(
|
||||
packet_receiver: Arc<Mutex<PacketReceiver>>,
|
||||
verified_sender: Arc<Mutex<Sender<VerifiedPackets>>>,
|
||||
sigverify_disabled: bool,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if let Err(e) = Self::verifier(&packet_receiver.clone(), &verified_sender.clone()) {
|
||||
if let Err(e) = Self::verifier(&packet_receiver, &verified_sender, sigverify_disabled) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
@ -90,11 +115,12 @@ impl SigVerifyStage {
|
||||
fn verifier_services(
|
||||
packet_receiver: PacketReceiver,
|
||||
verified_sender: Sender<VerifiedPackets>,
|
||||
sigverify_disabled: bool,
|
||||
) -> Vec<JoinHandle<()>> {
|
||||
let sender = Arc::new(Mutex::new(verified_sender));
|
||||
let receiver = Arc::new(Mutex::new(packet_receiver));
|
||||
(0..4)
|
||||
.map(|_| Self::verifier_service(receiver.clone(), sender.clone()))
|
||||
.map(|_| Self::verifier_service(receiver.clone(), sender.clone(), sigverify_disabled))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
772
src/streamer.rs
772
src/streamer.rs
@ -1,35 +1,18 @@
|
||||
//! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets.
|
||||
//!
|
||||
use counter::Counter;
|
||||
use crdt::{Crdt, CrdtError, NodeInfo};
|
||||
#[cfg(feature = "erasure")]
|
||||
use erasure;
|
||||
use packet::{
|
||||
Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedBlobs, SharedPackets, BLOB_SIZE,
|
||||
};
|
||||
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlobs, SharedPackets};
|
||||
use result::{Error, Result};
|
||||
use std::cmp;
|
||||
use std::collections::VecDeque;
|
||||
use std::mem;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{Receiver, RecvTimeoutError, Sender};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
const LOG_RATE: usize = 10;
|
||||
pub const WINDOW_SIZE: u64 = 2 * 1024;
|
||||
pub type PacketReceiver = Receiver<SharedPackets>;
|
||||
pub type PacketSender = Sender<SharedPackets>;
|
||||
pub type BlobSender = Sender<SharedBlobs>;
|
||||
pub type BlobReceiver = Receiver<SharedBlobs>;
|
||||
pub type Window = Arc<RwLock<Vec<Option<SharedBlob>>>>;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum WindowError {
|
||||
GenericError,
|
||||
}
|
||||
|
||||
fn recv_loop(
|
||||
sock: &UdpSocket,
|
||||
@ -40,7 +23,8 @@ fn recv_loop(
|
||||
loop {
|
||||
let msgs = re.allocate();
|
||||
loop {
|
||||
let result = msgs.write()
|
||||
let result = msgs
|
||||
.write()
|
||||
.expect("write lock in fn recv_loop")
|
||||
.recv_from(sock);
|
||||
match result {
|
||||
@ -117,7 +101,7 @@ pub fn responder(
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => error!("{} responder error: {:?}", name, e),
|
||||
_ => warn!("{} responder error: {:?}", name, e),
|
||||
}
|
||||
}
|
||||
})
|
||||
@ -127,7 +111,7 @@ pub fn responder(
|
||||
//TODO, we would need to stick block authentication before we create the
|
||||
//window.
|
||||
fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> {
|
||||
trace!("receiving on {}", sock.local_addr().unwrap());
|
||||
trace!("recv_blobs: receiving on {}", sock.local_addr().unwrap());
|
||||
let dq = Blob::recv_from(recycler, sock)?;
|
||||
if !dq.is_empty() {
|
||||
s.send(dq)?;
|
||||
@ -157,654 +141,8 @@ pub fn blob_receiver(
|
||||
Ok(t)
|
||||
}
|
||||
|
||||
fn find_next_missing(
|
||||
locked_window: &Window,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
consumed: &mut u64,
|
||||
received: &mut u64,
|
||||
) -> Result<Vec<(SocketAddr, Vec<u8>)>> {
|
||||
if *received <= *consumed {
|
||||
Err(WindowError::GenericError)?;
|
||||
}
|
||||
let window = locked_window.read().unwrap();
|
||||
let reqs: Vec<_> = (*consumed..*received)
|
||||
.filter_map(|pix| {
|
||||
let i = (pix % WINDOW_SIZE) as usize;
|
||||
if window[i].is_none() {
|
||||
let val = crdt.read().unwrap().window_index_request(pix as u64);
|
||||
if let Ok((to, req)) = val {
|
||||
return Some((to, req));
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
.collect();
|
||||
Ok(reqs)
|
||||
}
|
||||
|
||||
fn repair_window(
|
||||
debug_id: u64,
|
||||
locked_window: &Window,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
_recycler: &BlobRecycler,
|
||||
last: &mut u64,
|
||||
times: &mut usize,
|
||||
consumed: &mut u64,
|
||||
received: &mut u64,
|
||||
) -> Result<()> {
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
if erasure::recover(
|
||||
_recycler,
|
||||
&mut locked_window.write().unwrap(),
|
||||
*consumed as usize,
|
||||
*received as usize,
|
||||
).is_err()
|
||||
{
|
||||
trace!("erasure::recover failed");
|
||||
}
|
||||
}
|
||||
//exponential backoff
|
||||
if *last != *consumed {
|
||||
*times = 0;
|
||||
}
|
||||
*last = *consumed;
|
||||
*times += 1;
|
||||
//if times flips from all 1s 7 -> 8, 15 -> 16, we retry otherwise return Ok
|
||||
if *times & (*times - 1) != 0 {
|
||||
trace!(
|
||||
"repair_window counter {} {} {}",
|
||||
*times,
|
||||
*consumed,
|
||||
*received
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
|
||||
trace!("{:x}: repair_window missing: {}", debug_id, reqs.len());
|
||||
if !reqs.is_empty() {
|
||||
static mut COUNTER_REPAIR: Counter =
|
||||
create_counter!("streamer-repair_window-repair", LOG_RATE);
|
||||
inc_counter!(COUNTER_REPAIR, reqs.len());
|
||||
debug!(
|
||||
"{:x}: repair_window counter times: {} consumed: {} received: {} missing: {}",
|
||||
debug_id,
|
||||
*times,
|
||||
*consumed,
|
||||
*received,
|
||||
reqs.len()
|
||||
);
|
||||
}
|
||||
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
||||
for (to, req) in reqs {
|
||||
//todo cache socket
|
||||
debug!(
|
||||
"{:x} repair_window request {} {} {}",
|
||||
debug_id, *consumed, *received, to
|
||||
);
|
||||
assert!(req.len() < BLOB_SIZE);
|
||||
sock.send_to(&req, to)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn retransmit_all_leader_blocks(
|
||||
maybe_leader: Option<NodeInfo>,
|
||||
dq: &mut SharedBlobs,
|
||||
debug_id: u64,
|
||||
recycler: &BlobRecycler,
|
||||
consumed: &mut u64,
|
||||
received: &mut u64,
|
||||
retransmit: &BlobSender,
|
||||
) -> Result<()> {
|
||||
let mut retransmit_queue = VecDeque::new();
|
||||
if let Some(leader) = maybe_leader {
|
||||
for b in dq {
|
||||
let p = b.read().expect("'b' read lock in fn recv_window");
|
||||
//TODO this check isn't safe against adverserial packets
|
||||
//we need to maintain a sequence window
|
||||
let leader_id = leader.id;
|
||||
trace!(
|
||||
"idx: {} addr: {:?} id: {:?} leader: {:?}",
|
||||
p.get_index().expect("get_index in fn recv_window"),
|
||||
p.get_id().expect("get_id in trace! fn recv_window"),
|
||||
p.meta.addr(),
|
||||
leader_id
|
||||
);
|
||||
if p.get_id().expect("get_id in fn recv_window") == leader_id {
|
||||
//TODO
|
||||
//need to copy the retransmitted blob
|
||||
//otherwise we get into races with which thread
|
||||
//should do the recycling
|
||||
//
|
||||
//a better abstraction would be to recycle when the blob
|
||||
//is dropped via a weakref to the recycler
|
||||
let nv = recycler.allocate();
|
||||
{
|
||||
let mut mnv = nv.write().expect("recycler write lock in fn recv_window");
|
||||
let sz = p.meta.size;
|
||||
mnv.meta.size = sz;
|
||||
mnv.data[..sz].copy_from_slice(&p.data[..sz]);
|
||||
}
|
||||
retransmit_queue.push_back(nv);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("{:x}: no leader to retransmit from", debug_id);
|
||||
}
|
||||
if !retransmit_queue.is_empty() {
|
||||
debug!(
|
||||
"{:x}: RECV_WINDOW {} {}: retransmit {}",
|
||||
debug_id,
|
||||
*consumed,
|
||||
*received,
|
||||
retransmit_queue.len(),
|
||||
);
|
||||
static mut COUNTER_RETRANSMIT: Counter =
|
||||
create_counter!("streamer-recv_window-retransmit", LOG_RATE);
|
||||
inc_counter!(COUNTER_RETRANSMIT, retransmit_queue.len());
|
||||
retransmit.send(retransmit_queue)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_blob(
|
||||
b: SharedBlob,
|
||||
pix: u64,
|
||||
w: usize,
|
||||
consume_queue: &mut SharedBlobs,
|
||||
locked_window: &Window,
|
||||
debug_id: u64,
|
||||
recycler: &BlobRecycler,
|
||||
consumed: &mut u64,
|
||||
) {
|
||||
let mut window = locked_window.write().unwrap();
|
||||
|
||||
// Search the window for old blobs in the window
|
||||
// of consumed to received and clear any old ones
|
||||
for ix in *consumed..(pix + 1) {
|
||||
let k = (ix % WINDOW_SIZE) as usize;
|
||||
if let Some(b) = &mut window[k] {
|
||||
if b.read().unwrap().get_index().unwrap() >= *consumed as u64 {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if let Some(b) = mem::replace(&mut window[k], None) {
|
||||
recycler.recycle(b);
|
||||
}
|
||||
}
|
||||
|
||||
// Insert the new blob into the window
|
||||
// spot should be free because we cleared it above
|
||||
if window[w].is_none() {
|
||||
window[w] = Some(b);
|
||||
} else if let Some(cblob) = &window[w] {
|
||||
if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
|
||||
warn!("{:x}: overrun blob at index {:}", debug_id, w);
|
||||
} else {
|
||||
debug!("{:x}: duplicate blob at index {:}", debug_id, w);
|
||||
}
|
||||
}
|
||||
loop {
|
||||
let k = (*consumed % WINDOW_SIZE) as usize;
|
||||
trace!("k: {} consumed: {}", k, *consumed);
|
||||
|
||||
if window[k].is_none() {
|
||||
break;
|
||||
}
|
||||
let mut is_coding = false;
|
||||
if let Some(ref cblob) = window[k] {
|
||||
let cblob_r = cblob
|
||||
.read()
|
||||
.expect("blob read lock for flogs streamer::window");
|
||||
if cblob_r.get_index().unwrap() < *consumed {
|
||||
break;
|
||||
}
|
||||
if cblob_r.is_coding() {
|
||||
is_coding = true;
|
||||
}
|
||||
}
|
||||
if !is_coding {
|
||||
consume_queue.push_back(window[k].clone().expect("clone in fn recv_window"));
|
||||
*consumed += 1;
|
||||
} else {
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
let block_start = *consumed - (*consumed % erasure::NUM_CODED as u64);
|
||||
let coding_end = block_start + erasure::NUM_CODED as u64;
|
||||
// We've received all this block's data blobs, go and null out the window now
|
||||
for j in block_start..*consumed {
|
||||
if let Some(b) = mem::replace(&mut window[(j % WINDOW_SIZE) as usize], None) {
|
||||
recycler.recycle(b);
|
||||
}
|
||||
}
|
||||
for j in *consumed..coding_end {
|
||||
window[(j % WINDOW_SIZE) as usize] = None;
|
||||
}
|
||||
|
||||
*consumed += erasure::MAX_MISSING as u64;
|
||||
debug!(
|
||||
"skipping processing coding blob k: {} consumed: {}",
|
||||
k, *consumed
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn recv_window(
|
||||
debug_id: u64,
|
||||
locked_window: &Window,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
recycler: &BlobRecycler,
|
||||
consumed: &mut u64,
|
||||
received: &mut u64,
|
||||
r: &BlobReceiver,
|
||||
s: &BlobSender,
|
||||
retransmit: &BlobSender,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(200);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
let maybe_leader: Option<NodeInfo> = crdt.read()
|
||||
.expect("'crdt' read lock in fn recv_window")
|
||||
.leader_data()
|
||||
.cloned();
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq)
|
||||
}
|
||||
static mut COUNTER_RECV: Counter = create_counter!("streamer-recv_window-recv", LOG_RATE);
|
||||
inc_counter!(COUNTER_RECV, dq.len());
|
||||
debug!(
|
||||
"{:x}: RECV_WINDOW {} {}: got packets {}",
|
||||
debug_id,
|
||||
*consumed,
|
||||
*received,
|
||||
dq.len(),
|
||||
);
|
||||
|
||||
retransmit_all_leader_blocks(
|
||||
maybe_leader,
|
||||
&mut dq,
|
||||
debug_id,
|
||||
recycler,
|
||||
consumed,
|
||||
received,
|
||||
retransmit,
|
||||
)?;
|
||||
|
||||
//send a contiguous set of blocks
|
||||
let mut consume_queue = VecDeque::new();
|
||||
while let Some(b) = dq.pop_front() {
|
||||
let (pix, meta_size) = {
|
||||
let p = b.write().expect("'b' write lock in fn recv_window");
|
||||
(p.get_index()?, p.meta.size)
|
||||
};
|
||||
if pix > *received {
|
||||
*received = pix;
|
||||
}
|
||||
// Got a blob which has already been consumed, skip it
|
||||
// probably from a repair window request
|
||||
if pix < *consumed {
|
||||
debug!(
|
||||
"{:x}: received: {} but older than consumed: {} skipping..",
|
||||
debug_id, pix, *consumed
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let w = (pix % WINDOW_SIZE) as usize;
|
||||
//TODO, after the block are authenticated
|
||||
//if we get different blocks at the same index
|
||||
//that is a network failure/attack
|
||||
trace!("window w: {} size: {}", w, meta_size);
|
||||
|
||||
process_blob(
|
||||
b,
|
||||
pix,
|
||||
w,
|
||||
&mut consume_queue,
|
||||
locked_window,
|
||||
debug_id,
|
||||
recycler,
|
||||
consumed,
|
||||
);
|
||||
}
|
||||
print_window(debug_id, locked_window, *consumed);
|
||||
trace!("sending consume_queue.len: {}", consume_queue.len());
|
||||
if !consume_queue.is_empty() {
|
||||
debug!(
|
||||
"{:x}: RECV_WINDOW {} {}: forwarding consume_queue {}",
|
||||
debug_id,
|
||||
*consumed,
|
||||
*received,
|
||||
consume_queue.len(),
|
||||
);
|
||||
trace!("sending consume_queue.len: {}", consume_queue.len());
|
||||
static mut COUNTER_CONSUME: Counter =
|
||||
create_counter!("streamer-recv_window-consume", LOG_RATE);
|
||||
inc_counter!(COUNTER_CONSUME, consume_queue.len());
|
||||
s.send(consume_queue)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn print_window(debug_id: u64, locked_window: &Window, consumed: u64) {
|
||||
{
|
||||
let buf: Vec<_> = locked_window
|
||||
.read()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, v)| {
|
||||
if i == (consumed % WINDOW_SIZE) as usize {
|
||||
"_"
|
||||
} else if v.is_none() {
|
||||
"0"
|
||||
} else if let Some(ref cblob) = v {
|
||||
if cblob.read().unwrap().is_coding() {
|
||||
"C"
|
||||
} else {
|
||||
"1"
|
||||
}
|
||||
} else {
|
||||
"0"
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
trace!("{:x}:WINDOW ({}): {}", debug_id, consumed, buf.join(""));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_window() -> Window {
|
||||
Arc::new(RwLock::new(vec![None; WINDOW_SIZE as usize]))
|
||||
}
|
||||
|
||||
/// Initialize a rebroadcast window with most recent Entry blobs
|
||||
/// * `crdt` - gossip instance, used to set blob ids
|
||||
/// * `blobs` - up to WINDOW_SIZE most recent blobs
|
||||
/// * `entry_height` - current entry height
|
||||
pub fn initialized_window(
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
blobs: Vec<SharedBlob>,
|
||||
entry_height: u64,
|
||||
) -> Window {
|
||||
let window = default_window();
|
||||
|
||||
{
|
||||
let mut win = window.write().unwrap();
|
||||
let me = crdt.read().unwrap().my_data().clone();
|
||||
|
||||
debug!(
|
||||
"initialized window entry_height:{} blobs_len:{}",
|
||||
entry_height,
|
||||
blobs.len()
|
||||
);
|
||||
|
||||
// Index the blobs
|
||||
let mut received = entry_height - blobs.len() as u64;
|
||||
Crdt::index_blobs(&me, &blobs, &mut received).expect("index blobs for initial window");
|
||||
|
||||
// populate the window, offset by implied index
|
||||
let diff = cmp::max(blobs.len() as isize - win.len() as isize, 0) as usize;
|
||||
for b in blobs.into_iter().skip(diff) {
|
||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||
let pos = (ix % WINDOW_SIZE) as usize;
|
||||
trace!("caching {} at {}", ix, pos);
|
||||
assert!(win[pos].is_none());
|
||||
win[pos] = Some(b);
|
||||
}
|
||||
}
|
||||
|
||||
window
|
||||
}
|
||||
|
||||
pub fn window(
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Window,
|
||||
entry_height: u64,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
s: BlobSender,
|
||||
retransmit: BlobSender,
|
||||
) -> JoinHandle<()> {
|
||||
Builder::new()
|
||||
.name("solana-window".to_string())
|
||||
.spawn(move || {
|
||||
let mut consumed = entry_height;
|
||||
let mut received = entry_height;
|
||||
let mut last = entry_height;
|
||||
let mut times = 0;
|
||||
let debug_id = crdt.read().unwrap().debug_id();
|
||||
trace!("{:x}: RECV_WINDOW started", debug_id);
|
||||
loop {
|
||||
if let Err(e) = recv_window(
|
||||
debug_id,
|
||||
&window,
|
||||
&crdt,
|
||||
&recycler,
|
||||
&mut consumed,
|
||||
&mut received,
|
||||
&r,
|
||||
&s,
|
||||
&retransmit,
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => error!("window error: {:?}", e),
|
||||
}
|
||||
}
|
||||
let _ = repair_window(
|
||||
debug_id,
|
||||
&window,
|
||||
&crdt,
|
||||
&recycler,
|
||||
&mut last,
|
||||
&mut times,
|
||||
&mut consumed,
|
||||
&mut received,
|
||||
);
|
||||
assert!(consumed <= (received + 1));
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn broadcast(
|
||||
me: &NodeInfo,
|
||||
broadcast_table: &[NodeInfo],
|
||||
window: &Window,
|
||||
recycler: &BlobRecycler,
|
||||
r: &BlobReceiver,
|
||||
sock: &UdpSocket,
|
||||
transmit_index: &mut u64,
|
||||
receive_index: &mut u64,
|
||||
) -> Result<()> {
|
||||
let debug_id = me.debug_id();
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq);
|
||||
}
|
||||
|
||||
// flatten deque to vec
|
||||
let blobs_vec: Vec<_> = dq.into_iter().collect();
|
||||
|
||||
// We could receive more blobs than window slots so
|
||||
// break them up into window-sized chunks to process
|
||||
let blobs_chunked = blobs_vec.chunks(WINDOW_SIZE as usize).map(|x| x.to_vec());
|
||||
|
||||
print_window(me.debug_id(), window, *receive_index);
|
||||
|
||||
for mut blobs in blobs_chunked {
|
||||
// Insert the coding blobs into the blob stream
|
||||
#[cfg(feature = "erasure")]
|
||||
erasure::add_coding_blobs(recycler, &mut blobs, *receive_index);
|
||||
|
||||
let blobs_len = blobs.len();
|
||||
debug!("{:x} broadcast blobs.len: {}", debug_id, blobs_len);
|
||||
|
||||
// Index the blobs
|
||||
Crdt::index_blobs(&me, &blobs, receive_index)?;
|
||||
// keep the cache of blobs that are broadcast
|
||||
static mut COUNTER_BROADCAST: Counter =
|
||||
create_counter!("streamer-broadcast-sent", LOG_RATE);
|
||||
inc_counter!(COUNTER_BROADCAST, blobs.len());
|
||||
{
|
||||
let mut win = window.write().unwrap();
|
||||
assert!(blobs.len() <= win.len());
|
||||
for b in &blobs {
|
||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||
let pos = (ix % WINDOW_SIZE) as usize;
|
||||
if let Some(x) = mem::replace(&mut win[pos], None) {
|
||||
trace!(
|
||||
"popped {} at {}",
|
||||
x.read().unwrap().get_index().unwrap(),
|
||||
pos
|
||||
);
|
||||
recycler.recycle(x);
|
||||
}
|
||||
trace!("null {}", pos);
|
||||
}
|
||||
while let Some(b) = blobs.pop() {
|
||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||
let pos = (ix % WINDOW_SIZE) as usize;
|
||||
trace!("caching {} at {}", ix, pos);
|
||||
assert!(win[pos].is_none());
|
||||
win[pos] = Some(b);
|
||||
}
|
||||
}
|
||||
|
||||
// Fill in the coding blob data from the window data blobs
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
erasure::generate_coding(
|
||||
&mut window.write().unwrap(),
|
||||
*receive_index as usize,
|
||||
blobs_len,
|
||||
)?;
|
||||
}
|
||||
|
||||
*receive_index += blobs_len as u64;
|
||||
|
||||
// Send blobs out from the window
|
||||
Crdt::broadcast(
|
||||
&me,
|
||||
&broadcast_table,
|
||||
&window,
|
||||
&sock,
|
||||
transmit_index,
|
||||
*receive_index,
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Service to broadcast messages from the leader to layer 1 nodes.
|
||||
/// See `crdt` for network layer definitions.
|
||||
/// # Arguments
|
||||
/// * `sock` - Socket to send from.
|
||||
/// * `exit` - Boolean to signal system exit.
|
||||
/// * `crdt` - CRDT structure
|
||||
/// * `window` - Cache of blobs that we have broadcast
|
||||
/// * `recycler` - Blob recycler.
|
||||
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||
pub fn broadcaster(
|
||||
sock: UdpSocket,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Window,
|
||||
entry_height: u64,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
Builder::new()
|
||||
.name("solana-broadcaster".to_string())
|
||||
.spawn(move || {
|
||||
let mut transmit_index = entry_height;
|
||||
let mut receive_index = entry_height;
|
||||
let me = crdt.read().unwrap().my_data().clone();
|
||||
loop {
|
||||
let broadcast_table = crdt.read().unwrap().compute_broadcast_table();
|
||||
if let Err(e) = broadcast(
|
||||
&me,
|
||||
&broadcast_table,
|
||||
&window,
|
||||
&recycler,
|
||||
&r,
|
||||
&sock,
|
||||
&mut transmit_index,
|
||||
&mut receive_index,
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
Error::CrdtError(CrdtError::TooSmall) => (), // TODO: Why are the unit-tests throwing hundreds of these?
|
||||
_ => error!("broadcaster error: {:?}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn retransmit(
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
recycler: &BlobRecycler,
|
||||
r: &BlobReceiver,
|
||||
sock: &UdpSocket,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq);
|
||||
}
|
||||
{
|
||||
for b in &dq {
|
||||
Crdt::retransmit(&crdt, b, sock)?;
|
||||
}
|
||||
}
|
||||
while let Some(b) = dq.pop_front() {
|
||||
recycler.recycle(b);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Service to retransmit messages from the leader to layer 1 nodes.
|
||||
/// See `crdt` for network layer definitions.
|
||||
/// # Arguments
|
||||
/// * `sock` - Socket to read from. Read timeout is set to 1.
|
||||
/// * `exit` - Boolean to signal system exit.
|
||||
/// * `crdt` - This structure needs to be updated and populated by the bank and via gossip.
|
||||
/// * `recycler` - Blob recycler.
|
||||
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||
pub fn retransmitter(
|
||||
sock: UdpSocket,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
Builder::new()
|
||||
.name("solana-retransmitter".to_string())
|
||||
.spawn(move || {
|
||||
trace!("retransmitter started");
|
||||
loop {
|
||||
if let Err(e) = retransmit(&crdt, &recycler, &r, &sock) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => error!("retransmitter error: {:?}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
trace!("exiting retransmitter");
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crdt::{Crdt, TestNode};
|
||||
use logger;
|
||||
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
|
||||
use std::collections::VecDeque;
|
||||
use std::io;
|
||||
@ -812,10 +150,10 @@ mod test {
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use streamer::{blob_receiver, receiver, responder, window};
|
||||
use streamer::{default_window, BlobReceiver, PacketReceiver};
|
||||
use streamer::PacketReceiver;
|
||||
use streamer::{receiver, responder};
|
||||
|
||||
fn get_msgs(r: PacketReceiver, num: &mut usize) {
|
||||
for _t in 0..5 {
|
||||
@ -877,92 +215,4 @@ mod test {
|
||||
t_receiver.join().expect("join");
|
||||
t_responder.join().expect("join");
|
||||
}
|
||||
|
||||
fn get_blobs(r: BlobReceiver, num: &mut usize) {
|
||||
for _t in 0..5 {
|
||||
let timer = Duration::new(1, 0);
|
||||
match r.recv_timeout(timer) {
|
||||
Ok(m) => {
|
||||
for (i, v) in m.iter().enumerate() {
|
||||
assert_eq!(v.read().unwrap().get_index().unwrap() as usize, *num + i);
|
||||
}
|
||||
*num += m.len();
|
||||
}
|
||||
e => info!("error {:?}", e),
|
||||
}
|
||||
if *num == 10 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn window_send_test() {
|
||||
logger::setup();
|
||||
let tn = TestNode::new();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let mut crdt_me = Crdt::new(tn.data.clone());
|
||||
let me_id = crdt_me.my_data().id;
|
||||
crdt_me.set_leader(me_id);
|
||||
let subs = Arc::new(RwLock::new(crdt_me));
|
||||
|
||||
let resp_recycler = BlobRecycler::default();
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver = blob_receiver(
|
||||
exit.clone(),
|
||||
resp_recycler.clone(),
|
||||
tn.sockets.gossip,
|
||||
s_reader,
|
||||
).unwrap();
|
||||
let (s_window, r_window) = channel();
|
||||
let (s_retransmit, r_retransmit) = channel();
|
||||
let win = default_window();
|
||||
let t_window = window(
|
||||
subs,
|
||||
win,
|
||||
0,
|
||||
resp_recycler.clone(),
|
||||
r_reader,
|
||||
s_window,
|
||||
s_retransmit,
|
||||
);
|
||||
let t_responder = {
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = responder(
|
||||
"window_send_test",
|
||||
tn.sockets.replicate,
|
||||
resp_recycler.clone(),
|
||||
r_responder,
|
||||
);
|
||||
let mut msgs = VecDeque::new();
|
||||
for v in 0..10 {
|
||||
let i = 9 - v;
|
||||
let b = resp_recycler.allocate();
|
||||
{
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(i).unwrap();
|
||||
w.set_id(me_id).unwrap();
|
||||
assert_eq!(i, w.get_index().unwrap());
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&tn.data.contact_info.ncp);
|
||||
}
|
||||
msgs.push_back(b);
|
||||
}
|
||||
s_responder.send(msgs).expect("send");
|
||||
t_responder
|
||||
};
|
||||
|
||||
let mut num = 0;
|
||||
get_blobs(r_window, &mut num);
|
||||
assert_eq!(num, 10);
|
||||
let mut q = r_retransmit.recv().unwrap();
|
||||
while let Ok(mut nq) = r_retransmit.try_recv() {
|
||||
q.append(&mut nq);
|
||||
}
|
||||
assert_eq!(q.len(), 10);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_receiver.join().expect("join");
|
||||
t_responder.join().expect("join");
|
||||
t_window.join().expect("join");
|
||||
}
|
||||
}
|
||||
|
181
src/thin_client.rs
Normal file → Executable file
181
src/thin_client.rs
Normal file → Executable file
@ -6,10 +6,12 @@
|
||||
use bincode::{deserialize, serialize};
|
||||
use hash::Hash;
|
||||
use request::{Request, Response};
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use signature::{Keypair, Pubkey, Signature};
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use timing;
|
||||
use transaction::Transaction;
|
||||
@ -25,8 +27,9 @@ pub struct ThinClient {
|
||||
transactions_socket: UdpSocket,
|
||||
last_id: Option<Hash>,
|
||||
transaction_count: u64,
|
||||
balances: HashMap<PublicKey, i64>,
|
||||
balances: HashMap<Pubkey, i64>,
|
||||
signature_status: bool,
|
||||
finality: Option<usize>,
|
||||
}
|
||||
|
||||
impl ThinClient {
|
||||
@ -48,6 +51,7 @@ impl ThinClient {
|
||||
transaction_count: 0,
|
||||
balances: HashMap::new(),
|
||||
signature_status: false,
|
||||
finality: None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -81,29 +85,33 @@ impl ThinClient {
|
||||
trace!("Response signature not found");
|
||||
}
|
||||
}
|
||||
Response::Finality { time } => {
|
||||
trace!("Response finality {:?}", time);
|
||||
self.finality = Some(time);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a signed Transaction to the server for processing. This method
|
||||
/// does not wait for a response.
|
||||
pub fn transfer_signed(&self, tx: &Transaction) -> io::Result<usize> {
|
||||
pub fn transfer_signed(&self, tx: &Transaction) -> io::Result<Signature> {
|
||||
let data = serialize(&tx).expect("serialize Transaction in pub fn transfer_signed");
|
||||
self.transactions_socket
|
||||
.send_to(&data, &self.transactions_addr)
|
||||
.send_to(&data, &self.transactions_addr)?;
|
||||
Ok(tx.signature)
|
||||
}
|
||||
|
||||
/// Creates, signs, and processes a Transaction. Useful for writing unit-tests.
|
||||
pub fn transfer(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
keypair: &Keypair,
|
||||
to: Pubkey,
|
||||
last_id: &Hash,
|
||||
) -> io::Result<Signature> {
|
||||
let now = Instant::now();
|
||||
let tx = Transaction::new(keypair, to, n, *last_id);
|
||||
let sig = tx.sig;
|
||||
let result = self.transfer_signed(&tx).map(|_| sig);
|
||||
let result = self.transfer_signed(&tx);
|
||||
metrics::submit(
|
||||
influxdb::Point::new("thinclient")
|
||||
.add_tag("op", influxdb::Value::String("transfer".to_string()))
|
||||
@ -119,7 +127,7 @@ impl ThinClient {
|
||||
/// Request the balance of the user holding `pubkey`. This method blocks
|
||||
/// until the server sends a response. If the response packet is dropped
|
||||
/// by the network, this method will hang indefinitely.
|
||||
pub fn get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
|
||||
pub fn get_balance(&mut self, pubkey: &Pubkey) -> io::Result<i64> {
|
||||
trace!("get_balance");
|
||||
let req = Request::GetBalance { key: *pubkey };
|
||||
let data = serialize(&req).expect("serialize GetBalance in pub fn get_balance");
|
||||
@ -141,10 +149,37 @@ impl ThinClient {
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "nokey"))
|
||||
}
|
||||
|
||||
/// Request the finality from the leader node
|
||||
pub fn get_finality(&mut self) -> usize {
|
||||
trace!("get_finality");
|
||||
let req = Request::GetFinality;
|
||||
let data = serialize(&req).expect("serialize GetFinality in pub fn get_finality");
|
||||
let mut done = false;
|
||||
while !done {
|
||||
debug!("get_finality send_to {}", &self.requests_addr);
|
||||
self.requests_socket
|
||||
.send_to(&data, &self.requests_addr)
|
||||
.expect("buffer error in pub fn get_finality");
|
||||
|
||||
match self.recv_response() {
|
||||
Ok(resp) => {
|
||||
if let Response::Finality { .. } = resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(&resp);
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("thin_client get_finality error: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
self.finality.expect("some finality")
|
||||
}
|
||||
|
||||
/// Request the transaction count. If the response packet is dropped by the network,
|
||||
/// this method will hang.
|
||||
pub fn transaction_count(&mut self) -> u64 {
|
||||
info!("transaction_count");
|
||||
debug!("transaction_count");
|
||||
let req = Request::GetTransactionCount;
|
||||
let data =
|
||||
serialize(&req).expect("serialize GetTransactionCount in pub fn transaction_count");
|
||||
@ -155,7 +190,7 @@ impl ThinClient {
|
||||
.expect("buffer error in pub fn transaction_count");
|
||||
|
||||
if let Ok(resp) = self.recv_response() {
|
||||
info!("recv_response {:?}", resp);
|
||||
debug!("transaction_count recv_response: {:?}", resp);
|
||||
if let Response::TransactionCount { .. } = resp {
|
||||
done = true;
|
||||
}
|
||||
@ -193,14 +228,19 @@ impl ThinClient {
|
||||
self.last_id.expect("some last_id")
|
||||
}
|
||||
|
||||
pub fn poll_get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
|
||||
let mut balance;
|
||||
pub fn poll_get_balance(&mut self, pubkey: &Pubkey) -> io::Result<i64> {
|
||||
let mut balance_result;
|
||||
let mut balance_value = -1;
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
balance = self.get_balance(pubkey);
|
||||
if balance.is_ok() && *balance.as_ref().unwrap() != 0 || now.elapsed().as_secs() > 1 {
|
||||
balance_result = self.get_balance(pubkey);
|
||||
if balance_result.is_ok() {
|
||||
balance_value = *balance_result.as_ref().unwrap();
|
||||
}
|
||||
if balance_value > 0 || now.elapsed().as_secs() > 1 {
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
metrics::submit(
|
||||
influxdb::Point::new("thinclient")
|
||||
@ -211,14 +251,34 @@ impl ThinClient {
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
balance
|
||||
if balance_value >= 0 {
|
||||
Ok(balance_value)
|
||||
} else {
|
||||
assert!(balance_result.is_err());
|
||||
balance_result
|
||||
}
|
||||
}
|
||||
|
||||
/// Poll the server to confirm a transaction.
|
||||
pub fn poll_for_signature(&mut self, signature: &Signature) -> io::Result<()> {
|
||||
let now = Instant::now();
|
||||
while !self.check_signature(signature) {
|
||||
if now.elapsed().as_secs() > 1 {
|
||||
// TODO: Return a better error.
|
||||
return Err(io::Error::new(io::ErrorKind::Other, "signature not found"));
|
||||
}
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check a signature in the bank. This method blocks
|
||||
/// until the server sends a response.
|
||||
pub fn check_signature(&mut self, sig: &Signature) -> bool {
|
||||
pub fn check_signature(&mut self, signature: &Signature) -> bool {
|
||||
trace!("check_signature");
|
||||
let req = Request::GetSignature { signature: *sig };
|
||||
let req = Request::GetSignature {
|
||||
signature: *signature,
|
||||
};
|
||||
let data = serialize(&req).expect("serialize GetSignature in pub fn check_signature");
|
||||
let now = Instant::now();
|
||||
let mut done = false;
|
||||
@ -259,37 +319,52 @@ mod tests {
|
||||
use bank::Bank;
|
||||
use budget::Budget;
|
||||
use crdt::TestNode;
|
||||
use fullnode::FullNode;
|
||||
use fullnode::Fullnode;
|
||||
use ledger::LedgerWriter;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use service::Service;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use signature::{Keypair, KeypairUtil};
|
||||
use std::fs::remove_dir_all;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use transaction::{Instruction, Plan};
|
||||
|
||||
fn tmp_ledger(name: &str, mint: &Mint) -> String {
|
||||
use std::env;
|
||||
let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string());
|
||||
let keypair = Keypair::new();
|
||||
|
||||
let path = format!("{}/tmp-ledger-{}-{}", out_dir, name, keypair.pubkey());
|
||||
|
||||
let mut writer = LedgerWriter::open(&path, true).unwrap();
|
||||
writer.write_entries(mint.create_entries()).unwrap();
|
||||
|
||||
path
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_thin_client() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let leader_keypair = Keypair::new();
|
||||
let leader = TestNode::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||
let leader_data = leader.data.clone();
|
||||
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let bob_pubkey = Keypair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let ledger_path = tmp_ledger("thin_client", &alice);
|
||||
|
||||
let server = FullNode::new_leader(
|
||||
let server = Fullnode::new_leader(
|
||||
leader_keypair,
|
||||
bank,
|
||||
0,
|
||||
None,
|
||||
Some(Duration::from_millis(30)),
|
||||
&[],
|
||||
leader,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
&ledger_path,
|
||||
false,
|
||||
);
|
||||
sleep(Duration::from_millis(900));
|
||||
|
||||
@ -303,13 +378,15 @@ mod tests {
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id();
|
||||
let _sig = client
|
||||
let signature = client
|
||||
.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
let balance = client.poll_get_balance(&bob_pubkey);
|
||||
client.poll_for_signature(&signature).unwrap();
|
||||
let balance = client.get_balance(&bob_pubkey);
|
||||
assert_eq!(balance.unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
server.join().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
// sleep(Duration::from_millis(300)); is unstable
|
||||
@ -317,21 +394,24 @@ mod tests {
|
||||
#[ignore]
|
||||
fn test_bad_sig() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let leader_keypair = Keypair::new();
|
||||
let leader = TestNode::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let bob_pubkey = Keypair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let leader_data = leader.data.clone();
|
||||
let ledger_path = tmp_ledger("bad_sig", &alice);
|
||||
|
||||
let server = FullNode::new_leader(
|
||||
let server = Fullnode::new_leader(
|
||||
leader_keypair,
|
||||
bank,
|
||||
0,
|
||||
None,
|
||||
Some(Duration::from_millis(30)),
|
||||
&[],
|
||||
leader,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
&ledger_path,
|
||||
false,
|
||||
);
|
||||
//TODO: remove this sleep, or add a retry so CI is stable
|
||||
sleep(Duration::from_millis(300));
|
||||
@ -360,31 +440,37 @@ mod tests {
|
||||
contract.tokens = 502;
|
||||
contract.plan = Plan::Budget(Budget::new_payment(502, bob_pubkey));
|
||||
}
|
||||
let _sig = client.transfer_signed(&tr2).unwrap();
|
||||
let signature = client.transfer_signed(&tr2).unwrap();
|
||||
client.poll_for_signature(&signature).unwrap();
|
||||
|
||||
let balance = client.poll_get_balance(&bob_pubkey);
|
||||
let balance = client.get_balance(&bob_pubkey);
|
||||
assert_eq!(balance.unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
server.join().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_client_check_signature() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let leader_keypair = Keypair::new();
|
||||
let leader = TestNode::new_localhost_with_pubkey(leader_keypair.pubkey());
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let bob_pubkey = Keypair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let leader_data = leader.data.clone();
|
||||
let server = FullNode::new_leader(
|
||||
let ledger_path = tmp_ledger("client_check_signature", &alice);
|
||||
|
||||
let server = Fullnode::new_leader(
|
||||
leader_keypair,
|
||||
bank,
|
||||
0,
|
||||
None,
|
||||
Some(Duration::from_millis(30)),
|
||||
&[],
|
||||
leader,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
&ledger_path,
|
||||
false,
|
||||
);
|
||||
sleep(Duration::from_millis(300));
|
||||
|
||||
@ -400,14 +486,15 @@ mod tests {
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id();
|
||||
let sig = client
|
||||
let signature = client
|
||||
.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
sleep(Duration::from_millis(100));
|
||||
|
||||
assert!(client.check_signature(&sig));
|
||||
assert!(client.check_signature(&signature));
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
server.join().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
}
|
||||
|
16
src/tpu.rs
16
src/tpu.rs
@ -32,8 +32,8 @@ use fetch_stage::FetchStage;
|
||||
use packet::{BlobRecycler, PacketRecycler};
|
||||
use record_stage::RecordStage;
|
||||
use service::Service;
|
||||
use signature::Keypair;
|
||||
use sigverify_stage::SigVerifyStage;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, RwLock};
|
||||
@ -51,21 +51,24 @@ pub struct Tpu {
|
||||
}
|
||||
|
||||
impl Tpu {
|
||||
pub fn new<W: Write + Send + 'static>(
|
||||
pub fn new(
|
||||
keypair: Keypair,
|
||||
bank: &Arc<Bank>,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
tick_duration: Option<Duration>,
|
||||
transactions_socket: UdpSocket,
|
||||
blob_recycler: &BlobRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
writer: W,
|
||||
ledger_path: &str,
|
||||
sigverify_disabled: bool,
|
||||
) -> (Self, BlobReceiver) {
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
let (fetch_stage, packet_receiver) =
|
||||
FetchStage::new(transactions_socket, exit, &packet_recycler.clone());
|
||||
FetchStage::new(transactions_socket, exit, &packet_recycler);
|
||||
|
||||
let (sigverify_stage, verified_receiver) = SigVerifyStage::new(packet_receiver);
|
||||
let (sigverify_stage, verified_receiver) =
|
||||
SigVerifyStage::new(packet_receiver, sigverify_disabled);
|
||||
|
||||
let (banking_stage, signal_receiver) =
|
||||
BankingStage::new(bank.clone(), verified_receiver, packet_recycler.clone());
|
||||
@ -78,10 +81,11 @@ impl Tpu {
|
||||
};
|
||||
|
||||
let (write_stage, blob_receiver) = WriteStage::new(
|
||||
keypair,
|
||||
bank.clone(),
|
||||
crdt.clone(),
|
||||
blob_recycler.clone(),
|
||||
writer,
|
||||
ledger_path,
|
||||
entry_receiver,
|
||||
);
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user