Compare commits
212 Commits
v0.7.0-bet
...
v0.7.0
Author | SHA1 | Date | |
---|---|---|---|
9d2d9a0189 | |||
6d3afc774a | |||
88646bf27d | |||
0696f9f497 | |||
b2ea2455e2 | |||
3f659a69fd | |||
2c62be951f | |||
2348733d6c | |||
cc229b535d | |||
7f810a29ff | |||
fc1dfd86d2 | |||
5deb34e5bd | |||
39df087902 | |||
6ff46540b6 | |||
dbab8792e4 | |||
4eb676afaa | |||
a6cb2f1bcf | |||
28af9a39b4 | |||
8cf5620b87 | |||
85d6627ee6 | |||
611a005ec9 | |||
90b3b90391 | |||
fd4f294fd3 | |||
145274c001 | |||
df5d6693f6 | |||
05c5603879 | |||
c2c48a5c3c | |||
4af556f70e | |||
8bad411962 | |||
5b0418793e | |||
4423ee6902 | |||
f0c39cc84d | |||
3d45b04da8 | |||
9e2f26a5d2 | |||
a016f6e82e | |||
eb3e5fd204 | |||
72282dc493 | |||
47a22c66b4 | |||
fb11d8a909 | |||
7d872f52f4 | |||
d882bfe65c | |||
103584ef27 | |||
1fb537deb9 | |||
2bd48b4207 | |||
f5a6db3dc0 | |||
dd0c1ac5b2 | |||
d8c9655128 | |||
09f2d273c5 | |||
f6eb85e7a3 | |||
0d85b43901 | |||
fdf94a77b4 | |||
af40ab0c04 | |||
015b7a1ddb | |||
ab3e460e64 | |||
194a84c8dd | |||
51d932dad1 | |||
561d31cc13 | |||
d6a8e437bb | |||
4631af5011 | |||
5d28729b2a | |||
8c08e614b7 | |||
e76bf1438b | |||
4e177877c9 | |||
60848b9d95 | |||
79b3564a26 | |||
1e8c36c555 | |||
94d015b089 | |||
cfb3736372 | |||
2b77f62233 | |||
e8d23c17ca | |||
a7ed2a304a | |||
0025b42c26 | |||
3f7f492cc0 | |||
490d7875dd | |||
4240edf710 | |||
30e50d0f70 | |||
751c1eba32 | |||
d349d6aa98 | |||
1f9152dc72 | |||
1b9d50172b | |||
084dbd7f58 | |||
58c0508f94 | |||
dcf82c024f | |||
b253ed0c46 | |||
61db53fc19 | |||
b0ead086a1 | |||
a3b22d0d33 | |||
28d24497a3 | |||
05cea4c1da | |||
260f5edfd6 | |||
7105136595 | |||
54db379bf2 | |||
effbf0b978 | |||
8e7a2a9587 | |||
18e6ff4167 | |||
fa1cdaa91a | |||
b538b67524 | |||
2b0f6355af | |||
11b9a0323d | |||
710fa822a0 | |||
aaf6ce5aea | |||
34ea483736 | |||
a3ff40476e | |||
4cca3ff454 | |||
3d9acdd970 | |||
428f220b88 | |||
10add6a8ac | |||
f06a8dceda | |||
545f4f1c87 | |||
77543d83ff | |||
eb6a30cb7c | |||
97372b8e63 | |||
cea29ed772 | |||
b5006b8f2b | |||
81c44c605b | |||
0b66a6626a | |||
e8be4d7eae | |||
30f0c25b65 | |||
73ae3c3301 | |||
f98e9aba48 | |||
84c28a077a | |||
350cf62b90 | |||
aa4f30c491 | |||
3de979aa7c | |||
5bc133985b | |||
87156e1364 | |||
45ff142871 | |||
2710ff271e | |||
468ac9facd | |||
705720f086 | |||
a219e78f00 | |||
7a41868173 | |||
e16acec901 | |||
de44d7475e | |||
c2dd009e0b | |||
5a8da75d06 | |||
848c6e2371 | |||
e3882950cf | |||
28f6fbee23 | |||
3144a70b18 | |||
bed5438831 | |||
6f991b3c11 | |||
03a8a5ed55 | |||
0c6d2ef1f4 | |||
d2be79f38c | |||
cc89801b12 | |||
dfa05a8742 | |||
d7d985365b | |||
0d4e4b18c2 | |||
7687436bef | |||
d531b9645d | |||
6a1b5a222a | |||
be2bf69c93 | |||
0672794692 | |||
c65c0d9b23 | |||
0ee86ff313 | |||
3b1aa846b5 | |||
0a34cb8023 | |||
227aa38c8a | |||
1dd467ed7d | |||
922dffb122 | |||
63985d4595 | |||
97dd1834d7 | |||
2ea030be48 | |||
606cfbfe1e | |||
90a4ab7e57 | |||
412e15fbdc | |||
ed0a590549 | |||
71f05cb23e | |||
5f99657523 | |||
587ae1bf3c | |||
461dea69d9 | |||
22c0e3cd54 | |||
3ed9567f96 | |||
c4fa841aa9 | |||
f284af1c3d | |||
46602ba9c3 | |||
81477246be | |||
9bd63867aa | |||
d1c317fd5f | |||
cbd664ba4b | |||
4bb7cefa15 | |||
82c86daa78 | |||
b95db62be3 | |||
0f7fdd71cc | |||
af1a7da0d5 | |||
d698b3da3a | |||
6d275d571c | |||
63acb82c87 | |||
4d05b74314 | |||
37dd511356 | |||
96c321da76 | |||
4701540cc9 | |||
f54615b4e3 | |||
9c456b2fb0 | |||
77bf17064a | |||
44150b2e85 | |||
8ec2fe15f3 | |||
687af3e3a4 | |||
72ab83cd45 | |||
4b07772e22 | |||
22d2c962b2 | |||
e771d36278 | |||
800c2dd370 | |||
f38842822f | |||
88a6fb86bf | |||
f6fe998ed4 | |||
16337d7c1e | |||
ae309f80f7 | |||
fa70b3bf70 | |||
3a90f138b2 | |||
033f6dcbcb |
2
.buildkite/hooks/post-checkout
Normal file
2
.buildkite/hooks/post-checkout
Normal file
@ -0,0 +1,2 @@
|
||||
CI_BUILD_START=$(date +%s)
|
||||
export CI_BUILD_START
|
1
.buildkite/hooks/post-checkout.sh
Symbolic link
1
.buildkite/hooks/post-checkout.sh
Symbolic link
@ -0,0 +1 @@
|
||||
post-checkout
|
54
.buildkite/hooks/post-command
Executable file → Normal file
54
.buildkite/hooks/post-command
Executable file → Normal file
@ -1,14 +1,50 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n "$CARGO_TARGET_CACHE_NAME" ]] || exit 0
|
||||
|
||||
#
|
||||
# Save target/ for the next CI build on this machine
|
||||
#
|
||||
(
|
||||
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
|
||||
mkdir -p $d
|
||||
set -x
|
||||
rsync -a --delete --link-dest=$PWD target $d
|
||||
du -hs $d
|
||||
)
|
||||
if [[ -n $CARGO_TARGET_CACHE_NAME ]]; then
|
||||
(
|
||||
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
|
||||
mkdir -p "$d"
|
||||
set -x
|
||||
rsync -a --delete --link-dest="$PWD" target "$d"
|
||||
du -hs "$d"
|
||||
)
|
||||
fi
|
||||
|
||||
#
|
||||
# Add job_stats data point
|
||||
#
|
||||
if [[ -z $CI_BUILD_START ]]; then
|
||||
echo Error: CI_BUILD_START empty
|
||||
else
|
||||
CI_BUILD_DURATION=$(( $(date +%s) - CI_BUILD_START + 1 ))
|
||||
|
||||
CI_LABEL=${BUILDKITE_LABEL:-build label missing}
|
||||
|
||||
PR=false
|
||||
if [[ $BUILDKITE_BRANCH =~ pull/* ]]; then
|
||||
PR=true
|
||||
fi
|
||||
|
||||
SUCCESS=true
|
||||
if [[ $BUILDKITE_COMMAND_EXIT_STATUS != 0 ]]; then
|
||||
SUCCESS=false
|
||||
fi
|
||||
|
||||
point_tags="pipeline=$BUILDKITE_PIPELINE_SLUG,job=$CI_LABEL,pr=$PR,success=$SUCCESS"
|
||||
point_tags="${point_tags// /\\ }" # Escape spaces
|
||||
|
||||
point_fields="duration=$CI_BUILD_DURATION"
|
||||
point_fields="${point_fields// /\\ }" # Escape spaces
|
||||
|
||||
point="job_stats,$point_tags $point_fields"
|
||||
echo "Influx data point: $point"
|
||||
if [[ -n $INFLUX_USERNAME && -n $INFLUX_PASSWORD ]]; then
|
||||
echo "https://metrics.solana.com:8086/write?db=ci&u=${INFLUX_USERNAME}&p=${INFLUX_PASSWORD}" \
|
||||
| xargs curl -XPOST --data-binary "$point"
|
||||
else
|
||||
echo Influx user credentials not found
|
||||
fi
|
||||
fi
|
||||
|
1
.buildkite/hooks/post-command.sh
Symbolic link
1
.buildkite/hooks/post-command.sh
Symbolic link
@ -0,0 +1 @@
|
||||
post-command
|
4
.buildkite/hooks/pre-command
Executable file → Normal file
4
.buildkite/hooks/pre-command
Executable file → Normal file
@ -7,7 +7,7 @@
|
||||
#
|
||||
(
|
||||
d=$HOME/cargo-target-cache/"$CARGO_TARGET_CACHE_NAME"
|
||||
mkdir -p $d/target
|
||||
mkdir -p "$d"/target
|
||||
set -x
|
||||
rsync -a --delete --link-dest=$d $d/target .
|
||||
rsync -a --delete --link-dest="$d" "$d"/target .
|
||||
)
|
||||
|
1
.buildkite/hooks/pre-command.sh
Symbolic link
1
.buildkite/hooks/pre-command.sh
Symbolic link
@ -0,0 +1 @@
|
||||
pre-command
|
1
.clippy.toml
Normal file
1
.clippy.toml
Normal file
@ -0,0 +1 @@
|
||||
too-many-arguments-threshold = 9
|
39
Cargo.toml
39
Cargo.toml
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.7.0-beta"
|
||||
version = "0.7.0"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "http://solana.com/"
|
||||
readme = "README.md"
|
||||
@ -10,6 +10,10 @@ authors = [
|
||||
"Anatoly Yakovenko <anatoly@solana.com>",
|
||||
"Greg Fitzgerald <greg@solana.com>",
|
||||
"Stephen Akridge <stephen@solana.com>",
|
||||
"Michael Vines <mvines@solana.com>",
|
||||
"Rob Walker <rob@solana.com>",
|
||||
"Pankaj Garg <pankaj@solana.com>",
|
||||
"Tyera Eulberg <tyera@solana.com>",
|
||||
]
|
||||
license = "Apache-2.0"
|
||||
|
||||
@ -25,6 +29,10 @@ path = "src/bin/wallet.rs"
|
||||
name = "solana-fullnode"
|
||||
path = "src/bin/fullnode.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-keygen"
|
||||
path = "src/bin/keygen.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-fullnode-config"
|
||||
path = "src/bin/fullnode-config.rs"
|
||||
@ -33,10 +41,6 @@ path = "src/bin/fullnode-config.rs"
|
||||
name = "solana-genesis"
|
||||
path = "src/bin/genesis.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-mint"
|
||||
path = "src/bin/mint.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-drone"
|
||||
path = "src/bin/drone.rs"
|
||||
@ -80,3 +84,28 @@ p2p = "0.5.2"
|
||||
futures = "0.1.21"
|
||||
clap = "2.31"
|
||||
reqwest = "0.8.6"
|
||||
influx_db_client = "0.3.4"
|
||||
dirs = "1.0.2"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.2"
|
||||
|
||||
[[bench]]
|
||||
name = "bank"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "banking_stage"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "ledger"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "signature"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "streamer"
|
||||
harness = false
|
||||
|
32
README.md
32
README.md
@ -47,7 +47,7 @@ $ source $HOME/.cargo/env
|
||||
Now checkout the code from github:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/solana-labs/solana.git
|
||||
$ git clone https://github.com/solana-labs/solana.git
|
||||
$ cd solana
|
||||
```
|
||||
|
||||
@ -84,17 +84,24 @@ Now start the server:
|
||||
$ ./multinode-demo/leader.sh
|
||||
```
|
||||
|
||||
To run a performance-enhanced fullnode on Linux,
|
||||
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
|
||||
your system:
|
||||
```bash
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
|
||||
```
|
||||
|
||||
Wait a few seconds for the server to initialize. It will print "Ready." when it's ready to
|
||||
receive transactions.
|
||||
|
||||
Drone
|
||||
---
|
||||
|
||||
In order for the below test client and validators to work, we'll also need to
|
||||
spin up a drone to give out some test tokens. The drone delivers Milton
|
||||
Friedman-style "air drops" (free tokens to requesting clients) to be used in
|
||||
test transactions.
|
||||
|
||||
Start the drone on the leader node with:
|
||||
|
||||
```bash
|
||||
$ ./multinode-demo/drone.sh
|
||||
```
|
||||
|
||||
|
||||
Multinode Testnet
|
||||
---
|
||||
|
||||
@ -104,15 +111,18 @@ To run a multinode testnet, after starting a leader node, spin up some validator
|
||||
$ ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
```
|
||||
|
||||
To run a performance-enhanced fullnode on Linux,
|
||||
To run a performance-enhanced leader or validator (on Linux),
|
||||
[CUDA 9.2](https://developer.nvidia.com/cuda-downloads) must be installed on
|
||||
your system:
|
||||
```bash
|
||||
$ ./fetch-perf-libs.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/leader.sh
|
||||
$ SOLANA_CUDA=1 ./multinode-demo/validator.sh ubuntu@10.0.1.51:~/solana 10.0.1.51
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
Testnet Client Demo
|
||||
---
|
||||
|
||||
|
66
benches/bank.rs
Normal file
66
benches/bank.rs
Normal file
@ -0,0 +1,66 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate bincode;
|
||||
extern crate rayon;
|
||||
extern crate solana;
|
||||
|
||||
use bincode::serialize;
|
||||
use criterion::{Bencher, Criterion};
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::*;
|
||||
use solana::hash::hash;
|
||||
use solana::mint::Mint;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
|
||||
fn bench_process_transaction(bencher: &mut Bencher) {
|
||||
let mint = Mint::new(100_000_000);
|
||||
let bank = Bank::new(&mint);
|
||||
|
||||
// Create transactions between unrelated parties.
|
||||
let transactions: Vec<_> = (0..4096)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
// Seed the 'from' account.
|
||||
let rando0 = KeyPair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 10_000, mint.last_id());
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
|
||||
// Seed the 'to' account and a cell for its signature.
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
|
||||
let rando1 = KeyPair::new();
|
||||
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||
assert!(bank.process_transaction(&tx).is_ok());
|
||||
|
||||
// Finally, return the transaction to the benchmark.
|
||||
tx
|
||||
})
|
||||
.collect();
|
||||
|
||||
bencher.iter_with_setup(
|
||||
|| {
|
||||
// Since benchmarker runs this multiple times, we need to clear the signatures.
|
||||
bank.clear_signatures();
|
||||
transactions.clone()
|
||||
},
|
||||
|transactions| {
|
||||
let results = bank.process_transactions(transactions);
|
||||
assert!(results.iter().all(Result::is_ok));
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_process_transaction", |bencher| {
|
||||
bench_process_transaction(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
226
benches/banking_stage.rs
Normal file
226
benches/banking_stage.rs
Normal file
@ -0,0 +1,226 @@
|
||||
extern crate bincode;
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate rayon;
|
||||
extern crate solana;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use rayon::prelude::*;
|
||||
use solana::bank::Bank;
|
||||
use solana::banking_stage::BankingStage;
|
||||
use solana::mint::Mint;
|
||||
use solana::packet::{to_packets_chunked, PacketRecycler};
|
||||
use solana::record_stage::Signal;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use std::iter;
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
|
||||
// use self::test::Bencher;
|
||||
// use bank::{Bank, MAX_ENTRY_IDS};
|
||||
// use bincode::serialize;
|
||||
// use hash::hash;
|
||||
// use mint::Mint;
|
||||
// use rayon::prelude::*;
|
||||
// use signature::{KeyPair, KeyPairUtil};
|
||||
// use std::collections::HashSet;
|
||||
// use std::time::Instant;
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// fn bench_process_transactions(_bencher: &mut Bencher) {
|
||||
// let mint = Mint::new(100_000_000);
|
||||
// let bank = Bank::new(&mint);
|
||||
// // Create transactions between unrelated parties.
|
||||
// let txs = 100_000;
|
||||
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
|
||||
// let transactions: Vec<_> = (0..txs)
|
||||
// .into_par_iter()
|
||||
// .map(|i| {
|
||||
// // Seed the 'to' account and a cell for its signature.
|
||||
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
|
||||
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
|
||||
// {
|
||||
// let mut last_ids = last_ids.lock().unwrap();
|
||||
// if !last_ids.contains(&last_id) {
|
||||
// last_ids.insert(last_id);
|
||||
// bank.register_entry_id(&last_id);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Seed the 'from' account.
|
||||
// let rando0 = KeyPair::new();
|
||||
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// let rando1 = KeyPair::new();
|
||||
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// // Finally, return a transaction that's unique
|
||||
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
// })
|
||||
// .collect();
|
||||
//
|
||||
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// let now = Instant::now();
|
||||
// assert!(banking_stage.process_transactions(transactions).is_ok());
|
||||
// let duration = now.elapsed();
|
||||
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
|
||||
// let tps = txs as f64 / sec;
|
||||
//
|
||||
// // Ensure that all transactions were successfully logged.
|
||||
// drop(banking_stage.historian_input);
|
||||
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
|
||||
// assert_eq!(entries.len(), 1);
|
||||
// assert_eq!(entries[0].transactions.len(), txs as usize);
|
||||
//
|
||||
// println!("{} tps", tps);
|
||||
// }
|
||||
|
||||
fn check_txs(batches: usize, receiver: &Receiver<Signal>, ref_tx_count: usize) {
|
||||
let mut total = 0;
|
||||
for _ in 0..batches {
|
||||
let signal = receiver.recv().unwrap();
|
||||
if let Signal::Transactions(transactions) = signal {
|
||||
total += transactions.len();
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
}
|
||||
assert_eq!(total, ref_tx_count);
|
||||
}
|
||||
|
||||
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
let tx = 10_000_usize;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let mint = Mint::new(mint_total);
|
||||
let num_dst_accounts = 8 * 1024;
|
||||
let num_src_accounts = 8 * 1024;
|
||||
|
||||
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| KeyPair::new()).collect();
|
||||
let dstkeys: Vec<_> = (0..num_dst_accounts)
|
||||
.map(|_| KeyPair::new().pubkey())
|
||||
.collect();
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&srckeys[i % num_src_accounts],
|
||||
dstkeys[i % num_dst_accounts],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
let setup_transactions: Vec<_> = (0..num_src_accounts)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
srckeys[i].pubkey(),
|
||||
mint_total / num_src_accounts as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
|
||||
let verified_setup: Vec<_> =
|
||||
to_packets_chunked(&packet_recycler, &setup_transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_setup_len = verified_setup.len();
|
||||
verified_sender.send(verified_setup).unwrap();
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(verified_setup_len, &signal_receiver, num_src_accounts);
|
||||
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), 192)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_len = verified.len();
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(verified_len, &signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
|
||||
let tx = 10_000_usize;
|
||||
let mint = Mint::new(1_000_000_000_000);
|
||||
let mut pubkeys = Vec::new();
|
||||
let num_keys = 8;
|
||||
for _ in 0..num_keys {
|
||||
pubkeys.push(KeyPair::new().pubkey());
|
||||
}
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
pubkeys[i % num_keys],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, &transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
let verified_len = verified.len();
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(&bank, &verified_receiver, &signal_sender, &packet_recycler)
|
||||
.unwrap();
|
||||
|
||||
check_txs(verified_len, &signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_banking_stage_multi_accounts", |bencher| {
|
||||
bench_banking_stage_multi_accounts(bencher);
|
||||
});
|
||||
criterion.bench_function("bench_process_stage_single_from", |bencher| {
|
||||
bench_banking_stage_single_from(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
40
benches/ledger.rs
Normal file
40
benches/ledger.rs
Normal file
@ -0,0 +1,40 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate solana;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use solana::hash::{hash, Hash};
|
||||
use solana::ledger::{next_entries, reconstruct_entries_from_blobs, Block};
|
||||
use solana::packet::BlobRecycler;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0; 10];
|
||||
let entries = next_entries(&zero, 1, transactions);
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
bencher.iter(|| {
|
||||
let mut blob_q = VecDeque::new();
|
||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap(), entries);
|
||||
});
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_block_to_blobs_to_block", |bencher| {
|
||||
bench_block_to_blobs_to_block(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
24
benches/signature.rs
Normal file
24
benches/signature.rs
Normal file
@ -0,0 +1,24 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate solana;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use solana::signature::GenKeys;
|
||||
|
||||
fn bench_gen_keys(b: &mut Bencher) {
|
||||
let rnd = GenKeys::new([0u8; 32]);
|
||||
b.iter(|| rnd.gen_n_keypairs(1000));
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_gen_keys", |bencher| {
|
||||
bench_gen_keys(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
117
benches/streamer.rs
Normal file
117
benches/streamer.rs
Normal file
@ -0,0 +1,117 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate solana;
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
|
||||
use criterion::{Bencher, Criterion};
|
||||
use solana::packet::{Packet, PacketRecycler, BLOB_SIZE, PACKET_DATA_SIZE};
|
||||
use solana::result::Result;
|
||||
use solana::streamer::{receiver, PacketReceiver};
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::sleep;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
|
||||
fn producer(addr: &SocketAddr, recycler: PacketRecycler, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let msgs = recycler.allocate();
|
||||
let msgs_ = msgs.clone();
|
||||
msgs.write().unwrap().packets.resize(10, Packet::default());
|
||||
for w in msgs.write().unwrap().packets.iter_mut() {
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
}
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in msgs_.read().unwrap().packets.iter() {
|
||||
let a = p.meta.addr();
|
||||
assert!(p.meta.size < BLOB_SIZE);
|
||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||
num += 1;
|
||||
}
|
||||
assert_eq!(num, 10);
|
||||
})
|
||||
}
|
||||
|
||||
fn sink(
|
||||
recycler: PacketRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
rvs: Arc<Mutex<usize>>,
|
||||
r: PacketReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
match r.recv_timeout(timer) {
|
||||
Ok(msgs) => {
|
||||
*rvs.lock().unwrap() += msgs.read().unwrap().packets.len();
|
||||
recycler.recycle(msgs);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn bench_streamer_with_result() -> Result<()> {
|
||||
let read = UdpSocket::bind("127.0.0.1:0")?;
|
||||
read.set_read_timeout(Some(Duration::new(1, 0)))?;
|
||||
|
||||
let addr = read.local_addr()?;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let pack_recycler = PacketRecycler::default();
|
||||
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
|
||||
let t_producer1 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
let t_producer2 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
let t_producer3 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
|
||||
let rvs = Arc::new(Mutex::new(0));
|
||||
let t_sink = sink(pack_recycler.clone(), exit.clone(), rvs.clone(), r_reader);
|
||||
|
||||
let start = SystemTime::now();
|
||||
let start_val = *rvs.lock().unwrap();
|
||||
sleep(Duration::new(5, 0));
|
||||
let elapsed = start.elapsed().unwrap();
|
||||
let end_val = *rvs.lock().unwrap();
|
||||
let time = elapsed.as_secs() * 10000000000 + elapsed.subsec_nanos() as u64;
|
||||
let ftime = (time as f64) / 10000000000f64;
|
||||
let fcount = (end_val - start_val) as f64;
|
||||
trace!("performance: {:?}", fcount / ftime);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_reader.join()?;
|
||||
t_producer1.join()?;
|
||||
t_producer2.join()?;
|
||||
t_producer3.join()?;
|
||||
t_sink.join()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn bench_streamer(bencher: &mut Bencher) {
|
||||
bencher.iter(|| {
|
||||
bench_streamer_with_result().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
fn bench(criterion: &mut Criterion) {
|
||||
criterion.bench_function("bench_streamer", |bencher| {
|
||||
bench_streamer(bencher);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
name = benches;
|
||||
config = Criterion::default().sample_size(2);
|
||||
targets = bench
|
||||
);
|
||||
criterion_main!(benches);
|
@ -33,11 +33,12 @@ The process to update a disk image is as follows (TODO: make this less manual):
|
||||
4. From another machine, `gcloud auth login`, then create a new Disk Image based
|
||||
off the modified VM Instance:
|
||||
```
|
||||
$ gcloud compute images create ci-default-v5 --source-disk xxx --source-disk-zone us-east1-b
|
||||
$ gcloud compute images create ci-default-$(date +%Y%m%d%H%M) --source-disk xxx --source-disk-zone us-east1-b --family ci-default
|
||||
|
||||
```
|
||||
or
|
||||
```
|
||||
$ gcloud compute images create ci-cuda-v5 --source-disk xxx --source-disk-zone us-east1-b
|
||||
$ gcloud compute images create ci-cuda-$(date +%Y%m%d%H%M) --source-disk xxx --source-disk-zone us-east1-b --family ci-cuda
|
||||
```
|
||||
5. Delete the new VM instance.
|
||||
6. Go to the Instance templates tab, find the existing template named
|
||||
|
@ -1,4 +1,4 @@
|
||||
steps:
|
||||
- command: "ci/snap.sh"
|
||||
timeout_in_minutes: 20
|
||||
timeout_in_minutes: 40
|
||||
name: "snap [public]"
|
||||
|
@ -3,11 +3,11 @@ steps:
|
||||
name: "stable [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "stable"
|
||||
timeout_in_minutes: 20
|
||||
timeout_in_minutes: 30
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck [public]"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/docker-run.sh rustlang/rust:nightly ci/test-nightly.sh"
|
||||
- command: "ci/docker-run.sh solanalabs/rust-nightly ci/test-nightly.sh"
|
||||
name: "nightly [public]"
|
||||
env:
|
||||
CARGO_TARGET_CACHE_NAME: "nightly"
|
||||
@ -36,3 +36,9 @@ steps:
|
||||
- trigger: "solana-snap"
|
||||
branches: "!pull/*"
|
||||
async: true
|
||||
build:
|
||||
message: "${BUILDKITE_MESSAGE}"
|
||||
commit: "${BUILDKITE_COMMIT}"
|
||||
branch: "${BUILDKITE_BRANCH}"
|
||||
env:
|
||||
TRIGGERED_BUILDKITE_TAG: "${BUILDKITE_TAG}"
|
||||
|
6
ci/docker-rust-nightly/Dockerfile
Normal file
6
ci/docker-rust-nightly/Dockerfile
Normal file
@ -0,0 +1,6 @@
|
||||
FROM rustlang/rust:nightly
|
||||
|
||||
RUN cargo install --force clippy cargo-cov && \
|
||||
echo deb http://ftp.debian.org/debian stretch-backports main >> /etc/apt/sources.list && \
|
||||
apt update && \
|
||||
apt install -y llvm-6.0
|
6
ci/docker-rust-nightly/README.md
Normal file
6
ci/docker-rust-nightly/README.md
Normal file
@ -0,0 +1,6 @@
|
||||
Docker image containing rust nightly and some preinstalled crates used in CI.
|
||||
|
||||
This image may be manually updated by running `./build.sh` if you are a member
|
||||
of the [Solana Labs](https://hub.docker.com/u/solanalabs/) Docker Hub
|
||||
organization, but it is also automatically updated periodically by
|
||||
[this automation](https://buildkite.com/solana-labs/solana-ci-docker-rust-nightly).
|
6
ci/docker-rust-nightly/build.sh
Executable file
6
ci/docker-rust-nightly/build.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
docker build -t solanalabs/rust-nightly .
|
||||
docker push solanalabs/rust-nightly
|
@ -2,6 +2,6 @@ FROM snapcraft/xenial-amd64
|
||||
|
||||
# Update snapcraft to latest version
|
||||
RUN apt-get update -qq \
|
||||
&& apt-get install -y snapcraft \
|
||||
&& apt-get install -y snapcraft daemontools \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& snapcraft --version
|
||||
|
12
ci/snap.sh
12
ci/snap.sh
@ -7,7 +7,11 @@ if [[ -z $BUILDKITE_BRANCH ]] || ./ci/is-pr.sh; then
|
||||
DRYRUN="echo"
|
||||
fi
|
||||
|
||||
if [[ -z "$BUILDKITE_TAG" ]]; then
|
||||
# BUILDKITE_TAG is the normal environment variable set by Buildkite. However
|
||||
# when this script is run from a triggered pipeline, TRIGGERED_BUILDKITE_TAG is
|
||||
# used instead of BUILDKITE_TAG (due to Buildkite limitations that prevents
|
||||
# BUILDKITE_TAG from propagating through to triggered pipelines)
|
||||
if [[ -z "$BUILDKITE_TAG" && -z "$TRIGGERED_BUILDKITE_TAG" ]]; then
|
||||
SNAP_CHANNEL=edge
|
||||
else
|
||||
SNAP_CHANNEL=beta
|
||||
@ -33,6 +37,12 @@ fi
|
||||
|
||||
set -x
|
||||
|
||||
echo --- checking for multilog
|
||||
if [[ ! -x /usr/bin/multilog ]]; then
|
||||
echo "multilog not found, install with: sudo apt-get install -y daemontools"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo --- build
|
||||
snapcraft
|
||||
|
||||
|
@ -13,13 +13,11 @@ _() {
|
||||
|
||||
_ cargo build --verbose --features unstable
|
||||
_ cargo test --verbose --features unstable
|
||||
_ cargo bench --verbose --features unstable
|
||||
|
||||
_ cargo clippy -- --deny=warnings
|
||||
|
||||
exit 0
|
||||
|
||||
# Coverage disabled (see issue #433)
|
||||
_ cargo install --force cargo-cov
|
||||
_ cargo cov test
|
||||
_ cargo cov report
|
||||
|
||||
@ -29,6 +27,6 @@ ls -l target/cov/report/index.html
|
||||
if [[ -z "$CODECOV_TOKEN" ]]; then
|
||||
echo CODECOV_TOKEN undefined
|
||||
else
|
||||
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov gcov'
|
||||
bash <(curl -s https://codecov.io/bash) -x 'llvm-cov-6.0 gcov'
|
||||
fi
|
||||
|
||||
|
@ -15,4 +15,4 @@ _ rustup component add rustfmt-preview
|
||||
_ cargo fmt -- --write-mode=check
|
||||
_ cargo build --verbose
|
||||
_ cargo test --verbose
|
||||
_ cargo test -- --ignored
|
||||
_ cargo bench --verbose
|
||||
|
165
ci/testnet-deploy.sh
Executable file
165
ci/testnet-deploy.sh
Executable file
@ -0,0 +1,165 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Deploys the Solana software running on the testnet full nodes
|
||||
#
|
||||
# This script must be run by a user/machine that has successfully authenticated
|
||||
# with GCP and has sufficient permission.
|
||||
#
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
# TODO: Switch over to rolling updates
|
||||
ROLLING_UPDATE=false
|
||||
#ROLLING_UPDATE=true
|
||||
|
||||
if [[ -z $SOLANA_METRICS_CONFIG ]]; then
|
||||
echo Error: SOLANA_METRICS_CONFIG environment variable is unset
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Default to edge channel. To select the beta channel:
|
||||
# export SOLANA_SNAP_CHANNEL=beta
|
||||
if [[ -z $SOLANA_SNAP_CHANNEL ]]; then
|
||||
SOLANA_SNAP_CHANNEL=edge
|
||||
fi
|
||||
|
||||
case $SOLANA_SNAP_CHANNEL in
|
||||
edge)
|
||||
publicUrl=master.testnet.solana.com
|
||||
publicIp=$(dig +short $publicUrl | head -n1)
|
||||
;;
|
||||
beta)
|
||||
publicUrl=testnet.solana.com
|
||||
publicIp="" # Use default value
|
||||
;;
|
||||
*)
|
||||
echo Error: Unknown SOLANA_SNAP_CHANNEL=$SOLANA_SNAP_CHANNEL
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
resourcePrefix=${publicUrl//./-}
|
||||
vmlist=("$resourcePrefix":us-west1-b) # Leader is hard coded as the first entry
|
||||
validatorNamePrefix=$resourcePrefix-validator-
|
||||
|
||||
echo "--- Available validators for $publicUrl"
|
||||
filter="name~^$validatorNamePrefix"
|
||||
gcloud compute instances list --filter="$filter"
|
||||
while read -r vmName vmZone status; do
|
||||
if [[ $status != RUNNING ]]; then
|
||||
echo "Warning: $vmName is not RUNNING, ignoring it."
|
||||
continue
|
||||
fi
|
||||
vmlist+=("$vmName:$vmZone")
|
||||
done < <(gcloud compute instances list --filter="$filter" --format 'value(name,zone,status)')
|
||||
|
||||
wait_for_node() {
|
||||
declare pid=$1
|
||||
|
||||
declare ok=true
|
||||
wait "$pid" || ok=false
|
||||
cat "log-$pid.txt"
|
||||
if ! $ok; then
|
||||
echo ^^^ +++
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
if ! $ROLLING_UPDATE; then
|
||||
count=1
|
||||
for info in "${vmlist[@]}"; do
|
||||
nodePosition="($count/${#vmlist[*]})"
|
||||
vmName=${info%:*}
|
||||
vmZone=${info#*:}
|
||||
echo "--- Shutting down $vmName in zone $vmZone $nodePosition"
|
||||
gcloud compute ssh "$vmName" --zone "$vmZone" \
|
||||
--ssh-flag="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" \
|
||||
--command="echo sudo snap remove solana" &
|
||||
|
||||
if [[ $((count % 10)) = 0 ]]; then
|
||||
# Slow down deployment to avoid triggering GCP login
|
||||
# quota limits (each |ssh| counts as a login)
|
||||
sleep 3
|
||||
fi
|
||||
|
||||
count=$((count + 1))
|
||||
done
|
||||
|
||||
wait
|
||||
fi
|
||||
|
||||
echo "--- Refreshing leader for $publicUrl"
|
||||
leader=true
|
||||
pids=()
|
||||
count=1
|
||||
for info in "${vmlist[@]}"; do
|
||||
nodePosition="($count/${#vmlist[*]})"
|
||||
|
||||
vmName=${info%:*}
|
||||
vmZone=${info#*:}
|
||||
echo "Starting refresh for $vmName $nodePosition"
|
||||
|
||||
(
|
||||
SECONDS=0
|
||||
echo "--- $vmName in zone $vmZone $nodePosition"
|
||||
commonNodeConfig="\
|
||||
rust-log=$RUST_LOG \
|
||||
default-metrics-rate=$SOLANA_DEFAULT_METRICS_RATE \
|
||||
metrics-config=$SOLANA_METRICS_CONFIG \
|
||||
"
|
||||
if $leader; then
|
||||
nodeConfig="mode=leader+drone $commonNodeConfig"
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
nodeConfig="$nodeConfig enable-cuda=1"
|
||||
fi
|
||||
else
|
||||
nodeConfig="mode=validator leader-address=$publicIp $commonNodeConfig"
|
||||
fi
|
||||
|
||||
set -x
|
||||
gcloud compute ssh "$vmName" --zone "$vmZone" \
|
||||
--ssh-flag="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -t" \
|
||||
--command="\
|
||||
set -ex; \
|
||||
logmarker='solana deploy $(date)/$RANDOM'; \
|
||||
sudo snap remove solana; \
|
||||
logger \$logmarker; \
|
||||
sudo snap install solana --$SOLANA_SNAP_CHANNEL --devmode; \
|
||||
sudo snap set solana $nodeConfig; \
|
||||
snap info solana; \
|
||||
echo Slight delay to get more syslog output; \
|
||||
sleep 2; \
|
||||
sudo grep -Pzo \"\$logmarker(.|\\n)*\" /var/log/syslog \
|
||||
"
|
||||
echo "Succeeded in ${SECONDS} seconds"
|
||||
) > "log-$vmName.txt" 2>&1 &
|
||||
pid=$!
|
||||
# Rename log file so it can be discovered later by $pid
|
||||
mv "log-$vmName.txt" "log-$pid.txt"
|
||||
|
||||
if $leader; then
|
||||
echo Waiting for leader...
|
||||
# Wait for the leader to initialize before starting the validators
|
||||
# TODO: Remove this limitation eventually.
|
||||
wait_for_node "$pid"
|
||||
|
||||
echo "--- Refreshing validators"
|
||||
else
|
||||
# Slow down deployment to ~20 machines a minute to avoid triggering GCP login
|
||||
# quota limits (each |ssh| counts as a login)
|
||||
sleep 3
|
||||
|
||||
pids+=("$pid")
|
||||
fi
|
||||
leader=false
|
||||
count=$((count + 1))
|
||||
done
|
||||
|
||||
echo --- Waiting for validators
|
||||
for pid in "${pids[@]}"; do
|
||||
wait_for_node "$pid"
|
||||
done
|
||||
|
||||
echo "--- $publicUrl sanity test"
|
||||
USE_SNAP=1 ci/testnet-sanity.sh $publicUrl
|
||||
|
||||
exit 0
|
17
ci/testnet-sanity.sh
Executable file
17
ci/testnet-sanity.sh
Executable file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Perform a quick sanity test on the specific testnet
|
||||
#
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
TESTNET=$1
|
||||
if [[ -z $TESTNET ]]; then
|
||||
TESTNET=testnet.solana.com
|
||||
fi
|
||||
|
||||
echo "--- $TESTNET: wallet sanity"
|
||||
multinode-demo/test/wallet-sanity.sh $TESTNET
|
||||
|
||||
echo --- fin
|
||||
exit 0
|
@ -7,7 +7,14 @@ here=$(dirname "$0")
|
||||
# shellcheck source=multinode-demo/common.sh
|
||||
source "$here"/common.sh
|
||||
|
||||
leader=${1:-${here}/..} # Default to local solana repo
|
||||
leader=$1
|
||||
if [[ -z $leader ]]; then
|
||||
if [[ -d "$SNAP" ]]; then
|
||||
leader=testnet.solana.com # Default to testnet when running as a Snap
|
||||
else
|
||||
leader=$here/.. # Default to local solana repo
|
||||
fi
|
||||
fi
|
||||
count=${2:-1}
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
@ -15,9 +22,12 @@ rsync_leader_url=$(rsync_url "$leader")
|
||||
set -ex
|
||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
$rsync -vPz "$rsync_leader_url"/config-private/mint.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
|
||||
# shellcheck disable=SC2086 # $solana_client_demo should not be quoted
|
||||
exec $solana_client_demo \
|
||||
-n "$count" -l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json \
|
||||
< "$SOLANA_CONFIG_CLIENT_DIR"/mint.json
|
||||
client_json="$SOLANA_CONFIG_CLIENT_DIR"/client.json
|
||||
[[ -r $client_json ]] || $solana_keygen -o "$client_json"
|
||||
|
||||
$solana_client_demo \
|
||||
-n "$count" \
|
||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json \
|
||||
-k "$SOLANA_CONFIG_CLIENT_DIR"/client.json \
|
||||
|
||||
|
@ -3,20 +3,41 @@
|
||||
# Disable complaints about unused variables in this file:
|
||||
# shellcheck disable=2034
|
||||
|
||||
# shellcheck disable=2154 # 'here' is referenced but not assigned
|
||||
if [[ -z $here ]]; then
|
||||
echo "|here| is not defined"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rsync=rsync
|
||||
leader_logger="cat"
|
||||
validator_logger="cat"
|
||||
drone_logger="cat"
|
||||
|
||||
if [[ -d "$SNAP" ]]; then # Running inside a Linux Snap?
|
||||
solana_program() {
|
||||
declare program="$1"
|
||||
if [[ "$program" = wallet ]]; then
|
||||
# TODO: Merge wallet.sh functionality into solana-wallet proper and
|
||||
# remove this special case
|
||||
if [[ "$program" = wallet || "$program" = client-demo ]]; then
|
||||
# TODO: Merge wallet.sh/client.sh functionality into
|
||||
# solana-wallet/solana-demo-client proper and remove this special case
|
||||
printf "%s/bin/solana-%s" "$SNAP" "$program"
|
||||
else
|
||||
printf "%s/command-%s.wrapper" "$SNAP" "$program"
|
||||
fi
|
||||
}
|
||||
rsync="$SNAP"/bin/rsync
|
||||
multilog="$SNAP/bin/multilog t s16777215"
|
||||
leader_logger="$multilog $SNAP_DATA/leader"
|
||||
validator_logger="$multilog t $SNAP_DATA/validator"
|
||||
drone_logger="$multilog $SNAP_DATA/drone"
|
||||
# Create log directories manually to prevent multilog from creating them as
|
||||
# 0700
|
||||
mkdir -p "$SNAP_DATA"/{drone,leader,validator}
|
||||
|
||||
SOLANA_METRICS_CONFIG="$(snapctl get metrics-config)"
|
||||
SOLANA_DEFAULT_METRICS_RATE="$(snapctl get default-metrics-rate)"
|
||||
SOLANA_CUDA="$(snapctl get enable-cuda)"
|
||||
RUST_LOG="$(snapctl get rust-log)"
|
||||
|
||||
elif [[ -n "$USE_SNAP" ]]; then # Use the Linux Snap binaries
|
||||
solana_program() {
|
||||
@ -36,13 +57,18 @@ else
|
||||
declare features=""
|
||||
if [[ "$program" =~ ^(.*)-cuda$ ]]; then
|
||||
program=${BASH_REMATCH[1]}
|
||||
features="--features=cuda,erasure"
|
||||
features="--features=cuda"
|
||||
fi
|
||||
if [[ -z "$DEBUG" ]]; then
|
||||
maybe_release=--release
|
||||
fi
|
||||
printf "cargo run $maybe_release --bin solana-%s %s -- " "$program" "$features"
|
||||
}
|
||||
if [[ -n $SOLANA_CUDA ]]; then
|
||||
# Locate perf libs downloaded by |./fetch-perf-libs.sh|
|
||||
LD_LIBRARY_PATH=$(cd "$here" && dirname "$PWD"):$LD_LIBRARY_PATH
|
||||
export LD_LIBRARY_PATH
|
||||
fi
|
||||
fi
|
||||
|
||||
solana_client_demo=$(solana_program client-demo)
|
||||
@ -52,18 +78,75 @@ solana_fullnode=$(solana_program fullnode)
|
||||
solana_fullnode_config=$(solana_program fullnode-config)
|
||||
solana_fullnode_cuda=$(solana_program fullnode-cuda)
|
||||
solana_genesis=$(solana_program genesis)
|
||||
solana_mint=$(solana_program mint)
|
||||
solana_keygen=$(solana_program keygen)
|
||||
|
||||
export RUST_LOG=${RUST_LOG:-solana=info} # if RUST_LOG is unset, default to info
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
|
||||
# The SOLANA_METRICS_CONFIG environment variable is formatted as a
|
||||
# comma-delimited list of parameters. All parameters are optional.
|
||||
#
|
||||
# Example:
|
||||
# export SOLANA_METRICS_CONFIG="host=<metrics host>,db=<database name>,u=<username>,p=<password>"
|
||||
#
|
||||
configure_metrics() {
|
||||
[[ -n $SOLANA_METRICS_CONFIG ]] || return
|
||||
|
||||
declare metrics_params
|
||||
IFS=',' read -r -a metrics_params <<< "$SOLANA_METRICS_CONFIG"
|
||||
for param in "${metrics_params[@]}"; do
|
||||
IFS='=' read -r -a pair <<< "$param"
|
||||
if [[ "${#pair[@]}" != 2 ]]; then
|
||||
echo Error: invalid metrics parameter: "$param" >&2
|
||||
else
|
||||
declare name="${pair[0]}"
|
||||
declare value="${pair[1]}"
|
||||
case "$name" in
|
||||
host)
|
||||
export INFLUX_HOST="$value"
|
||||
echo INFLUX_HOST="$INFLUX_HOST" >&2
|
||||
;;
|
||||
db)
|
||||
export INFLUX_DATABASE="$value"
|
||||
echo INFLUX_DATABASE="$INFLUX_DATABASE" >&2
|
||||
;;
|
||||
u)
|
||||
export INFLUX_USERNAME="$value"
|
||||
echo INFLUX_USERNAME="$INFLUX_USERNAME" >&2
|
||||
;;
|
||||
p)
|
||||
export INFLUX_PASSWORD="$value"
|
||||
echo INFLUX_PASSWORD="********" >&2
|
||||
;;
|
||||
*)
|
||||
echo Error: Unknown metrics parameter name: "$name" >&2
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
done
|
||||
}
|
||||
configure_metrics
|
||||
|
||||
tune_networking() {
|
||||
[[ $(uname) = Linux ]] && (set -x; sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null)
|
||||
# Reference: https://medium.com/@CameronSparr/increase-os-udp-buffers-to-improve-performance-51d167bb1360
|
||||
if [[ $(uname) = Linux ]]; then
|
||||
(
|
||||
set -x +e
|
||||
# test the existence of the sysctls before trying to set them
|
||||
# go ahead and return true and don't exit if these calls fail
|
||||
sysctl net.core.rmem_max 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_max=26214400 1>/dev/null 2>/dev/null
|
||||
|
||||
sysctl net.core.rmem_default 2>/dev/null 1>/dev/null &&
|
||||
sudo sysctl -w net.core.rmem_default=26214400 1>/dev/null 2>/dev/null
|
||||
) || true
|
||||
fi
|
||||
}
|
||||
|
||||
SOLANA_CONFIG_DIR=${SNAP_DATA:-$PWD}/config
|
||||
SOLANA_CONFIG_PRIVATE_DIR=${SNAP_DATA:-$PWD}/config-private
|
||||
SOLANA_CONFIG_CLIENT_DIR=${SNAP_USER_DATA:-$PWD}/config-client-client
|
||||
SOLANA_CONFIG_CLIENT_DIR=${SNAP_USER_DATA:-$PWD}/config-client
|
||||
|
||||
rsync_url() { # adds the 'rsync://` prefix to URLs that need it
|
||||
declare url="$1"
|
||||
|
@ -36,6 +36,7 @@ set -ex
|
||||
mkdir -p "$SOLANA_CONFIG_DIR"
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_DIR"/
|
||||
|
||||
# shellcheck disable=SC2086 # $solana_drone should not be quoted
|
||||
exec $solana_drone \
|
||||
-l "$SOLANA_CONFIG_DIR"/leader.json < "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||
set -o pipefail
|
||||
$solana_drone \
|
||||
-l "$SOLANA_CONFIG_DIR"/leader.json -k "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json \
|
||||
2>&1 | $drone_logger
|
||||
|
80
multinode-demo/gce_multinode.sh
Executable file
80
multinode-demo/gce_multinode.sh
Executable file
@ -0,0 +1,80 @@
|
||||
#!/bin/bash
|
||||
|
||||
command=$1
|
||||
prefix=
|
||||
num_nodes=
|
||||
out_file=
|
||||
image_name="ubuntu-16-04-cuda-9-2-new"
|
||||
|
||||
shift
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 <create|delete> <-p prefix> <-n num_nodes> <-o file> [-i image-name]
|
||||
|
||||
Manage a GCE multinode network
|
||||
|
||||
create|delete - Create or delete the network
|
||||
-p prefix - A common prefix for node names, to avoid collision
|
||||
-n num_nodes - Number of nodes
|
||||
-o out_file - Used for create option. Outputs an array of IP addresses
|
||||
of new nodes to the file
|
||||
-i image_name - Existing image on GCE (default $image_name)
|
||||
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
while getopts "h?p:i:n:o:" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
;;
|
||||
p)
|
||||
prefix=$OPTARG
|
||||
;;
|
||||
i)
|
||||
image_name=$OPTARG
|
||||
;;
|
||||
o)
|
||||
out_file=$OPTARG
|
||||
;;
|
||||
n)
|
||||
num_nodes=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
set -e
|
||||
|
||||
[[ -n $command ]] || usage "Need a command (create|delete)"
|
||||
|
||||
[[ -n $prefix ]] || usage "Need a prefix for GCE instance names"
|
||||
|
||||
[[ -n $num_nodes ]] || usage "Need number of nodes"
|
||||
|
||||
nodes=()
|
||||
for i in $(seq 1 "$num_nodes"); do
|
||||
nodes+=("$prefix$i")
|
||||
done
|
||||
|
||||
if [[ $command == "create" ]]; then
|
||||
[[ -n $out_file ]] || usage "Need an outfile to store IP Addresses"
|
||||
|
||||
ip_addr_list=$(gcloud beta compute instances create "${nodes[@]}" --zone=us-west1-b --tags=testnet \
|
||||
--image="$image_name" | awk '/RUNNING/ {print $5}')
|
||||
|
||||
echo "ip_addr_array=($ip_addr_list)" >"$out_file"
|
||||
elif [[ $command == "delete" ]]; then
|
||||
gcloud beta compute instances delete "${nodes[@]}"
|
||||
else
|
||||
usage "Unknown command: $command"
|
||||
fi
|
@ -25,9 +25,8 @@ fi
|
||||
|
||||
tune_networking
|
||||
|
||||
# shellcheck disable=SC2086 # $program should not be quoted
|
||||
exec $program \
|
||||
-l "$SOLANA_CONFIG_DIR"/leader.json \
|
||||
< <(shopt -s nullglob && cat "$SOLANA_CONFIG_DIR"/genesis.log \
|
||||
"$SOLANA_CONFIG_DIR"/tx-*.log) \
|
||||
> "$SOLANA_CONFIG_DIR"/tx-"$(date -u +%Y%m%d%H%M%S%N)".log
|
||||
set -xo pipefail
|
||||
$program \
|
||||
--identity "$SOLANA_CONFIG_DIR"/leader.json \
|
||||
--ledger "$SOLANA_CONFIG_DIR"/ledger.log \
|
||||
2>&1 | $leader_logger
|
||||
|
@ -1,59 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
function myip()
|
||||
{
|
||||
# shellcheck disable=SC2207
|
||||
declare ipaddrs=(
|
||||
# query interwebs
|
||||
$(curl -s ifconfig.co)
|
||||
# machine's interfaces
|
||||
$(ifconfig |
|
||||
awk '/inet addr:/ {gsub("addr:","",$2); print $2; next}
|
||||
/inet6 addr:/ {gsub("/.*", "", $3); print $3; next}
|
||||
/inet(6)? / {print $2}'
|
||||
)
|
||||
)
|
||||
|
||||
if (( ! ${#ipaddrs[*]} ))
|
||||
then
|
||||
echo "
|
||||
myip: error: I'm having trouble determining what our IP address is...
|
||||
Are we connected to a network?
|
||||
|
||||
"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
declare prompt="
|
||||
Please choose the IP address you want to advertise to the network:
|
||||
|
||||
0) ${ipaddrs[0]} <====== this one was returned by the interwebs...
|
||||
"
|
||||
|
||||
for ((i=1; i < ${#ipaddrs[*]}; i++))
|
||||
do
|
||||
prompt+=" $i) ${ipaddrs[i]}
|
||||
"
|
||||
done
|
||||
|
||||
while read -r -p "${prompt}
|
||||
please enter a number [0 for default]: " which
|
||||
do
|
||||
[[ -z ${which} ]] && break;
|
||||
[[ ${which} =~ [0-9]+ ]] && (( which < ${#ipaddrs[*]} )) && break;
|
||||
echo "Ug. invalid entry \"${which}\"...
|
||||
"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
which=${which:-0}
|
||||
|
||||
echo "${ipaddrs[which]}"
|
||||
|
||||
}
|
||||
|
||||
if [[ ${0} == "${BASH_SOURCE[0]}" ]]
|
||||
then
|
||||
myip "$@"
|
||||
fi
|
14
multinode-demo/remote_leader.sh
Executable file
14
multinode-demo/remote_leader.sh
Executable file
@ -0,0 +1,14 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n $FORCE ]] || exit
|
||||
|
||||
chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa
|
||||
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
|
||||
./fetch-perf-libs.sh
|
||||
|
||||
# Run setup
|
||||
USE_INSTALL=1 ./multinode-demo/setup.sh -p
|
||||
USE_INSTALL=1 SOLANA_CUDA=1 ./multinode-demo/leader.sh >leader.log 2>&1 &
|
||||
USE_INSTALL=1 ./multinode-demo/drone.sh >drone.log 2>&1 &
|
185
multinode-demo/remote_nodes.sh
Executable file
185
multinode-demo/remote_nodes.sh
Executable file
@ -0,0 +1,185 @@
|
||||
#!/bin/bash
|
||||
|
||||
command=$1
|
||||
ip_addr_file=
|
||||
remote_user=
|
||||
ssh_keys=
|
||||
|
||||
shift
|
||||
|
||||
usage() {
|
||||
exitcode=0
|
||||
if [[ -n "$1" ]]; then
|
||||
exitcode=1
|
||||
echo "Error: $*"
|
||||
fi
|
||||
cat <<EOF
|
||||
usage: $0 <start|stop> <-f IP Addr Array file> <-u username> [-k ssh-keys]
|
||||
|
||||
Manage a GCE multinode network
|
||||
|
||||
start|stop - Create or delete the network
|
||||
-f file - A bash script that exports an array of IP addresses, ip_addr_array.
|
||||
Elements of the array are public IP address of remote nodes.
|
||||
-u username - The username for logging into remote nodes.
|
||||
-k ssh-keys - Path to public/private key pair that remote nodes can use to perform
|
||||
rsync and ssh among themselves. Must contain pub, and priv keys.
|
||||
|
||||
EOF
|
||||
exit $exitcode
|
||||
}
|
||||
|
||||
while getopts "h?f:u:k:" opt; do
|
||||
case $opt in
|
||||
h | \?)
|
||||
usage
|
||||
;;
|
||||
f)
|
||||
ip_addr_file=$OPTARG
|
||||
;;
|
||||
u)
|
||||
remote_user=$OPTARG
|
||||
;;
|
||||
k)
|
||||
ssh_keys=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage "Error: unhandled option: $opt"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
set -e
|
||||
|
||||
# Sample IP Address array file contents
|
||||
# ip_addr_array=(192.168.1.1 192.168.1.5 192.168.2.2)
|
||||
|
||||
[[ -n $command ]] || usage "Need a command (start|stop)"
|
||||
[[ -n $ip_addr_file ]] || usage "Need a file with IP address array"
|
||||
[[ -n $remote_user ]] || usage "Need the username for remote nodes"
|
||||
|
||||
ip_addr_array=()
|
||||
# Get IP address array
|
||||
# shellcheck source=/dev/null
|
||||
source "$ip_addr_file"
|
||||
|
||||
build_project() {
|
||||
echo "Build started at $(date)"
|
||||
SECONDS=0
|
||||
|
||||
# Build and install locally
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
cargo install --force
|
||||
|
||||
echo "Build took $SECONDS seconds"
|
||||
}
|
||||
|
||||
common_start_setup() {
|
||||
ip_addr=$1
|
||||
|
||||
# Killing sshguard for now. TODO: Find a better solution
|
||||
# sshguard is blacklisting IP address after ssh-keyscan and ssh login attempts
|
||||
ssh "$remote_user@$ip_addr" " \
|
||||
set -ex; \
|
||||
sudo service sshguard stop; \
|
||||
sudo apt-get --assume-yes install rsync libssl-dev; \
|
||||
mkdir -p ~/.ssh ~/solana ~/.cargo/bin; \
|
||||
" >log/"$ip_addr".log
|
||||
|
||||
# If provided, deploy SSH keys
|
||||
if [[ -n $ssh_keys ]]; then
|
||||
{
|
||||
rsync -vPrz "$ssh_keys"/id_rsa "$remote_user@$ip_addr":~/.ssh/
|
||||
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/
|
||||
rsync -vPrz "$ssh_keys"/id_rsa.pub "$remote_user@$ip_addr":~/.ssh/authorized_keys
|
||||
rsync -vPrz ./multinode-demo "$remote_user@$ip_addr":~/solana/
|
||||
} >>log/"$ip_addr".log
|
||||
fi
|
||||
}
|
||||
|
||||
start_leader() {
|
||||
common_start_setup "$1"
|
||||
|
||||
{
|
||||
rsync -vPrz ~/.cargo/bin/solana* "$remote_user@$ip_addr":~/.cargo/bin/
|
||||
rsync -vPrz ./fetch-perf-libs.sh "$remote_user@$ip_addr":~/solana/
|
||||
ssh -n -f "$remote_user@$ip_addr" 'cd solana; FORCE=1 ./multinode-demo/remote_leader.sh'
|
||||
} >>log/"$1".log
|
||||
|
||||
leader_ip=$1
|
||||
leader_time=$SECONDS
|
||||
SECONDS=0
|
||||
}
|
||||
|
||||
start_validator() {
|
||||
common_start_setup "$1"
|
||||
|
||||
ssh -n -f "$remote_user@$ip_addr" "cd solana; FORCE=1 ./multinode-demo/remote_validator.sh $leader_ip" >>log/"$1".log
|
||||
}
|
||||
|
||||
start_all_nodes() {
|
||||
echo "Deployment started at $(date)"
|
||||
SECONDS=0
|
||||
count=0
|
||||
leader_ip=
|
||||
leader_time=
|
||||
|
||||
mkdir -p log
|
||||
|
||||
for ip_addr in "${ip_addr_array[@]}"; do
|
||||
if ((!count)); then
|
||||
# Start the leader on the first node
|
||||
echo "Leader node $ip_addr, killing previous instance and restarting"
|
||||
start_leader "$ip_addr"
|
||||
else
|
||||
# Start validator on all other nodes
|
||||
echo "Validator[$count] node $ip_addr, killing previous instance and restarting"
|
||||
start_validator "$ip_addr" &
|
||||
# TBD: Remove the sleep or reduce time once GCP login quota is increased
|
||||
sleep 2
|
||||
fi
|
||||
|
||||
((count = count + 1))
|
||||
done
|
||||
|
||||
wait
|
||||
|
||||
((validator_count = count - 1))
|
||||
|
||||
echo "Deployment finished at $(date)"
|
||||
echo "Leader deployment too $leader_time seconds"
|
||||
echo "$validator_count Validator deployment took $SECONDS seconds"
|
||||
}
|
||||
|
||||
stop_all_nodes() {
|
||||
SECONDS=0
|
||||
local count=0
|
||||
for ip_addr in "${ip_addr_array[@]}"; do
|
||||
ssh-keygen -R "$ip_addr" >log/local.log
|
||||
ssh-keyscan "$ip_addr" >>~/.ssh/known_hosts 2>/dev/null
|
||||
|
||||
echo "Stopping node[$count] $ip_addr. Remote user $remote_user"
|
||||
|
||||
ssh -n -f "$remote_user@$ip_addr" " \
|
||||
set -ex; \
|
||||
sudo service sshguard stop; \
|
||||
pkill -9 solana-; \
|
||||
pkill -9 validator; \
|
||||
pkill -9 leader; \
|
||||
"
|
||||
sleep 2
|
||||
((count = count + 1))
|
||||
echo "Stopped node[$count] $ip_addr"
|
||||
done
|
||||
echo "Stopping $count nodes took $SECONDS seconds"
|
||||
}
|
||||
|
||||
if [[ $command == "start" ]]; then
|
||||
build_project
|
||||
stop_all_nodes
|
||||
start_all_nodes
|
||||
elif [[ $command == "stop" ]]; then
|
||||
stop_all_nodes
|
||||
else
|
||||
usage "Unknown command: $command"
|
||||
fi
|
17
multinode-demo/remote_validator.sh
Executable file
17
multinode-demo/remote_validator.sh
Executable file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
[[ -n $FORCE ]] || exit
|
||||
|
||||
chmod 600 ~/.ssh/authorized_keys ~/.ssh/id_rsa
|
||||
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
|
||||
touch ~/.ssh/known_hosts
|
||||
ssh-keygen -R "$1" 2>/dev/null
|
||||
ssh-keyscan "$1" >>~/.ssh/known_hosts 2>/dev/null
|
||||
|
||||
rsync -vPrz "$1":~/.cargo/bin/solana* ~/.cargo/bin/
|
||||
|
||||
# Run setup
|
||||
USE_INSTALL=1 ./multinode-demo/setup.sh -p
|
||||
USE_INSTALL=1 ./multinode-demo/validator.sh "$1":~/solana "$1" >validator.log 2>&1
|
@ -71,6 +71,9 @@ done
|
||||
|
||||
leader_address_args=("$ip_address_arg")
|
||||
validator_address_args=("$ip_address_arg" -b 9000)
|
||||
leader_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/leader-id.json
|
||||
validator_id_path="$SOLANA_CONFIG_PRIVATE_DIR"/validator-id.json
|
||||
mint_path="$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||
|
||||
set -e
|
||||
|
||||
@ -78,25 +81,27 @@ echo "Cleaning $SOLANA_CONFIG_DIR"
|
||||
rm -rvf "$SOLANA_CONFIG_DIR"
|
||||
mkdir -p "$SOLANA_CONFIG_DIR"
|
||||
|
||||
rm -rvf "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
mkdir -p "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
|
||||
$solana_keygen -o "$leader_id_path"
|
||||
$solana_keygen -o "$validator_id_path"
|
||||
|
||||
if $node_type_leader; then
|
||||
rm -rvf "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
mkdir -p "$SOLANA_CONFIG_PRIVATE_DIR"
|
||||
echo "Creating $mint_path with $num_tokens tokens"
|
||||
$solana_keygen -o "$mint_path"
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_DIR/mint.json with $num_tokens tokens"
|
||||
$solana_mint <<<"$num_tokens" > "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_DIR/genesis.log"
|
||||
$solana_genesis < "$SOLANA_CONFIG_PRIVATE_DIR"/mint.json > "$SOLANA_CONFIG_DIR"/genesis.log
|
||||
echo "Creating $SOLANA_CONFIG_DIR/ledger.log"
|
||||
$solana_genesis --tokens="$num_tokens" < "$mint_path" > "$SOLANA_CONFIG_DIR"/ledger.log
|
||||
|
||||
echo "Creating $SOLANA_CONFIG_DIR/leader.json"
|
||||
$solana_fullnode_config "${leader_address_args[@]}" > "$SOLANA_CONFIG_DIR"/leader.json
|
||||
$solana_fullnode_config --keypair="$leader_id_path" "${leader_address_args[@]}" > "$SOLANA_CONFIG_DIR"/leader.json
|
||||
fi
|
||||
|
||||
|
||||
if $node_type_validator; then
|
||||
echo "Creating $SOLANA_CONFIG_DIR/validator.json"
|
||||
$solana_fullnode_config "${validator_address_args[@]}" > "$SOLANA_CONFIG_DIR"/validator.json
|
||||
$solana_fullnode_config --keypair="$validator_id_path" "${validator_address_args[@]}" > "$SOLANA_CONFIG_DIR"/validator.json
|
||||
fi
|
||||
|
||||
ls -lh "$SOLANA_CONFIG_DIR"/
|
||||
|
@ -1,80 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
ip_addr_file=$1
|
||||
remote_user=$2
|
||||
ssh_keys=$3
|
||||
|
||||
usage() {
|
||||
echo -e "\\tUsage: $0 <IP Address array> <username> [path to ssh keys]\\n"
|
||||
echo -e "\\t <IP Address array>: A bash script that exports an array of IP addresses, ip_addr_array. Elements of the array are public IP address of remote nodes."
|
||||
echo -e "\\t <username>: The username for logging into remote nodes."
|
||||
echo -e "\\t [path to ssh keys]: The public/private key pair that remote nodes can use to perform rsync and ssh among themselves. Must contain pub, priv and authorized_keys.\\n"
|
||||
}
|
||||
|
||||
# Sample IP Address array file contents
|
||||
# ip_addr_array=(192.168.1.1 192.168.1.5 192.168.2.2)
|
||||
|
||||
if [[ -z "$ip_addr_file" ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$remote_user" ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Build and install locally
|
||||
PATH="$HOME"/.cargo/bin:"$PATH"
|
||||
cargo install --force
|
||||
|
||||
ip_addr_array=()
|
||||
# Get IP address array
|
||||
# shellcheck source=/dev/null
|
||||
source "$ip_addr_file"
|
||||
|
||||
# shellcheck disable=SC2089,SC2016
|
||||
ssh_command_prefix='export PATH="$HOME/.cargo/bin:$PATH"; cd solana; USE_INSTALL=1 ./multinode-demo/'
|
||||
|
||||
count=0
|
||||
leader=
|
||||
for ip_addr in "${ip_addr_array[@]}"; do
|
||||
echo "$ip_addr"
|
||||
|
||||
# Deploy build and scripts to remote node
|
||||
rsync -r -av ~/.cargo/bin "$remote_user"@"$ip_addr":~/.cargo
|
||||
rsync -r -av ./multinode-demo "$remote_user"@"$ip_addr":~/solana/
|
||||
|
||||
# If provided, deploy SSH keys
|
||||
if [[ -z $ssh_keys ]]; then
|
||||
echo "skip copying the ssh keys"
|
||||
else
|
||||
rsync -r -av "$ssh_keys"/* "$remote_user"@"$ip_addr":~/.ssh/
|
||||
fi
|
||||
|
||||
# Stop current nodes
|
||||
ssh "$remote_user"@"$ip_addr" 'pkill -9 solana-fullnode'
|
||||
ssh "$remote_user"@"$ip_addr" 'pkill -9 solana-client-demo'
|
||||
|
||||
# Run setup
|
||||
ssh "$remote_user"@"$ip_addr" "$ssh_command_prefix"'setup.sh -p "$ip_addr"'
|
||||
|
||||
if (( !count )); then
|
||||
# Start the leader on the first node
|
||||
echo "Starting leader node $ip_addr"
|
||||
ssh -n -f "$remote_user"@"$ip_addr" "$ssh_command_prefix"'leader.sh > leader.log 2>&1'
|
||||
leader=${ip_addr_array[0]}
|
||||
else
|
||||
# Start validator on all other nodes
|
||||
echo "Starting validator node $ip_addr"
|
||||
ssh -n -f "$remote_user"@"$ip_addr" "$ssh_command_prefix""validator.sh $remote_user@$leader:~/solana $leader > validator.log 2>&1"
|
||||
fi
|
||||
|
||||
(( count++ ))
|
||||
|
||||
if (( count == ${#ip_addr_array[@]} )); then
|
||||
# Launch client demo on the last node
|
||||
echo "Starting client demo on $ip_addr"
|
||||
ssh -n -f "$remote_user"@"$ip_addr" "$ssh_command_prefix""client.sh $remote_user@$leader:~/solana $count > client.log 2>&1"
|
||||
fi
|
||||
done
|
@ -6,7 +6,13 @@
|
||||
here=$(dirname "$0")
|
||||
cd "$here"
|
||||
|
||||
wallet="../wallet.sh $1"
|
||||
if [[ -n "$USE_SNAP" ]]; then
|
||||
# TODO: Merge wallet.sh functionality into solana-wallet proper and
|
||||
# remove this USE_SNAP case
|
||||
wallet="solana.wallet $1"
|
||||
else
|
||||
wallet="../wallet.sh $1"
|
||||
fi
|
||||
|
||||
# Tokens transferred to this address are lost forever...
|
||||
garbage_address=vS3ngn1TfQmpsW1Z4NkLuqNAQFF3dYQw8UZ6TCx9bmq
|
||||
|
@ -65,16 +65,32 @@ fi
|
||||
|
||||
rsync_leader_url=$(rsync_url "$leader")
|
||||
|
||||
set -ex
|
||||
SOLANA_LEADER_CONFIG_DIR="$SOLANA_CONFIG_DIR"/leader-config
|
||||
rm -rf "$SOLANA_LEADER_CONFIG_DIR"
|
||||
$rsync -vPrz "$rsync_leader_url"/config/ "$SOLANA_LEADER_CONFIG_DIR"
|
||||
ls -lh "$SOLANA_LEADER_CONFIG_DIR"
|
||||
|
||||
tune_networking
|
||||
|
||||
# shellcheck disable=SC2086 # $program should not be quoted
|
||||
exec $program \
|
||||
-l "$SOLANA_CONFIG_DIR"/validator.json -t "$leader_address:$leader_port" \
|
||||
< <(shopt -s nullglob && cat "$SOLANA_LEADER_CONFIG_DIR"/genesis.log \
|
||||
"$SOLANA_LEADER_CONFIG_DIR"/tx-*.log)
|
||||
SOLANA_LEADER_CONFIG_DIR="$SOLANA_CONFIG_DIR"/leader-config
|
||||
rm -rf "$SOLANA_LEADER_CONFIG_DIR"
|
||||
set -ex
|
||||
$rsync -vPrz "$rsync_leader_url"/config/ "$SOLANA_LEADER_CONFIG_DIR"
|
||||
|
||||
# migrate from old ledger format? why not...
|
||||
if [[ ! -f "$SOLANA_LEADER_CONFIG_DIR"/ledger.log &&
|
||||
-f "$SOLANA_LEADER_CONFIG_DIR"/genesis.log ]]; then
|
||||
(shopt -s nullglob &&
|
||||
cat "$SOLANA_LEADER_CONFIG_DIR"/genesis.log \
|
||||
"$SOLANA_LEADER_CONFIG_DIR"/tx-*.log) > "$SOLANA_LEADER_CONFIG_DIR"/ledger.log
|
||||
fi
|
||||
|
||||
# Ensure the validator has at least 1 token before connecting to the network
|
||||
# TODO: Remove this workaround
|
||||
while ! $solana_wallet \
|
||||
-l "$SOLANA_LEADER_CONFIG_DIR"/leader.json \
|
||||
-k "$SOLANA_CONFIG_PRIVATE_DIR"/validator-id.json airdrop --tokens 1; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
set -o pipefail
|
||||
$program \
|
||||
--identity "$SOLANA_CONFIG_DIR"/validator.json \
|
||||
--testnet "$leader_address:$leader_port" \
|
||||
--ledger "$SOLANA_LEADER_CONFIG_DIR"/ledger.log \
|
||||
2>&1 | $validator_logger
|
||||
|
@ -30,18 +30,16 @@ rsync_leader_url=$(rsync_url "$leader")
|
||||
set -e
|
||||
mkdir -p "$SOLANA_CONFIG_CLIENT_DIR"
|
||||
if [[ ! -r "$SOLANA_CONFIG_CLIENT_DIR"/leader.json ]]; then
|
||||
(
|
||||
set -x
|
||||
$rsync -vPz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
)
|
||||
echo "Fetching leader configuration from $rsync_leader_url"
|
||||
$rsync -Pz "$rsync_leader_url"/config/leader.json "$SOLANA_CONFIG_CLIENT_DIR"/
|
||||
fi
|
||||
|
||||
client_json="$SOLANA_CONFIG_CLIENT_DIR"/client.json
|
||||
if [[ ! -r $client_json ]]; then
|
||||
$solana_mint <<<0 > "$client_json"
|
||||
client_id_path="$SOLANA_CONFIG_CLIENT_DIR"/id.json
|
||||
if [[ ! -r $client_id_path ]]; then
|
||||
echo "Generating client identity: $client_id_path"
|
||||
$solana_keygen -o "$client_id_path"
|
||||
fi
|
||||
|
||||
set -x
|
||||
# shellcheck disable=SC2086 # $solana_wallet should not be quoted
|
||||
exec $solana_wallet \
|
||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -m "$client_json" "$@"
|
||||
-l "$SOLANA_CONFIG_CLIENT_DIR"/leader.json -k "$client_id_path" "$@"
|
||||
|
@ -37,40 +37,65 @@ apps:
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
genesis:
|
||||
command: solana-genesis
|
||||
mint:
|
||||
command: solana-mint
|
||||
keygen:
|
||||
command: solana-keygen
|
||||
plugs:
|
||||
- home
|
||||
client-demo:
|
||||
command: solana-client-demo
|
||||
# TODO: Merge client.sh functionality into solana-client-demo proper
|
||||
command: client.sh
|
||||
#command: solana-client-demo
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
- home
|
||||
wallet:
|
||||
# TODO: Merge wallet.sh functionality into solana-wallet proper
|
||||
command: wallet.sh
|
||||
#command: solana-wallet
|
||||
|
||||
plugs:
|
||||
- network
|
||||
- home
|
||||
daemon-validator:
|
||||
daemon: simple
|
||||
command: validator.sh
|
||||
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
daemon-leader:
|
||||
daemon: simple
|
||||
command: leader.sh
|
||||
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
daemon-drone:
|
||||
daemon: simple
|
||||
command: drone.sh
|
||||
plugs:
|
||||
- network
|
||||
- network-bind
|
||||
|
||||
parts:
|
||||
solana:
|
||||
plugin: nil
|
||||
prime:
|
||||
- bin
|
||||
- usr/lib/libgf_complete.so.1
|
||||
- usr/lib/libJerasure.so.2
|
||||
- usr/lib
|
||||
override-build: |
|
||||
# Install CUDA 9.2 runtime
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/
|
||||
cp -rav /usr/local/cuda-9.2/targets/x86_64-linux/lib/ $SNAPCRAFT_PART_INSTALL/usr/lib
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/x86_64-linux-gnu/
|
||||
cp -rav /usr/lib/x86_64-linux-gnu/libcuda.* $SNAPCRAFT_PART_INSTALL/usr/lib/x86_64-linux-gnu/
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/usr/lib/nvidia-396/
|
||||
cp -v /usr/lib/nvidia-396/libnvidia-fatbinaryloader.so* $SNAPCRAFT_PART_INSTALL/usr/lib/nvidia-396/
|
||||
|
||||
# Build/install solana-fullnode-cuda
|
||||
./fetch-perf-libs.sh
|
||||
cargo install --features=cuda,erasure --root $SNAPCRAFT_PART_INSTALL --bin solana-fullnode
|
||||
cargo install --features=cuda --root $SNAPCRAFT_PART_INSTALL --bin solana-fullnode
|
||||
mv $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode $SNAPCRAFT_PART_INSTALL
|
||||
rm -rf $SNAPCRAFT_PART_INSTALL/bin/*
|
||||
mv $SNAPCRAFT_PART_INSTALL/solana-fullnode $SNAPCRAFT_PART_INSTALL/bin/solana-fullnode-cuda
|
||||
@ -85,8 +110,9 @@ parts:
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
||||
cp -av multinode-demo/* $SNAPCRAFT_PART_INSTALL/bin/
|
||||
|
||||
# TODO: build rsync from source instead of sneaking it in from the host
|
||||
# TODO: build rsync/multilog from source instead of sneaking it in from the host
|
||||
# system...
|
||||
set -x
|
||||
mkdir -p $SNAPCRAFT_PART_INSTALL/bin
|
||||
cp -av /usr/bin/rsync $SNAPCRAFT_PART_INSTALL/bin/
|
||||
cp -av /usr/bin/multilog $SNAPCRAFT_PART_INSTALL/bin/
|
||||
|
350
src/bank.rs
350
src/bank.rs
@ -6,6 +6,7 @@
|
||||
extern crate libc;
|
||||
|
||||
use chrono::prelude::*;
|
||||
use counter::Counter;
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use itertools::Itertools;
|
||||
@ -19,6 +20,7 @@ use std::result;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::RwLock;
|
||||
use std::time::Instant;
|
||||
use streamer::WINDOW_SIZE;
|
||||
use timing::duration_as_us;
|
||||
use transaction::{Instruction, Plan, Transaction};
|
||||
|
||||
@ -81,16 +83,6 @@ pub struct Bank {
|
||||
/// reject transactions with signatures its seen before
|
||||
last_ids_sigs: RwLock<HashMap<Hash, HashSet<Signature>>>,
|
||||
|
||||
/// The set of trusted timekeepers. A Timestamp transaction from a `PublicKey`
|
||||
/// outside this set will be discarded. Note that if validators do not have the
|
||||
/// same set as leaders, they may interpret the ledger differently.
|
||||
time_sources: RwLock<HashSet<PublicKey>>,
|
||||
|
||||
/// The most recent timestamp from a trusted timekeeper. This timestamp is applied
|
||||
/// to every smart contract when it enters the system. If it is waiting on a
|
||||
/// timestamp witness before that timestamp, the bank will execute it immediately.
|
||||
last_time: RwLock<DateTime<Utc>>,
|
||||
|
||||
/// The number of transactions the bank has processed without error since the
|
||||
/// start of the ledger.
|
||||
transaction_count: AtomicUsize,
|
||||
@ -103,8 +95,6 @@ impl Default for Bank {
|
||||
pending: RwLock::new(HashMap::new()),
|
||||
last_ids: RwLock::new(VecDeque::new()),
|
||||
last_ids_sigs: RwLock::new(HashMap::new()),
|
||||
time_sources: RwLock::new(HashSet::new()),
|
||||
last_time: RwLock::new(Utc.timestamp(0, 0)),
|
||||
transaction_count: AtomicUsize::new(0),
|
||||
}
|
||||
}
|
||||
@ -114,7 +104,7 @@ impl Bank {
|
||||
/// Create an Bank using a deposit.
|
||||
pub fn new_from_deposit(deposit: &Payment) -> Self {
|
||||
let bank = Self::default();
|
||||
bank.apply_payment(deposit);
|
||||
bank.apply_payment(deposit, &mut bank.balances.write().unwrap());
|
||||
bank
|
||||
}
|
||||
|
||||
@ -130,13 +120,8 @@ impl Bank {
|
||||
}
|
||||
|
||||
/// Commit funds to the `payment.to` party.
|
||||
fn apply_payment(&self, payment: &Payment) {
|
||||
let mut balances = self.balances.write().unwrap();
|
||||
if balances.contains_key(&payment.to) {
|
||||
*balances.get_mut(&payment.to).unwrap() += payment.tokens;
|
||||
} else {
|
||||
balances.insert(payment.to, payment.tokens);
|
||||
}
|
||||
fn apply_payment(&self, payment: &Payment, balances: &mut HashMap<PublicKey, i64>) {
|
||||
*balances.entry(payment.to).or_insert(0) += payment.tokens;
|
||||
}
|
||||
|
||||
/// Return the last entry ID registered.
|
||||
@ -174,6 +159,13 @@ impl Bank {
|
||||
}
|
||||
}
|
||||
|
||||
/// Forget all signatures. Useful for benchmarking.
|
||||
pub fn clear_signatures(&self) {
|
||||
for (_, sigs) in self.last_ids_sigs.write().unwrap().iter_mut() {
|
||||
sigs.clear();
|
||||
}
|
||||
}
|
||||
|
||||
fn reserve_signature_with_last_id(&self, signature: &Signature, last_id: &Hash) -> Result<()> {
|
||||
if let Some(entry) = self.last_ids_sigs
|
||||
.write()
|
||||
@ -206,12 +198,16 @@ impl Bank {
|
||||
|
||||
/// Deduct tokens from the 'from' address the account has sufficient
|
||||
/// funds and isn't a duplicate.
|
||||
fn apply_debits(&self, tx: &Transaction) -> Result<()> {
|
||||
let mut bals = self.balances.write().unwrap();
|
||||
fn apply_debits(&self, tx: &Transaction, bals: &mut HashMap<PublicKey, i64>) -> Result<()> {
|
||||
let mut purge = false;
|
||||
{
|
||||
let option = bals.get_mut(&tx.from);
|
||||
if option.is_none() {
|
||||
if let Instruction::NewVote(_) = &tx.instruction {
|
||||
inc_new_counter!("bank-appy_debits-vote_account_not_found", 1);
|
||||
} else {
|
||||
inc_new_counter!("bank-appy_debits-generic_account_not_found", 1);
|
||||
}
|
||||
return Err(BankError::AccountNotFound(tx.from));
|
||||
}
|
||||
let bal = option.unwrap();
|
||||
@ -243,16 +239,12 @@ impl Bank {
|
||||
|
||||
/// Apply only a transaction's credits. Credits from multiple transactions
|
||||
/// may safely be applied in parallel.
|
||||
fn apply_credits(&self, tx: &Transaction) {
|
||||
fn apply_credits(&self, tx: &Transaction, balances: &mut HashMap<PublicKey, i64>) {
|
||||
match &tx.instruction {
|
||||
Instruction::NewContract(contract) => {
|
||||
let mut plan = contract.plan.clone();
|
||||
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
||||
.read()
|
||||
.expect("timestamp creation in apply_credits")));
|
||||
|
||||
let plan = contract.plan.clone();
|
||||
if let Some(payment) = plan.final_payment() {
|
||||
self.apply_payment(&payment);
|
||||
self.apply_payment(&payment, balances);
|
||||
} else {
|
||||
let mut pending = self.pending
|
||||
.write()
|
||||
@ -266,14 +258,19 @@ impl Bank {
|
||||
Instruction::ApplySignature(tx_sig) => {
|
||||
let _ = self.apply_signature(tx.from, *tx_sig);
|
||||
}
|
||||
Instruction::NewVote(_vote) => {
|
||||
info!("GOT VOTE!");
|
||||
// TODO: record the vote in the stake table...
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a Transaction. If it contains a payment plan that requires a witness
|
||||
/// to progress, the payment plan will be stored in the bank.
|
||||
fn process_transaction(&self, tx: &Transaction) -> Result<()> {
|
||||
self.apply_debits(tx)?;
|
||||
self.apply_credits(tx);
|
||||
pub fn process_transaction(&self, tx: &Transaction) -> Result<()> {
|
||||
let bals = &mut self.balances.write().unwrap();
|
||||
self.apply_debits(tx, bals)?;
|
||||
self.apply_credits(tx, bals);
|
||||
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
||||
Ok(())
|
||||
}
|
||||
@ -281,11 +278,12 @@ impl Bank {
|
||||
/// Process a batch of transactions.
|
||||
#[must_use]
|
||||
pub fn process_transactions(&self, txs: Vec<Transaction>) -> Vec<Result<Transaction>> {
|
||||
let bals = &mut self.balances.write().unwrap();
|
||||
debug!("processing Transactions {}", txs.len());
|
||||
let txs_len = txs.len();
|
||||
let now = Instant::now();
|
||||
let results: Vec<_> = txs.into_iter()
|
||||
.map(|tx| self.apply_debits(&tx).map(|_| tx))
|
||||
.map(|tx| self.apply_debits(&tx, bals).map(|_| tx))
|
||||
.collect(); // Calling collect() here forces all debits to complete before moving on.
|
||||
|
||||
let debits = now.elapsed();
|
||||
@ -295,7 +293,7 @@ impl Bank {
|
||||
.into_iter()
|
||||
.map(|result| {
|
||||
result.map(|tx| {
|
||||
self.apply_credits(&tx);
|
||||
self.apply_credits(&tx, bals);
|
||||
tx
|
||||
})
|
||||
})
|
||||
@ -321,32 +319,60 @@ impl Bank {
|
||||
res
|
||||
}
|
||||
|
||||
fn process_entry(&self, entry: Entry) -> Result<()> {
|
||||
if !entry.transactions.is_empty() {
|
||||
for result in self.process_transactions(entry.transactions) {
|
||||
result?;
|
||||
}
|
||||
}
|
||||
if !entry.has_more {
|
||||
self.register_entry_id(&entry.id);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process an ordered list of entries, populating a circular buffer "tail"
|
||||
/// as we go.
|
||||
fn process_entries_tail(
|
||||
&self,
|
||||
entries: Vec<Entry>,
|
||||
tail: &mut Vec<Entry>,
|
||||
tail_idx: &mut usize,
|
||||
) -> Result<u64> {
|
||||
let mut entry_count = 0;
|
||||
|
||||
for entry in entries {
|
||||
if tail.len() > *tail_idx {
|
||||
tail[*tail_idx] = entry.clone();
|
||||
} else {
|
||||
tail.push(entry.clone());
|
||||
}
|
||||
*tail_idx = (*tail_idx + 1) % WINDOW_SIZE as usize;
|
||||
|
||||
entry_count += 1;
|
||||
self.process_entry(entry)?;
|
||||
}
|
||||
|
||||
Ok(entry_count)
|
||||
}
|
||||
|
||||
/// Process an ordered list of entries.
|
||||
pub fn process_entries<I>(&self, entries: I) -> Result<u64>
|
||||
where
|
||||
I: IntoIterator<Item = Entry>,
|
||||
{
|
||||
pub fn process_entries(&self, entries: Vec<Entry>) -> Result<u64> {
|
||||
let mut entry_count = 0;
|
||||
for entry in entries {
|
||||
entry_count += 1;
|
||||
|
||||
if !entry.transactions.is_empty() {
|
||||
for result in self.process_transactions(entry.transactions) {
|
||||
result?;
|
||||
}
|
||||
}
|
||||
// TODO: verify this is ok in cases like:
|
||||
// 1. an untrusted genesis or tx-<DATE>.log
|
||||
// 2. a crazy leader..
|
||||
if !entry.has_more {
|
||||
self.register_entry_id(&entry.id);
|
||||
}
|
||||
self.process_entry(entry)?;
|
||||
}
|
||||
Ok(entry_count)
|
||||
}
|
||||
|
||||
/// Append entry blocks to the ledger, verifying them along the way.
|
||||
pub fn process_blocks<I>(&self, entries: I) -> Result<u64>
|
||||
fn process_blocks<I>(
|
||||
&self,
|
||||
entries: I,
|
||||
tail: &mut Vec<Entry>,
|
||||
tail_idx: &mut usize,
|
||||
) -> Result<u64>
|
||||
where
|
||||
I: IntoIterator<Item = Entry>,
|
||||
{
|
||||
@ -358,13 +384,13 @@ impl Bank {
|
||||
if !block.verify(&self.last_id()) {
|
||||
return Err(BankError::LedgerVerificationFailed);
|
||||
}
|
||||
entry_count += self.process_entries(block)?;
|
||||
entry_count += self.process_entries_tail(block, tail, tail_idx)?;
|
||||
}
|
||||
Ok(entry_count)
|
||||
}
|
||||
|
||||
/// Process a full ledger.
|
||||
pub fn process_ledger<I>(&self, entries: I) -> Result<u64>
|
||||
pub fn process_ledger<I>(&self, entries: I) -> Result<(u64, Vec<Entry>)>
|
||||
where
|
||||
I: IntoIterator<Item = Entry>,
|
||||
{
|
||||
@ -380,20 +406,33 @@ impl Bank {
|
||||
let entry1 = entries
|
||||
.next()
|
||||
.expect("invalid ledger: need at least 2 entries");
|
||||
let tx = &entry1.transactions[0];
|
||||
let deposit = if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
contract.plan.final_payment()
|
||||
} else {
|
||||
None
|
||||
}.expect("invalid ledger, needs to start with a contract");
|
||||
{
|
||||
let tx = &entry1.transactions[0];
|
||||
let deposit = if let Instruction::NewContract(contract) = &tx.instruction {
|
||||
contract.plan.final_payment()
|
||||
} else {
|
||||
None
|
||||
}.expect("invalid ledger, needs to start with a contract");
|
||||
|
||||
self.apply_payment(&deposit);
|
||||
self.apply_payment(&deposit, &mut self.balances.write().unwrap());
|
||||
}
|
||||
self.register_entry_id(&entry0.id);
|
||||
self.register_entry_id(&entry1.id);
|
||||
|
||||
let mut entry_count = 2;
|
||||
entry_count += self.process_blocks(entries)?;
|
||||
Ok(entry_count)
|
||||
let mut tail = Vec::with_capacity(WINDOW_SIZE as usize);
|
||||
tail.push(entry0);
|
||||
tail.push(entry1);
|
||||
let mut tail_idx = 2;
|
||||
let entry_count = 2 + self.process_blocks(entries, &mut tail, &mut tail_idx)?;
|
||||
|
||||
// check f we need to rotate tail
|
||||
let tail = if tail.len() == WINDOW_SIZE as usize {
|
||||
rotate_vector(tail, tail_idx)
|
||||
} else {
|
||||
tail
|
||||
};
|
||||
|
||||
Ok((entry_count, tail))
|
||||
}
|
||||
|
||||
/// Process a Witness Signature. Any payment plans waiting on this signature
|
||||
@ -404,9 +443,9 @@ impl Bank {
|
||||
.expect("write() in apply_signature")
|
||||
.entry(tx_sig)
|
||||
{
|
||||
e.get_mut().apply_witness(&Witness::Signature(from));
|
||||
e.get_mut().apply_witness(&Witness::Signature, &from);
|
||||
if let Some(payment) = e.get().final_payment() {
|
||||
self.apply_payment(&payment);
|
||||
self.apply_payment(&payment, &mut self.balances.write().unwrap());
|
||||
e.remove_entry();
|
||||
}
|
||||
};
|
||||
@ -417,31 +456,6 @@ impl Bank {
|
||||
/// Process a Witness Timestamp. Any payment plans waiting on this timestamp
|
||||
/// will progress one step.
|
||||
fn apply_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
||||
// If this is the first timestamp we've seen, it probably came from the genesis block,
|
||||
// so we'll trust it.
|
||||
if *self.last_time
|
||||
.read()
|
||||
.expect("'last_time' read lock on first timestamp check")
|
||||
== Utc.timestamp(0, 0)
|
||||
{
|
||||
self.time_sources
|
||||
.write()
|
||||
.expect("'time_sources' write lock on first timestamp")
|
||||
.insert(from);
|
||||
}
|
||||
|
||||
if self.time_sources
|
||||
.read()
|
||||
.expect("'time_sources' read lock")
|
||||
.contains(&from)
|
||||
{
|
||||
if dt > *self.last_time.read().expect("'last_time' read lock") {
|
||||
*self.last_time.write().expect("'last_time' write lock") = dt;
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check to see if any timelocked transactions can be completed.
|
||||
let mut completed = vec![];
|
||||
|
||||
@ -451,11 +465,9 @@ impl Bank {
|
||||
.write()
|
||||
.expect("'pending' write lock in apply_timestamp");
|
||||
for (key, plan) in pending.iter_mut() {
|
||||
plan.apply_witness(&Witness::Timestamp(*self.last_time
|
||||
.read()
|
||||
.expect("'last_time' read lock when creating timestamp")));
|
||||
plan.apply_witness(&Witness::Timestamp(dt), &from);
|
||||
if let Some(payment) = plan.final_payment() {
|
||||
self.apply_payment(&payment);
|
||||
self.apply_payment(&payment, &mut self.balances.write().unwrap());
|
||||
completed.push(key.clone());
|
||||
}
|
||||
}
|
||||
@ -501,7 +513,7 @@ impl Bank {
|
||||
let bals = self.balances
|
||||
.read()
|
||||
.expect("'balances' read lock in get_balance");
|
||||
bals.get(pubkey).map(|x| *x).unwrap_or(0)
|
||||
bals.get(pubkey).cloned().unwrap_or(0)
|
||||
}
|
||||
|
||||
pub fn transaction_count(&self) -> usize {
|
||||
@ -521,16 +533,27 @@ impl Bank {
|
||||
}
|
||||
}
|
||||
|
||||
fn rotate_vector<T: Clone>(v: Vec<T>, at: usize) -> Vec<T> {
|
||||
if at != 0 {
|
||||
let mut ret = Vec::with_capacity(v.len());
|
||||
ret.extend_from_slice(&v[at..]);
|
||||
ret.extend_from_slice(&v[0..at]);
|
||||
ret
|
||||
} else {
|
||||
v
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode::serialize;
|
||||
use entry::next_entry;
|
||||
use entry::Entry;
|
||||
use entry_writer::{self, EntryWriter};
|
||||
use hash::hash;
|
||||
use ledger::next_entries;
|
||||
use signature::KeyPairUtil;
|
||||
use std::io::{BufRead, BufReader, Cursor, Seek, SeekFrom};
|
||||
use std::io::{BufReader, Cursor, Seek, SeekFrom};
|
||||
|
||||
#[test]
|
||||
fn test_two_payments_to_one_party() {
|
||||
@ -634,22 +657,6 @@ mod tests {
|
||||
assert_ne!(bank.get_balance(&pubkey), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_after_date() {
|
||||
let mint = Mint::new(1);
|
||||
let bank = Bank::new(&mint);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
bank.apply_timestamp(mint.pubkey(), dt).unwrap();
|
||||
|
||||
// It's now past now, so this transfer should be processed immediately.
|
||||
bank.transfer_on_date(1, &mint.keypair(), pubkey, dt, mint.last_id())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), 0);
|
||||
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cancel_transfer() {
|
||||
let mint = Mint::new(1);
|
||||
@ -779,25 +786,59 @@ mod tests {
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
||||
}
|
||||
|
||||
fn create_sample_block(mint: &Mint) -> impl Iterator<Item = Entry> {
|
||||
let keypair = KeyPair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, mint.last_id());
|
||||
next_entries(&mint.last_id(), 0, vec![tx]).into_iter()
|
||||
fn create_sample_block(mint: &Mint, length: usize) -> impl Iterator<Item = Entry> {
|
||||
let mut entries = Vec::with_capacity(length);
|
||||
let mut hash = mint.last_id();
|
||||
let mut cur_hashes = 0;
|
||||
for _ in 0..length {
|
||||
let keypair = KeyPair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, hash);
|
||||
let entry = Entry::new_mut(&mut hash, &mut cur_hashes, vec![tx], false);
|
||||
entries.push(entry);
|
||||
}
|
||||
entries.into_iter()
|
||||
}
|
||||
|
||||
fn create_sample_ledger() -> (impl Iterator<Item = Entry>, PublicKey) {
|
||||
let mint = Mint::new(2);
|
||||
fn create_sample_ledger(length: usize) -> (impl Iterator<Item = Entry>, PublicKey) {
|
||||
let mint = Mint::new(1 + length as i64);
|
||||
let genesis = mint.create_entries();
|
||||
let block = create_sample_block(&mint);
|
||||
let block = create_sample_block(&mint, length);
|
||||
(genesis.into_iter().chain(block), mint.pubkey())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_ledger() {
|
||||
let (ledger, pubkey) = create_sample_ledger();
|
||||
let (ledger, pubkey) = create_sample_ledger(1);
|
||||
let (ledger, dup) = ledger.tee();
|
||||
let bank = Bank::default();
|
||||
bank.process_ledger(ledger).unwrap();
|
||||
let (ledger_height, tail) = bank.process_ledger(ledger).unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||
assert_eq!(ledger_height, 3);
|
||||
assert_eq!(tail.len(), 3);
|
||||
assert_eq!(tail, dup.collect_vec());
|
||||
let last_entry = &tail[tail.len() - 1];
|
||||
assert_eq!(bank.last_id(), last_entry.id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_ledger_around_window_size() {
|
||||
// TODO: put me back in when Criterion is up
|
||||
// for _ in 0..10 {
|
||||
// let (ledger, _) = create_sample_ledger(WINDOW_SIZE as usize);
|
||||
// let bank = Bank::default();
|
||||
// let (_, _) = bank.process_ledger(ledger).unwrap();
|
||||
// }
|
||||
|
||||
let window_size = WINDOW_SIZE as usize;
|
||||
for entry_count in window_size - 3..window_size + 2 {
|
||||
let (ledger, pubkey) = create_sample_ledger(entry_count);
|
||||
let bank = Bank::default();
|
||||
let (ledger_height, tail) = bank.process_ledger(ledger).unwrap();
|
||||
assert_eq!(bank.get_balance(&pubkey), 1);
|
||||
assert_eq!(ledger_height, entry_count as u64 + 2);
|
||||
assert!(tail.len() <= window_size);
|
||||
let last_entry = &tail[tail.len() - 1];
|
||||
assert_eq!(bank.last_id(), last_entry.id);
|
||||
}
|
||||
}
|
||||
|
||||
// Write the given entries to a file and then return a file iterator to them.
|
||||
@ -807,14 +848,12 @@ mod tests {
|
||||
file.seek(SeekFrom::Start(0)).unwrap();
|
||||
|
||||
let reader = BufReader::new(file);
|
||||
reader
|
||||
.lines()
|
||||
.map(|line| entry_writer::read_entry(line.unwrap()).unwrap())
|
||||
entry_writer::read_entries(reader).map(|x| x.unwrap())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_ledger_from_file() {
|
||||
let (ledger, pubkey) = create_sample_ledger();
|
||||
let (ledger, pubkey) = create_sample_ledger(1);
|
||||
let ledger = to_file_iter(ledger);
|
||||
|
||||
let bank = Bank::default();
|
||||
@ -826,60 +865,21 @@ mod tests {
|
||||
fn test_process_ledger_from_files() {
|
||||
let mint = Mint::new(2);
|
||||
let genesis = to_file_iter(mint.create_entries().into_iter());
|
||||
let block = to_file_iter(create_sample_block(&mint));
|
||||
let block = to_file_iter(create_sample_block(&mint, 1));
|
||||
|
||||
let bank = Bank::default();
|
||||
bank.process_ledger(genesis.chain(block)).unwrap();
|
||||
assert_eq!(bank.get_balance(&mint.pubkey()), 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use bank::*;
|
||||
use bincode::serialize;
|
||||
use hash::hash;
|
||||
use rayon::prelude::*;
|
||||
use signature::KeyPairUtil;
|
||||
#[test]
|
||||
fn test_rotate_vector() {
|
||||
let expect = vec![1, 2, 3, 4];
|
||||
|
||||
#[bench]
|
||||
fn bench_process_transaction(bencher: &mut Bencher) {
|
||||
let mint = Mint::new(100_000_000);
|
||||
let bank = Bank::new(&mint);
|
||||
// Create transactions between unrelated parties.
|
||||
let transactions: Vec<_> = (0..4096)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
// Seed the 'from' account.
|
||||
let rando0 = KeyPair::new();
|
||||
let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
|
||||
bank.process_transaction(&tx).unwrap();
|
||||
|
||||
// Seed the 'to' account and a cell for its signature.
|
||||
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||
bank.register_entry_id(&last_id);
|
||||
|
||||
let rando1 = KeyPair::new();
|
||||
let tx = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||
bank.process_transaction(&tx).unwrap();
|
||||
|
||||
// Finally, return a transaction that's unique
|
||||
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
})
|
||||
.collect();
|
||||
bencher.iter(|| {
|
||||
// Since benchmarker runs this multiple times, we need to clear the signatures.
|
||||
for (_, sigs) in bank.last_ids_sigs.write().unwrap().iter_mut() {
|
||||
sigs.clear();
|
||||
}
|
||||
|
||||
assert!(
|
||||
bank.process_transactions(transactions.clone())
|
||||
.iter()
|
||||
.all(|x| x.is_ok())
|
||||
);
|
||||
});
|
||||
assert_eq!(rotate_vector(vec![4, 1, 2, 3], 1), expect);
|
||||
assert_eq!(rotate_vector(vec![1, 2, 3, 4], 0), expect);
|
||||
assert_eq!(rotate_vector(vec![2, 3, 4, 1], 3), expect);
|
||||
assert_eq!(rotate_vector(vec![3, 4, 1, 2], 2), expect);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -8,12 +8,13 @@ use counter::Counter;
|
||||
use packet::{PacketRecycler, Packets, SharedPackets};
|
||||
use rayon::prelude::*;
|
||||
use record_stage::Signal;
|
||||
use result::Result;
|
||||
use result::{Error, Result};
|
||||
use service::Service;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use timing;
|
||||
@ -22,17 +23,15 @@ use transaction::Transaction;
|
||||
/// Stores the stage's thread handle and output receiver.
|
||||
pub struct BankingStage {
|
||||
/// Handle to the stage's thread.
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl BankingStage {
|
||||
/// Create the stage using `bank`. Exit when either `exit` is set or
|
||||
/// when `verified_receiver` or the stage's output receiver is dropped.
|
||||
/// Create the stage using `bank`. Exit when `verified_receiver` is dropped.
|
||||
/// Discard input packets using `packet_recycler` to minimize memory
|
||||
/// allocations in a previous stage such as the `fetch_stage`.
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
exit: Arc<AtomicBool>,
|
||||
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
packet_recycler: PacketRecycler,
|
||||
) -> (Self, Receiver<Signal>) {
|
||||
@ -40,15 +39,16 @@ impl BankingStage {
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-banking-stage".to_string())
|
||||
.spawn(move || loop {
|
||||
let e = Self::process_packets(
|
||||
bank.clone(),
|
||||
if let Err(e) = Self::process_packets(
|
||||
&bank,
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
);
|
||||
if e.is_err() {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => error!("{:?}", e),
|
||||
}
|
||||
}
|
||||
})
|
||||
@ -71,8 +71,8 @@ impl BankingStage {
|
||||
|
||||
/// Process the incoming packets and send output `Signal` messages to `signal_sender`.
|
||||
/// Discard packets via `packet_recycler`.
|
||||
fn process_packets(
|
||||
bank: Arc<Bank>,
|
||||
pub fn process_packets(
|
||||
bank: &Arc<Bank>,
|
||||
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
signal_sender: &Sender<Signal>,
|
||||
packet_recycler: &PacketRecycler,
|
||||
@ -89,7 +89,6 @@ impl BankingStage {
|
||||
mms.len(),
|
||||
);
|
||||
let count = mms.iter().map(|x| x.1.len()).sum();
|
||||
static mut COUNTER: Counter = create_counter!("banking_stage_process_packets", 1);
|
||||
let proc_start = Instant::now();
|
||||
for (msgs, vers) in mms {
|
||||
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
|
||||
@ -125,11 +124,21 @@ impl BankingStage {
|
||||
reqs_len,
|
||||
(reqs_len as f32) / (total_time_s)
|
||||
);
|
||||
inc_counter!(COUNTER, count, proc_start);
|
||||
inc_new_counter!("banking_stage-process_packets", count);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for BankingStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
vec![self.thread_hdl]
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: When banking is pulled out of RequestStage, add this test back in.
|
||||
|
||||
//use bank::Bank;
|
||||
@ -190,239 +199,3 @@ impl BankingStage {
|
||||
// assert_eq!(bank.get_balance(&alice.pubkey()), Some(1));
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//#[cfg(all(feature = "unstable", test))]
|
||||
//mod bench {
|
||||
// extern crate test;
|
||||
// use self::test::Bencher;
|
||||
// use bank::{Bank, MAX_ENTRY_IDS};
|
||||
// use bincode::serialize;
|
||||
// use hash::hash;
|
||||
// use mint::Mint;
|
||||
// use rayon::prelude::*;
|
||||
// use signature::{KeyPair, KeyPairUtil};
|
||||
// use std::collections::HashSet;
|
||||
// use std::time::Instant;
|
||||
// use transaction::Transaction;
|
||||
//
|
||||
// #[bench]
|
||||
// fn bench_process_transactions(_bencher: &mut Bencher) {
|
||||
// let mint = Mint::new(100_000_000);
|
||||
// let bank = Bank::new(&mint);
|
||||
// // Create transactions between unrelated parties.
|
||||
// let txs = 100_000;
|
||||
// let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
|
||||
// let transactions: Vec<_> = (0..txs)
|
||||
// .into_par_iter()
|
||||
// .map(|i| {
|
||||
// // Seed the 'to' account and a cell for its signature.
|
||||
// let dummy_id = i % (MAX_ENTRY_IDS as i32);
|
||||
// let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
|
||||
// {
|
||||
// let mut last_ids = last_ids.lock().unwrap();
|
||||
// if !last_ids.contains(&last_id) {
|
||||
// last_ids.insert(last_id);
|
||||
// bank.register_entry_id(&last_id);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Seed the 'from' account.
|
||||
// let rando0 = KeyPair::new();
|
||||
// let tx = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// let rando1 = KeyPair::new();
|
||||
// let tx = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||
// bank.process_transaction(&tx).unwrap();
|
||||
//
|
||||
// // Finally, return a transaction that's unique
|
||||
// Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||
// })
|
||||
// .collect();
|
||||
//
|
||||
// let banking_stage = EventProcessor::new(bank, &mint.last_id(), None);
|
||||
//
|
||||
// let now = Instant::now();
|
||||
// assert!(banking_stage.process_transactions(transactions).is_ok());
|
||||
// let duration = now.elapsed();
|
||||
// let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
|
||||
// let tps = txs as f64 / sec;
|
||||
//
|
||||
// // Ensure that all transactions were successfully logged.
|
||||
// drop(banking_stage.historian_input);
|
||||
// let entries: Vec<Entry> = banking_stage.output.lock().unwrap().iter().collect();
|
||||
// assert_eq!(entries.len(), 1);
|
||||
// assert_eq!(entries[0].transactions.len(), txs as usize);
|
||||
//
|
||||
// println!("{} tps", tps);
|
||||
// }
|
||||
//}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use bank::*;
|
||||
use banking_stage::BankingStage;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use packet::{to_packets_chunked, PacketRecycler};
|
||||
use rayon::prelude::*;
|
||||
use record_stage::Signal;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::iter;
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
use transaction::Transaction;
|
||||
|
||||
fn check_txs(batches: usize, receiver: &Receiver<Signal>, ref_tx_count: usize) {
|
||||
let mut total = 0;
|
||||
for _ in 0..batches {
|
||||
let signal = receiver.recv().unwrap();
|
||||
if let Signal::Transactions(transactions) = signal {
|
||||
total += transactions.len();
|
||||
} else {
|
||||
assert!(false);
|
||||
}
|
||||
}
|
||||
assert_eq!(total, ref_tx_count);
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_banking_stage_multi_accounts(bencher: &mut Bencher) {
|
||||
logger::setup();
|
||||
let tx = 10_000_usize;
|
||||
let mint_total = 1_000_000_000_000;
|
||||
let mint = Mint::new(mint_total);
|
||||
let num_dst_accounts = 8 * 1024;
|
||||
let num_src_accounts = 8 * 1024;
|
||||
|
||||
let srckeys: Vec<_> = (0..num_src_accounts).map(|_| KeyPair::new()).collect();
|
||||
let dstkeys: Vec<_> = (0..num_dst_accounts)
|
||||
.map(|_| KeyPair::new().pubkey())
|
||||
.collect();
|
||||
|
||||
info!("created keys src: {} dst: {}", srckeys.len(), dstkeys.len());
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&srckeys[i % num_src_accounts],
|
||||
dstkeys[i % num_dst_accounts],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
info!("created transactions");
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
let setup_transactions: Vec<_> = (0..num_src_accounts)
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
srckeys[i].pubkey(),
|
||||
mint_total / num_src_accounts as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
|
||||
let verified_setup: Vec<_> =
|
||||
to_packets_chunked(&packet_recycler, setup_transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_setup_len = verified_setup.len();
|
||||
verified_sender.send(verified_setup).unwrap();
|
||||
BankingStage::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
|
||||
check_txs(verified_setup_len, &signal_receiver, num_src_accounts);
|
||||
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions.clone(), 192)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let verified_len = verified.len();
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
|
||||
check_txs(verified_len, &signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_banking_stage_single_from(bencher: &mut Bencher) {
|
||||
logger::setup();
|
||||
let tx = 10_000_usize;
|
||||
let mint = Mint::new(1_000_000_000_000);
|
||||
let mut pubkeys = Vec::new();
|
||||
let num_keys = 8;
|
||||
for _ in 0..num_keys {
|
||||
pubkeys.push(KeyPair::new().pubkey());
|
||||
}
|
||||
|
||||
let transactions: Vec<_> = (0..tx)
|
||||
.into_par_iter()
|
||||
.map(|i| {
|
||||
Transaction::new(
|
||||
&mint.keypair(),
|
||||
pubkeys[i % num_keys],
|
||||
i as i64,
|
||||
mint.last_id(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let (signal_sender, signal_receiver) = channel();
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
bencher.iter(move || {
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
let verified: Vec<_> = to_packets_chunked(&packet_recycler, transactions.clone(), tx)
|
||||
.into_iter()
|
||||
.map(|x| {
|
||||
let len = (*x).read().unwrap().packets.len();
|
||||
(x, iter::repeat(1).take(len).collect())
|
||||
})
|
||||
.collect();
|
||||
let verified_len = verified.len();
|
||||
verified_sender.send(verified).unwrap();
|
||||
BankingStage::process_packets(
|
||||
bank.clone(),
|
||||
&verified_receiver,
|
||||
&signal_sender,
|
||||
&packet_recycler,
|
||||
).unwrap();
|
||||
|
||||
check_txs(verified_len, &signal_receiver, tx);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
326
src/bin/client-demo.rs
Normal file → Executable file
326
src/bin/client-demo.rs
Normal file → Executable file
@ -1,27 +1,29 @@
|
||||
extern crate atty;
|
||||
extern crate bincode;
|
||||
extern crate clap;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate rayon;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use getopts::Options;
|
||||
use bincode::serialize;
|
||||
use clap::{App, Arg};
|
||||
use rayon::prelude::*;
|
||||
use solana::crdt::{Crdt, ReplicatedData};
|
||||
use solana::crdt::{Crdt, NodeInfo};
|
||||
use solana::drone::{DroneRequest, DRONE_PORT};
|
||||
use solana::fullnode::Config;
|
||||
use solana::hash::Hash;
|
||||
use solana::mint::Mint;
|
||||
use solana::nat::udp_public_bind;
|
||||
use solana::nat::{udp_public_bind, udp_random_bind};
|
||||
use solana::ncp::Ncp;
|
||||
use solana::signature::{GenKeys, KeyPair, KeyPairUtil};
|
||||
use solana::service::Service;
|
||||
use solana::signature::{read_keypair, GenKeys, KeyPair, KeyPairUtil};
|
||||
use solana::streamer::default_window;
|
||||
use solana::thin_client::ThinClient;
|
||||
use solana::timing::{duration_as_ms, duration_as_s};
|
||||
use solana::transaction::Transaction;
|
||||
use std::env;
|
||||
use std::error;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::io::Write;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
@ -31,20 +33,11 @@ use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
|
||||
brief += " Solana client demo creates a number of transactions and\n";
|
||||
brief += " sends them to a target node.";
|
||||
brief += " Takes json formatted mint file to stdin.";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn sample_tx_count(
|
||||
exit: Arc<AtomicBool>,
|
||||
maxes: Arc<RwLock<Vec<(f64, u64)>>>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
maxes: &Arc<RwLock<Vec<(f64, u64)>>>,
|
||||
first_count: u64,
|
||||
v: ReplicatedData,
|
||||
v: &NodeInfo,
|
||||
sample_period: u64,
|
||||
) {
|
||||
let mut client = mk_client(&v);
|
||||
@ -58,17 +51,17 @@ fn sample_tx_count(
|
||||
now = Instant::now();
|
||||
let sample = tx_count - initial_tx_count;
|
||||
initial_tx_count = tx_count;
|
||||
println!("{}: Transactions processed {}", v.transactions_addr, sample);
|
||||
println!("{}: Transactions processed {}", v.contact_info.tpu, sample);
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (sample * 1_000_000_000) as f64 / ns as f64;
|
||||
if tps > max_tps {
|
||||
max_tps = tps;
|
||||
}
|
||||
println!("{}: {:.2} tps", v.transactions_addr, tps);
|
||||
println!("{}: {:.2} tps", v.contact_info.tpu, tps);
|
||||
total = tx_count - first_count;
|
||||
println!(
|
||||
"{}: Total Transactions processed {}",
|
||||
v.transactions_addr, total
|
||||
v.contact_info.tpu, total
|
||||
);
|
||||
sleep(Duration::new(sample_period, 0));
|
||||
|
||||
@ -82,20 +75,29 @@ fn sample_tx_count(
|
||||
|
||||
fn generate_and_send_txs(
|
||||
client: &mut ThinClient,
|
||||
tx_clients: &Vec<ThinClient>,
|
||||
mint: &Mint,
|
||||
keypairs: &Vec<KeyPair>,
|
||||
leader: &ReplicatedData,
|
||||
tx_clients: &[ThinClient],
|
||||
id: &KeyPair,
|
||||
keypairs: &[KeyPair],
|
||||
leader: &NodeInfo,
|
||||
txs: i64,
|
||||
last_id: &mut Hash,
|
||||
threads: usize,
|
||||
reclaim: bool,
|
||||
) {
|
||||
println!("Signing transactions... {}", keypairs.len(),);
|
||||
println!("Signing transactions... {}", txs / 2,);
|
||||
let signing_start = Instant::now();
|
||||
let transactions: Vec<_> = keypairs
|
||||
.par_iter()
|
||||
.map(|keypair| Transaction::new(&mint.keypair(), keypair.pubkey(), 1, *last_id))
|
||||
.collect();
|
||||
|
||||
let transactions: Vec<_> = if !reclaim {
|
||||
keypairs
|
||||
.par_iter()
|
||||
.map(|keypair| Transaction::new(&id, keypair.pubkey(), 1, *last_id))
|
||||
.collect()
|
||||
} else {
|
||||
keypairs
|
||||
.par_iter()
|
||||
.map(|keypair| Transaction::new(keypair, id.pubkey(), 1, *last_id))
|
||||
.collect()
|
||||
};
|
||||
|
||||
let duration = signing_start.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
@ -108,7 +110,11 @@ fn generate_and_send_txs(
|
||||
duration_as_ms(&duration),
|
||||
);
|
||||
|
||||
println!("Transfering {} transactions in {} batches", txs, threads);
|
||||
println!(
|
||||
"Transfering {} transactions in {} batches",
|
||||
txs / 2,
|
||||
threads
|
||||
);
|
||||
let transfer_start = Instant::now();
|
||||
let sz = transactions.len() / threads;
|
||||
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||
@ -119,10 +125,10 @@ fn generate_and_send_txs(
|
||||
println!(
|
||||
"Transferring 1 unit {} times... to {:?}",
|
||||
txs.len(),
|
||||
leader.transactions_addr
|
||||
leader.contact_info.tpu
|
||||
);
|
||||
for tx in txs {
|
||||
client.transfer_signed(tx.clone()).unwrap();
|
||||
client.transfer_signed(tx).unwrap();
|
||||
}
|
||||
});
|
||||
println!(
|
||||
@ -145,90 +151,114 @@ fn main() {
|
||||
env_logger::init();
|
||||
let mut threads = 4usize;
|
||||
let mut num_nodes = 1usize;
|
||||
let mut time_sec = 60;
|
||||
let mut time_sec = 90;
|
||||
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("l", "", "leader", "leader.json");
|
||||
opts.optopt("t", "", "number of threads", &format!("{}", threads));
|
||||
opts.optopt(
|
||||
"s",
|
||||
"",
|
||||
"send transactions for this many seconds",
|
||||
&format!("{}", time_sec),
|
||||
);
|
||||
opts.optopt(
|
||||
"n",
|
||||
"",
|
||||
"number of nodes to converge to",
|
||||
&format!("{}", num_nodes),
|
||||
);
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
let matches = App::new("solana-client-demo")
|
||||
.arg(
|
||||
Arg::with_name("leader")
|
||||
.short("l")
|
||||
.long("leader")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/leader.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.default_value("~/.config/solana/id.json")
|
||||
.help("/path/to/id.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num_nodes")
|
||||
.short("n")
|
||||
.long("nodes")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("number of nodes to converge to"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("threads")
|
||||
.short("t")
|
||||
.long("threads")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("number of threads"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("seconds")
|
||||
.short("s")
|
||||
.long("sec")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("send transactions for this many seconds"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
if matches.opt_present("t") {
|
||||
threads = matches.opt_str("t").unwrap().parse().expect("integer");
|
||||
}
|
||||
if matches.opt_present("n") {
|
||||
num_nodes = matches.opt_str("n").unwrap().parse().expect("integer");
|
||||
}
|
||||
if matches.opt_present("s") {
|
||||
time_sec = matches.opt_str("s").unwrap().parse().expect("integer");
|
||||
}
|
||||
|
||||
let leader = if matches.opt_present("l") {
|
||||
read_leader(matches.opt_str("l").unwrap())
|
||||
let leader: NodeInfo;
|
||||
if let Some(l) = matches.value_of("leader") {
|
||||
leader = read_leader(l).node_info;
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
ReplicatedData::new_leader(&server_addr)
|
||||
leader = NodeInfo::new_leader(&server_addr);
|
||||
};
|
||||
|
||||
let id = read_keypair(matches.value_of("keypair").unwrap()).expect("client keypair");
|
||||
|
||||
if let Some(t) = matches.value_of("threads") {
|
||||
threads = t.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
if let Some(n) = matches.value_of("num_nodes") {
|
||||
num_nodes = n.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
if let Some(s) = matches.value_of("seconds") {
|
||||
time_sec = s.to_string().parse().expect("integer");
|
||||
}
|
||||
|
||||
let mut drone_addr = leader.contact_info.tpu;
|
||||
drone_addr.set_port(DRONE_PORT);
|
||||
|
||||
let signal = Arc::new(AtomicBool::new(false));
|
||||
let mut c_threads = vec![];
|
||||
let validators = converge(&leader, signal.clone(), num_nodes, &mut c_threads);
|
||||
assert_eq!(validators.len(), num_nodes);
|
||||
let validators = converge(&leader, &signal, num_nodes, &mut c_threads);
|
||||
println!("Network has {} node(s)", validators.len());
|
||||
assert!(validators.len() >= num_nodes);
|
||||
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
println!("Parsing stdin...");
|
||||
let mint: Mint = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
let mut client = mk_client(&leader);
|
||||
|
||||
let starting_balance = client.poll_get_balance(&id.pubkey()).unwrap();
|
||||
let txs: i64 = 500_000;
|
||||
|
||||
if starting_balance < txs {
|
||||
let airdrop_amount = txs - starting_balance;
|
||||
println!("Airdropping {:?} tokens", airdrop_amount);
|
||||
request_airdrop(&drone_addr, &id, airdrop_amount as u64).unwrap();
|
||||
// TODO: return airdrop Result from Drone
|
||||
sleep(Duration::from_millis(100));
|
||||
|
||||
let balance = client.poll_get_balance(&id.pubkey()).unwrap();
|
||||
println!("Your balance is: {:?}", balance);
|
||||
|
||||
if balance < txs || (starting_balance == balance) {
|
||||
println!("TPS airdrop limit reached; wait 60sec to retry");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
println!("Get last ID...");
|
||||
let mut last_id = client.get_last_id();
|
||||
println!("Got last ID {:?}", last_id);
|
||||
|
||||
let mut seed = [0u8; 32];
|
||||
seed.copy_from_slice(&mint.keypair().public_key_bytes()[..32]);
|
||||
seed.copy_from_slice(&id.public_key_bytes()[..32]);
|
||||
let rnd = GenKeys::new(seed);
|
||||
|
||||
println!("Creating keypairs...");
|
||||
let txs = 500_000;
|
||||
let keypairs = rnd.gen_n_keypairs(txs);
|
||||
let keypairs = rnd.gen_n_keypairs(txs / 2);
|
||||
|
||||
let first_count = client.transaction_count();
|
||||
println!("initial count {}", first_count);
|
||||
@ -247,27 +277,43 @@ fn main() {
|
||||
Builder::new()
|
||||
.name("solana-client-sample".to_string())
|
||||
.spawn(move || {
|
||||
sample_tx_count(exit, maxes, first_count, v, sample_period);
|
||||
sample_tx_count(&exit, &maxes, first_count, &v, sample_period);
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
let clients = (0..threads).map(|_| mk_client(&leader)).collect();
|
||||
let clients: Vec<_> = (0..threads).map(|_| mk_client(&leader)).collect();
|
||||
|
||||
// generate and send transactions for the specified duration
|
||||
let time = Duration::new(time_sec, 0);
|
||||
let now = Instant::now();
|
||||
let time = Duration::new(time_sec / 2, 0);
|
||||
let mut now = Instant::now();
|
||||
while now.elapsed() < time {
|
||||
generate_and_send_txs(
|
||||
&mut client,
|
||||
&clients,
|
||||
&mint,
|
||||
&id,
|
||||
&keypairs,
|
||||
&leader,
|
||||
txs,
|
||||
&mut last_id,
|
||||
threads,
|
||||
false,
|
||||
);
|
||||
}
|
||||
last_id = client.get_last_id();
|
||||
now = Instant::now();
|
||||
while now.elapsed() < time {
|
||||
generate_and_send_txs(
|
||||
&mut client,
|
||||
&clients,
|
||||
&id,
|
||||
&keypairs,
|
||||
&leader,
|
||||
txs,
|
||||
&mut last_id,
|
||||
threads,
|
||||
true,
|
||||
);
|
||||
}
|
||||
|
||||
@ -300,27 +346,29 @@ fn main() {
|
||||
}
|
||||
}
|
||||
|
||||
fn mk_client(r: &ReplicatedData) -> ThinClient {
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
fn mk_client(r: &NodeInfo) -> ThinClient {
|
||||
let requests_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
let transactions_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
|
||||
ThinClient::new(
|
||||
r.requests_addr,
|
||||
r.contact_info.rpu,
|
||||
requests_socket,
|
||||
r.transactions_addr,
|
||||
r.contact_info.tpu,
|
||||
transactions_socket,
|
||||
)
|
||||
}
|
||||
|
||||
fn spy_node() -> (ReplicatedData, UdpSocket) {
|
||||
let gossip_socket_pair = udp_public_bind("gossip");
|
||||
fn spy_node() -> (NodeInfo, UdpSocket) {
|
||||
let gossip_socket_pair = udp_public_bind("gossip", 8000, 10000);
|
||||
let pubkey = KeyPair::new().pubkey();
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let node = ReplicatedData::new(
|
||||
assert!(!gossip_socket_pair.addr.ip().is_unspecified());
|
||||
assert!(!gossip_socket_pair.addr.ip().is_multicast());
|
||||
let node = NodeInfo::new(
|
||||
pubkey,
|
||||
//gossip.local_addr().unwrap(),
|
||||
gossip_socket_pair.addr,
|
||||
@ -333,22 +381,22 @@ fn spy_node() -> (ReplicatedData, UdpSocket) {
|
||||
}
|
||||
|
||||
fn converge(
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
leader: &NodeInfo,
|
||||
exit: &Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) -> Vec<ReplicatedData> {
|
||||
) -> Vec<NodeInfo> {
|
||||
//lets spy on the network
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let (spy, spy_gossip) = spy_node();
|
||||
let mut spy_crdt = Crdt::new(spy);
|
||||
let mut spy_crdt = Crdt::new(spy).expect("Crdt::new");
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let window = default_window();
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let gossip_send_socket = udp_random_bind(8000, 10000, 5).unwrap();
|
||||
let ncp = Ncp::new(
|
||||
spy_ref.clone(),
|
||||
&spy_ref,
|
||||
window.clone(),
|
||||
spy_gossip,
|
||||
gossip_send_socket,
|
||||
@ -357,27 +405,49 @@ fn converge(
|
||||
let mut rv = vec![];
|
||||
//wait for the network to converge, 30 seconds should be plenty
|
||||
for _ in 0..30 {
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
let v: Vec<NodeInfo> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.requests_addr != daddr)
|
||||
.filter(|x| x.contact_info.rpu != daddr)
|
||||
.cloned()
|
||||
.collect();
|
||||
if v.len() >= num_nodes {
|
||||
println!("CONVERGED!");
|
||||
rv.extend(v.into_iter());
|
||||
break;
|
||||
} else {
|
||||
println!(
|
||||
"{} node(s) discovered (looking for {} or more)",
|
||||
v.len(),
|
||||
num_nodes
|
||||
);
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
threads.extend(ncp.thread_hdls.into_iter());
|
||||
threads.extend(ncp.thread_hdls().into_iter());
|
||||
rv
|
||||
}
|
||||
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
|
||||
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
|
||||
fn read_leader(path: &str) -> Config {
|
||||
let file = File::open(path).unwrap_or_else(|_| panic!("file not found: {}", path));
|
||||
serde_json::from_reader(file).unwrap_or_else(|_| panic!("failed to parse {}", path))
|
||||
}
|
||||
|
||||
fn request_airdrop(
|
||||
drone_addr: &SocketAddr,
|
||||
id: &KeyPair,
|
||||
tokens: u64,
|
||||
) -> Result<(), Box<error::Error>> {
|
||||
let mut stream = TcpStream::connect(drone_addr)?;
|
||||
let req = DroneRequest::GetAirdrop {
|
||||
airdrop_request_amount: tokens,
|
||||
client_public_key: id.pubkey(),
|
||||
};
|
||||
let tx = serialize(&req).expect("serialize drone request");
|
||||
stream.write_all(&tx).unwrap();
|
||||
// TODO: add timeout to this function, in case of unresponsive drone
|
||||
Ok(())
|
||||
}
|
||||
|
146
src/bin/drone.rs
146
src/bin/drone.rs
@ -1,115 +1,97 @@
|
||||
extern crate atty;
|
||||
extern crate bincode;
|
||||
extern crate clap;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
extern crate tokio;
|
||||
extern crate tokio_codec;
|
||||
extern crate tokio_io;
|
||||
|
||||
use atty::{is, Stream as atty_stream};
|
||||
use bincode::deserialize;
|
||||
use getopts::Options;
|
||||
use solana::crdt::ReplicatedData;
|
||||
use solana::drone::{Drone, DroneRequest};
|
||||
use solana::mint::Mint;
|
||||
use std::env;
|
||||
use clap::{App, Arg};
|
||||
use solana::crdt::NodeInfo;
|
||||
use solana::drone::{Drone, DroneRequest, DRONE_PORT};
|
||||
use solana::fullnode::Config;
|
||||
use solana::metrics::set_panic_hook;
|
||||
use solana::signature::read_keypair;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, Read};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::process::exit;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::prelude::*;
|
||||
use tokio_codec::{BytesCodec, Decoder};
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
|
||||
brief += " Run a Solana Drone to act as the custodian of the mint's remaining tokens\n";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn main() {
|
||||
env_logger::init();
|
||||
let mut opts = Options::new();
|
||||
opts.optopt(
|
||||
"t",
|
||||
"",
|
||||
"time",
|
||||
"time slice over which to limit token requests to drone",
|
||||
);
|
||||
opts.optopt("c", "", "cap", "request limit for time slice");
|
||||
opts.optopt("l", "", "leader", "leader.json");
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
set_panic_hook("drone");
|
||||
let matches = App::new("drone")
|
||||
.arg(
|
||||
Arg::with_name("leader")
|
||||
.short("l")
|
||||
.long("leader")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/leader.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("/path/to/mint.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("time")
|
||||
.short("t")
|
||||
.long("time")
|
||||
.value_name("SECONDS")
|
||||
.takes_value(true)
|
||||
.help("time slice over which to limit requests to drone"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("cap")
|
||||
.short("c")
|
||||
.long("cap")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.help("request limit for time slice"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let leader: NodeInfo;
|
||||
if let Some(l) = matches.value_of("leader") {
|
||||
leader = read_leader(l).node_info;
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
leader = NodeInfo::new_leader(&server_addr);
|
||||
};
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
|
||||
let mint_keypair =
|
||||
read_keypair(matches.value_of("keypair").expect("keypair")).expect("client keypair");
|
||||
|
||||
let time_slice: Option<u64>;
|
||||
if matches.opt_present("t") {
|
||||
time_slice = matches
|
||||
.opt_str("t")
|
||||
.expect("unexpected string from input")
|
||||
.parse()
|
||||
.ok();
|
||||
if let Some(t) = matches.value_of("time") {
|
||||
time_slice = Some(t.to_string().parse().expect("integer"));
|
||||
} else {
|
||||
time_slice = None;
|
||||
}
|
||||
let request_cap: Option<u64>;
|
||||
if matches.opt_present("c") {
|
||||
request_cap = matches
|
||||
.opt_str("c")
|
||||
.expect("unexpected string from input")
|
||||
.parse()
|
||||
.ok();
|
||||
if let Some(c) = matches.value_of("cap") {
|
||||
request_cap = Some(c.to_string().parse().expect("integer"));
|
||||
} else {
|
||||
request_cap = None;
|
||||
}
|
||||
let leader = if matches.opt_present("l") {
|
||||
read_leader(matches.opt_str("l").unwrap())
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
ReplicatedData::new_leader(&server_addr)
|
||||
};
|
||||
|
||||
if is(atty_stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mut buffer = String::new();
|
||||
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||
if num_bytes == 0 {
|
||||
eprintln!("empty file on stdin, expected a json file");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mint: Mint = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||
eprintln!("failed to parse json: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let mint_keypair = mint.keypair();
|
||||
|
||||
let drone_addr: SocketAddr = "0.0.0.0:9900".parse().unwrap();
|
||||
let drone_addr: SocketAddr = format!("0.0.0.0:{}", DRONE_PORT).parse().unwrap();
|
||||
|
||||
let drone = Arc::new(Mutex::new(Drone::new(
|
||||
mint_keypair,
|
||||
drone_addr,
|
||||
leader.transactions_addr,
|
||||
leader.requests_addr,
|
||||
leader.contact_info.tpu,
|
||||
leader.contact_info.rpu,
|
||||
time_slice,
|
||||
request_cap,
|
||||
)));
|
||||
@ -161,7 +143,7 @@ fn main() {
|
||||
});
|
||||
tokio::run(done);
|
||||
}
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
|
||||
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
|
||||
fn read_leader(path: &str) -> Config {
|
||||
let file = File::open(path).unwrap_or_else(|_| panic!("file not found: {}", path));
|
||||
serde_json::from_reader(file).unwrap_or_else(|_| panic!("failed to parse {}", path))
|
||||
}
|
||||
|
@ -1,66 +1,81 @@
|
||||
extern crate getopts;
|
||||
extern crate clap;
|
||||
extern crate dirs;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use getopts::Options;
|
||||
use solana::crdt::{get_ip_addr, parse_port_or_addr, ReplicatedData};
|
||||
use clap::{App, Arg};
|
||||
use solana::crdt::{get_ip_addr, parse_port_or_addr};
|
||||
use solana::fullnode::Config;
|
||||
use solana::nat::get_public_ip_addr;
|
||||
use std::env;
|
||||
use solana::signature::read_pkcs8;
|
||||
use std::io;
|
||||
use std::net::SocketAddr;
|
||||
use std::process::exit;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: {} [options]\n\n", program);
|
||||
brief += " Create a solana fullnode config file\n";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut opts = Options::new();
|
||||
opts.optopt("b", "", "bind", "bind to port or address");
|
||||
opts.optflag(
|
||||
"p",
|
||||
"",
|
||||
"detect public network address using public servers",
|
||||
);
|
||||
opts.optflag(
|
||||
"l",
|
||||
"",
|
||||
"detect network address from local machine configuration",
|
||||
);
|
||||
opts.optflag("h", "help", "print help");
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
let matches = App::new("fullnode-config")
|
||||
.arg(
|
||||
Arg::with_name("local")
|
||||
.short("l")
|
||||
.long("local")
|
||||
.takes_value(false)
|
||||
.help("detect network address from local machine configuration"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/id.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("public")
|
||||
.short("p")
|
||||
.long("public")
|
||||
.takes_value(false)
|
||||
.help("detect public network address using public servers"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("bind")
|
||||
.short("b")
|
||||
.long("bind")
|
||||
.value_name("PORT")
|
||||
.takes_value(true)
|
||||
.help("bind to port or address"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let bind_addr: SocketAddr = {
|
||||
let mut bind_addr = parse_port_or_addr(matches.opt_str("b"));
|
||||
if matches.opt_present("l") {
|
||||
let mut bind_addr = parse_port_or_addr({
|
||||
if let Some(b) = matches.value_of("bind") {
|
||||
Some(b.to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
if matches.is_present("local") {
|
||||
let ip = get_ip_addr().unwrap();
|
||||
bind_addr.set_ip(ip);
|
||||
}
|
||||
if matches.opt_present("p") {
|
||||
if matches.is_present("public") {
|
||||
let ip = get_public_ip_addr().unwrap();
|
||||
bind_addr.set_ip(ip);
|
||||
}
|
||||
bind_addr
|
||||
};
|
||||
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let id_path = if matches.is_present("keypair") {
|
||||
matches.value_of("keypair").unwrap()
|
||||
} else {
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
let pkcs8 = read_pkcs8(id_path).expect("client keypair");
|
||||
|
||||
// we need all the receiving sockets to be bound within the expected
|
||||
// port range that we open on aws
|
||||
let repl_data = ReplicatedData::new_leader(&bind_addr);
|
||||
let config = Config::new(&bind_addr, pkcs8);
|
||||
let stdout = io::stdout();
|
||||
serde_json::to_writer(stdout, &repl_data).expect("serialize");
|
||||
serde_json::to_writer(stdout, &config).expect("serialize");
|
||||
}
|
||||
|
@ -1,74 +1,61 @@
|
||||
extern crate atty;
|
||||
extern crate clap;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate log;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use getopts::Options;
|
||||
use solana::crdt::{ReplicatedData, TestNode};
|
||||
use solana::fullnode::FullNode;
|
||||
use std::env;
|
||||
use clap::{App, Arg};
|
||||
use solana::crdt::{NodeInfo, TestNode};
|
||||
use solana::fullnode::{Config, FullNode, LedgerFile};
|
||||
use solana::metrics::set_panic_hook;
|
||||
use solana::service::Service;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use std::fs::File;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::process::exit;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
//use std::time::Duration;
|
||||
|
||||
fn print_usage(program: &str, opts: Options) {
|
||||
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
|
||||
brief += " Run a Solana node to handle transactions and\n";
|
||||
brief += " write a new transaction log to stdout.\n";
|
||||
brief += " Takes existing transaction log from stdin.";
|
||||
|
||||
print!("{}", opts.usage(&brief));
|
||||
}
|
||||
|
||||
fn main() -> () {
|
||||
env_logger::init();
|
||||
let mut opts = Options::new();
|
||||
opts.optflag("h", "help", "print help");
|
||||
opts.optopt("l", "", "run with the identity found in FILE", "FILE");
|
||||
opts.optopt(
|
||||
"t",
|
||||
"",
|
||||
"testnet; connect to the network at this gossip entry point",
|
||||
"HOST:PORT",
|
||||
);
|
||||
opts.optopt(
|
||||
"o",
|
||||
"",
|
||||
"output log to FILE, defaults to stdout (ignored by validators)",
|
||||
"FILE",
|
||||
);
|
||||
|
||||
let args: Vec<String> = env::args().collect();
|
||||
let matches = match opts.parse(&args[1..]) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
}
|
||||
};
|
||||
if matches.opt_present("h") {
|
||||
let program = args[0].clone();
|
||||
print_usage(&program, opts);
|
||||
return;
|
||||
}
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a log file");
|
||||
exit(1);
|
||||
}
|
||||
set_panic_hook("fullnode");
|
||||
let matches = App::new("fullnode")
|
||||
.arg(
|
||||
Arg::with_name("identity")
|
||||
.short("i")
|
||||
.long("identity")
|
||||
.value_name("FILE")
|
||||
.takes_value(true)
|
||||
.help("run with the identity found in FILE"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("testnet")
|
||||
.short("t")
|
||||
.long("testnet")
|
||||
.value_name("HOST:PORT")
|
||||
.takes_value(true)
|
||||
.help("connect to the network at this gossip entry point"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("ledger")
|
||||
.short("L")
|
||||
.long("ledger")
|
||||
.value_name("FILE")
|
||||
.takes_value(true)
|
||||
.help("use FILE as persistent ledger (defaults to stdin/stdout)"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let bind_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
let mut repl_data = ReplicatedData::new_leader(&bind_addr);
|
||||
if matches.opt_present("l") {
|
||||
let path = matches.opt_str("l").unwrap();
|
||||
let mut keypair = KeyPair::new();
|
||||
let mut repl_data = NodeInfo::new_leader_with_pubkey(keypair.pubkey(), &bind_addr);
|
||||
if let Some(i) = matches.value_of("identity") {
|
||||
let path = i.to_string();
|
||||
if let Ok(file) = File::open(path.clone()) {
|
||||
if let Ok(data) = serde_json::from_reader(file) {
|
||||
repl_data = data;
|
||||
let parse: serde_json::Result<Config> = serde_json::from_reader(file);
|
||||
if let Ok(data) = parse {
|
||||
keypair = data.keypair();
|
||||
repl_data = data.node_info;
|
||||
} else {
|
||||
eprintln!("failed to parse {}", path);
|
||||
exit(1);
|
||||
@ -78,19 +65,22 @@ fn main() -> () {
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
let mut node = TestNode::new_with_bind_addr(repl_data, bind_addr);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let fullnode = if matches.opt_present("t") {
|
||||
let testnet_address_string = matches.opt_str("t").unwrap();
|
||||
let testnet_addr = testnet_address_string.parse().unwrap();
|
||||
FullNode::new(node, false, None, Some(testnet_addr), None, exit)
|
||||
let ledger = if let Some(l) = matches.value_of("ledger") {
|
||||
LedgerFile::Path(l.to_string())
|
||||
} else {
|
||||
node.data.current_leader_id = node.data.id.clone();
|
||||
|
||||
let outfile = matches.opt_str("o");
|
||||
FullNode::new(node, true, None, None, outfile, exit)
|
||||
LedgerFile::StdInOut
|
||||
};
|
||||
for t in fullnode.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
|
||||
let mut node = TestNode::new_with_bind_addr(repl_data, bind_addr);
|
||||
let fullnode = if let Some(t) = matches.value_of("testnet") {
|
||||
let testnet_address_string = t.to_string();
|
||||
let testnet_addr = testnet_address_string.parse().unwrap();
|
||||
|
||||
FullNode::new(node, false, ledger, Some(keypair), Some(testnet_addr))
|
||||
} else {
|
||||
node.data.leader_id = node.data.id;
|
||||
|
||||
FullNode::new(node, true, ledger, None, None)
|
||||
};
|
||||
fullnode.join().expect("join");
|
||||
}
|
||||
|
@ -1,10 +1,13 @@
|
||||
//! A command-line executable for generating the chain's genesis block.
|
||||
|
||||
extern crate atty;
|
||||
#[macro_use]
|
||||
extern crate clap;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use clap::{App, Arg};
|
||||
use solana::entry_writer::EntryWriter;
|
||||
use solana::mint::Mint;
|
||||
use std::error;
|
||||
@ -12,6 +15,20 @@ use std::io::{stdin, stdout, Read};
|
||||
use std::process::exit;
|
||||
|
||||
fn main() -> Result<(), Box<error::Error>> {
|
||||
let matches = App::new("solana-genesis")
|
||||
.arg(
|
||||
Arg::with_name("tokens")
|
||||
.short("t")
|
||||
.long("tokens")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("Number of tokens with which to initialize mint"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let tokens = value_t_or_exit!(matches, "tokens", i64);
|
||||
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a json file");
|
||||
exit(1);
|
||||
@ -24,7 +41,9 @@ fn main() -> Result<(), Box<error::Error>> {
|
||||
exit(1);
|
||||
}
|
||||
|
||||
let mint: Mint = serde_json::from_str(&buffer)?;
|
||||
let pkcs8: Vec<u8> = serde_json::from_str(&buffer)?;
|
||||
let mint = Mint::new_with_pkcs8(tokens, pkcs8);
|
||||
|
||||
let mut writer = stdout();
|
||||
EntryWriter::write_entries(&mut writer, mint.create_entries())?;
|
||||
Ok(())
|
||||
|
49
src/bin/keygen.rs
Normal file
49
src/bin/keygen.rs
Normal file
@ -0,0 +1,49 @@
|
||||
extern crate clap;
|
||||
extern crate dirs;
|
||||
extern crate ring;
|
||||
extern crate serde_json;
|
||||
|
||||
use clap::{App, Arg};
|
||||
use ring::rand::SystemRandom;
|
||||
use ring::signature::Ed25519KeyPair;
|
||||
use std::error;
|
||||
use std::fs::{self, File};
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
|
||||
fn main() -> Result<(), Box<error::Error>> {
|
||||
let matches = App::new("solana-keygen")
|
||||
.arg(
|
||||
Arg::with_name("outfile")
|
||||
.short("o")
|
||||
.long("outfile")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("path to generated file"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let rnd = SystemRandom::new();
|
||||
let pkcs8_bytes = Ed25519KeyPair::generate_pkcs8(&rnd)?;
|
||||
let serialized = serde_json::to_string(&pkcs8_bytes.to_vec())?;
|
||||
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let outfile = if matches.is_present("outfile") {
|
||||
matches.value_of("outfile").unwrap()
|
||||
} else {
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
|
||||
if outfile == "-" {
|
||||
println!("{}", serialized);
|
||||
} else {
|
||||
if let Some(outdir) = Path::new(outfile).parent() {
|
||||
fs::create_dir_all(outdir)?;
|
||||
}
|
||||
let mut f = File::create(outfile)?;
|
||||
f.write_all(&serialized.into_bytes())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
extern crate atty;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use atty::{is, Stream};
|
||||
use solana::mint::Mint;
|
||||
use std::io;
|
||||
use std::process::exit;
|
||||
|
||||
fn main() {
|
||||
let mut input_text = String::new();
|
||||
if is(Stream::Stdin) {
|
||||
eprintln!("nothing found on stdin, expected a token number");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
io::stdin().read_line(&mut input_text).unwrap();
|
||||
let trimmed = input_text.trim();
|
||||
let tokens = trimmed.parse::<i64>().unwrap_or_else(|e| {
|
||||
eprintln!("{}", e);
|
||||
exit(1);
|
||||
});
|
||||
let mint = Mint::new(tokens);
|
||||
let serialized = serde_json::to_string(&mint).unwrap_or_else(|e| {
|
||||
eprintln!("failed to serialize: {}", e);
|
||||
exit(1);
|
||||
});
|
||||
println!("{}", serialized);
|
||||
}
|
@ -2,17 +2,17 @@ extern crate atty;
|
||||
extern crate bincode;
|
||||
extern crate bs58;
|
||||
extern crate clap;
|
||||
extern crate dirs;
|
||||
extern crate env_logger;
|
||||
extern crate getopts;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use bincode::serialize;
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use solana::crdt::ReplicatedData;
|
||||
use solana::drone::DroneRequest;
|
||||
use solana::mint::Mint;
|
||||
use solana::signature::{PublicKey, Signature};
|
||||
use solana::crdt::NodeInfo;
|
||||
use solana::drone::{DroneRequest, DRONE_PORT};
|
||||
use solana::fullnode::Config;
|
||||
use solana::signature::{read_keypair, KeyPair, KeyPairUtil, PublicKey, Signature};
|
||||
use solana::thin_client::ThinClient;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
@ -20,7 +20,6 @@ use std::fs::File;
|
||||
use std::io;
|
||||
use std::io::prelude::*;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream, UdpSocket};
|
||||
use std::process::exit;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
@ -56,8 +55,8 @@ impl error::Error for WalletError {
|
||||
}
|
||||
|
||||
struct WalletConfig {
|
||||
leader: ReplicatedData,
|
||||
id: Mint,
|
||||
leader: NodeInfo,
|
||||
id: KeyPair,
|
||||
drone_addr: SocketAddr,
|
||||
command: WalletCommand,
|
||||
}
|
||||
@ -66,9 +65,9 @@ impl Default for WalletConfig {
|
||||
fn default() -> WalletConfig {
|
||||
let default_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
WalletConfig {
|
||||
leader: ReplicatedData::new_leader(&default_addr.clone()),
|
||||
id: Mint::new(0),
|
||||
drone_addr: default_addr.clone(),
|
||||
leader: NodeInfo::new_leader(&default_addr),
|
||||
id: KeyPair::new(),
|
||||
drone_addr: default_addr,
|
||||
command: WalletCommand::Balance,
|
||||
}
|
||||
}
|
||||
@ -85,12 +84,12 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
.help("/path/to/leader.json"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("mint")
|
||||
.short("m")
|
||||
.long("mint")
|
||||
Arg::with_name("keypair")
|
||||
.short("k")
|
||||
.long("keypair")
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("/path/to/mint.json"),
|
||||
.help("/path/to/id.json"),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("airdrop")
|
||||
@ -101,6 +100,7 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
.long("tokens")
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("The number of tokens to request"),
|
||||
),
|
||||
)
|
||||
@ -122,7 +122,6 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
.long("to")
|
||||
.value_name("PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("The pubkey of recipient"),
|
||||
),
|
||||
)
|
||||
@ -141,36 +140,39 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
.subcommand(SubCommand::with_name("address").about("Get your public key"))
|
||||
.get_matches();
|
||||
|
||||
let leader: ReplicatedData;
|
||||
let leader: NodeInfo;
|
||||
if let Some(l) = matches.value_of("leader") {
|
||||
leader = read_leader(l.to_string());
|
||||
leader = read_leader(l)?.node_info;
|
||||
} else {
|
||||
let server_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8000);
|
||||
leader = ReplicatedData::new_leader(&server_addr);
|
||||
leader = NodeInfo::new_leader(&server_addr);
|
||||
};
|
||||
|
||||
let id: Mint;
|
||||
if let Some(m) = matches.value_of("mint") {
|
||||
id = read_mint(m.to_string())?;
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let id_path = if matches.is_present("keypair") {
|
||||
matches.value_of("keypair").unwrap()
|
||||
} else {
|
||||
eprintln!("No mint found!");
|
||||
exit(1);
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
let id = read_keypair(id_path).or_else(|err| {
|
||||
display_actions();
|
||||
Err(WalletError::BadParameter(format!(
|
||||
"{}: Unable to open keypair file: {}",
|
||||
err, id_path
|
||||
)))
|
||||
})?;
|
||||
|
||||
let mut drone_addr = leader.transactions_addr.clone();
|
||||
drone_addr.set_port(9900);
|
||||
let mut drone_addr = leader.contact_info.tpu;
|
||||
drone_addr.set_port(DRONE_PORT);
|
||||
|
||||
let command = match matches.subcommand() {
|
||||
("airdrop", Some(airdrop_matches)) => {
|
||||
let mut tokens: i64 = id.tokens;
|
||||
if airdrop_matches.is_present("tokens") {
|
||||
tokens = airdrop_matches.value_of("tokens").unwrap().parse()?;
|
||||
}
|
||||
let tokens = airdrop_matches.value_of("tokens").unwrap().parse()?;
|
||||
Ok(WalletCommand::AirDrop(tokens))
|
||||
}
|
||||
("pay", Some(pay_matches)) => {
|
||||
let to: PublicKey;
|
||||
if pay_matches.is_present("to") {
|
||||
let to = if pay_matches.is_present("to") {
|
||||
let pubkey_vec = bs58::decode(pay_matches.value_of("to").unwrap())
|
||||
.into_vec()
|
||||
.expect("base58-encoded public key");
|
||||
@ -179,14 +181,13 @@ fn parse_args() -> Result<WalletConfig, Box<error::Error>> {
|
||||
display_actions();
|
||||
Err(WalletError::BadParameter("Invalid public key".to_string()))?;
|
||||
}
|
||||
to = PublicKey::clone_from_slice(&pubkey_vec);
|
||||
PublicKey::clone_from_slice(&pubkey_vec)
|
||||
} else {
|
||||
to = id.pubkey();
|
||||
}
|
||||
let mut tokens: i64 = id.tokens;
|
||||
if pay_matches.is_present("tokens") {
|
||||
tokens = pay_matches.value_of("tokens").unwrap().parse()?;
|
||||
}
|
||||
id.pubkey()
|
||||
};
|
||||
|
||||
let tokens = pay_matches.value_of("tokens").unwrap().parse()?;
|
||||
|
||||
Ok(WalletCommand::Pay(tokens, to))
|
||||
}
|
||||
("confirm", Some(confirm_matches)) => {
|
||||
@ -248,20 +249,33 @@ fn process_command(
|
||||
// Request an airdrop from Solana Drone;
|
||||
// Request amount is set in request_airdrop function
|
||||
WalletCommand::AirDrop(tokens) => {
|
||||
println!("Airdrop requested...");
|
||||
println!("Airdropping {:?} tokens", tokens);
|
||||
let _airdrop = request_airdrop(&config.drone_addr, &config.id, tokens as u64)?;
|
||||
// TODO: return airdrop Result from Drone
|
||||
sleep(Duration::from_millis(100));
|
||||
println!(
|
||||
"Your balance is: {:?}",
|
||||
client.poll_get_balance(&config.id.pubkey()).unwrap()
|
||||
"Requesting airdrop of {:?} tokens from {}",
|
||||
tokens, config.drone_addr
|
||||
);
|
||||
let previous_balance = client.poll_get_balance(&config.id.pubkey())?;
|
||||
request_airdrop(&config.drone_addr, &config.id, tokens as u64)?;
|
||||
|
||||
// TODO: return airdrop Result from Drone instead of polling the
|
||||
// network
|
||||
let mut current_balance = previous_balance;
|
||||
for _ in 0..20 {
|
||||
sleep(Duration::from_millis(500));
|
||||
current_balance = client.poll_get_balance(&config.id.pubkey())?;
|
||||
if previous_balance != current_balance {
|
||||
break;
|
||||
}
|
||||
println!(".");
|
||||
}
|
||||
println!("Your balance is: {:?}", current_balance);
|
||||
if current_balance - previous_balance != tokens {
|
||||
Err("Airdrop failed!")?;
|
||||
}
|
||||
}
|
||||
// If client has positive balance, spend tokens in {balance} number of transactions
|
||||
WalletCommand::Pay(tokens, to) => {
|
||||
let last_id = client.get_last_id();
|
||||
let sig = client.transfer(tokens, &config.id.keypair(), to, &last_id)?;
|
||||
let sig = client.transfer(tokens, &config.id, to, &last_id)?;
|
||||
println!("{}", bs58::encode(sig).into_string());
|
||||
}
|
||||
// Confirm the last client transaction by signature
|
||||
@ -277,28 +291,33 @@ fn process_command(
|
||||
}
|
||||
|
||||
fn display_actions() {
|
||||
println!("");
|
||||
println!();
|
||||
println!("Commands:");
|
||||
println!(" address Get your public key");
|
||||
println!(" balance Get your account balance");
|
||||
println!(" airdrop Request a batch of tokens");
|
||||
println!(" pay Send tokens to a public key");
|
||||
println!(" confirm Confirm your last payment by signature");
|
||||
println!("");
|
||||
println!();
|
||||
}
|
||||
|
||||
fn read_leader(path: String) -> ReplicatedData {
|
||||
let file = File::open(path.clone()).expect(&format!("file not found: {}", path));
|
||||
serde_json::from_reader(file).expect(&format!("failed to parse {}", path))
|
||||
fn read_leader(path: &str) -> Result<Config, WalletError> {
|
||||
let file = File::open(path.to_string()).or_else(|err| {
|
||||
Err(WalletError::BadParameter(format!(
|
||||
"{}: Unable to open leader file: {}",
|
||||
err, path
|
||||
)))
|
||||
})?;
|
||||
|
||||
serde_json::from_reader(file).or_else(|err| {
|
||||
Err(WalletError::BadParameter(format!(
|
||||
"{}: Failed to parse leader file: {}",
|
||||
err, path
|
||||
)))
|
||||
})
|
||||
}
|
||||
|
||||
fn read_mint(path: String) -> Result<Mint, Box<error::Error>> {
|
||||
let file = File::open(path.clone())?;
|
||||
let mint = serde_json::from_reader(file)?;
|
||||
Ok(mint)
|
||||
}
|
||||
|
||||
fn mk_client(r: &ReplicatedData) -> io::Result<ThinClient> {
|
||||
fn mk_client(r: &NodeInfo) -> io::Result<ThinClient> {
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
requests_socket
|
||||
@ -306,16 +325,16 @@ fn mk_client(r: &ReplicatedData) -> io::Result<ThinClient> {
|
||||
.unwrap();
|
||||
|
||||
Ok(ThinClient::new(
|
||||
r.requests_addr,
|
||||
r.contact_info.rpu,
|
||||
requests_socket,
|
||||
r.transactions_addr,
|
||||
r.contact_info.tpu,
|
||||
transactions_socket,
|
||||
))
|
||||
}
|
||||
|
||||
fn request_airdrop(
|
||||
drone_addr: &SocketAddr,
|
||||
id: &Mint,
|
||||
id: &KeyPair,
|
||||
tokens: u64,
|
||||
) -> Result<(), Box<error::Error>> {
|
||||
let mut stream = TcpStream::connect(drone_addr)?;
|
||||
|
@ -1,29 +1,31 @@
|
||||
//! The `blob_fetch_stage` pulls blobs from UDP sockets and sends it to a channel.
|
||||
|
||||
use packet::BlobRecycler;
|
||||
use service::Service;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use std::thread::{self, JoinHandle};
|
||||
use streamer::{self, BlobReceiver};
|
||||
|
||||
pub struct BlobFetchStage {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl BlobFetchStage {
|
||||
pub fn new(
|
||||
socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: BlobRecycler,
|
||||
blob_recycler: &BlobRecycler,
|
||||
) -> (Self, BlobReceiver) {
|
||||
Self::new_multi_socket(vec![socket], exit, blob_recycler)
|
||||
}
|
||||
pub fn new_multi_socket(
|
||||
sockets: Vec<UdpSocket>,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: BlobRecycler,
|
||||
blob_recycler: &BlobRecycler,
|
||||
) -> (Self, BlobReceiver) {
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let thread_hdls: Vec<_> = sockets
|
||||
@ -38,6 +40,23 @@ impl BlobFetchStage {
|
||||
})
|
||||
.collect();
|
||||
|
||||
(BlobFetchStage { thread_hdls }, blob_receiver)
|
||||
(BlobFetchStage { exit, thread_hdls }, blob_receiver)
|
||||
}
|
||||
|
||||
pub fn close(&self) {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for BlobFetchStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
self.thread_hdls
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls() {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ use std::mem;
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Condition {
|
||||
/// Wait for a `Timestamp` `Witness` at or after the given `DateTime`.
|
||||
Timestamp(DateTime<Utc>),
|
||||
Timestamp(DateTime<Utc>, PublicKey),
|
||||
|
||||
/// Wait for a `Signature` `Witness` from `PublicKey`.
|
||||
Signature(PublicKey),
|
||||
@ -20,10 +20,12 @@ pub enum Condition {
|
||||
|
||||
impl Condition {
|
||||
/// Return true if the given Witness satisfies this Condition.
|
||||
pub fn is_satisfied(&self, witness: &Witness) -> bool {
|
||||
pub fn is_satisfied(&self, witness: &Witness, from: &PublicKey) -> bool {
|
||||
match (self, witness) {
|
||||
(Condition::Signature(pubkey), Witness::Signature(from)) => pubkey == from,
|
||||
(Condition::Timestamp(dt), Witness::Timestamp(last_time)) => dt <= last_time,
|
||||
(Condition::Signature(pubkey), Witness::Signature) => pubkey == from,
|
||||
(Condition::Timestamp(dt, pubkey), Witness::Timestamp(last_time)) => {
|
||||
pubkey == from && dt <= last_time
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
@ -56,8 +58,13 @@ impl Budget {
|
||||
}
|
||||
|
||||
/// Create a budget that pays `tokens` to `to` after the given DateTime.
|
||||
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
|
||||
Budget::After(Condition::Timestamp(dt), Payment { tokens, to })
|
||||
pub fn new_future_payment(
|
||||
dt: DateTime<Utc>,
|
||||
from: PublicKey,
|
||||
tokens: i64,
|
||||
to: PublicKey,
|
||||
) -> Self {
|
||||
Budget::After(Condition::Timestamp(dt, from), Payment { tokens, to })
|
||||
}
|
||||
|
||||
/// Create a budget that pays `tokens` to `to` after the given DateTime
|
||||
@ -69,7 +76,7 @@ impl Budget {
|
||||
to: PublicKey,
|
||||
) -> Self {
|
||||
Budget::Or(
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Timestamp(dt, from), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
)
|
||||
}
|
||||
@ -94,11 +101,11 @@ impl PaymentPlan for Budget {
|
||||
|
||||
/// Apply a witness to the budget to see if the budget can be reduced.
|
||||
/// If so, modify the budget in-place.
|
||||
fn apply_witness(&mut self, witness: &Witness) {
|
||||
fn apply_witness(&mut self, witness: &Witness, from: &PublicKey) {
|
||||
let new_payment = match self {
|
||||
Budget::After(cond, payment) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Or((cond, payment), _) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::Or(_, (cond, payment)) if cond.is_satisfied(witness) => Some(payment),
|
||||
Budget::After(cond, payment) if cond.is_satisfied(witness, from) => Some(payment),
|
||||
Budget::Or((cond, payment), _) if cond.is_satisfied(witness, from) => Some(payment),
|
||||
Budget::Or(_, (cond, payment)) if cond.is_satisfied(witness, from) => Some(payment),
|
||||
_ => None,
|
||||
}.cloned();
|
||||
|
||||
@ -111,20 +118,22 @@ impl PaymentPlan for Budget {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
|
||||
#[test]
|
||||
fn test_signature_satisfied() {
|
||||
let sig = PublicKey::default();
|
||||
assert!(Condition::Signature(sig).is_satisfied(&Witness::Signature(sig)));
|
||||
let from = PublicKey::default();
|
||||
assert!(Condition::Signature(from).is_satisfied(&Witness::Signature, &from));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_satisfied() {
|
||||
let dt1 = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let dt2 = Utc.ymd(2014, 11, 14).and_hms(10, 9, 8);
|
||||
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt1)));
|
||||
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt2)));
|
||||
assert!(!Condition::Timestamp(dt2).is_satisfied(&Witness::Timestamp(dt1)));
|
||||
let from = PublicKey::default();
|
||||
assert!(Condition::Timestamp(dt1, from).is_satisfied(&Witness::Timestamp(dt1), &from));
|
||||
assert!(Condition::Timestamp(dt1, from).is_satisfied(&Witness::Timestamp(dt2), &from));
|
||||
assert!(!Condition::Timestamp(dt2, from).is_satisfied(&Witness::Timestamp(dt1), &from));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -134,7 +143,7 @@ mod tests {
|
||||
let to = PublicKey::default();
|
||||
assert!(Budget::new_payment(42, to).verify(42));
|
||||
assert!(Budget::new_authorized_payment(from, 42, to).verify(42));
|
||||
assert!(Budget::new_future_payment(dt, 42, to).verify(42));
|
||||
assert!(Budget::new_future_payment(dt, from, 42, to).verify(42));
|
||||
assert!(Budget::new_cancelable_future_payment(dt, from, 42, to).verify(42));
|
||||
}
|
||||
|
||||
@ -144,20 +153,35 @@ mod tests {
|
||||
let to = PublicKey::default();
|
||||
|
||||
let mut budget = Budget::new_authorized_payment(from, 42, to);
|
||||
budget.apply_witness(&Witness::Signature(from));
|
||||
budget.apply_witness(&Witness::Signature, &from);
|
||||
assert_eq!(budget, Budget::new_payment(42, to));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_future_payment() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let to = PublicKey::default();
|
||||
let from = KeyPair::new().pubkey();
|
||||
let to = KeyPair::new().pubkey();
|
||||
|
||||
let mut budget = Budget::new_future_payment(dt, 42, to);
|
||||
budget.apply_witness(&Witness::Timestamp(dt));
|
||||
let mut budget = Budget::new_future_payment(dt, from, 42, to);
|
||||
budget.apply_witness(&Witness::Timestamp(dt), &from);
|
||||
assert_eq!(budget, Budget::new_payment(42, to));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unauthorized_future_payment() {
|
||||
// Ensure timestamp will only be acknowledged if it came from the
|
||||
// whitelisted public key.
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let from = KeyPair::new().pubkey();
|
||||
let to = KeyPair::new().pubkey();
|
||||
|
||||
let mut budget = Budget::new_future_payment(dt, from, 42, to);
|
||||
let orig_budget = budget.clone();
|
||||
budget.apply_witness(&Witness::Timestamp(dt), &to); // <-- Attack!
|
||||
assert_eq!(budget, orig_budget);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cancelable_future_payment() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
@ -165,11 +189,11 @@ mod tests {
|
||||
let to = PublicKey::default();
|
||||
|
||||
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
|
||||
budget.apply_witness(&Witness::Timestamp(dt));
|
||||
budget.apply_witness(&Witness::Timestamp(dt), &from);
|
||||
assert_eq!(budget, Budget::new_payment(42, to));
|
||||
|
||||
let mut budget = Budget::new_cancelable_future_payment(dt, from, 42, to);
|
||||
budget.apply_witness(&Witness::Signature(from));
|
||||
budget.apply_witness(&Witness::Signature, &from);
|
||||
assert_eq!(budget, Budget::new_payment(42, from));
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
use crdt::ReplicatedData;
|
||||
use crdt::{CrdtError, NodeInfo};
|
||||
use rand::distributions::{Distribution, Weighted, WeightedChoice};
|
||||
use rand::thread_rng;
|
||||
use result::{Error, Result};
|
||||
use result::Result;
|
||||
use signature::PublicKey;
|
||||
use std;
|
||||
use std::collections::HashMap;
|
||||
@ -9,7 +9,7 @@ use std::collections::HashMap;
|
||||
pub const DEFAULT_WEIGHT: u32 = 1;
|
||||
|
||||
pub trait ChooseGossipPeerStrategy {
|
||||
fn choose_peer<'a>(&self, options: Vec<&'a ReplicatedData>) -> Result<&'a ReplicatedData>;
|
||||
fn choose_peer<'a>(&self, options: Vec<&'a NodeInfo>) -> Result<&'a NodeInfo>;
|
||||
}
|
||||
|
||||
pub struct ChooseRandomPeerStrategy<'a> {
|
||||
@ -27,9 +27,9 @@ impl<'a, 'b> ChooseRandomPeerStrategy<'a> {
|
||||
}
|
||||
|
||||
impl<'a> ChooseGossipPeerStrategy for ChooseRandomPeerStrategy<'a> {
|
||||
fn choose_peer<'b>(&self, options: Vec<&'b ReplicatedData>) -> Result<&'b ReplicatedData> {
|
||||
fn choose_peer<'b>(&self, options: Vec<&'b NodeInfo>) -> Result<&'b NodeInfo> {
|
||||
if options.is_empty() {
|
||||
return Err(Error::CrdtTooSmall);
|
||||
Err(CrdtError::NoPeers)?;
|
||||
}
|
||||
|
||||
let n = ((self.random)() as usize) % options.len();
|
||||
@ -159,7 +159,7 @@ impl<'a> ChooseWeightedPeerStrategy<'a> {
|
||||
|
||||
// Return u32 b/c the weighted sampling API from rand::distributions
|
||||
// only takes u32 for weights
|
||||
if weighted_vote >= std::u32::MAX as f64 {
|
||||
if weighted_vote >= f64::from(std::u32::MAX) {
|
||||
return std::u32::MAX;
|
||||
}
|
||||
|
||||
@ -172,9 +172,9 @@ impl<'a> ChooseWeightedPeerStrategy<'a> {
|
||||
}
|
||||
|
||||
impl<'a> ChooseGossipPeerStrategy for ChooseWeightedPeerStrategy<'a> {
|
||||
fn choose_peer<'b>(&self, options: Vec<&'b ReplicatedData>) -> Result<&'b ReplicatedData> {
|
||||
if options.len() < 1 {
|
||||
return Err(Error::CrdtTooSmall);
|
||||
fn choose_peer<'b>(&self, options: Vec<&'b NodeInfo>) -> Result<&'b NodeInfo> {
|
||||
if options.is_empty() {
|
||||
Err(CrdtError::NoPeers)?;
|
||||
}
|
||||
|
||||
let mut weighted_peers = vec![];
|
||||
|
126
src/counter.rs
126
src/counter.rs
@ -1,13 +1,19 @@
|
||||
use influx_db_client as influxdb;
|
||||
use metrics;
|
||||
use std::env;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::Duration;
|
||||
use timing;
|
||||
|
||||
const DEFAULT_METRICS_RATE: usize = 100;
|
||||
|
||||
pub struct Counter {
|
||||
pub name: &'static str,
|
||||
/// total accumulated value
|
||||
pub counts: AtomicUsize,
|
||||
pub nanos: AtomicUsize,
|
||||
pub times: AtomicUsize,
|
||||
pub lograte: usize,
|
||||
/// last accumulated value logged
|
||||
pub lastlog: AtomicUsize,
|
||||
pub lograte: AtomicUsize,
|
||||
}
|
||||
|
||||
macro_rules! create_counter {
|
||||
@ -15,55 +21,135 @@ macro_rules! create_counter {
|
||||
Counter {
|
||||
name: $name,
|
||||
counts: AtomicUsize::new(0),
|
||||
nanos: AtomicUsize::new(0),
|
||||
times: AtomicUsize::new(0),
|
||||
lograte: $lograte,
|
||||
lastlog: AtomicUsize::new(0),
|
||||
lograte: AtomicUsize::new($lograte),
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! inc_counter {
|
||||
($name:expr, $count:expr, $start:expr) => {
|
||||
unsafe { $name.inc($count, $start.elapsed()) };
|
||||
($name:expr, $count:expr) => {
|
||||
unsafe { $name.inc($count) };
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! inc_new_counter {
|
||||
($name:expr, $count:expr) => {{
|
||||
static mut INC_NEW_COUNTER: Counter = create_counter!($name, 0);
|
||||
inc_counter!(INC_NEW_COUNTER, $count);
|
||||
}};
|
||||
($name:expr, $count:expr, $lograte:expr) => {{
|
||||
static mut INC_NEW_COUNTER: Counter = create_counter!($name, $lograte);
|
||||
inc_counter!(INC_NEW_COUNTER, $count);
|
||||
}};
|
||||
}
|
||||
|
||||
impl Counter {
|
||||
pub fn inc(&mut self, events: usize, dur: Duration) {
|
||||
let total = dur.as_secs() * 1_000_000_000 + dur.subsec_nanos() as u64;
|
||||
fn default_log_rate() -> usize {
|
||||
let v = env::var("SOLANA_DEFAULT_METRICS_RATE")
|
||||
.map(|x| x.parse().unwrap_or(DEFAULT_METRICS_RATE))
|
||||
.unwrap_or(DEFAULT_METRICS_RATE);
|
||||
if v == 0 {
|
||||
DEFAULT_METRICS_RATE
|
||||
} else {
|
||||
v
|
||||
}
|
||||
}
|
||||
pub fn inc(&mut self, events: usize) {
|
||||
let counts = self.counts.fetch_add(events, Ordering::Relaxed);
|
||||
let nanos = self.nanos.fetch_add(total as usize, Ordering::Relaxed);
|
||||
let times = self.times.fetch_add(1, Ordering::Relaxed);
|
||||
if times % self.lograte == 0 && times > 0 {
|
||||
let mut lograte = self.lograte.load(Ordering::Relaxed);
|
||||
if lograte == 0 {
|
||||
lograte = Counter::default_log_rate();
|
||||
self.lograte.store(lograte, Ordering::Relaxed);
|
||||
}
|
||||
if times % lograte == 0 && times > 0 {
|
||||
let lastlog = self.lastlog.load(Ordering::Relaxed);
|
||||
info!(
|
||||
"COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"nanos\": {}, \"samples\": {}, \"rate\": {}, \"now\": {}}}",
|
||||
"COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"samples\": {}, \"now\": {}}}",
|
||||
self.name,
|
||||
counts,
|
||||
nanos,
|
||||
times,
|
||||
counts as f64 * 1e9 / nanos as f64,
|
||||
timing::timestamp(),
|
||||
);
|
||||
metrics::submit(
|
||||
influxdb::Point::new(&format!("counter-{}", self.name))
|
||||
.add_field(
|
||||
"count",
|
||||
influxdb::Value::Integer(counts as i64 - lastlog as i64),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
self.lastlog
|
||||
.compare_and_swap(lastlog, counts, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use counter::Counter;
|
||||
use counter::{Counter, DEFAULT_METRICS_RATE};
|
||||
use std::env;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::Instant;
|
||||
#[test]
|
||||
fn test_counter() {
|
||||
static mut COUNTER: Counter = create_counter!("test", 100);
|
||||
let start = Instant::now();
|
||||
let count = 1;
|
||||
inc_counter!(COUNTER, count, start);
|
||||
inc_counter!(COUNTER, count);
|
||||
unsafe {
|
||||
assert_eq!(COUNTER.counts.load(Ordering::Relaxed), 1);
|
||||
assert_ne!(COUNTER.nanos.load(Ordering::Relaxed), 0);
|
||||
assert_eq!(COUNTER.times.load(Ordering::Relaxed), 1);
|
||||
assert_eq!(COUNTER.lograte, 100);
|
||||
assert_eq!(COUNTER.lograte.load(Ordering::Relaxed), 100);
|
||||
assert_eq!(COUNTER.lastlog.load(Ordering::Relaxed), 0);
|
||||
assert_eq!(COUNTER.name, "test");
|
||||
}
|
||||
for _ in 0..199 {
|
||||
inc_counter!(COUNTER, 2);
|
||||
}
|
||||
unsafe {
|
||||
assert_eq!(COUNTER.lastlog.load(Ordering::Relaxed), 199);
|
||||
}
|
||||
inc_counter!(COUNTER, 2);
|
||||
unsafe {
|
||||
assert_eq!(COUNTER.lastlog.load(Ordering::Relaxed), 399);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn test_inc_new_counter() {
|
||||
//make sure that macros are syntactically correct
|
||||
//the variable is internal to the macro scope so there is no way to introspect it
|
||||
inc_new_counter!("counter-1", 1);
|
||||
inc_new_counter!("counter-2", 1, 2);
|
||||
}
|
||||
#[test]
|
||||
fn test_lograte() {
|
||||
static mut COUNTER: Counter = create_counter!("test_lograte", 0);
|
||||
inc_counter!(COUNTER, 2);
|
||||
unsafe {
|
||||
assert_eq!(
|
||||
COUNTER.lograte.load(Ordering::Relaxed),
|
||||
DEFAULT_METRICS_RATE
|
||||
);
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn test_lograte_env() {
|
||||
assert_ne!(DEFAULT_METRICS_RATE, 0);
|
||||
static mut COUNTER: Counter = create_counter!("test_lograte_env", 0);
|
||||
env::set_var("SOLANA_DEFAULT_METRICS_RATE", "50");
|
||||
inc_counter!(COUNTER, 2);
|
||||
unsafe {
|
||||
assert_eq!(COUNTER.lograte.load(Ordering::Relaxed), 50);
|
||||
}
|
||||
|
||||
static mut COUNTER2: Counter = create_counter!("test_lograte_env", 0);
|
||||
env::set_var("SOLANA_DEFAULT_METRICS_RATE", "0");
|
||||
inc_counter!(COUNTER2, 2);
|
||||
unsafe {
|
||||
assert_eq!(
|
||||
COUNTER2.lograte.load(Ordering::Relaxed),
|
||||
DEFAULT_METRICS_RATE
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
1190
src/crdt.rs
1190
src/crdt.rs
File diff suppressed because it is too large
Load Diff
62
src/drone.rs
62
src/drone.rs
@ -4,6 +4,8 @@
|
||||
//! checking requests against a request cap for a given time time_slice
|
||||
//! and (to come) an IP rate limit.
|
||||
|
||||
use influx_db_client as influxdb;
|
||||
use metrics;
|
||||
use signature::{KeyPair, PublicKey};
|
||||
use std::io;
|
||||
use std::io::{Error, ErrorKind};
|
||||
@ -13,9 +15,10 @@ use thin_client::ThinClient;
|
||||
use transaction::Transaction;
|
||||
|
||||
pub const TIME_SLICE: u64 = 60;
|
||||
pub const REQUEST_CAP: u64 = 150_000;
|
||||
pub const REQUEST_CAP: u64 = 1_000_000;
|
||||
pub const DRONE_PORT: u16 = 9900;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
||||
pub enum DroneRequest {
|
||||
GetAirdrop {
|
||||
airdrop_request_amount: u64,
|
||||
@ -110,7 +113,11 @@ impl Drone {
|
||||
airdrop_request_amount,
|
||||
client_public_key,
|
||||
} => {
|
||||
request_amount = airdrop_request_amount.clone();
|
||||
info!(
|
||||
"Requesting airdrop of {} to {:?}",
|
||||
airdrop_request_amount, client_public_key
|
||||
);
|
||||
request_amount = airdrop_request_amount;
|
||||
tx = Transaction::new(
|
||||
&self.mint_keypair,
|
||||
client_public_key,
|
||||
@ -121,13 +128,32 @@ impl Drone {
|
||||
}
|
||||
if self.check_request_limit(request_amount) {
|
||||
self.request_current += request_amount;
|
||||
client.transfer_signed(tx)
|
||||
metrics::submit(
|
||||
influxdb::Point::new("drone")
|
||||
.add_tag("op", influxdb::Value::String("airdrop".to_string()))
|
||||
.add_field(
|
||||
"request_amount",
|
||||
influxdb::Value::Integer(request_amount as i64),
|
||||
)
|
||||
.add_field(
|
||||
"request_current",
|
||||
influxdb::Value::Integer(self.request_current as i64),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
client.transfer_signed(&tx)
|
||||
} else {
|
||||
Err(Error::new(ErrorKind::Other, "token limit reached"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Drone {
|
||||
fn drop(&mut self) {
|
||||
metrics::flush();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bank::Bank;
|
||||
@ -136,6 +162,7 @@ mod tests {
|
||||
use fullnode::FullNode;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use service::Service;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
@ -233,32 +260,31 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_send_airdrop() {
|
||||
const SMALL_BATCH: i64 = 50;
|
||||
const TPS_BATCH: i64 = 5_000_000;
|
||||
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let leader = TestNode::new_localhost();
|
||||
|
||||
let alice = Mint::new(10_000_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let carlos_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let leader_data = leader.data.clone();
|
||||
|
||||
let server = FullNode::new_leader(
|
||||
bank,
|
||||
0,
|
||||
None,
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
leader,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
//TODO: this seems unstable
|
||||
sleep(Duration::from_millis(900));
|
||||
|
||||
let mut addr: SocketAddr = "0.0.0.0:9900".parse().expect("bind to drone socket");
|
||||
@ -266,10 +292,10 @@ mod tests {
|
||||
let mut drone = Drone::new(
|
||||
alice.keypair(),
|
||||
addr,
|
||||
leader.data.transactions_addr,
|
||||
leader.data.requests_addr,
|
||||
leader_data.contact_info.tpu,
|
||||
leader_data.contact_info.rpu,
|
||||
None,
|
||||
Some(5_000_050),
|
||||
Some(150_000),
|
||||
);
|
||||
|
||||
let bob_req = DroneRequest::GetAirdrop {
|
||||
@ -291,9 +317,9 @@ mod tests {
|
||||
UdpSocket::bind("0.0.0.0:0").expect("drone bind to transactions socket");
|
||||
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
leader_data.contact_info.rpu,
|
||||
requests_socket,
|
||||
leader.data.transactions_addr,
|
||||
leader_data.contact_info.tpu,
|
||||
transactions_socket,
|
||||
);
|
||||
|
||||
@ -306,8 +332,6 @@ mod tests {
|
||||
assert_eq!(carlos_balance.unwrap(), TPS_BATCH);
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
server.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -
|
||||
|
||||
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
|
||||
pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
|
||||
assert!(num_hashes > 0 || transactions.len() == 0);
|
||||
assert!(num_hashes > 0 || transactions.is_empty());
|
||||
Entry {
|
||||
num_hashes,
|
||||
id: next_hash(start_hash, num_hashes, &transactions),
|
||||
|
@ -5,7 +5,7 @@
|
||||
use bank::Bank;
|
||||
use entry::Entry;
|
||||
use serde_json;
|
||||
use std::io::{self, BufRead, Error, ErrorKind, Write};
|
||||
use std::io::{self, BufRead, Cursor, Error, ErrorKind, Write};
|
||||
|
||||
pub struct EntryWriter<'a, W> {
|
||||
bank: &'a Bank,
|
||||
@ -20,7 +20,8 @@ impl<'a, W: Write> EntryWriter<'a, W> {
|
||||
|
||||
fn write_entry(writer: &mut W, entry: &Entry) -> io::Result<()> {
|
||||
let serialized = serde_json::to_string(entry).unwrap();
|
||||
writeln!(writer, "{}", serialized)
|
||||
writeln!(writer, "{}", serialized)?;
|
||||
writer.flush()
|
||||
}
|
||||
|
||||
pub fn write_entries<I>(writer: &mut W, entries: I) -> io::Result<()>
|
||||
@ -49,15 +50,28 @@ impl<'a, W: Write> EntryWriter<'a, W> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_entry(s: String) -> io::Result<Entry> {
|
||||
serde_json::from_str(&s).map_err(|e| Error::new(ErrorKind::Other, e.to_string()))
|
||||
/// Parse a string containing an Entry.
|
||||
pub fn read_entry(s: &str) -> io::Result<Entry> {
|
||||
serde_json::from_str(s).map_err(|e| Error::new(ErrorKind::Other, e.to_string()))
|
||||
}
|
||||
|
||||
// TODO: How to implement this without attaching the input's lifetime to the output?
|
||||
pub fn read_entries<'a, R: BufRead>(
|
||||
reader: &'a mut R,
|
||||
) -> impl Iterator<Item = io::Result<Entry>> + 'a {
|
||||
reader.lines().map(|s| read_entry(s?))
|
||||
/// Return an iterator for all the entries in the given file.
|
||||
pub fn read_entries<R: BufRead>(reader: R) -> impl Iterator<Item = io::Result<Entry>> {
|
||||
reader.lines().map(|s| read_entry(&s?))
|
||||
}
|
||||
|
||||
/// Same as read_entries() but returning a vector. Handy for debugging short logs.
|
||||
pub fn read_entries_to_vec<R: BufRead>(reader: R) -> io::Result<Vec<Entry>> {
|
||||
let mut result = vec![];
|
||||
for x in read_entries(reader) {
|
||||
result.push(x?);
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Same as read_entries() but parsing a string and returning a vector.
|
||||
pub fn read_entries_from_str(s: &str) -> io::Result<Vec<Entry>> {
|
||||
read_entries_to_vec(Cursor::new(s))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -67,6 +81,7 @@ mod tests {
|
||||
use mint::Mint;
|
||||
use packet::BLOB_DATA_SIZE;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::str;
|
||||
use transaction::Transaction;
|
||||
|
||||
#[test]
|
||||
@ -98,4 +113,13 @@ mod tests {
|
||||
entry_writer.write_and_register_entry(&entries[1]).unwrap();
|
||||
assert_eq!(bank.last_id(), entries[1].id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_entries_from_str() {
|
||||
let mint = Mint::new(1);
|
||||
let mut buf = vec![];
|
||||
EntryWriter::write_entries(&mut buf, mint.create_entries()).unwrap();
|
||||
let entries = read_entries_from_str(str::from_utf8(&buf).unwrap()).unwrap();
|
||||
assert_eq!(entries, mint.create_entries());
|
||||
}
|
||||
}
|
||||
|
@ -523,7 +523,7 @@ mod test {
|
||||
erasure::add_coding_blobs(blob_recycler, &mut blobs, offset as u64);
|
||||
let blobs_len = blobs.len();
|
||||
|
||||
let d = crdt::ReplicatedData::new(
|
||||
let d = crdt::NodeInfo::new(
|
||||
KeyPair::new().pubkey(),
|
||||
"127.0.0.1:1234".parse().unwrap(),
|
||||
"127.0.0.1:1235".parse().unwrap(),
|
||||
@ -531,9 +531,7 @@ mod test {
|
||||
"127.0.0.1:1237".parse().unwrap(),
|
||||
"127.0.0.1:1238".parse().unwrap(),
|
||||
);
|
||||
let crdt = Arc::new(RwLock::new(crdt::Crdt::new(d.clone())));
|
||||
|
||||
assert!(crdt::Crdt::index_blobs(&crdt, &blobs, &mut (offset as u64)).is_ok());
|
||||
assert!(crdt::Crdt::index_blobs(&d, &blobs, &mut (offset as u64)).is_ok());
|
||||
for b in blobs {
|
||||
let idx = b.read().unwrap().get_index().unwrap() as usize;
|
||||
window[idx] = Some(b);
|
||||
|
@ -1,29 +1,31 @@
|
||||
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
|
||||
|
||||
use packet::PacketRecycler;
|
||||
use service::Service;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use std::thread::{self, JoinHandle};
|
||||
use streamer::{self, PacketReceiver};
|
||||
|
||||
pub struct FetchStage {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl FetchStage {
|
||||
pub fn new(
|
||||
socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_recycler: PacketRecycler,
|
||||
packet_recycler: &PacketRecycler,
|
||||
) -> (Self, PacketReceiver) {
|
||||
Self::new_multi_socket(vec![socket], exit, packet_recycler)
|
||||
}
|
||||
pub fn new_multi_socket(
|
||||
sockets: Vec<UdpSocket>,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_recycler: PacketRecycler,
|
||||
packet_recycler: &PacketRecycler,
|
||||
) -> (Self, PacketReceiver) {
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let thread_hdls: Vec<_> = sockets
|
||||
@ -38,6 +40,23 @@ impl FetchStage {
|
||||
})
|
||||
.collect();
|
||||
|
||||
(FetchStage { thread_hdls }, packet_receiver)
|
||||
(FetchStage { exit, thread_hdls }, packet_receiver)
|
||||
}
|
||||
|
||||
pub fn close(&self) {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for FetchStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
self.thread_hdls
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls() {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
333
src/fullnode.rs
333
src/fullnode.rs
@ -1,55 +1,90 @@
|
||||
//! The `fullnode` module hosts all the fullnode microservices.
|
||||
|
||||
use bank::Bank;
|
||||
use crdt::{Crdt, ReplicatedData, TestNode};
|
||||
use crdt::{Crdt, NodeInfo, TestNode};
|
||||
use entry::Entry;
|
||||
use entry_writer;
|
||||
use ledger::Block;
|
||||
use ncp::Ncp;
|
||||
use packet::BlobRecycler;
|
||||
use rpu::Rpu;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use service::Service;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::collections::VecDeque;
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::io::{stdin, stdout, BufReader};
|
||||
use std::io::{Read, Write};
|
||||
use std::net::SocketAddr;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use std::thread::{JoinHandle, Result};
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
use tpu::Tpu;
|
||||
use tvu::Tvu;
|
||||
use untrusted::Input;
|
||||
|
||||
//use std::time::Duration;
|
||||
pub struct FullNode {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
pub enum LedgerFile {
|
||||
StdInOut,
|
||||
Path(String),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
/// Fullnode configuration to be stored in file
|
||||
pub struct Config {
|
||||
pub node_info: NodeInfo,
|
||||
pkcs8: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Structure to be replicated by the network
|
||||
impl Config {
|
||||
pub fn new(bind_addr: &SocketAddr, pkcs8: Vec<u8>) -> Self {
|
||||
let keypair =
|
||||
KeyPair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in fullnode::Config new");
|
||||
let pubkey = keypair.pubkey();
|
||||
let node_info = NodeInfo::new_leader_with_pubkey(pubkey, bind_addr);
|
||||
Config { node_info, pkcs8 }
|
||||
}
|
||||
pub fn keypair(&self) -> KeyPair {
|
||||
KeyPair::from_pkcs8(Input::from(&self.pkcs8))
|
||||
.expect("from_pkcs8 in fullnode::Config keypair")
|
||||
}
|
||||
}
|
||||
|
||||
impl FullNode {
|
||||
pub fn new(
|
||||
mut node: TestNode,
|
||||
leader: bool,
|
||||
infile: Option<String>,
|
||||
ledger: LedgerFile,
|
||||
keypair_for_validator: Option<KeyPair>,
|
||||
network_entry_for_validator: Option<SocketAddr>,
|
||||
outfile_for_leader: Option<String>,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> FullNode {
|
||||
info!("creating bank...");
|
||||
let bank = Bank::default();
|
||||
let entry_height = if let Some(path) = infile {
|
||||
let f = File::open(path).unwrap();
|
||||
let mut r = BufReader::new(f);
|
||||
let entries =
|
||||
entry_writer::read_entries(&mut r).map(|e| e.expect("failed to parse entry"));
|
||||
info!("processing ledger...");
|
||||
bank.process_ledger(entries).expect("process_ledger")
|
||||
} else {
|
||||
let mut r = BufReader::new(stdin());
|
||||
let entries =
|
||||
entry_writer::read_entries(&mut r).map(|e| e.expect("failed to parse entry"));
|
||||
info!("processing ledger...");
|
||||
bank.process_ledger(entries).expect("process_ledger")
|
||||
let (infile, outfile): (Box<Read>, Box<Write + Send>) = match ledger {
|
||||
LedgerFile::Path(path) => (
|
||||
Box::new(File::open(path.clone()).expect("opening ledger file")),
|
||||
Box::new(
|
||||
OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(path)
|
||||
.expect("opening ledger file"),
|
||||
),
|
||||
),
|
||||
LedgerFile::StdInOut => (Box::new(stdin()), Box::new(stdout())),
|
||||
};
|
||||
let reader = BufReader::new(infile);
|
||||
let entries = entry_writer::read_entries(reader).map(|e| e.expect("failed to parse entry"));
|
||||
|
||||
info!("processing ledger...");
|
||||
let (entry_height, ledger_tail) = bank.process_ledger(entries).expect("process_ledger");
|
||||
// entry_height is the network-wide agreed height of the ledger.
|
||||
// initialize it from the input ledger
|
||||
info!("processed {} ledger...", entry_height);
|
||||
@ -60,69 +95,70 @@ impl FullNode {
|
||||
let local_requests_addr = node.sockets.requests.local_addr().unwrap();
|
||||
info!(
|
||||
"starting... local gossip address: {} (advertising {})",
|
||||
local_gossip_addr, node.data.gossip_addr
|
||||
local_gossip_addr, node.data.contact_info.ncp
|
||||
);
|
||||
let requests_addr = node.data.contact_info.rpu;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
if !leader {
|
||||
let testnet_addr = network_entry_for_validator.expect("validator requires entry");
|
||||
|
||||
let network_entry_point = ReplicatedData::new_entry_point(testnet_addr);
|
||||
let network_entry_point = NodeInfo::new_entry_point(testnet_addr);
|
||||
let keypair = keypair_for_validator.expect("validator requires keypair");
|
||||
let server = FullNode::new_validator(
|
||||
keypair,
|
||||
bank,
|
||||
entry_height,
|
||||
node.data.clone(),
|
||||
node.sockets.requests,
|
||||
node.sockets.respond,
|
||||
node.sockets.replicate,
|
||||
node.sockets.gossip,
|
||||
node.sockets.repair,
|
||||
network_entry_point,
|
||||
Some(ledger_tail),
|
||||
node,
|
||||
&network_entry_point,
|
||||
exit.clone(),
|
||||
);
|
||||
info!(
|
||||
"validator ready... local request address: {} (advertising {}) connected to: {}",
|
||||
local_requests_addr, node.data.requests_addr, testnet_addr
|
||||
local_requests_addr, requests_addr, testnet_addr
|
||||
);
|
||||
server
|
||||
} else {
|
||||
node.data.current_leader_id = node.data.id.clone();
|
||||
let server = if let Some(file) = outfile_for_leader {
|
||||
FullNode::new_leader(
|
||||
bank,
|
||||
entry_height,
|
||||
//Some(Duration::from_millis(1000)),
|
||||
None,
|
||||
node.data.clone(),
|
||||
node.sockets.requests,
|
||||
node.sockets.transaction,
|
||||
node.sockets.broadcast,
|
||||
node.sockets.respond,
|
||||
node.sockets.gossip,
|
||||
exit.clone(),
|
||||
File::create(file).expect("opening ledger file"),
|
||||
)
|
||||
} else {
|
||||
FullNode::new_leader(
|
||||
bank,
|
||||
entry_height,
|
||||
//Some(Duration::from_millis(1000)),
|
||||
None,
|
||||
node.data.clone(),
|
||||
node.sockets.requests,
|
||||
node.sockets.transaction,
|
||||
node.sockets.broadcast,
|
||||
node.sockets.respond,
|
||||
node.sockets.gossip,
|
||||
exit.clone(),
|
||||
stdout(),
|
||||
)
|
||||
};
|
||||
node.data.leader_id = node.data.id;
|
||||
|
||||
let server = FullNode::new_leader(
|
||||
bank,
|
||||
entry_height,
|
||||
Some(ledger_tail),
|
||||
//Some(Duration::from_millis(1000)),
|
||||
None,
|
||||
node,
|
||||
exit.clone(),
|
||||
outfile,
|
||||
);
|
||||
info!(
|
||||
"leader ready... local request address: {} (advertising {})",
|
||||
local_requests_addr, node.data.requests_addr
|
||||
local_requests_addr, requests_addr
|
||||
);
|
||||
server
|
||||
}
|
||||
}
|
||||
|
||||
fn new_window(
|
||||
ledger_tail: Option<Vec<Entry>>,
|
||||
entry_height: u64,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
blob_recycler: &BlobRecycler,
|
||||
) -> streamer::Window {
|
||||
match ledger_tail {
|
||||
Some(ledger_tail) => {
|
||||
// convert to blobs
|
||||
let mut blobs = VecDeque::new();
|
||||
ledger_tail.to_blobs(&blob_recycler, &mut blobs);
|
||||
|
||||
// flatten deque to vec
|
||||
let blobs: Vec<_> = blobs.into_iter().collect();
|
||||
streamer::initialized_window(&crdt, blobs, entry_height)
|
||||
}
|
||||
None => streamer::default_window(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a server instance acting as a leader.
|
||||
///
|
||||
/// ```text
|
||||
@ -150,47 +186,46 @@ impl FullNode {
|
||||
pub fn new_leader<W: Write + Send + 'static>(
|
||||
bank: Bank,
|
||||
entry_height: u64,
|
||||
ledger_tail: Option<Vec<Entry>>,
|
||||
tick_duration: Option<Duration>,
|
||||
me: ReplicatedData,
|
||||
requests_socket: UdpSocket,
|
||||
transactions_socket: UdpSocket,
|
||||
broadcast_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
gossip_socket: UdpSocket,
|
||||
node: TestNode,
|
||||
exit: Arc<AtomicBool>,
|
||||
writer: W,
|
||||
) -> Self {
|
||||
let bank = Arc::new(bank);
|
||||
let mut thread_hdls = vec![];
|
||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
||||
thread_hdls.extend(rpu.thread_hdls);
|
||||
let rpu = Rpu::new(
|
||||
&bank,
|
||||
node.sockets.requests,
|
||||
node.sockets.respond,
|
||||
exit.clone(),
|
||||
);
|
||||
thread_hdls.extend(rpu.thread_hdls());
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(node.data).expect("Crdt::new")));
|
||||
let (tpu, blob_receiver) = Tpu::new(
|
||||
bank.clone(),
|
||||
&bank,
|
||||
&crdt,
|
||||
tick_duration,
|
||||
transactions_socket,
|
||||
blob_recycler.clone(),
|
||||
node.sockets.transaction,
|
||||
&blob_recycler,
|
||||
exit.clone(),
|
||||
writer,
|
||||
);
|
||||
thread_hdls.extend(tpu.thread_hdls);
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
let window = streamer::default_window();
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
thread_hdls.extend(tpu.thread_hdls());
|
||||
let window = FullNode::new_window(ledger_tail, entry_height, &crdt, &blob_recycler);
|
||||
let ncp = Ncp::new(
|
||||
crdt.clone(),
|
||||
&crdt,
|
||||
window.clone(),
|
||||
gossip_socket,
|
||||
gossip_send_socket,
|
||||
node.sockets.gossip,
|
||||
node.sockets.gossip_send,
|
||||
exit.clone(),
|
||||
).expect("Ncp::new");
|
||||
thread_hdls.extend(ncp.thread_hdls);
|
||||
thread_hdls.extend(ncp.thread_hdls());
|
||||
|
||||
let t_broadcast = streamer::broadcaster(
|
||||
broadcast_socket,
|
||||
exit.clone(),
|
||||
node.sockets.broadcast,
|
||||
crdt,
|
||||
window,
|
||||
entry_height,
|
||||
@ -199,7 +234,7 @@ impl FullNode {
|
||||
);
|
||||
thread_hdls.extend(vec![t_broadcast]);
|
||||
|
||||
FullNode { thread_hdls }
|
||||
FullNode { exit, thread_hdls }
|
||||
}
|
||||
|
||||
/// Create a server instance acting as a validator.
|
||||
@ -232,81 +267,119 @@ impl FullNode {
|
||||
/// `-------------------------------`
|
||||
/// ```
|
||||
pub fn new_validator(
|
||||
keypair: KeyPair,
|
||||
bank: Bank,
|
||||
entry_height: u64,
|
||||
me: ReplicatedData,
|
||||
requests_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
replicate_socket: UdpSocket,
|
||||
gossip_listen_socket: UdpSocket,
|
||||
repair_socket: UdpSocket,
|
||||
entry_point: ReplicatedData,
|
||||
ledger_tail: Option<Vec<Entry>>,
|
||||
node: TestNode,
|
||||
entry_point: &NodeInfo,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let bank = Arc::new(bank);
|
||||
let mut thread_hdls = vec![];
|
||||
let rpu = Rpu::new(bank.clone(), requests_socket, respond_socket, exit.clone());
|
||||
thread_hdls.extend(rpu.thread_hdls);
|
||||
let rpu = Rpu::new(
|
||||
&bank,
|
||||
node.sockets.requests,
|
||||
node.sockets.respond,
|
||||
exit.clone(),
|
||||
);
|
||||
thread_hdls.extend(rpu.thread_hdls());
|
||||
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(me)));
|
||||
let crdt = Arc::new(RwLock::new(Crdt::new(node.data).expect("Crdt::new")));
|
||||
crdt.write()
|
||||
.expect("'crdt' write lock before insert() in pub fn replicate")
|
||||
.insert(&entry_point);
|
||||
let window = streamer::default_window();
|
||||
let gossip_send_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let retransmit_socket = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
|
||||
let window = FullNode::new_window(ledger_tail, entry_height, &crdt, &blob_recycler);
|
||||
|
||||
let ncp = Ncp::new(
|
||||
crdt.clone(),
|
||||
&crdt,
|
||||
window.clone(),
|
||||
gossip_listen_socket,
|
||||
gossip_send_socket,
|
||||
node.sockets.gossip,
|
||||
node.sockets.gossip_send,
|
||||
exit.clone(),
|
||||
).expect("Ncp::new");
|
||||
|
||||
let tvu = Tvu::new(
|
||||
bank.clone(),
|
||||
keypair,
|
||||
&bank,
|
||||
entry_height,
|
||||
crdt.clone(),
|
||||
window.clone(),
|
||||
replicate_socket,
|
||||
repair_socket,
|
||||
retransmit_socket,
|
||||
node.sockets.replicate,
|
||||
node.sockets.repair,
|
||||
node.sockets.retransmit,
|
||||
exit.clone(),
|
||||
);
|
||||
thread_hdls.extend(tvu.thread_hdls);
|
||||
thread_hdls.extend(ncp.thread_hdls);
|
||||
FullNode { thread_hdls }
|
||||
thread_hdls.extend(tvu.thread_hdls());
|
||||
thread_hdls.extend(ncp.thread_hdls());
|
||||
FullNode { exit, thread_hdls }
|
||||
}
|
||||
|
||||
//used for notifying many nodes in parallel to exit
|
||||
pub fn exit(&self) {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
pub fn close(self) -> Result<()> {
|
||||
self.exit();
|
||||
self.join()
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for FullNode {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
self.thread_hdls
|
||||
}
|
||||
|
||||
fn join(self) -> Result<()> {
|
||||
for thread_hdl in self.thread_hdls() {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use bank::Bank;
|
||||
use crdt::TestNode;
|
||||
use fullnode::FullNode;
|
||||
use mint::Mint;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use service::Service;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
#[test]
|
||||
fn validator_exit() {
|
||||
let tn = TestNode::new();
|
||||
let kp = KeyPair::new();
|
||||
let tn = TestNode::new_localhost_with_pubkey(kp.pubkey());
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let v = FullNode::new_validator(
|
||||
bank,
|
||||
0,
|
||||
tn.data.clone(),
|
||||
tn.sockets.requests,
|
||||
tn.sockets.respond,
|
||||
tn.sockets.replicate,
|
||||
tn.sockets.gossip,
|
||||
tn.sockets.repair,
|
||||
tn.data,
|
||||
exit.clone(),
|
||||
);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in v.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
let entry = tn.data.clone();
|
||||
let v = FullNode::new_validator(kp, bank, 0, None, tn, &entry, exit);
|
||||
v.exit();
|
||||
v.join().unwrap();
|
||||
}
|
||||
#[test]
|
||||
fn validator_parallel_exit() {
|
||||
let vals: Vec<FullNode> = (0..2)
|
||||
.map(|_| {
|
||||
let kp = KeyPair::new();
|
||||
let tn = TestNode::new_localhost_with_pubkey(kp.pubkey());
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let entry = tn.data.clone();
|
||||
FullNode::new_validator(kp, bank, 0, None, tn, &entry, exit)
|
||||
})
|
||||
.collect();
|
||||
//each validator can exit in parallel to speed many sequential calls to `join`
|
||||
vals.iter().for_each(|v| v.exit());
|
||||
//while join is called sequentially, the above exit call notified all the
|
||||
//validators to exit from all their threads
|
||||
vals.into_iter().for_each(|v| v.join().unwrap());
|
||||
}
|
||||
}
|
||||
|
@ -196,32 +196,3 @@ mod tests {
|
||||
// assert_eq!(entries0, entries1);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use hash::hash;
|
||||
use ledger::*;
|
||||
use packet::BlobRecycler;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[bench]
|
||||
fn bench_block_to_blobs_to_block(bencher: &mut Bencher) {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
let keypair = KeyPair::new();
|
||||
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 1, one);
|
||||
let transactions = vec![tx0; 10];
|
||||
let entries = next_entries(&zero, 1, transactions);
|
||||
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
bencher.iter(|| {
|
||||
let mut blob_q = VecDeque::new();
|
||||
entries.to_blobs(&blob_recycler, &mut blob_q);
|
||||
assert_eq!(reconstruct_entries_from_blobs(blob_q).unwrap(), entries);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ pub mod fullnode;
|
||||
pub mod hash;
|
||||
pub mod ledger;
|
||||
pub mod logger;
|
||||
pub mod metrics;
|
||||
pub mod mint;
|
||||
pub mod nat;
|
||||
pub mod ncp;
|
||||
@ -38,6 +39,7 @@ pub mod request_processor;
|
||||
pub mod request_stage;
|
||||
pub mod result;
|
||||
pub mod rpu;
|
||||
pub mod service;
|
||||
pub mod signature;
|
||||
pub mod sigverify;
|
||||
pub mod sigverify_stage;
|
||||
@ -47,6 +49,7 @@ pub mod timing;
|
||||
pub mod tpu;
|
||||
pub mod transaction;
|
||||
pub mod tvu;
|
||||
pub mod voting;
|
||||
pub mod window_stage;
|
||||
pub mod write_stage;
|
||||
extern crate bincode;
|
||||
@ -71,4 +74,5 @@ extern crate untrusted;
|
||||
#[macro_use]
|
||||
extern crate matches;
|
||||
|
||||
extern crate influx_db_client;
|
||||
extern crate rand;
|
||||
|
@ -9,6 +9,6 @@ static INIT: Once = ONCE_INIT;
|
||||
/// Setup function that is only run once, even if called multiple times.
|
||||
pub fn setup() {
|
||||
INIT.call_once(|| {
|
||||
let _ = env_logger::init();
|
||||
env_logger::init();
|
||||
});
|
||||
}
|
||||
|
347
src/metrics.rs
Normal file
347
src/metrics.rs
Normal file
@ -0,0 +1,347 @@
|
||||
//! The `metrics` module enables sending measurements to an InfluxDB instance
|
||||
|
||||
use influx_db_client as influxdb;
|
||||
use std::env;
|
||||
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
|
||||
use std::sync::{Arc, Barrier, Mutex, Once, ONCE_INIT};
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
use timing;
|
||||
|
||||
#[derive(Debug)]
|
||||
enum MetricsCommand {
|
||||
Submit(influxdb::Point),
|
||||
Flush(Arc<Barrier>),
|
||||
}
|
||||
|
||||
struct MetricsAgent {
|
||||
sender: Sender<MetricsCommand>,
|
||||
}
|
||||
|
||||
trait MetricsWriter {
|
||||
// Write the points and empty the vector. Called on the internal
|
||||
// MetricsAgent worker thread.
|
||||
fn write(&self, points: Vec<influxdb::Point>);
|
||||
}
|
||||
|
||||
struct InfluxDbMetricsWriter {
|
||||
client: Option<influxdb::Client>,
|
||||
}
|
||||
|
||||
impl InfluxDbMetricsWriter {
|
||||
fn new() -> Self {
|
||||
InfluxDbMetricsWriter {
|
||||
client: Self::build_client(),
|
||||
}
|
||||
}
|
||||
|
||||
fn build_client() -> Option<influxdb::Client> {
|
||||
let host = env::var("INFLUX_HOST")
|
||||
.unwrap_or_else(|_| "https://metrics.solana.com:8086".to_string());
|
||||
let db = env::var("INFLUX_DATABASE").unwrap_or_else(|_| "scratch".to_string());
|
||||
let username = env::var("INFLUX_USERNAME").unwrap_or_else(|_| "scratch_writer".to_string());
|
||||
let password = env::var("INFLUX_PASSWORD").unwrap_or_else(|_| "topsecret".to_string());
|
||||
|
||||
debug!("InfluxDB host={} db={} username={}", host, db, username);
|
||||
let mut client = influxdb::Client::new_with_option(host, db, None)
|
||||
.set_authentication(username, password);
|
||||
|
||||
client.set_read_timeout(1 /*second*/);
|
||||
client.set_write_timeout(1 /*second*/);
|
||||
|
||||
debug!("InfluxDB version: {:?}", client.get_version());
|
||||
Some(client)
|
||||
}
|
||||
}
|
||||
|
||||
impl MetricsWriter for InfluxDbMetricsWriter {
|
||||
fn write(&self, points: Vec<influxdb::Point>) {
|
||||
if let Some(ref client) = self.client {
|
||||
debug!("submitting {} points", points.len());
|
||||
if let Err(err) = client.write_points(
|
||||
influxdb::Points { point: points },
|
||||
Some(influxdb::Precision::Milliseconds),
|
||||
None,
|
||||
) {
|
||||
debug!("InfluxDbMetricsWriter write error: {:?}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MetricsAgent {
|
||||
fn default() -> Self {
|
||||
Self::new(
|
||||
Arc::new(InfluxDbMetricsWriter::new()),
|
||||
Duration::from_secs(10),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl MetricsAgent {
|
||||
fn new(writer: Arc<MetricsWriter + Send + Sync>, write_frequency: Duration) -> Self {
|
||||
let (sender, receiver) = channel::<MetricsCommand>();
|
||||
thread::spawn(move || Self::run(&receiver, &writer, write_frequency));
|
||||
MetricsAgent { sender }
|
||||
}
|
||||
|
||||
fn run(
|
||||
receiver: &Receiver<MetricsCommand>,
|
||||
writer: &Arc<MetricsWriter + Send + Sync>,
|
||||
write_frequency: Duration,
|
||||
) {
|
||||
trace!("run: enter");
|
||||
let mut last_write_time = Instant::now();
|
||||
let mut points = Vec::new();
|
||||
|
||||
loop {
|
||||
match receiver.recv_timeout(write_frequency / 2) {
|
||||
Ok(cmd) => match cmd {
|
||||
MetricsCommand::Flush(barrier) => {
|
||||
debug!("metrics_thread: flush");
|
||||
if !points.is_empty() {
|
||||
writer.write(points);
|
||||
points = Vec::new();
|
||||
last_write_time = Instant::now();
|
||||
}
|
||||
barrier.wait();
|
||||
}
|
||||
MetricsCommand::Submit(point) => {
|
||||
debug!("run: submit {:?}", point);
|
||||
points.push(point);
|
||||
}
|
||||
},
|
||||
Err(RecvTimeoutError::Timeout) => {
|
||||
trace!("run: receive timeout");
|
||||
}
|
||||
Err(RecvTimeoutError::Disconnected) => {
|
||||
debug!("run: sender disconnected");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
if now.duration_since(last_write_time) >= write_frequency && !points.is_empty() {
|
||||
debug!("run: writing {} points", points.len());
|
||||
writer.write(points);
|
||||
points = Vec::new();
|
||||
last_write_time = now;
|
||||
}
|
||||
}
|
||||
trace!("run: exit");
|
||||
}
|
||||
|
||||
pub fn submit(&self, mut point: influxdb::Point) {
|
||||
if point.timestamp.is_none() {
|
||||
point.timestamp = Some(timing::timestamp() as i64);
|
||||
}
|
||||
debug!("Submitting point: {:?}", point);
|
||||
self.sender.send(MetricsCommand::Submit(point)).unwrap();
|
||||
}
|
||||
|
||||
pub fn flush(&self) {
|
||||
debug!("Flush");
|
||||
let barrier = Arc::new(Barrier::new(2));
|
||||
self.sender
|
||||
.send(MetricsCommand::Flush(Arc::clone(&barrier)))
|
||||
.unwrap();
|
||||
|
||||
barrier.wait();
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for MetricsAgent {
|
||||
fn drop(&mut self) {
|
||||
self.flush();
|
||||
}
|
||||
}
|
||||
|
||||
fn get_singleton_agent() -> Arc<Mutex<MetricsAgent>> {
|
||||
static INIT: Once = ONCE_INIT;
|
||||
static mut AGENT: Option<Arc<Mutex<MetricsAgent>>> = None;
|
||||
unsafe {
|
||||
INIT.call_once(|| AGENT = Some(Arc::new(Mutex::new(MetricsAgent::default()))));
|
||||
match AGENT {
|
||||
Some(ref agent) => agent.clone(),
|
||||
None => panic!("Failed to initialize metrics agent"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Submits a new point from any thread. Note that points are internally queued
|
||||
/// and transmitted periodically in batches.
|
||||
pub fn submit(point: influxdb::Point) {
|
||||
let agent_mutex = get_singleton_agent();
|
||||
let agent = agent_mutex.lock().unwrap();
|
||||
agent.submit(point);
|
||||
}
|
||||
|
||||
/// Blocks until all pending points from previous calls to `submit` have been
|
||||
/// transmitted.
|
||||
pub fn flush() {
|
||||
let agent_mutex = get_singleton_agent();
|
||||
let agent = agent_mutex.lock().unwrap();
|
||||
agent.flush();
|
||||
}
|
||||
|
||||
/// Hook the panic handler to generate a data point on each panic
|
||||
pub fn set_panic_hook(program: &'static str) {
|
||||
use std::panic;
|
||||
use std::sync::{Once, ONCE_INIT};
|
||||
static SET_HOOK: Once = ONCE_INIT;
|
||||
SET_HOOK.call_once(|| {
|
||||
let default_hook = panic::take_hook();
|
||||
panic::set_hook(Box::new(move |ono| {
|
||||
default_hook(ono);
|
||||
submit(
|
||||
influxdb::Point::new("panic")
|
||||
.add_tag("program", influxdb::Value::String(program.to_string()))
|
||||
.add_tag(
|
||||
"thread",
|
||||
influxdb::Value::String(
|
||||
thread::current().name().unwrap_or("?").to_string(),
|
||||
),
|
||||
)
|
||||
// The 'one' field exists to give Kapacitor Alerts a numerical value
|
||||
// to filter on
|
||||
.add_field("one", influxdb::Value::Integer(1))
|
||||
.add_field(
|
||||
"message",
|
||||
influxdb::Value::String(
|
||||
// TODO: use ono.message() when it becomes stable
|
||||
ono.to_string(),
|
||||
),
|
||||
)
|
||||
.add_field(
|
||||
"location",
|
||||
influxdb::Value::String(match ono.location() {
|
||||
Some(location) => location.to_string(),
|
||||
None => "?".to_string(),
|
||||
}),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
// Flush metrics immediately in case the process exits immediately
|
||||
// upon return
|
||||
flush();
|
||||
}));
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use rand::random;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
struct MockMetricsWriter {
|
||||
points_written: AtomicUsize,
|
||||
}
|
||||
impl MockMetricsWriter {
|
||||
fn new() -> Self {
|
||||
MockMetricsWriter {
|
||||
points_written: AtomicUsize::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
fn points_written(&self) -> usize {
|
||||
return self.points_written.load(Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
impl MetricsWriter for MockMetricsWriter {
|
||||
fn write(&self, points: Vec<influxdb::Point>) {
|
||||
assert!(!points.is_empty());
|
||||
|
||||
self.points_written
|
||||
.fetch_add(points.len(), Ordering::SeqCst);
|
||||
|
||||
println!(
|
||||
"Writing {} points ({} total)",
|
||||
points.len(),
|
||||
self.points_written.load(Ordering::SeqCst)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_submit() {
|
||||
let writer = Arc::new(MockMetricsWriter::new());
|
||||
let agent = MetricsAgent::new(writer.clone(), Duration::from_secs(10));
|
||||
|
||||
for i in 0..42 {
|
||||
agent.submit(influxdb::Point::new(&format!("measurement {}", i)));
|
||||
}
|
||||
|
||||
agent.flush();
|
||||
assert_eq!(writer.points_written(), 42);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_submit_with_delay() {
|
||||
let writer = Arc::new(MockMetricsWriter::new());
|
||||
let agent = MetricsAgent::new(writer.clone(), Duration::from_millis(100));
|
||||
|
||||
agent.submit(influxdb::Point::new("point 1"));
|
||||
thread::sleep(Duration::from_secs(2));
|
||||
assert_eq!(writer.points_written(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multithread_submit() {
|
||||
let writer = Arc::new(MockMetricsWriter::new());
|
||||
let agent = Arc::new(Mutex::new(MetricsAgent::new(
|
||||
writer.clone(),
|
||||
Duration::from_secs(10),
|
||||
)));
|
||||
|
||||
//
|
||||
// Submit measurements from different threads
|
||||
//
|
||||
let mut threads = Vec::new();
|
||||
for i in 0..42 {
|
||||
let point = influxdb::Point::new(&format!("measurement {}", i));
|
||||
let agent = Arc::clone(&agent);
|
||||
threads.push(thread::spawn(move || {
|
||||
agent.lock().unwrap().submit(point);
|
||||
}));
|
||||
}
|
||||
|
||||
for thread in threads {
|
||||
thread.join().unwrap();
|
||||
}
|
||||
|
||||
agent.lock().unwrap().flush();
|
||||
assert_eq!(writer.points_written(), 42);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flush_before_drop() {
|
||||
let writer = Arc::new(MockMetricsWriter::new());
|
||||
{
|
||||
let agent = MetricsAgent::new(writer.clone(), Duration::from_secs(9999999));
|
||||
agent.submit(influxdb::Point::new("point 1"));
|
||||
}
|
||||
|
||||
assert_eq!(writer.points_written(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_live_submit() {
|
||||
let agent = MetricsAgent::default();
|
||||
|
||||
let point = influxdb::Point::new("live_submit_test")
|
||||
.add_tag("test", influxdb::Value::Boolean(true))
|
||||
.add_field(
|
||||
"random_bool",
|
||||
influxdb::Value::Boolean(random::<u8>() < 128),
|
||||
)
|
||||
.add_field(
|
||||
"random_int",
|
||||
influxdb::Value::Integer(random::<u8>() as i64),
|
||||
)
|
||||
.to_owned();
|
||||
agent.submit(point);
|
||||
}
|
||||
|
||||
}
|
14
src/mint.rs
14
src/mint.rs
@ -15,11 +15,7 @@ pub struct Mint {
|
||||
}
|
||||
|
||||
impl Mint {
|
||||
pub fn new(tokens: i64) -> Self {
|
||||
let rnd = SystemRandom::new();
|
||||
let pkcs8 = KeyPair::generate_pkcs8(&rnd)
|
||||
.expect("generate_pkcs8 in mint pub fn new")
|
||||
.to_vec();
|
||||
pub fn new_with_pkcs8(tokens: i64, pkcs8: Vec<u8>) -> Self {
|
||||
let keypair =
|
||||
KeyPair::from_pkcs8(Input::from(&pkcs8)).expect("from_pkcs8 in mint pub fn new");
|
||||
let pubkey = keypair.pubkey();
|
||||
@ -30,6 +26,14 @@ impl Mint {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(tokens: i64) -> Self {
|
||||
let rnd = SystemRandom::new();
|
||||
let pkcs8 = KeyPair::generate_pkcs8(&rnd)
|
||||
.expect("generate_pkcs8 in mint pub fn new")
|
||||
.to_vec();
|
||||
Self::new_with_pkcs8(tokens, pkcs8)
|
||||
}
|
||||
|
||||
pub fn seed(&self) -> Hash {
|
||||
hash(&self.pkcs8)
|
||||
}
|
||||
|
34
src/nat.rs
Normal file → Executable file
34
src/nat.rs
Normal file → Executable file
@ -2,6 +2,7 @@
|
||||
|
||||
extern crate futures;
|
||||
extern crate p2p;
|
||||
extern crate rand;
|
||||
extern crate reqwest;
|
||||
extern crate tokio_core;
|
||||
|
||||
@ -9,7 +10,9 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
|
||||
use self::futures::Future;
|
||||
use self::p2p::UdpSocketExt;
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::env;
|
||||
use std::io;
|
||||
use std::str;
|
||||
|
||||
/// A data type representing a public Udp socket
|
||||
@ -32,8 +35,25 @@ pub fn get_public_ip_addr() -> Result<IpAddr, String> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn udp_random_bind(start: u16, end: u16, tries: u32) -> io::Result<UdpSocket> {
|
||||
let mut count = 0;
|
||||
loop {
|
||||
count += 1;
|
||||
|
||||
let rand_port = thread_rng().gen_range(start, end);
|
||||
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), rand_port);
|
||||
|
||||
match UdpSocket::bind(addr) {
|
||||
Result::Ok(val) => break Result::Ok(val),
|
||||
Result::Err(err) => if err.kind() != io::ErrorKind::AddrInUse || count >= tries {
|
||||
return Err(err);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Binds a private Udp address to a public address using UPnP if possible
|
||||
pub fn udp_public_bind(label: &str) -> UdpSocketPair {
|
||||
pub fn udp_public_bind(label: &str, startport: u16, endport: u16) -> UdpSocketPair {
|
||||
let private_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||
|
||||
let mut core = tokio_core::reactor::Core::new().unwrap();
|
||||
@ -72,7 +92,7 @@ pub fn udp_public_bind(label: &str) -> UdpSocketPair {
|
||||
//
|
||||
// TODO: Remove the |sender| socket and deal with the downstream changes to
|
||||
// the UDP signalling
|
||||
let mut local_addr_sender = local_addr.clone();
|
||||
let mut local_addr_sender = local_addr;
|
||||
local_addr_sender.set_port(public_addr.port());
|
||||
UdpSocket::bind(local_addr_sender).unwrap()
|
||||
};
|
||||
@ -84,11 +104,15 @@ pub fn udp_public_bind(label: &str) -> UdpSocketPair {
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
let sender = UdpSocket::bind(private_addr).unwrap();
|
||||
let sender = udp_random_bind(startport, endport, 5).unwrap();
|
||||
let local_addr = sender.local_addr().unwrap();
|
||||
info!("Using local address {} for {}", local_addr, label);
|
||||
|
||||
let pub_ip = get_public_ip_addr().unwrap();
|
||||
let pub_addr = SocketAddr::new(pub_ip, local_addr.port());
|
||||
|
||||
info!("Using source address {} for {}", pub_addr, label);
|
||||
UdpSocketPair {
|
||||
addr: private_addr,
|
||||
addr: pub_addr,
|
||||
receiver: sender.try_clone().unwrap(),
|
||||
sender,
|
||||
}
|
||||
|
48
src/ncp.rs
48
src/ncp.rs
@ -3,20 +3,22 @@
|
||||
use crdt::Crdt;
|
||||
use packet::{BlobRecycler, SharedBlob};
|
||||
use result::Result;
|
||||
use service::Service;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use std::thread::{self, JoinHandle};
|
||||
use streamer;
|
||||
|
||||
pub struct Ncp {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
exit: Arc<AtomicBool>,
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Ncp {
|
||||
pub fn new(
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
window: Arc<RwLock<Vec<Option<SharedBlob>>>>,
|
||||
gossip_listen_socket: UdpSocket,
|
||||
gossip_send_socket: UdpSocket,
|
||||
@ -37,8 +39,8 @@ impl Ncp {
|
||||
)?;
|
||||
let (response_sender, response_receiver) = channel();
|
||||
let t_responder = streamer::responder(
|
||||
"ncp",
|
||||
gossip_send_socket,
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
response_receiver,
|
||||
);
|
||||
@ -50,9 +52,27 @@ impl Ncp {
|
||||
response_sender.clone(),
|
||||
exit.clone(),
|
||||
);
|
||||
let t_gossip = Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit);
|
||||
let t_gossip = Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit.clone());
|
||||
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
|
||||
Ok(Ncp { thread_hdls })
|
||||
Ok(Ncp { exit, thread_hdls })
|
||||
}
|
||||
|
||||
pub fn close(self) -> thread::Result<()> {
|
||||
self.exit.store(true, Ordering::Relaxed);
|
||||
self.join()
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for Ncp {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
self.thread_hdls
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls() {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@ -60,27 +80,25 @@ impl Ncp {
|
||||
mod tests {
|
||||
use crdt::{Crdt, TestNode};
|
||||
use ncp::Ncp;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
// test that stage will exit when flag is set
|
||||
fn test_exit() {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let tn = TestNode::new();
|
||||
let crdt = Crdt::new(tn.data.clone());
|
||||
let tn = TestNode::new_localhost();
|
||||
let crdt = Crdt::new(tn.data.clone()).expect("Crdt::new");
|
||||
let c = Arc::new(RwLock::new(crdt));
|
||||
let w = Arc::new(RwLock::new(vec![]));
|
||||
let d = Ncp::new(
|
||||
c.clone(),
|
||||
&c,
|
||||
w,
|
||||
tn.sockets.gossip,
|
||||
tn.sockets.gossip_send,
|
||||
exit.clone(),
|
||||
).unwrap();
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in d.thread_hdls {
|
||||
t.join().expect("thread join");
|
||||
}
|
||||
d.close().expect("thread join");
|
||||
}
|
||||
}
|
||||
|
@ -12,7 +12,6 @@ use std::mem::size_of;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::Instant;
|
||||
|
||||
pub type SharedPackets = Arc<RwLock<Packets>>;
|
||||
pub type SharedBlob = Arc<RwLock<Blob>>;
|
||||
@ -188,7 +187,6 @@ impl<T: Default> Recycler<T> {
|
||||
|
||||
impl Packets {
|
||||
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
||||
static mut COUNTER: Counter = create_counter!("packets", 10);
|
||||
self.packets.resize(NUM_PACKETS, Packet::default());
|
||||
let mut i = 0;
|
||||
//DOCUMENTED SIDE-EFFECT
|
||||
@ -198,13 +196,12 @@ impl Packets {
|
||||
// * read until it fails
|
||||
// * set it back to blocking before returning
|
||||
socket.set_nonblocking(false)?;
|
||||
let mut start = Instant::now();
|
||||
for p in &mut self.packets {
|
||||
p.meta.size = 0;
|
||||
trace!("receiving on {}", socket.local_addr().unwrap());
|
||||
match socket.recv_from(&mut p.data) {
|
||||
Err(_) if i > 0 => {
|
||||
inc_counter!(COUNTER, i, start);
|
||||
inc_new_counter!("packets-recv_count", 1);
|
||||
debug!("got {:?} messages on {}", i, socket.local_addr().unwrap());
|
||||
break;
|
||||
}
|
||||
@ -215,8 +212,8 @@ impl Packets {
|
||||
Ok((nrecv, from)) => {
|
||||
p.meta.size = nrecv;
|
||||
p.meta.set_addr(&from);
|
||||
trace!("got {} bytes from {}", nrecv, from);
|
||||
if i == 0 {
|
||||
start = Instant::now();
|
||||
socket.set_nonblocking(true)?;
|
||||
}
|
||||
}
|
||||
@ -242,7 +239,7 @@ impl Packets {
|
||||
|
||||
pub fn to_packets_chunked<T: Serialize>(
|
||||
r: &PacketRecycler,
|
||||
xs: Vec<T>,
|
||||
xs: &[T],
|
||||
chunks: usize,
|
||||
) -> Vec<SharedPackets> {
|
||||
let mut out = vec![];
|
||||
@ -260,10 +257,10 @@ pub fn to_packets_chunked<T: Serialize>(
|
||||
}
|
||||
out.push(p);
|
||||
}
|
||||
return out;
|
||||
out
|
||||
}
|
||||
|
||||
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: Vec<T>) -> Vec<SharedPackets> {
|
||||
pub fn to_packets<T: Serialize>(r: &PacketRecycler, xs: &[T]) -> Vec<SharedPackets> {
|
||||
to_packets_chunked(r, xs, NUM_PACKETS)
|
||||
}
|
||||
|
||||
@ -349,7 +346,7 @@ impl Blob {
|
||||
}
|
||||
|
||||
pub fn is_coding(&self) -> bool {
|
||||
return (self.get_flags().unwrap() & BLOB_FLAG_IS_CODING) != 0;
|
||||
(self.get_flags().unwrap() & BLOB_FLAG_IS_CODING) != 0
|
||||
}
|
||||
|
||||
pub fn set_coding(&mut self) -> Result<()> {
|
||||
@ -409,6 +406,7 @@ impl Blob {
|
||||
Ok((nrecv, from)) => {
|
||||
p.meta.size = nrecv;
|
||||
p.meta.set_addr(&from);
|
||||
trace!("got {} bytes from {}", nrecv, from);
|
||||
if i == 0 {
|
||||
socket.set_nonblocking(true)?;
|
||||
}
|
||||
@ -424,7 +422,13 @@ impl Blob {
|
||||
{
|
||||
let p = r.read().expect("'r' read lock in pub fn send_to");
|
||||
let a = p.meta.addr();
|
||||
socket.send_to(&p.data[..p.meta.size], &a)?;
|
||||
if let Err(e) = socket.send_to(&p.data[..p.meta.size], &a) {
|
||||
warn!(
|
||||
"error sending {} byte packet to {:?}: {:?}",
|
||||
p.meta.size, a, e
|
||||
);
|
||||
Err(e)?;
|
||||
}
|
||||
}
|
||||
re.recycle(r);
|
||||
}
|
||||
@ -520,15 +524,15 @@ mod tests {
|
||||
fn test_to_packets() {
|
||||
let tx = Request::GetTransactionCount;
|
||||
let re = PacketRecycler::default();
|
||||
let rv = to_packets(&re, vec![tx.clone(); 1]);
|
||||
let rv = to_packets(&re, &vec![tx.clone(); 1]);
|
||||
assert_eq!(rv.len(), 1);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
|
||||
|
||||
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS]);
|
||||
let rv = to_packets(&re, &vec![tx.clone(); NUM_PACKETS]);
|
||||
assert_eq!(rv.len(), 1);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||
|
||||
let rv = to_packets(&re, vec![tx.clone(); NUM_PACKETS + 1]);
|
||||
let rv = to_packets(&re, &vec![tx.clone(); NUM_PACKETS + 1]);
|
||||
assert_eq!(rv.len(), 2);
|
||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
|
||||
|
@ -13,7 +13,7 @@ pub enum Witness {
|
||||
Timestamp(DateTime<Utc>),
|
||||
|
||||
/// A siganture from PublicKey.
|
||||
Signature(PublicKey),
|
||||
Signature,
|
||||
}
|
||||
|
||||
/// Some amount of tokens that should be sent to the `to` `PublicKey`.
|
||||
@ -36,5 +36,5 @@ pub trait PaymentPlan {
|
||||
|
||||
/// Apply a witness to the payment plan to see if the plan can be reduced.
|
||||
/// If so, modify the plan in-place.
|
||||
fn apply_witness(&mut self, witness: &Witness);
|
||||
fn apply_witness(&mut self, witness: &Witness, from: &PublicKey);
|
||||
}
|
||||
|
@ -8,8 +8,9 @@
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use recorder::Recorder;
|
||||
use service::Service;
|
||||
use std::sync::mpsc::{channel, Receiver, RecvError, Sender, TryRecvError};
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::{Duration, Instant};
|
||||
use transaction::Transaction;
|
||||
|
||||
@ -20,7 +21,7 @@ pub enum Signal {
|
||||
}
|
||||
|
||||
pub struct RecordStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl RecordStage {
|
||||
@ -31,7 +32,7 @@ impl RecordStage {
|
||||
start_hash: &Hash,
|
||||
) -> (Self, Receiver<Vec<Entry>>) {
|
||||
let (entry_sender, entry_receiver) = channel();
|
||||
let start_hash = start_hash.clone();
|
||||
let start_hash = *start_hash;
|
||||
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-record-stage".to_string())
|
||||
@ -51,7 +52,7 @@ impl RecordStage {
|
||||
tick_duration: Duration,
|
||||
) -> (Self, Receiver<Vec<Entry>>) {
|
||||
let (entry_sender, entry_receiver) = channel();
|
||||
let start_hash = start_hash.clone();
|
||||
let start_hash = *start_hash;
|
||||
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-record-stage".to_string())
|
||||
@ -59,13 +60,14 @@ impl RecordStage {
|
||||
let mut recorder = Recorder::new(start_hash);
|
||||
let start_time = Instant::now();
|
||||
loop {
|
||||
if let Err(_) = Self::try_process_signals(
|
||||
if Self::try_process_signals(
|
||||
&mut recorder,
|
||||
start_time,
|
||||
tick_duration,
|
||||
&signal_receiver,
|
||||
&entry_sender,
|
||||
) {
|
||||
).is_err()
|
||||
{
|
||||
return;
|
||||
}
|
||||
recorder.hash();
|
||||
@ -124,6 +126,16 @@ impl RecordStage {
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for RecordStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
vec![self.thread_hdl]
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@ -1,43 +1,149 @@
|
||||
//! The `replicate_stage` replicates transactions broadcast by the leader.
|
||||
|
||||
use bank::Bank;
|
||||
use bincode::serialize;
|
||||
use counter::Counter;
|
||||
use crdt::Crdt;
|
||||
use ledger;
|
||||
use result::Result;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use packet::BlobRecycler;
|
||||
use result::{Error, Result};
|
||||
use service::Service;
|
||||
use signature::KeyPair;
|
||||
use std::collections::VecDeque;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::mpsc::RecvTimeoutError;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer::BlobReceiver;
|
||||
use streamer::{responder, BlobReceiver, BlobSender};
|
||||
use timing;
|
||||
use transaction::Transaction;
|
||||
use voting::entries_to_votes;
|
||||
|
||||
pub struct ReplicateStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
const VOTE_TIMEOUT_MS: u64 = 1000;
|
||||
|
||||
impl ReplicateStage {
|
||||
/// Process entry blobs, already in order
|
||||
fn replicate_requests(bank: &Arc<Bank>, blob_receiver: &BlobReceiver) -> Result<()> {
|
||||
fn replicate_requests(
|
||||
keypair: &Arc<KeyPair>,
|
||||
bank: &Arc<Bank>,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
blob_recycler: &BlobRecycler,
|
||||
window_receiver: &BlobReceiver,
|
||||
vote_blob_sender: &BlobSender,
|
||||
last_vote: &mut u64,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let blobs = blob_receiver.recv_timeout(timer)?;
|
||||
//coalesce all the available blobs into a single vote
|
||||
let mut blobs = window_receiver.recv_timeout(timer)?;
|
||||
while let Ok(mut more) = window_receiver.try_recv() {
|
||||
blobs.append(&mut more);
|
||||
}
|
||||
let blobs_len = blobs.len();
|
||||
let entries = ledger::reconstruct_entries_from_blobs(blobs)?;
|
||||
let entries = ledger::reconstruct_entries_from_blobs(blobs.clone())?;
|
||||
{
|
||||
let votes = entries_to_votes(&entries);
|
||||
let mut wcrdt = crdt.write().unwrap();
|
||||
wcrdt.insert_votes(&votes);
|
||||
};
|
||||
inc_new_counter!(
|
||||
"replicate-transactions",
|
||||
entries.iter().map(|x| x.transactions.len()).sum()
|
||||
);
|
||||
let res = bank.process_entries(entries);
|
||||
if res.is_err() {
|
||||
error!("process_entries {} {:?}", blobs_len, res);
|
||||
}
|
||||
res?;
|
||||
let _ = res?;
|
||||
let now = timing::timestamp();
|
||||
if now - *last_vote > VOTE_TIMEOUT_MS {
|
||||
let last_id = bank.last_id();
|
||||
let shared_blob = blob_recycler.allocate();
|
||||
let (vote, addr) = {
|
||||
let mut wcrdt = crdt.write().unwrap();
|
||||
//TODO: doesn't seem like there is a synchronous call to get height and id
|
||||
info!("replicate_stage {:?}", &last_id[..8]);
|
||||
wcrdt.new_vote(last_id)
|
||||
}?;
|
||||
{
|
||||
let mut blob = shared_blob.write().unwrap();
|
||||
let tx = Transaction::new_vote(&keypair, vote, last_id, 0);
|
||||
let bytes = serialize(&tx)?;
|
||||
let len = bytes.len();
|
||||
blob.data[..len].copy_from_slice(&bytes);
|
||||
blob.meta.set_addr(&addr);
|
||||
blob.meta.size = len;
|
||||
}
|
||||
inc_new_counter!("replicate-vote_sent", 1);
|
||||
*last_vote = now;
|
||||
|
||||
vote_blob_sender.send(VecDeque::from(vec![shared_blob]))?;
|
||||
}
|
||||
while let Some(blob) = blobs.pop_front() {
|
||||
blob_recycler.recycle(blob);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
pub fn new(
|
||||
keypair: KeyPair,
|
||||
bank: Arc<Bank>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
blob_recycler: BlobRecycler,
|
||||
window_receiver: BlobReceiver,
|
||||
) -> Self {
|
||||
let (vote_blob_sender, vote_blob_receiver) = channel();
|
||||
let send = UdpSocket::bind("0.0.0.0:0").expect("bind");
|
||||
let t_responder = responder(
|
||||
"replicate_stage",
|
||||
send,
|
||||
blob_recycler.clone(),
|
||||
vote_blob_receiver,
|
||||
);
|
||||
let skeypair = Arc::new(keypair);
|
||||
|
||||
pub fn new(bank: Arc<Bank>, exit: Arc<AtomicBool>, window_receiver: BlobReceiver) -> Self {
|
||||
let thread_hdl = Builder::new()
|
||||
let t_replicate = Builder::new()
|
||||
.name("solana-replicate-stage".to_string())
|
||||
.spawn(move || loop {
|
||||
let e = Self::replicate_requests(&bank, &window_receiver);
|
||||
if e.is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
.spawn(move || {
|
||||
let mut timestamp: u64 = 0;
|
||||
loop {
|
||||
if let Err(e) = Self::replicate_requests(
|
||||
&skeypair,
|
||||
&bank,
|
||||
&crdt,
|
||||
&blob_recycler,
|
||||
&window_receiver,
|
||||
&vote_blob_sender,
|
||||
&mut timestamp,
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => error!("{:?}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
ReplicateStage { thread_hdl }
|
||||
ReplicateStage {
|
||||
thread_hdls: vec![t_responder, t_replicate],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for ReplicateStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
self.thread_hdls
|
||||
}
|
||||
fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls() {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ use hash::Hash;
|
||||
use signature::{PublicKey, Signature};
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
||||
pub enum Request {
|
||||
GetBalance { key: PublicKey },
|
||||
GetLastId,
|
||||
|
@ -5,18 +5,18 @@ use packet::{to_blobs, BlobRecycler, PacketRecycler, Packets, SharedPackets};
|
||||
use rayon::prelude::*;
|
||||
use request::Request;
|
||||
use request_processor::RequestProcessor;
|
||||
use result::Result;
|
||||
use result::{Error, Result};
|
||||
use service::Service;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Instant;
|
||||
use streamer::{self, BlobReceiver, BlobSender};
|
||||
use timing;
|
||||
|
||||
pub struct RequestStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
thread_hdl: JoinHandle<()>,
|
||||
pub request_processor: Arc<RequestProcessor>,
|
||||
}
|
||||
|
||||
@ -80,7 +80,6 @@ impl RequestStage {
|
||||
}
|
||||
pub fn new(
|
||||
request_processor: RequestProcessor,
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_receiver: Receiver<SharedPackets>,
|
||||
packet_recycler: PacketRecycler,
|
||||
blob_recycler: BlobRecycler,
|
||||
@ -91,16 +90,17 @@ impl RequestStage {
|
||||
let thread_hdl = Builder::new()
|
||||
.name("solana-request-stage".to_string())
|
||||
.spawn(move || loop {
|
||||
let e = Self::process_request_packets(
|
||||
if let Err(e) = Self::process_request_packets(
|
||||
&request_processor_,
|
||||
&packet_receiver,
|
||||
&blob_sender,
|
||||
&packet_recycler,
|
||||
&blob_recycler,
|
||||
);
|
||||
if e.is_err() {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => error!("{:?}", e),
|
||||
}
|
||||
}
|
||||
})
|
||||
@ -114,3 +114,13 @@ impl RequestStage {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for RequestStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
vec![self.thread_hdl]
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
}
|
||||
|
@ -2,9 +2,13 @@
|
||||
|
||||
use bank;
|
||||
use bincode;
|
||||
use crdt;
|
||||
#[cfg(feature = "erasure")]
|
||||
use erasure;
|
||||
use serde_json;
|
||||
use std;
|
||||
use std::any::Any;
|
||||
use streamer;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
@ -16,10 +20,11 @@ pub enum Error {
|
||||
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
|
||||
Serialize(std::boxed::Box<bincode::ErrorKind>),
|
||||
BankError(bank::BankError),
|
||||
CrdtError(crdt::CrdtError),
|
||||
WindowError(streamer::WindowError),
|
||||
#[cfg(feature = "erasure")]
|
||||
ErasureError(erasure::ErasureError),
|
||||
SendError,
|
||||
Services,
|
||||
CrdtTooSmall,
|
||||
GenericError,
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@ -39,6 +44,22 @@ impl std::convert::From<bank::BankError> for Error {
|
||||
Error::BankError(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<crdt::CrdtError> for Error {
|
||||
fn from(e: crdt::CrdtError) -> Error {
|
||||
Error::CrdtError(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<streamer::WindowError> for Error {
|
||||
fn from(e: streamer::WindowError) -> Error {
|
||||
Error::WindowError(e)
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "erasure")]
|
||||
impl std::convert::From<erasure::ErasureError> for Error {
|
||||
fn from(e: erasure::ErasureError) -> Error {
|
||||
Error::ErasureError(e)
|
||||
}
|
||||
}
|
||||
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
|
||||
fn from(_e: std::sync::mpsc::SendError<T>) -> Error {
|
||||
Error::SendError
|
||||
|
34
src/rpu.rs
34
src/rpu.rs
@ -27,20 +27,21 @@ use bank::Bank;
|
||||
use packet::{BlobRecycler, PacketRecycler};
|
||||
use request_processor::RequestProcessor;
|
||||
use request_stage::RequestStage;
|
||||
use service::Service;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use std::thread::{self, JoinHandle};
|
||||
use streamer;
|
||||
|
||||
pub struct Rpu {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl Rpu {
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
bank: &Arc<Bank>,
|
||||
requests_socket: UdpSocket,
|
||||
respond_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
@ -49,7 +50,7 @@ impl Rpu {
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let t_receiver = streamer::receiver(
|
||||
requests_socket,
|
||||
exit.clone(),
|
||||
exit,
|
||||
packet_recycler.clone(),
|
||||
packet_sender,
|
||||
);
|
||||
@ -58,20 +59,29 @@ impl Rpu {
|
||||
let request_processor = RequestProcessor::new(bank.clone());
|
||||
let (request_stage, blob_receiver) = RequestStage::new(
|
||||
request_processor,
|
||||
exit.clone(),
|
||||
packet_receiver,
|
||||
packet_recycler.clone(),
|
||||
blob_recycler.clone(),
|
||||
);
|
||||
|
||||
let t_responder = streamer::responder(
|
||||
respond_socket,
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
blob_receiver,
|
||||
);
|
||||
let t_responder =
|
||||
streamer::responder("rpu", respond_socket, blob_recycler.clone(), blob_receiver);
|
||||
|
||||
let thread_hdls = vec![t_receiver, t_responder, request_stage.thread_hdl];
|
||||
let mut thread_hdls = vec![t_receiver, t_responder];
|
||||
thread_hdls.extend(request_stage.thread_hdls().into_iter());
|
||||
Rpu { thread_hdls }
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for Rpu {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
self.thread_hdls
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls() {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
6
src/service.rs
Normal file
6
src/service.rs
Normal file
@ -0,0 +1,6 @@
|
||||
use std::thread::{JoinHandle, Result};
|
||||
|
||||
pub trait Service {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>>;
|
||||
fn join(self) -> Result<()>;
|
||||
}
|
@ -8,8 +8,11 @@ use ring::error::Unspecified;
|
||||
use ring::rand::SecureRandom;
|
||||
use ring::signature::Ed25519KeyPair;
|
||||
use ring::{rand, signature};
|
||||
use serde_json;
|
||||
use std::cell::RefCell;
|
||||
use untrusted;
|
||||
use std::error;
|
||||
use std::fs::File;
|
||||
use untrusted::Input;
|
||||
|
||||
pub type KeyPair = Ed25519KeyPair;
|
||||
pub type PublicKey = GenericArray<u8, U32>;
|
||||
@ -24,10 +27,8 @@ impl KeyPairUtil for Ed25519KeyPair {
|
||||
/// Return a new ED25519 keypair
|
||||
fn new() -> Self {
|
||||
let rng = rand::SystemRandom::new();
|
||||
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng)
|
||||
.expect("generate_pkcs8 in signature pb fn new");
|
||||
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes))
|
||||
.expect("from_pcks8 in signature pb fn new")
|
||||
let pkcs8_bytes = Ed25519KeyPair::generate_pkcs8(&rng).expect("generate_pkcs8");
|
||||
Ed25519KeyPair::from_pkcs8(Input::from(&pkcs8_bytes)).expect("from_pcks8")
|
||||
}
|
||||
|
||||
/// Return the public key for the given keypair
|
||||
@ -42,9 +43,9 @@ pub trait SignatureUtil {
|
||||
|
||||
impl SignatureUtil for GenericArray<u8, U64> {
|
||||
fn verify(&self, peer_public_key_bytes: &[u8], msg_bytes: &[u8]) -> bool {
|
||||
let peer_public_key = untrusted::Input::from(peer_public_key_bytes);
|
||||
let msg = untrusted::Input::from(msg_bytes);
|
||||
let sig = untrusted::Input::from(self);
|
||||
let peer_public_key = Input::from(peer_public_key_bytes);
|
||||
let msg = Input::from(msg_bytes);
|
||||
let sig = Input::from(self);
|
||||
signature::verify(&signature::ED25519, peer_public_key, msg, sig).is_ok()
|
||||
}
|
||||
}
|
||||
@ -77,7 +78,7 @@ impl GenKeys {
|
||||
.into_par_iter()
|
||||
.map(|seed| {
|
||||
let pkcs8 = GenKeys::new(seed).new_key();
|
||||
KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8)).unwrap()
|
||||
KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
@ -91,6 +92,18 @@ impl SecureRandom for GenKeys {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_pkcs8(path: &str) -> Result<Vec<u8>, Box<error::Error>> {
|
||||
let file = File::open(path.to_string())?;
|
||||
let pkcs8: Vec<u8> = serde_json::from_reader(file)?;
|
||||
Ok(pkcs8)
|
||||
}
|
||||
|
||||
pub fn read_keypair(path: &str) -> Result<KeyPair, Box<error::Error>> {
|
||||
let pkcs8 = read_pkcs8(path)?;
|
||||
let keypair = Ed25519KeyPair::from_pkcs8(Input::from(&pkcs8))?;
|
||||
Ok(keypair)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@ -121,17 +134,3 @@ mod tests {
|
||||
assert_eq!(gen_n_pubkeys(seed, 50), gen_n_pubkeys(seed, 50));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
|
||||
use self::test::Bencher;
|
||||
use super::*;
|
||||
|
||||
#[bench]
|
||||
fn bench_gen_keys(b: &mut Bencher) {
|
||||
let rnd = GenKeys::new([0u8; 32]);
|
||||
b.iter(|| rnd.gen_n_keypairs(1000));
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,6 @@ use counter::Counter;
|
||||
use packet::{Packet, SharedPackets};
|
||||
use std::mem::size_of;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::time::Instant;
|
||||
use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET};
|
||||
|
||||
pub const TX_OFFSET: usize = 0;
|
||||
@ -23,6 +22,8 @@ struct Elems {
|
||||
#[cfg(feature = "cuda")]
|
||||
#[link(name = "cuda_verify_ed25519")]
|
||||
extern "C" {
|
||||
fn ed25519_init() -> bool;
|
||||
fn ed25519_set_verbose(val: bool);
|
||||
fn ed25519_verify_many(
|
||||
vecs: *const Elems,
|
||||
num: u32, //number of vecs
|
||||
@ -35,6 +36,11 @@ extern "C" {
|
||||
) -> u32;
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "cuda"))]
|
||||
pub fn init() {
|
||||
// stub
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "cuda"))]
|
||||
fn verify_packet(packet: &Packet) -> u8 {
|
||||
use ring::signature;
|
||||
@ -60,18 +66,17 @@ fn verify_packet(packet: &Packet) -> u8 {
|
||||
).is_ok() as u8
|
||||
}
|
||||
|
||||
fn batch_size(batches: &Vec<SharedPackets>) -> usize {
|
||||
fn batch_size(batches: &[SharedPackets]) -> usize {
|
||||
batches
|
||||
.iter()
|
||||
.map(|p| p.read().unwrap().packets.len())
|
||||
.sum()
|
||||
}
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(ptr_arg))]
|
||||
#[cfg(not(feature = "cuda"))]
|
||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
use rayon::prelude::*;
|
||||
static mut COUNTER: Counter = create_counter!("ed25519_verify", 1);
|
||||
let start = Instant::now();
|
||||
let count = batch_size(batches);
|
||||
info!("CPU ECDSA for {}", batch_size(batches));
|
||||
let rv = batches
|
||||
@ -85,15 +90,24 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
.collect()
|
||||
})
|
||||
.collect();
|
||||
inc_counter!(COUNTER, count, start);
|
||||
inc_new_counter!("ed25519_verify", count);
|
||||
rv
|
||||
}
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
pub fn init() {
|
||||
unsafe {
|
||||
ed25519_set_verbose(true);
|
||||
if !ed25519_init() {
|
||||
panic!("ed25519_init() failed");
|
||||
}
|
||||
ed25519_set_verbose(false);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "cuda")]
|
||||
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
use packet::PACKET_DATA_SIZE;
|
||||
static mut COUNTER: Counter = create_counter!("ed25519_verify_cuda", 1);
|
||||
let start = Instant::now();
|
||||
let count = batch_size(batches);
|
||||
info!("CUDA ECDSA for {}", batch_size(batches));
|
||||
let mut out = Vec::new();
|
||||
@ -153,7 +167,7 @@ pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||
num += 1;
|
||||
}
|
||||
}
|
||||
inc_counter!(COUNTER, count, start);
|
||||
inc_new_counter!("ed25519_verify", count);
|
||||
rvs
|
||||
}
|
||||
|
||||
@ -189,7 +203,7 @@ mod tests {
|
||||
|
||||
// jumble some data to test failure
|
||||
if modify_data {
|
||||
packet.data[20] = 10;
|
||||
packet.data[20] = packet.data[20].wrapping_add(10);
|
||||
}
|
||||
|
||||
// generate packet vector
|
||||
|
@ -7,38 +7,38 @@
|
||||
|
||||
use packet::SharedPackets;
|
||||
use rand::{thread_rng, Rng};
|
||||
use result::Result;
|
||||
use result::{Error, Result};
|
||||
use service::Service;
|
||||
use sigverify;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::thread::{self, spawn, JoinHandle};
|
||||
use std::time::Instant;
|
||||
use streamer::{self, PacketReceiver};
|
||||
use timing;
|
||||
|
||||
pub type VerifiedPackets = Vec<(SharedPackets, Vec<u8>)>;
|
||||
|
||||
pub struct SigVerifyStage {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl SigVerifyStage {
|
||||
pub fn new(
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_receiver: Receiver<SharedPackets>,
|
||||
) -> (Self, Receiver<Vec<(SharedPackets, Vec<u8>)>>) {
|
||||
pub fn new(packet_receiver: Receiver<SharedPackets>) -> (Self, Receiver<VerifiedPackets>) {
|
||||
sigverify::init();
|
||||
let (verified_sender, verified_receiver) = channel();
|
||||
let thread_hdls = Self::verifier_services(exit, packet_receiver, verified_sender);
|
||||
let thread_hdls = Self::verifier_services(packet_receiver, verified_sender);
|
||||
(SigVerifyStage { thread_hdls }, verified_receiver)
|
||||
}
|
||||
|
||||
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<(SharedPackets, Vec<u8>)> {
|
||||
fn verify_batch(batch: Vec<SharedPackets>) -> VerifiedPackets {
|
||||
let r = sigverify::ed25519_verify(&batch);
|
||||
batch.into_iter().zip(r).collect()
|
||||
}
|
||||
|
||||
fn verifier(
|
||||
recvr: &Arc<Mutex<PacketReceiver>>,
|
||||
sendr: &Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
|
||||
sendr: &Arc<Mutex<Sender<VerifiedPackets>>>,
|
||||
) -> Result<()> {
|
||||
let (batch, len) =
|
||||
streamer::recv_batch(&recvr.lock().expect("'recvr' lock in fn verifier"))?;
|
||||
@ -74,27 +74,41 @@ impl SigVerifyStage {
|
||||
}
|
||||
|
||||
fn verifier_service(
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_receiver: Arc<Mutex<PacketReceiver>>,
|
||||
verified_sender: Arc<Mutex<Sender<Vec<(SharedPackets, Vec<u8>)>>>>,
|
||||
verified_sender: Arc<Mutex<Sender<VerifiedPackets>>>,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
let e = Self::verifier(&packet_receiver.clone(), &verified_sender.clone());
|
||||
if e.is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
if let Err(e) = Self::verifier(&packet_receiver, &verified_sender) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => error!("{:?}", e),
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn verifier_services(
|
||||
exit: Arc<AtomicBool>,
|
||||
packet_receiver: PacketReceiver,
|
||||
verified_sender: Sender<Vec<(SharedPackets, Vec<u8>)>>,
|
||||
verified_sender: Sender<VerifiedPackets>,
|
||||
) -> Vec<JoinHandle<()>> {
|
||||
let sender = Arc::new(Mutex::new(verified_sender));
|
||||
let receiver = Arc::new(Mutex::new(packet_receiver));
|
||||
(0..4)
|
||||
.map(|_| Self::verifier_service(exit.clone(), receiver.clone(), sender.clone()))
|
||||
.map(|_| Self::verifier_service(receiver.clone(), sender.clone()))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for SigVerifyStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
self.thread_hdls
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls() {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
668
src/streamer.rs
668
src/streamer.rs
@ -1,17 +1,19 @@
|
||||
//! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets.
|
||||
//!
|
||||
use crdt::Crdt;
|
||||
use counter::Counter;
|
||||
use crdt::{Crdt, CrdtError, NodeInfo};
|
||||
#[cfg(feature = "erasure")]
|
||||
use erasure;
|
||||
use packet::{
|
||||
Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedBlobs, SharedPackets, BLOB_SIZE,
|
||||
};
|
||||
use result::{Error, Result};
|
||||
use std::cmp;
|
||||
use std::collections::VecDeque;
|
||||
use std::mem;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{Receiver, Sender};
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::{Receiver, RecvTimeoutError, Sender};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
@ -23,6 +25,11 @@ pub type BlobSender = Sender<SharedBlobs>;
|
||||
pub type BlobReceiver = Receiver<SharedBlobs>;
|
||||
pub type Window = Arc<RwLock<Vec<Option<SharedBlob>>>>;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum WindowError {
|
||||
GenericError,
|
||||
}
|
||||
|
||||
fn recv_loop(
|
||||
sock: &UdpSocket,
|
||||
exit: &Arc<AtomicBool>,
|
||||
@ -92,21 +99,25 @@ pub fn recv_batch(recvr: &PacketReceiver) -> Result<(Vec<SharedPackets>, usize)>
|
||||
break;
|
||||
}
|
||||
}
|
||||
debug!("batch len {}", batch.len());
|
||||
trace!("batch len {}", batch.len());
|
||||
Ok((batch, len))
|
||||
}
|
||||
|
||||
pub fn responder(
|
||||
name: &'static str,
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
Builder::new()
|
||||
.name("solana-responder".to_string())
|
||||
.name(format!("solana-responder-{}", name))
|
||||
.spawn(move || loop {
|
||||
if recv_send(&sock, &recycler, &r).is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
if let Err(e) = recv_send(&sock, &recycler, &r) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => warn!("{} responder error: {:?}", name, e),
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
@ -115,7 +126,7 @@ pub fn responder(
|
||||
//TODO, we would need to stick block authentication before we create the
|
||||
//window.
|
||||
fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> {
|
||||
trace!("receiving on {}", sock.local_addr().unwrap());
|
||||
trace!("recv_blobs: receiving on {}", sock.local_addr().unwrap());
|
||||
let dq = Blob::recv_from(recycler, sock)?;
|
||||
if !dq.is_empty() {
|
||||
s.send(dq)?;
|
||||
@ -152,13 +163,13 @@ fn find_next_missing(
|
||||
received: &mut u64,
|
||||
) -> Result<Vec<(SocketAddr, Vec<u8>)>> {
|
||||
if *received <= *consumed {
|
||||
return Err(Error::GenericError);
|
||||
Err(WindowError::GenericError)?;
|
||||
}
|
||||
let window = locked_window.read().unwrap();
|
||||
let reqs: Vec<_> = (*consumed..*received)
|
||||
.filter_map(|pix| {
|
||||
let i = (pix % WINDOW_SIZE) as usize;
|
||||
if let &None = &window[i] {
|
||||
if window[i].is_none() {
|
||||
let val = crdt.read().unwrap().window_index_request(pix as u64);
|
||||
if let Ok((to, req)) = val {
|
||||
return Some((to, req));
|
||||
@ -171,6 +182,7 @@ fn find_next_missing(
|
||||
}
|
||||
|
||||
fn repair_window(
|
||||
debug_id: u64,
|
||||
locked_window: &Window,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
_recycler: &BlobRecycler,
|
||||
@ -199,47 +211,57 @@ fn repair_window(
|
||||
*times += 1;
|
||||
//if times flips from all 1s 7 -> 8, 15 -> 16, we retry otherwise return Ok
|
||||
if *times & (*times - 1) != 0 {
|
||||
trace!("repair_window counter {} {}", *times, *consumed);
|
||||
trace!(
|
||||
"repair_window counter {} {} {}",
|
||||
*times,
|
||||
*consumed,
|
||||
*received
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let reqs = find_next_missing(locked_window, crdt, consumed, received)?;
|
||||
trace!("{:x}: repair_window missing: {}", debug_id, reqs.len());
|
||||
if !reqs.is_empty() {
|
||||
inc_new_counter!("streamer-repair_window-repair", reqs.len());
|
||||
debug!(
|
||||
"{:x}: repair_window counter times: {} consumed: {} received: {} missing: {}",
|
||||
debug_id,
|
||||
*times,
|
||||
*consumed,
|
||||
*received,
|
||||
reqs.len()
|
||||
);
|
||||
}
|
||||
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
||||
for (to, req) in reqs {
|
||||
//todo cache socket
|
||||
info!("repair_window request {} {} {}", *consumed, *received, to);
|
||||
debug!(
|
||||
"{:x} repair_window request {} {} {}",
|
||||
debug_id, *consumed, *received, to
|
||||
);
|
||||
assert!(req.len() < BLOB_SIZE);
|
||||
sock.send_to(&req, to)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn recv_window(
|
||||
locked_window: &Window,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
fn retransmit_all_leader_blocks(
|
||||
maybe_leader: Option<NodeInfo>,
|
||||
dq: &mut SharedBlobs,
|
||||
debug_id: u64,
|
||||
recycler: &BlobRecycler,
|
||||
consumed: &mut u64,
|
||||
received: &mut u64,
|
||||
r: &BlobReceiver,
|
||||
s: &BlobSender,
|
||||
retransmit: &BlobSender,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(200);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
let leader_id = crdt.read()
|
||||
.expect("'crdt' read lock in fn recv_window")
|
||||
.leader_data()
|
||||
.expect("leader not ready")
|
||||
.id;
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq)
|
||||
}
|
||||
{
|
||||
//retransmit all leader blocks
|
||||
let mut retransmitq = VecDeque::new();
|
||||
for b in &dq {
|
||||
let mut retransmit_queue = VecDeque::new();
|
||||
if let Some(leader) = maybe_leader {
|
||||
for b in dq {
|
||||
let p = b.read().expect("'b' read lock in fn recv_window");
|
||||
//TODO this check isn't safe against adverserial packets
|
||||
//we need to maintain a sequence window
|
||||
let leader_id = leader.id;
|
||||
trace!(
|
||||
"idx: {} addr: {:?} id: {:?} leader: {:?}",
|
||||
p.get_index().expect("get_index in fn recv_window"),
|
||||
@ -262,15 +284,151 @@ fn recv_window(
|
||||
mnv.meta.size = sz;
|
||||
mnv.data[..sz].copy_from_slice(&p.data[..sz]);
|
||||
}
|
||||
retransmitq.push_back(nv);
|
||||
retransmit_queue.push_back(nv);
|
||||
}
|
||||
}
|
||||
if !retransmitq.is_empty() {
|
||||
retransmit.send(retransmitq)?;
|
||||
} else {
|
||||
warn!("{:x}: no leader to retransmit from", debug_id);
|
||||
}
|
||||
if !retransmit_queue.is_empty() {
|
||||
debug!(
|
||||
"{:x}: RECV_WINDOW {} {}: retransmit {}",
|
||||
debug_id,
|
||||
*consumed,
|
||||
*received,
|
||||
retransmit_queue.len(),
|
||||
);
|
||||
inc_new_counter!("streamer-recv_window-retransmit", retransmit_queue.len());
|
||||
retransmit.send(retransmit_queue)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_blob(
|
||||
b: SharedBlob,
|
||||
pix: u64,
|
||||
w: usize,
|
||||
consume_queue: &mut SharedBlobs,
|
||||
locked_window: &Window,
|
||||
debug_id: u64,
|
||||
recycler: &BlobRecycler,
|
||||
consumed: &mut u64,
|
||||
) {
|
||||
let mut window = locked_window.write().unwrap();
|
||||
|
||||
// Search the window for old blobs in the window
|
||||
// of consumed to received and clear any old ones
|
||||
for ix in *consumed..(pix + 1) {
|
||||
let k = (ix % WINDOW_SIZE) as usize;
|
||||
if let Some(b) = &mut window[k] {
|
||||
if b.read().unwrap().get_index().unwrap() >= *consumed as u64 {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if let Some(b) = mem::replace(&mut window[k], None) {
|
||||
recycler.recycle(b);
|
||||
}
|
||||
}
|
||||
|
||||
// Insert the new blob into the window
|
||||
// spot should be free because we cleared it above
|
||||
if window[w].is_none() {
|
||||
window[w] = Some(b);
|
||||
} else if let Some(cblob) = &window[w] {
|
||||
if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
|
||||
warn!("{:x}: overrun blob at index {:}", debug_id, w);
|
||||
} else {
|
||||
debug!("{:x}: duplicate blob at index {:}", debug_id, w);
|
||||
}
|
||||
}
|
||||
loop {
|
||||
let k = (*consumed % WINDOW_SIZE) as usize;
|
||||
trace!("k: {} consumed: {}", k, *consumed);
|
||||
|
||||
if window[k].is_none() {
|
||||
break;
|
||||
}
|
||||
let mut is_coding = false;
|
||||
if let Some(ref cblob) = window[k] {
|
||||
let cblob_r = cblob
|
||||
.read()
|
||||
.expect("blob read lock for flogs streamer::window");
|
||||
if cblob_r.get_index().unwrap() < *consumed {
|
||||
break;
|
||||
}
|
||||
if cblob_r.is_coding() {
|
||||
is_coding = true;
|
||||
}
|
||||
}
|
||||
if !is_coding {
|
||||
consume_queue.push_back(window[k].clone().expect("clone in fn recv_window"));
|
||||
*consumed += 1;
|
||||
} else {
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
let block_start = *consumed - (*consumed % erasure::NUM_CODED as u64);
|
||||
let coding_end = block_start + erasure::NUM_CODED as u64;
|
||||
// We've received all this block's data blobs, go and null out the window now
|
||||
for j in block_start..*consumed {
|
||||
if let Some(b) = mem::replace(&mut window[(j % WINDOW_SIZE) as usize], None) {
|
||||
recycler.recycle(b);
|
||||
}
|
||||
}
|
||||
for j in *consumed..coding_end {
|
||||
window[(j % WINDOW_SIZE) as usize] = None;
|
||||
}
|
||||
|
||||
*consumed += erasure::MAX_MISSING as u64;
|
||||
debug!(
|
||||
"skipping processing coding blob k: {} consumed: {}",
|
||||
k, *consumed
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn recv_window(
|
||||
debug_id: u64,
|
||||
locked_window: &Window,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
recycler: &BlobRecycler,
|
||||
consumed: &mut u64,
|
||||
received: &mut u64,
|
||||
r: &BlobReceiver,
|
||||
s: &BlobSender,
|
||||
retransmit: &BlobSender,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(200);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
let maybe_leader: Option<NodeInfo> = crdt.read()
|
||||
.expect("'crdt' read lock in fn recv_window")
|
||||
.leader_data()
|
||||
.cloned();
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
dq.append(&mut nq)
|
||||
}
|
||||
inc_new_counter!("streamer-recv_window-recv", dq.len());
|
||||
debug!(
|
||||
"{:x}: RECV_WINDOW {} {}: got packets {}",
|
||||
debug_id,
|
||||
*consumed,
|
||||
*received,
|
||||
dq.len(),
|
||||
);
|
||||
|
||||
retransmit_all_leader_blocks(
|
||||
maybe_leader,
|
||||
&mut dq,
|
||||
debug_id,
|
||||
recycler,
|
||||
consumed,
|
||||
received,
|
||||
retransmit,
|
||||
)?;
|
||||
|
||||
//send a contiguous set of blocks
|
||||
let mut contq = VecDeque::new();
|
||||
let mut consume_queue = VecDeque::new();
|
||||
while let Some(b) = dq.pop_front() {
|
||||
let (pix, meta_size) = {
|
||||
let p = b.write().expect("'b' write lock in fn recv_window");
|
||||
@ -283,8 +441,8 @@ fn recv_window(
|
||||
// probably from a repair window request
|
||||
if pix < *consumed {
|
||||
debug!(
|
||||
"received: {} but older than consumed: {} skipping..",
|
||||
pix, *consumed
|
||||
"{:x}: received: {} but older than consumed: {} skipping..",
|
||||
debug_id, pix, *consumed
|
||||
);
|
||||
continue;
|
||||
}
|
||||
@ -293,93 +451,36 @@ fn recv_window(
|
||||
//if we get different blocks at the same index
|
||||
//that is a network failure/attack
|
||||
trace!("window w: {} size: {}", w, meta_size);
|
||||
{
|
||||
let mut window = locked_window.write().unwrap();
|
||||
|
||||
// Search the window for old blobs in the window
|
||||
// of consumed to received and clear any old ones
|
||||
for ix in *consumed..(pix + 1) {
|
||||
let k = (ix % WINDOW_SIZE) as usize;
|
||||
if let Some(b) = &mut window[k] {
|
||||
if b.read().unwrap().get_index().unwrap() >= *consumed as u64 {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if let Some(b) = mem::replace(&mut window[k], None) {
|
||||
recycler.recycle(b);
|
||||
}
|
||||
}
|
||||
|
||||
// Insert the new blob into the window
|
||||
// spot should be free because we cleared it above
|
||||
if window[w].is_none() {
|
||||
window[w] = Some(b);
|
||||
} else if let Some(cblob) = &window[w] {
|
||||
if cblob.read().unwrap().get_index().unwrap() != pix as u64 {
|
||||
warn!("overrun blob at index {:}", w);
|
||||
} else {
|
||||
debug!("duplicate blob at index {:}", w);
|
||||
}
|
||||
}
|
||||
loop {
|
||||
let k = (*consumed % WINDOW_SIZE) as usize;
|
||||
trace!("k: {} consumed: {}", k, *consumed);
|
||||
|
||||
if window[k].is_none() {
|
||||
break;
|
||||
}
|
||||
let mut is_coding = false;
|
||||
if let &Some(ref cblob) = &window[k] {
|
||||
let cblob_r = cblob
|
||||
.read()
|
||||
.expect("blob read lock for flogs streamer::window");
|
||||
if cblob_r.get_index().unwrap() < *consumed {
|
||||
break;
|
||||
}
|
||||
if cblob_r.is_coding() {
|
||||
is_coding = true;
|
||||
}
|
||||
}
|
||||
if !is_coding {
|
||||
contq.push_back(window[k].clone().expect("clone in fn recv_window"));
|
||||
*consumed += 1;
|
||||
} else {
|
||||
#[cfg(feature = "erasure")]
|
||||
{
|
||||
let block_start = *consumed - (*consumed % erasure::NUM_CODED as u64);
|
||||
let coding_end = block_start + erasure::NUM_CODED as u64;
|
||||
// We've received all this block's data blobs, go and null out the window now
|
||||
for j in block_start..*consumed {
|
||||
if let Some(b) =
|
||||
mem::replace(&mut window[(j % WINDOW_SIZE) as usize], None)
|
||||
{
|
||||
recycler.recycle(b);
|
||||
}
|
||||
}
|
||||
for j in *consumed..coding_end {
|
||||
window[(j % WINDOW_SIZE) as usize] = None;
|
||||
}
|
||||
|
||||
*consumed += erasure::MAX_MISSING as u64;
|
||||
debug!(
|
||||
"skipping processing coding blob k: {} consumed: {}",
|
||||
k, *consumed
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
process_blob(
|
||||
b,
|
||||
pix,
|
||||
w,
|
||||
&mut consume_queue,
|
||||
locked_window,
|
||||
debug_id,
|
||||
recycler,
|
||||
consumed,
|
||||
);
|
||||
}
|
||||
print_window(locked_window, *consumed);
|
||||
trace!("sending contq.len: {}", contq.len());
|
||||
if !contq.is_empty() {
|
||||
trace!("sending contq.len: {}", contq.len());
|
||||
s.send(contq)?;
|
||||
print_window(debug_id, locked_window, *consumed);
|
||||
trace!("sending consume_queue.len: {}", consume_queue.len());
|
||||
if !consume_queue.is_empty() {
|
||||
debug!(
|
||||
"{:x}: RECV_WINDOW {} {}: forwarding consume_queue {}",
|
||||
debug_id,
|
||||
*consumed,
|
||||
*received,
|
||||
consume_queue.len(),
|
||||
);
|
||||
trace!("sending consume_queue.len: {}", consume_queue.len());
|
||||
inc_new_counter!("streamer-recv_window-consume", consume_queue.len());
|
||||
s.send(consume_queue)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn print_window(locked_window: &Window, consumed: u64) {
|
||||
fn print_window(debug_id: u64, locked_window: &Window, consumed: u64) {
|
||||
{
|
||||
let buf: Vec<_> = locked_window
|
||||
.read()
|
||||
@ -391,20 +492,18 @@ fn print_window(locked_window: &Window, consumed: u64) {
|
||||
"_"
|
||||
} else if v.is_none() {
|
||||
"0"
|
||||
} else {
|
||||
if let &Some(ref cblob) = &v {
|
||||
if cblob.read().unwrap().is_coding() {
|
||||
"C"
|
||||
} else {
|
||||
"1"
|
||||
}
|
||||
} else if let Some(ref cblob) = v {
|
||||
if cblob.read().unwrap().is_coding() {
|
||||
"C"
|
||||
} else {
|
||||
"0"
|
||||
"1"
|
||||
}
|
||||
} else {
|
||||
"0"
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
debug!("WINDOW ({}): {}", consumed, buf.join(""));
|
||||
trace!("{:x}:WINDOW ({}): {}", debug_id, consumed, buf.join(""));
|
||||
}
|
||||
}
|
||||
|
||||
@ -412,8 +511,46 @@ pub fn default_window() -> Window {
|
||||
Arc::new(RwLock::new(vec![None; WINDOW_SIZE as usize]))
|
||||
}
|
||||
|
||||
/// Initialize a rebroadcast window with most recent Entry blobs
|
||||
/// * `crdt` - gossip instance, used to set blob ids
|
||||
/// * `blobs` - up to WINDOW_SIZE most recent blobs
|
||||
/// * `entry_height` - current entry height
|
||||
pub fn initialized_window(
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
blobs: Vec<SharedBlob>,
|
||||
entry_height: u64,
|
||||
) -> Window {
|
||||
let window = default_window();
|
||||
|
||||
{
|
||||
let mut win = window.write().unwrap();
|
||||
let me = crdt.read().unwrap().my_data().clone();
|
||||
|
||||
debug!(
|
||||
"initialized window entry_height:{} blobs_len:{}",
|
||||
entry_height,
|
||||
blobs.len()
|
||||
);
|
||||
|
||||
// Index the blobs
|
||||
let mut received = entry_height - blobs.len() as u64;
|
||||
Crdt::index_blobs(&me, &blobs, &mut received).expect("index blobs for initial window");
|
||||
|
||||
// populate the window, offset by implied index
|
||||
let diff = cmp::max(blobs.len() as isize - win.len() as isize, 0) as usize;
|
||||
for b in blobs.into_iter().skip(diff) {
|
||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
||||
let pos = (ix % WINDOW_SIZE) as usize;
|
||||
trace!("caching {} at {}", ix, pos);
|
||||
assert!(win[pos].is_none());
|
||||
win[pos] = Some(b);
|
||||
}
|
||||
}
|
||||
|
||||
window
|
||||
}
|
||||
|
||||
pub fn window(
|
||||
exit: Arc<AtomicBool>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Window,
|
||||
entry_height: u64,
|
||||
@ -429,11 +566,11 @@ pub fn window(
|
||||
let mut received = entry_height;
|
||||
let mut last = entry_height;
|
||||
let mut times = 0;
|
||||
let debug_id = crdt.read().unwrap().debug_id();
|
||||
trace!("{:x}: RECV_WINDOW started", debug_id);
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let _ = recv_window(
|
||||
if let Err(e) = recv_window(
|
||||
debug_id,
|
||||
&window,
|
||||
&crdt,
|
||||
&recycler,
|
||||
@ -442,8 +579,18 @@ pub fn window(
|
||||
&r,
|
||||
&s,
|
||||
&retransmit,
|
||||
);
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => {
|
||||
inc_new_counter!("streamer-window-error", 1, 1);
|
||||
error!("window error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
let _ = repair_window(
|
||||
debug_id,
|
||||
&window,
|
||||
&crdt,
|
||||
&recycler,
|
||||
@ -452,13 +599,15 @@ pub fn window(
|
||||
&mut consumed,
|
||||
&mut received,
|
||||
);
|
||||
assert!(consumed <= (received + 1));
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn broadcast(
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
me: &NodeInfo,
|
||||
broadcast_table: &[NodeInfo],
|
||||
window: &Window,
|
||||
recycler: &BlobRecycler,
|
||||
r: &BlobReceiver,
|
||||
@ -466,6 +615,7 @@ fn broadcast(
|
||||
transmit_index: &mut u64,
|
||||
receive_index: &mut u64,
|
||||
) -> Result<()> {
|
||||
let debug_id = me.debug_id();
|
||||
let timer = Duration::new(1, 0);
|
||||
let mut dq = r.recv_timeout(timer)?;
|
||||
while let Ok(mut nq) = r.try_recv() {
|
||||
@ -479,7 +629,7 @@ fn broadcast(
|
||||
// break them up into window-sized chunks to process
|
||||
let blobs_chunked = blobs_vec.chunks(WINDOW_SIZE as usize).map(|x| x.to_vec());
|
||||
|
||||
print_window(window, *receive_index);
|
||||
print_window(me.debug_id(), window, *receive_index);
|
||||
|
||||
for mut blobs in blobs_chunked {
|
||||
// Insert the coding blobs into the blob stream
|
||||
@ -487,11 +637,12 @@ fn broadcast(
|
||||
erasure::add_coding_blobs(recycler, &mut blobs, *receive_index);
|
||||
|
||||
let blobs_len = blobs.len();
|
||||
debug!("broadcast blobs.len: {}", blobs_len);
|
||||
debug!("{:x} broadcast blobs.len: {}", debug_id, blobs_len);
|
||||
|
||||
// Index the blobs
|
||||
Crdt::index_blobs(crdt, &blobs, receive_index)?;
|
||||
Crdt::index_blobs(&me, &blobs, receive_index)?;
|
||||
// keep the cache of blobs that are broadcast
|
||||
inc_new_counter!("streamer-broadcast-sent", blobs.len());
|
||||
{
|
||||
let mut win = window.write().unwrap();
|
||||
assert!(blobs.len() <= win.len());
|
||||
@ -524,13 +675,20 @@ fn broadcast(
|
||||
&mut window.write().unwrap(),
|
||||
*receive_index as usize,
|
||||
blobs_len,
|
||||
).map_err(|_| Error::GenericError)?;
|
||||
)?;
|
||||
}
|
||||
|
||||
*receive_index += blobs_len as u64;
|
||||
|
||||
// Send blobs out from the window
|
||||
Crdt::broadcast(crdt, &window, &sock, transmit_index, *receive_index)?;
|
||||
Crdt::broadcast(
|
||||
&me,
|
||||
&broadcast_table,
|
||||
&window,
|
||||
&sock,
|
||||
transmit_index,
|
||||
*receive_index,
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -546,7 +704,6 @@ fn broadcast(
|
||||
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||
pub fn broadcaster(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Window,
|
||||
entry_height: u64,
|
||||
@ -558,19 +715,29 @@ pub fn broadcaster(
|
||||
.spawn(move || {
|
||||
let mut transmit_index = entry_height;
|
||||
let mut receive_index = entry_height;
|
||||
let me = crdt.read().unwrap().my_data().clone();
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let _ = broadcast(
|
||||
&crdt,
|
||||
let broadcast_table = crdt.read().unwrap().compute_broadcast_table();
|
||||
if let Err(e) = broadcast(
|
||||
&me,
|
||||
&broadcast_table,
|
||||
&window,
|
||||
&recycler,
|
||||
&r,
|
||||
&sock,
|
||||
&mut transmit_index,
|
||||
&mut receive_index,
|
||||
);
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
Error::CrdtError(CrdtError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these?
|
||||
_ => {
|
||||
inc_new_counter!("streamer-broadcaster-error", 1, 1);
|
||||
error!("broadcaster error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
@ -608,7 +775,6 @@ fn retransmit(
|
||||
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||
pub fn retransmitter(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
recycler: BlobRecycler,
|
||||
r: BlobReceiver,
|
||||
@ -618,125 +784,26 @@ pub fn retransmitter(
|
||||
.spawn(move || {
|
||||
trace!("retransmitter started");
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
if let Err(e) = retransmit(&crdt, &recycler, &r, &sock) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => {
|
||||
inc_new_counter!("streamer-retransmit-error", 1, 1);
|
||||
error!("retransmitter error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO: handle this error
|
||||
let _ = retransmit(&crdt, &recycler, &r, &sock);
|
||||
}
|
||||
trace!("exiting retransmitter");
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use packet::{Packet, PacketRecycler, BLOB_SIZE, PACKET_DATA_SIZE};
|
||||
use result::Result;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::sleep;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
use streamer::{receiver, PacketReceiver};
|
||||
|
||||
fn producer(
|
||||
addr: &SocketAddr,
|
||||
recycler: PacketRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> JoinHandle<()> {
|
||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let msgs = recycler.allocate();
|
||||
let msgs_ = msgs.clone();
|
||||
msgs.write().unwrap().packets.resize(10, Packet::default());
|
||||
for w in msgs.write().unwrap().packets.iter_mut() {
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
}
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in msgs_.read().unwrap().packets.iter() {
|
||||
let a = p.meta.addr();
|
||||
assert!(p.meta.size < BLOB_SIZE);
|
||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||
num += 1;
|
||||
}
|
||||
assert_eq!(num, 10);
|
||||
})
|
||||
}
|
||||
|
||||
fn sink(
|
||||
recycler: PacketRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
rvs: Arc<Mutex<usize>>,
|
||||
r: PacketReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
match r.recv_timeout(timer) {
|
||||
Ok(msgs) => {
|
||||
*rvs.lock().unwrap() += msgs.read().unwrap().packets.len();
|
||||
recycler.recycle(msgs);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn bench_streamer_with_result() -> Result<()> {
|
||||
let read = UdpSocket::bind("127.0.0.1:0")?;
|
||||
read.set_read_timeout(Some(Duration::new(1, 0)))?;
|
||||
|
||||
let addr = read.local_addr()?;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let pack_recycler = PacketRecycler::default();
|
||||
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
|
||||
let t_producer1 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
let t_producer2 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
let t_producer3 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||
|
||||
let rvs = Arc::new(Mutex::new(0));
|
||||
let t_sink = sink(pack_recycler.clone(), exit.clone(), rvs.clone(), r_reader);
|
||||
|
||||
let start = SystemTime::now();
|
||||
let start_val = *rvs.lock().unwrap();
|
||||
sleep(Duration::new(5, 0));
|
||||
let elapsed = start.elapsed().unwrap();
|
||||
let end_val = *rvs.lock().unwrap();
|
||||
let time = elapsed.as_secs() * 10000000000 + elapsed.subsec_nanos() as u64;
|
||||
let ftime = (time as f64) / 10000000000f64;
|
||||
let fcount = (end_val - start_val) as f64;
|
||||
trace!("performance: {:?}", fcount / ftime);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_reader.join()?;
|
||||
t_producer1.join()?;
|
||||
t_producer2.join()?;
|
||||
t_producer3.join()?;
|
||||
t_sink.join()?;
|
||||
Ok(())
|
||||
}
|
||||
#[bench]
|
||||
pub fn bench_streamer(_bench: &mut Bencher) {
|
||||
bench_streamer_with_result().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crdt::{Crdt, TestNode};
|
||||
use logger;
|
||||
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
|
||||
use std::collections::VecDeque;
|
||||
use std::io;
|
||||
@ -779,20 +846,29 @@ mod test {
|
||||
let resp_recycler = BlobRecycler::default();
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver = receiver(read, exit.clone(), pack_recycler.clone(), s_reader);
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
|
||||
let mut msgs = VecDeque::new();
|
||||
for i in 0..10 {
|
||||
let b = resp_recycler.allocate();
|
||||
{
|
||||
let mut w = b.write().unwrap();
|
||||
w.data[0] = i as u8;
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
let t_responder = {
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = responder(
|
||||
"streamer_send_test",
|
||||
send,
|
||||
resp_recycler.clone(),
|
||||
r_responder,
|
||||
);
|
||||
let mut msgs = VecDeque::new();
|
||||
for i in 0..10 {
|
||||
let b = resp_recycler.allocate();
|
||||
{
|
||||
let mut w = b.write().unwrap();
|
||||
w.data[0] = i as u8;
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
}
|
||||
msgs.push_back(b);
|
||||
}
|
||||
msgs.push_back(b);
|
||||
}
|
||||
s_responder.send(msgs).expect("send");
|
||||
s_responder.send(msgs).expect("send");
|
||||
t_responder
|
||||
};
|
||||
|
||||
let mut num = 0;
|
||||
get_msgs(r_reader, &mut num);
|
||||
assert_eq!(num, 10);
|
||||
@ -821,9 +897,10 @@ mod test {
|
||||
|
||||
#[test]
|
||||
pub fn window_send_test() {
|
||||
let tn = TestNode::new();
|
||||
logger::setup();
|
||||
let tn = TestNode::new_localhost();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let mut crdt_me = Crdt::new(tn.data.clone());
|
||||
let mut crdt_me = Crdt::new(tn.data.clone()).expect("Crdt::new");
|
||||
let me_id = crdt_me.my_data().id;
|
||||
crdt_me.set_leader(me_id);
|
||||
let subs = Arc::new(RwLock::new(crdt_me));
|
||||
@ -840,7 +917,6 @@ mod test {
|
||||
let (s_retransmit, r_retransmit) = channel();
|
||||
let win = default_window();
|
||||
let t_window = window(
|
||||
exit.clone(),
|
||||
subs,
|
||||
win,
|
||||
0,
|
||||
@ -849,28 +925,32 @@ mod test {
|
||||
s_window,
|
||||
s_retransmit,
|
||||
);
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = responder(
|
||||
tn.sockets.replicate,
|
||||
exit.clone(),
|
||||
resp_recycler.clone(),
|
||||
r_responder,
|
||||
);
|
||||
let mut msgs = VecDeque::new();
|
||||
for v in 0..10 {
|
||||
let i = 9 - v;
|
||||
let b = resp_recycler.allocate();
|
||||
{
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(i).unwrap();
|
||||
w.set_id(me_id).unwrap();
|
||||
assert_eq!(i, w.get_index().unwrap());
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&tn.data.gossip_addr);
|
||||
let t_responder = {
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = responder(
|
||||
"window_send_test",
|
||||
tn.sockets.replicate,
|
||||
resp_recycler.clone(),
|
||||
r_responder,
|
||||
);
|
||||
let mut msgs = VecDeque::new();
|
||||
for v in 0..10 {
|
||||
let i = 9 - v;
|
||||
let b = resp_recycler.allocate();
|
||||
{
|
||||
let mut w = b.write().unwrap();
|
||||
w.set_index(i).unwrap();
|
||||
w.set_id(me_id).unwrap();
|
||||
assert_eq!(i, w.get_index().unwrap());
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&tn.data.contact_info.ncp);
|
||||
}
|
||||
msgs.push_back(b);
|
||||
}
|
||||
msgs.push_back(b);
|
||||
}
|
||||
s_responder.send(msgs).expect("send");
|
||||
s_responder.send(msgs).expect("send");
|
||||
t_responder
|
||||
};
|
||||
|
||||
let mut num = 0;
|
||||
get_blobs(r_window, &mut num);
|
||||
assert_eq!(num, 10);
|
||||
|
@ -10,8 +10,13 @@ use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::collections::HashMap;
|
||||
use std::io;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::time::Instant;
|
||||
use timing;
|
||||
use transaction::Transaction;
|
||||
|
||||
use influx_db_client as influxdb;
|
||||
use metrics;
|
||||
|
||||
/// An object for querying and sending transactions to the network.
|
||||
pub struct ThinClient {
|
||||
requests_addr: SocketAddr,
|
||||
@ -34,7 +39,7 @@ impl ThinClient {
|
||||
transactions_addr: SocketAddr,
|
||||
transactions_socket: UdpSocket,
|
||||
) -> Self {
|
||||
let client = ThinClient {
|
||||
ThinClient {
|
||||
requests_addr,
|
||||
requests_socket,
|
||||
transactions_addr,
|
||||
@ -43,8 +48,7 @@ impl ThinClient {
|
||||
transaction_count: 0,
|
||||
balances: HashMap::new(),
|
||||
signature_status: false,
|
||||
};
|
||||
client
|
||||
}
|
||||
}
|
||||
|
||||
pub fn recv_response(&self) -> io::Result<Response> {
|
||||
@ -52,12 +56,11 @@ impl ThinClient {
|
||||
trace!("start recv_from");
|
||||
self.requests_socket.recv_from(&mut buf)?;
|
||||
trace!("end recv_from");
|
||||
let resp = deserialize(&buf).expect("deserialize balance in thin_client");
|
||||
Ok(resp)
|
||||
deserialize(&buf).or_else(|_| Err(io::Error::new(io::ErrorKind::Other, "deserialize")))
|
||||
}
|
||||
|
||||
pub fn process_response(&mut self, resp: Response) {
|
||||
match resp {
|
||||
pub fn process_response(&mut self, resp: &Response) {
|
||||
match *resp {
|
||||
Response::Balance { key, val } => {
|
||||
trace!("Response balance {:?} {:?}", key, val);
|
||||
self.balances.insert(key, val);
|
||||
@ -72,13 +75,10 @@ impl ThinClient {
|
||||
}
|
||||
Response::SignatureStatus { signature_status } => {
|
||||
self.signature_status = signature_status;
|
||||
match signature_status {
|
||||
true => {
|
||||
trace!("Response found signature");
|
||||
}
|
||||
false => {
|
||||
trace!("Response signature not found");
|
||||
}
|
||||
if signature_status {
|
||||
trace!("Response found signature");
|
||||
} else {
|
||||
trace!("Response signature not found");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -86,7 +86,7 @@ impl ThinClient {
|
||||
|
||||
/// Send a signed Transaction to the server for processing. This method
|
||||
/// does not wait for a response.
|
||||
pub fn transfer_signed(&self, tx: Transaction) -> io::Result<usize> {
|
||||
pub fn transfer_signed(&self, tx: &Transaction) -> io::Result<usize> {
|
||||
let data = serialize(&tx).expect("serialize Transaction in pub fn transfer_signed");
|
||||
self.transactions_socket
|
||||
.send_to(&data, &self.transactions_addr)
|
||||
@ -100,9 +100,20 @@ impl ThinClient {
|
||||
to: PublicKey,
|
||||
last_id: &Hash,
|
||||
) -> io::Result<Signature> {
|
||||
let now = Instant::now();
|
||||
let tx = Transaction::new(keypair, to, n, *last_id);
|
||||
let sig = tx.sig;
|
||||
self.transfer_signed(tx).map(|_| sig)
|
||||
let result = self.transfer_signed(&tx).map(|_| sig);
|
||||
metrics::submit(
|
||||
influxdb::Point::new("thinclient")
|
||||
.add_tag("op", influxdb::Value::String("transfer".to_string()))
|
||||
.add_field(
|
||||
"duration_ms",
|
||||
influxdb::Value::Integer(timing::duration_as_ms(&now.elapsed()) as i64),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
result
|
||||
}
|
||||
|
||||
/// Request the balance of the user holding `pubkey`. This method blocks
|
||||
@ -122,12 +133,12 @@ impl ThinClient {
|
||||
if let Response::Balance { key, .. } = &resp {
|
||||
done = key == pubkey;
|
||||
}
|
||||
self.process_response(resp);
|
||||
self.process_response(&resp);
|
||||
}
|
||||
self.balances
|
||||
.get(pubkey)
|
||||
.map(|x| *x)
|
||||
.ok_or(io::Error::new(io::ErrorKind::Other, "nokey"))
|
||||
.cloned()
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "nokey"))
|
||||
}
|
||||
|
||||
/// Request the transaction count. If the response packet is dropped by the network,
|
||||
@ -145,10 +156,10 @@ impl ThinClient {
|
||||
|
||||
if let Ok(resp) = self.recv_response() {
|
||||
info!("recv_response {:?}", resp);
|
||||
if let &Response::TransactionCount { .. } = &resp {
|
||||
if let Response::TransactionCount { .. } = resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
self.process_response(&resp);
|
||||
}
|
||||
}
|
||||
self.transaction_count
|
||||
@ -169,10 +180,10 @@ impl ThinClient {
|
||||
|
||||
match self.recv_response() {
|
||||
Ok(resp) => {
|
||||
if let &Response::LastId { .. } = &resp {
|
||||
if let Response::LastId { .. } = resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
self.process_response(&resp);
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("thin_client get_last_id error: {}", e);
|
||||
@ -183,8 +194,6 @@ impl ThinClient {
|
||||
}
|
||||
|
||||
pub fn poll_get_balance(&mut self, pubkey: &PublicKey) -> io::Result<i64> {
|
||||
use std::time::Instant;
|
||||
|
||||
let mut balance;
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
@ -193,7 +202,15 @@ impl ThinClient {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
metrics::submit(
|
||||
influxdb::Point::new("thinclient")
|
||||
.add_tag("op", influxdb::Value::String("get_balance".to_string()))
|
||||
.add_field(
|
||||
"duration_ms",
|
||||
influxdb::Value::Integer(timing::duration_as_ms(&now.elapsed()) as i64),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
balance
|
||||
}
|
||||
|
||||
@ -203,6 +220,7 @@ impl ThinClient {
|
||||
trace!("check_signature");
|
||||
let req = Request::GetSignature { signature: *sig };
|
||||
let data = serialize(&req).expect("serialize GetSignature in pub fn check_signature");
|
||||
let now = Instant::now();
|
||||
let mut done = false;
|
||||
while !done {
|
||||
self.requests_socket
|
||||
@ -210,16 +228,31 @@ impl ThinClient {
|
||||
.expect("buffer error in pub fn get_last_id");
|
||||
|
||||
if let Ok(resp) = self.recv_response() {
|
||||
if let &Response::SignatureStatus { .. } = &resp {
|
||||
if let Response::SignatureStatus { .. } = resp {
|
||||
done = true;
|
||||
}
|
||||
self.process_response(resp);
|
||||
self.process_response(&resp);
|
||||
}
|
||||
}
|
||||
metrics::submit(
|
||||
influxdb::Point::new("thinclient")
|
||||
.add_tag("op", influxdb::Value::String("check_signature".to_string()))
|
||||
.add_field(
|
||||
"duration_ms",
|
||||
influxdb::Value::Integer(timing::duration_as_ms(&now.elapsed()) as i64),
|
||||
)
|
||||
.to_owned(),
|
||||
);
|
||||
self.signature_status
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ThinClient {
|
||||
fn drop(&mut self) {
|
||||
metrics::flush();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@ -229,6 +262,7 @@ mod tests {
|
||||
use fullnode::FullNode;
|
||||
use logger;
|
||||
use mint::Mint;
|
||||
use service::Service;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
@ -240,7 +274,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_thin_client() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let leader = TestNode::new_localhost();
|
||||
let leader_data = leader.data.clone();
|
||||
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
@ -250,13 +285,9 @@ mod tests {
|
||||
let server = FullNode::new_leader(
|
||||
bank,
|
||||
0,
|
||||
None,
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
leader,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
@ -266,9 +297,9 @@ mod tests {
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
leader_data.contact_info.rpu,
|
||||
requests_socket,
|
||||
leader.data.transactions_addr,
|
||||
leader_data.contact_info.tpu,
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id();
|
||||
@ -278,33 +309,31 @@ mod tests {
|
||||
let balance = client.poll_get_balance(&bob_pubkey);
|
||||
assert_eq!(balance.unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
server.join().unwrap();
|
||||
}
|
||||
|
||||
// sleep(Duration::from_millis(300)); is unstable
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_bad_sig() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let leader = TestNode::new_localhost();
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let leader_data = leader.data.clone();
|
||||
|
||||
let server = FullNode::new_leader(
|
||||
bank,
|
||||
0,
|
||||
None,
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
leader,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
//TODO: remove this sleep, or add a retry so CI is stable
|
||||
sleep(Duration::from_millis(300));
|
||||
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
@ -313,16 +342,16 @@ mod tests {
|
||||
.unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
leader_data.contact_info.rpu,
|
||||
requests_socket,
|
||||
leader.data.transactions_addr,
|
||||
leader_data.contact_info.tpu,
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id();
|
||||
|
||||
let tx = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
|
||||
|
||||
let _sig = client.transfer_signed(tx).unwrap();
|
||||
let _sig = client.transfer_signed(&tx).unwrap();
|
||||
|
||||
let last_id = client.get_last_id();
|
||||
|
||||
@ -331,35 +360,29 @@ mod tests {
|
||||
contract.tokens = 502;
|
||||
contract.plan = Plan::Budget(Budget::new_payment(502, bob_pubkey));
|
||||
}
|
||||
let _sig = client.transfer_signed(tr2).unwrap();
|
||||
let _sig = client.transfer_signed(&tr2).unwrap();
|
||||
|
||||
let balance = client.poll_get_balance(&bob_pubkey);
|
||||
assert_eq!(balance.unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
server.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_client_check_signature() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let leader = TestNode::new_localhost();
|
||||
let alice = Mint::new(10_000);
|
||||
let bank = Bank::new(&alice);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let leader_data = leader.data.clone();
|
||||
let server = FullNode::new_leader(
|
||||
bank,
|
||||
0,
|
||||
None,
|
||||
Some(Duration::from_millis(30)),
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
leader,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
@ -371,9 +394,9 @@ mod tests {
|
||||
.unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut client = ThinClient::new(
|
||||
leader.data.requests_addr,
|
||||
leader_data.contact_info.rpu,
|
||||
requests_socket,
|
||||
leader.data.transactions_addr,
|
||||
leader_data.contact_info.tpu,
|
||||
transactions_socket,
|
||||
);
|
||||
let last_id = client.get_last_id();
|
||||
@ -385,8 +408,6 @@ mod tests {
|
||||
assert!(client.check_signature(&sig));
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in server.thread_hdls {
|
||||
t.join().unwrap();
|
||||
}
|
||||
server.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -3,20 +3,20 @@ use std::time::Duration;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
pub fn duration_as_us(d: &Duration) -> u64 {
|
||||
return (d.as_secs() * 1000 * 1000) + (d.subsec_nanos() as u64 / 1_000);
|
||||
(d.as_secs() * 1000 * 1000) + (u64::from(d.subsec_nanos()) / 1_000)
|
||||
}
|
||||
|
||||
pub fn duration_as_ms(d: &Duration) -> u64 {
|
||||
return (d.as_secs() * 1000) + (d.subsec_nanos() as u64 / 1_000_000);
|
||||
(d.as_secs() * 1000) + (u64::from(d.subsec_nanos()) / 1_000_000)
|
||||
}
|
||||
|
||||
pub fn duration_as_s(d: &Duration) -> f32 {
|
||||
return d.as_secs() as f32 + (d.subsec_nanos() as f32 / 1_000_000_000.0);
|
||||
d.as_secs() as f32 + (d.subsec_nanos() as f32 / 1_000_000_000.0)
|
||||
}
|
||||
|
||||
pub fn timestamp() -> u64 {
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("create timestamp in timing");
|
||||
return duration_as_ms(&now);
|
||||
duration_as_ms(&now)
|
||||
}
|
||||
|
73
src/tpu.rs
73
src/tpu.rs
@ -27,46 +27,48 @@
|
||||
|
||||
use bank::Bank;
|
||||
use banking_stage::BankingStage;
|
||||
use crdt::Crdt;
|
||||
use fetch_stage::FetchStage;
|
||||
use packet::{BlobRecycler, PacketRecycler};
|
||||
use record_stage::RecordStage;
|
||||
use service::Service;
|
||||
use sigverify_stage::SigVerifyStage;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
use std::thread::JoinHandle;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{self, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer::BlobReceiver;
|
||||
use write_stage::WriteStage;
|
||||
|
||||
pub struct Tpu {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
fetch_stage: FetchStage,
|
||||
sigverify_stage: SigVerifyStage,
|
||||
banking_stage: BankingStage,
|
||||
record_stage: RecordStage,
|
||||
write_stage: WriteStage,
|
||||
}
|
||||
|
||||
impl Tpu {
|
||||
pub fn new<W: Write + Send + 'static>(
|
||||
bank: Arc<Bank>,
|
||||
bank: &Arc<Bank>,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
tick_duration: Option<Duration>,
|
||||
transactions_socket: UdpSocket,
|
||||
blob_recycler: BlobRecycler,
|
||||
blob_recycler: &BlobRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
writer: W,
|
||||
) -> (Self, BlobReceiver) {
|
||||
let packet_recycler = PacketRecycler::default();
|
||||
|
||||
let (fetch_stage, packet_receiver) =
|
||||
FetchStage::new(transactions_socket, exit.clone(), packet_recycler.clone());
|
||||
FetchStage::new(transactions_socket, exit, &packet_recycler);
|
||||
|
||||
let (sigverify_stage, verified_receiver) =
|
||||
SigVerifyStage::new(exit.clone(), packet_receiver);
|
||||
let (sigverify_stage, verified_receiver) = SigVerifyStage::new(packet_receiver);
|
||||
|
||||
let (banking_stage, signal_receiver) = BankingStage::new(
|
||||
bank.clone(),
|
||||
exit.clone(),
|
||||
verified_receiver,
|
||||
packet_recycler.clone(),
|
||||
);
|
||||
let (banking_stage, signal_receiver) =
|
||||
BankingStage::new(bank.clone(), verified_receiver, packet_recycler.clone());
|
||||
|
||||
let (record_stage, entry_receiver) = match tick_duration {
|
||||
Some(tick_duration) => {
|
||||
@ -77,18 +79,43 @@ impl Tpu {
|
||||
|
||||
let (write_stage, blob_receiver) = WriteStage::new(
|
||||
bank.clone(),
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
blob_recycler.clone(),
|
||||
writer,
|
||||
entry_receiver,
|
||||
);
|
||||
let mut thread_hdls = vec![
|
||||
banking_stage.thread_hdl,
|
||||
record_stage.thread_hdl,
|
||||
write_stage.thread_hdl,
|
||||
];
|
||||
thread_hdls.extend(fetch_stage.thread_hdls.into_iter());
|
||||
thread_hdls.extend(sigverify_stage.thread_hdls.into_iter());
|
||||
(Tpu { thread_hdls }, blob_receiver)
|
||||
|
||||
let tpu = Tpu {
|
||||
fetch_stage,
|
||||
sigverify_stage,
|
||||
banking_stage,
|
||||
record_stage,
|
||||
write_stage,
|
||||
};
|
||||
(tpu, blob_receiver)
|
||||
}
|
||||
|
||||
pub fn close(self) -> thread::Result<()> {
|
||||
self.fetch_stage.close();
|
||||
self.join()
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for Tpu {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
let mut thread_hdls = vec![];
|
||||
thread_hdls.extend(self.fetch_stage.thread_hdls().into_iter());
|
||||
thread_hdls.extend(self.sigverify_stage.thread_hdls().into_iter());
|
||||
thread_hdls.extend(self.banking_stage.thread_hdls().into_iter());
|
||||
thread_hdls.extend(self.record_stage.thread_hdls().into_iter());
|
||||
thread_hdls.extend(self.write_stage.thread_hdls().into_iter());
|
||||
thread_hdls
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls() {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -32,9 +32,9 @@ impl PaymentPlan for Plan {
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_witness(&mut self, witness: &Witness) {
|
||||
fn apply_witness(&mut self, witness: &Witness, from: &PublicKey) {
|
||||
match self {
|
||||
Plan::Budget(budget) => budget.apply_witness(witness),
|
||||
Plan::Budget(budget) => budget.apply_witness(witness, from),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -47,6 +47,17 @@ pub struct Contract {
|
||||
pub plan: Plan,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Vote {
|
||||
/// We send some gossip specific membershp information through the vote to shortcut
|
||||
/// liveness voting
|
||||
/// The version of the CRDT struct that the last_id of this network voted with
|
||||
pub version: u64,
|
||||
/// The version of the CRDT struct that has the same network configuration as this one
|
||||
pub contact_info_version: u64,
|
||||
// TODO: add signature of the state here as well
|
||||
}
|
||||
|
||||
/// An instruction to progress the smart contract.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Instruction {
|
||||
@ -59,6 +70,9 @@ pub enum Instruction {
|
||||
/// Tell the payment plan that the `NewContract` with `Signature` has been
|
||||
/// signed by the containing transaction's `PublicKey`.
|
||||
ApplySignature(Signature),
|
||||
|
||||
/// Vote for a PoH that is equal to the lastid of this transaction
|
||||
NewVote(Vote),
|
||||
}
|
||||
|
||||
/// An instruction signed by a client with `PublicKey`.
|
||||
@ -135,6 +149,10 @@ impl Transaction {
|
||||
Self::new_from_instruction(from_keypair, instruction, last_id, 0)
|
||||
}
|
||||
|
||||
pub fn new_vote(from_keypair: &KeyPair, vote: Vote, last_id: Hash, fee: i64) -> Self {
|
||||
Transaction::new_from_instruction(&from_keypair, Instruction::NewVote(vote), last_id, fee)
|
||||
}
|
||||
|
||||
/// Create and sign a postdated Transaction. Used for unit-testing.
|
||||
pub fn new_on_date(
|
||||
from_keypair: &KeyPair,
|
||||
@ -145,7 +163,7 @@ impl Transaction {
|
||||
) -> Self {
|
||||
let from = from_keypair.pubkey();
|
||||
let budget = Budget::Or(
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Timestamp(dt, from), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
);
|
||||
let plan = Plan::Budget(budget);
|
||||
|
122
src/tvu.rs
122
src/tvu.rs
@ -2,13 +2,15 @@
|
||||
//! 3-stage transaction validation pipeline in software.
|
||||
//!
|
||||
//! ```text
|
||||
//! .------------------------------------------.
|
||||
//! | TVU |
|
||||
//! | |
|
||||
//! | | .------------.
|
||||
//! | .------------------------>| Validators |
|
||||
//! | .-------. | | `------------`
|
||||
//! .--------. | | | .----+---. .-----------. |
|
||||
//! .--------------------------------------------.
|
||||
//! | |
|
||||
//! | .--------------------------------+---------.
|
||||
//! | | TVU | |
|
||||
//! | | | |
|
||||
//! | | | | .------------.
|
||||
//! | | .------------+----------->| Validators |
|
||||
//! v | .-------. | | | `------------`
|
||||
//! .----+---. | | | .----+---. .----+------. |
|
||||
//! | Leader |--------->| Blob | | Window | | Replicate | |
|
||||
//! `--------` | | Fetch |-->| Stage |-->| Stage | |
|
||||
//! .------------. | | Stage | | | | | |
|
||||
@ -39,15 +41,19 @@ use blob_fetch_stage::BlobFetchStage;
|
||||
use crdt::Crdt;
|
||||
use packet::BlobRecycler;
|
||||
use replicate_stage::ReplicateStage;
|
||||
use service::Service;
|
||||
use signature::KeyPair;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use std::thread::{self, JoinHandle};
|
||||
use streamer::Window;
|
||||
use window_stage::WindowStage;
|
||||
|
||||
pub struct Tvu {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
replicate_stage: ReplicateStage,
|
||||
fetch_stage: BlobFetchStage,
|
||||
window_stage: WindowStage,
|
||||
}
|
||||
|
||||
impl Tvu {
|
||||
@ -63,7 +69,8 @@ impl Tvu {
|
||||
/// * `retransmit_socket` - my retransmit socket
|
||||
/// * `exit` - The exit signal.
|
||||
pub fn new(
|
||||
bank: Arc<Bank>,
|
||||
keypair: KeyPair,
|
||||
bank: &Arc<Bank>,
|
||||
entry_height: u64,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
window: Window,
|
||||
@ -73,33 +80,59 @@ impl Tvu {
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let blob_recycler = BlobRecycler::default();
|
||||
let (fetch_stage, blob_receiver) = BlobFetchStage::new_multi_socket(
|
||||
let (fetch_stage, blob_fetch_receiver) = BlobFetchStage::new_multi_socket(
|
||||
vec![replicate_socket, repair_socket],
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
exit,
|
||||
&blob_recycler,
|
||||
);
|
||||
//TODO
|
||||
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
||||
//then sent to the window, which does the erasure coding reconstruction
|
||||
let (window_stage, blob_receiver) = WindowStage::new(
|
||||
crdt,
|
||||
let (window_stage, blob_window_receiver) = WindowStage::new(
|
||||
&crdt,
|
||||
window,
|
||||
entry_height,
|
||||
retransmit_socket,
|
||||
exit.clone(),
|
||||
blob_recycler.clone(),
|
||||
blob_receiver,
|
||||
&blob_recycler,
|
||||
blob_fetch_receiver,
|
||||
);
|
||||
|
||||
let replicate_stage = ReplicateStage::new(bank, exit, blob_receiver);
|
||||
let replicate_stage = ReplicateStage::new(
|
||||
keypair,
|
||||
bank.clone(),
|
||||
crdt,
|
||||
blob_recycler,
|
||||
blob_window_receiver,
|
||||
);
|
||||
|
||||
let mut threads = vec![replicate_stage.thread_hdl];
|
||||
threads.extend(fetch_stage.thread_hdls.into_iter());
|
||||
threads.extend(window_stage.thread_hdls.into_iter());
|
||||
Tvu {
|
||||
thread_hdls: threads,
|
||||
replicate_stage,
|
||||
fetch_stage,
|
||||
window_stage,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn close(self) -> thread::Result<()> {
|
||||
self.fetch_stage.close();
|
||||
self.join()
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for Tvu {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
let mut thread_hdls = vec![];
|
||||
thread_hdls.extend(self.replicate_stage.thread_hdls().into_iter());
|
||||
thread_hdls.extend(self.fetch_stage.thread_hdls().into_iter());
|
||||
thread_hdls.extend(self.window_stage.thread_hdls().into_iter());
|
||||
thread_hdls
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls() {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -114,10 +147,11 @@ pub mod tests {
|
||||
use ncp::Ncp;
|
||||
use packet::BlobRecycler;
|
||||
use result::Result;
|
||||
use service::Service;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::collections::VecDeque;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
@ -132,27 +166,28 @@ pub mod tests {
|
||||
) -> Result<(Ncp, Window)> {
|
||||
let window = streamer::default_window();
|
||||
let send_sock = UdpSocket::bind("0.0.0.0:0").expect("bind 0");
|
||||
let ncp = Ncp::new(crdt, window.clone(), listen, send_sock, exit)?;
|
||||
let ncp = Ncp::new(&crdt, window.clone(), listen, send_sock, exit)?;
|
||||
Ok((ncp, window))
|
||||
}
|
||||
/// Test that message sent from leader to target1 and replicated to target2
|
||||
#[test]
|
||||
fn test_replicate() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new();
|
||||
let target1 = TestNode::new();
|
||||
let target2 = TestNode::new();
|
||||
let leader = TestNode::new_localhost();
|
||||
let target1_kp = KeyPair::new();
|
||||
let target1 = TestNode::new_localhost_with_pubkey(target1_kp.pubkey());
|
||||
let target2 = TestNode::new_localhost();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
//start crdt_leader
|
||||
let mut crdt_l = Crdt::new(leader.data.clone());
|
||||
let mut crdt_l = Crdt::new(leader.data.clone()).expect("Crdt::new");
|
||||
crdt_l.set_leader(leader.data.id);
|
||||
|
||||
let cref_l = Arc::new(RwLock::new(crdt_l));
|
||||
let dr_l = new_ncp(cref_l, leader.sockets.gossip, exit.clone()).unwrap();
|
||||
|
||||
//start crdt2
|
||||
let mut crdt2 = Crdt::new(target2.data.clone());
|
||||
let mut crdt2 = Crdt::new(target2.data.clone()).expect("Crdt::new");
|
||||
crdt2.insert(&leader.data);
|
||||
crdt2.set_leader(leader.data.id);
|
||||
let leader_id = leader.data.id;
|
||||
@ -175,26 +210,27 @@ pub mod tests {
|
||||
// simulate leader sending messages
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = streamer::responder(
|
||||
"test_replicate",
|
||||
leader.sockets.requests,
|
||||
exit.clone(),
|
||||
resp_recycler.clone(),
|
||||
r_responder,
|
||||
);
|
||||
|
||||
let starting_balance = 10_000;
|
||||
let mint = Mint::new(starting_balance);
|
||||
let replicate_addr = target1.data.replicate_addr;
|
||||
let replicate_addr = target1.data.contact_info.tvu;
|
||||
let bank = Arc::new(Bank::new(&mint));
|
||||
|
||||
//start crdt1
|
||||
let mut crdt1 = Crdt::new(target1.data.clone());
|
||||
let mut crdt1 = Crdt::new(target1.data.clone()).expect("Crdt::new");
|
||||
crdt1.insert(&leader.data);
|
||||
crdt1.set_leader(leader.data.id);
|
||||
let cref1 = Arc::new(RwLock::new(crdt1));
|
||||
let dr_1 = new_ncp(cref1.clone(), target1.sockets.gossip, exit.clone()).unwrap();
|
||||
|
||||
let tvu = Tvu::new(
|
||||
bank.clone(),
|
||||
target1_kp,
|
||||
&bank,
|
||||
0,
|
||||
cref1,
|
||||
dr_1.1,
|
||||
@ -250,6 +286,7 @@ pub mod tests {
|
||||
|
||||
// send the blobs into the socket
|
||||
s_responder.send(msgs).expect("send");
|
||||
drop(s_responder);
|
||||
|
||||
// receive retransmitted messages
|
||||
let timer = Duration::new(1, 0);
|
||||
@ -263,19 +300,10 @@ pub mod tests {
|
||||
let bob_balance = bank.get_balance(&bob_keypair.pubkey());
|
||||
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in tvu.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
for t in dr_l.0.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
for t in dr_2.0.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
for t in dr_1.0.thread_hdls {
|
||||
t.join().expect("join");
|
||||
}
|
||||
tvu.close().expect("close");
|
||||
dr_l.0.join().expect("join");
|
||||
dr_2.0.join().expect("join");
|
||||
dr_1.0.join().expect("join");
|
||||
t_receiver.join().expect("join");
|
||||
t_responder.join().expect("join");
|
||||
}
|
||||
|
21
src/voting.rs
Normal file
21
src/voting.rs
Normal file
@ -0,0 +1,21 @@
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use signature::PublicKey;
|
||||
use transaction::{Instruction, Vote};
|
||||
|
||||
pub fn entries_to_votes(entries: &[Entry]) -> Vec<(PublicKey, Vote, Hash)> {
|
||||
entries
|
||||
.iter()
|
||||
.flat_map(|entry| {
|
||||
let vs: Vec<(PublicKey, Vote, Hash)> = entry
|
||||
.transactions
|
||||
.iter()
|
||||
.filter_map(|tx| match tx.instruction {
|
||||
Instruction::NewVote(ref vote) => Some((tx.from, vote.clone(), tx.last_id)),
|
||||
_ => None,
|
||||
})
|
||||
.collect();
|
||||
vs
|
||||
})
|
||||
.collect()
|
||||
}
|
@ -2,39 +2,36 @@
|
||||
|
||||
use crdt::Crdt;
|
||||
use packet::BlobRecycler;
|
||||
use service::Service;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::JoinHandle;
|
||||
use std::thread::{self, JoinHandle};
|
||||
use streamer::{self, BlobReceiver, Window};
|
||||
|
||||
pub struct WindowStage {
|
||||
pub thread_hdls: Vec<JoinHandle<()>>,
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl WindowStage {
|
||||
pub fn new(
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
window: Window,
|
||||
entry_height: u64,
|
||||
retransmit_socket: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
blob_recycler: BlobRecycler,
|
||||
blob_recycler: &BlobRecycler,
|
||||
fetch_stage_receiver: BlobReceiver,
|
||||
) -> (Self, BlobReceiver) {
|
||||
let (retransmit_sender, retransmit_receiver) = channel();
|
||||
|
||||
let t_retransmit = streamer::retransmitter(
|
||||
retransmit_socket,
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
blob_recycler.clone(),
|
||||
retransmit_receiver,
|
||||
);
|
||||
let (blob_sender, blob_receiver) = channel();
|
||||
let t_window = streamer::window(
|
||||
exit.clone(),
|
||||
crdt.clone(),
|
||||
window,
|
||||
entry_height,
|
||||
@ -48,3 +45,16 @@ impl WindowStage {
|
||||
(WindowStage { thread_hdls }, blob_receiver)
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for WindowStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
self.thread_hdls
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
for thread_hdl in self.thread_hdls() {
|
||||
thread_hdl.join()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -3,49 +3,58 @@
|
||||
//! stdout, and then sends the Entry to its output channel.
|
||||
|
||||
use bank::Bank;
|
||||
use counter::Counter;
|
||||
use crdt::Crdt;
|
||||
use entry::Entry;
|
||||
use entry_writer::EntryWriter;
|
||||
use ledger::Block;
|
||||
use packet::BlobRecycler;
|
||||
use result::Result;
|
||||
use result::{Error, Result};
|
||||
use service::Service;
|
||||
use std::collections::VecDeque;
|
||||
use std::io::Write;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::{channel, Receiver};
|
||||
use std::sync::Arc;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer::{BlobReceiver, BlobSender};
|
||||
use voting::entries_to_votes;
|
||||
|
||||
pub struct WriteStage {
|
||||
pub thread_hdl: JoinHandle<()>,
|
||||
thread_hdl: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl WriteStage {
|
||||
/// Process any Entry items that have been published by the Historian.
|
||||
/// Process any Entry items that have been published by the RecordStage.
|
||||
/// continuosly broadcast blobs of entries out
|
||||
pub fn write_and_send_entries<W: Write>(
|
||||
crdt: &Arc<RwLock<Crdt>>,
|
||||
entry_writer: &mut EntryWriter<W>,
|
||||
blob_sender: &BlobSender,
|
||||
blob_recycler: &BlobRecycler,
|
||||
entry_receiver: &Receiver<Vec<Entry>>,
|
||||
) -> Result<()> {
|
||||
let entries = entry_receiver.recv_timeout(Duration::new(1, 0))?;
|
||||
let votes = entries_to_votes(&entries);
|
||||
crdt.write().unwrap().insert_votes(&votes);
|
||||
entry_writer.write_and_register_entries(&entries)?;
|
||||
trace!("New blobs? {}", entries.len());
|
||||
let mut blobs = VecDeque::new();
|
||||
entries.to_blobs(blob_recycler, &mut blobs);
|
||||
if !blobs.is_empty() {
|
||||
inc_new_counter!("write_stage-broadcast_vote-count", votes.len());
|
||||
inc_new_counter!("write_stage-broadcast_blobs-count", blobs.len());
|
||||
trace!("broadcasting {}", blobs.len());
|
||||
blob_sender.send(blobs)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a new Rpu that wraps the given Bank.
|
||||
/// Create a new WriteStage for writing and broadcasting entries.
|
||||
pub fn new<W: Write + Send + 'static>(
|
||||
bank: Arc<Bank>,
|
||||
exit: Arc<AtomicBool>,
|
||||
crdt: Arc<RwLock<Crdt>>,
|
||||
blob_recycler: BlobRecycler,
|
||||
writer: W,
|
||||
entry_receiver: Receiver<Vec<Entry>>,
|
||||
@ -56,16 +65,22 @@ impl WriteStage {
|
||||
.spawn(move || {
|
||||
let mut entry_writer = EntryWriter::new(&bank, writer);
|
||||
loop {
|
||||
let _ = Self::write_and_send_entries(
|
||||
if let Err(e) = Self::write_and_send_entries(
|
||||
&crdt,
|
||||
&mut entry_writer,
|
||||
&blob_sender,
|
||||
&blob_recycler,
|
||||
&entry_receiver,
|
||||
);
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
info!("broadcat_service exiting");
|
||||
break;
|
||||
}
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
_ => {
|
||||
inc_new_counter!("write_stage-error", 1);
|
||||
error!("{:?}", e);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
@ -73,3 +88,13 @@ impl WriteStage {
|
||||
(WriteStage { thread_hdl }, blob_receiver)
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for WriteStage {
|
||||
fn thread_hdls(self) -> Vec<JoinHandle<()>> {
|
||||
vec![self.thread_hdl]
|
||||
}
|
||||
|
||||
fn join(self) -> thread::Result<()> {
|
||||
self.thread_hdl.join()
|
||||
}
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ use solana::crdt::{Crdt, TestNode};
|
||||
use solana::logger;
|
||||
use solana::ncp::Ncp;
|
||||
use solana::packet::Blob;
|
||||
use solana::service::Service;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
@ -15,12 +16,12 @@ use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<Crdt>>, Ncp, UdpSocket) {
|
||||
let tn = TestNode::new();
|
||||
let crdt = Crdt::new(tn.data.clone());
|
||||
let tn = TestNode::new_localhost();
|
||||
let crdt = Crdt::new(tn.data.clone()).expect("Crdt::new");
|
||||
let c = Arc::new(RwLock::new(crdt));
|
||||
let w = Arc::new(RwLock::new(vec![]));
|
||||
let d = Ncp::new(
|
||||
c.clone(),
|
||||
&c.clone(),
|
||||
w,
|
||||
tn.sockets.gossip,
|
||||
tn.sockets.gossip_send,
|
||||
@ -30,8 +31,8 @@ fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<Crdt>>, Ncp, UdpSocket) {
|
||||
}
|
||||
|
||||
/// Test that the network converges.
|
||||
/// Run until every node in the network has a full ReplicatedData set.
|
||||
/// Check that nodes stop sending updates after all the ReplicatedData has been shared.
|
||||
/// Run until every node in the network has a full NodeInfo set.
|
||||
/// Check that nodes stop sending updates after all the NodeInfo has been shared.
|
||||
/// tests that actually use this function are below
|
||||
fn run_gossip_topo<F>(topo: F)
|
||||
where
|
||||
@ -58,10 +59,8 @@ where
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for (c, dr, _) in listen.into_iter() {
|
||||
for j in dr.thread_hdls.into_iter() {
|
||||
j.join().unwrap();
|
||||
}
|
||||
for (c, dr, _) in listen {
|
||||
dr.join().unwrap();
|
||||
// make it clear what failed
|
||||
// protocol is to chatty, updates should stop after everyone receives `num`
|
||||
assert!(c.read().unwrap().update_index <= num as u64);
|
||||
@ -175,16 +174,13 @@ pub fn crdt_retransmit() {
|
||||
//r1 was the sender, so it should fail to receive the packet
|
||||
assert_eq!(res, [true, false, false]);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
let mut threads = vec![];
|
||||
threads.extend(dr1.thread_hdls.into_iter());
|
||||
threads.extend(dr2.thread_hdls.into_iter());
|
||||
threads.extend(dr3.thread_hdls.into_iter());
|
||||
for t in threads.into_iter() {
|
||||
t.join().unwrap();
|
||||
}
|
||||
dr1.join().unwrap();
|
||||
dr2.join().unwrap();
|
||||
dr3.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_external_liveness_table() {
|
||||
logger::setup();
|
||||
let c1_c4_exit = Arc::new(AtomicBool::new(false));
|
||||
@ -255,13 +251,8 @@ fn test_external_liveness_table() {
|
||||
|
||||
// Shutdown validators c2 and c3
|
||||
c2_c3_exit.store(true, Ordering::Relaxed);
|
||||
let mut threads = vec![];
|
||||
threads.extend(dr2.thread_hdls.into_iter());
|
||||
threads.extend(dr3.thread_hdls.into_iter());
|
||||
|
||||
for t in threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
dr2.join().unwrap();
|
||||
dr3.join().unwrap();
|
||||
|
||||
// Allow communication between c1 and c4, make sure that c1's external_liveness table
|
||||
// entry for c4 gets cleared
|
||||
@ -281,11 +272,6 @@ fn test_external_liveness_table() {
|
||||
|
||||
// Shutdown validators c1 and c4
|
||||
c1_c4_exit.store(true, Ordering::Relaxed);
|
||||
let mut threads = vec![];
|
||||
threads.extend(dr1.thread_hdls.into_iter());
|
||||
threads.extend(dr4.thread_hdls.into_iter());
|
||||
|
||||
for t in threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
dr1.join().unwrap();
|
||||
dr4.join().unwrap();
|
||||
}
|
||||
|
@ -1,96 +1,85 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate bincode;
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use solana::bank::Bank;
|
||||
use solana::crdt::TestNode;
|
||||
use solana::crdt::{Crdt, ReplicatedData};
|
||||
use solana::fullnode::FullNode;
|
||||
use solana::crdt::{Crdt, NodeInfo};
|
||||
use solana::entry_writer::EntryWriter;
|
||||
use solana::fullnode::{FullNode, LedgerFile};
|
||||
use solana::logger;
|
||||
use solana::mint::Mint;
|
||||
use solana::ncp::Ncp;
|
||||
use solana::service::Service;
|
||||
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
use solana::streamer::default_window;
|
||||
use solana::thin_client::ThinClient;
|
||||
use std::io;
|
||||
use std::io::sink;
|
||||
use std::fs::File;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::sleep;
|
||||
use std::thread::JoinHandle;
|
||||
use std::time::Duration;
|
||||
|
||||
fn validator(
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
alice: &Mint,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) {
|
||||
let validator = TestNode::new();
|
||||
let replicant_bank = Bank::new(&alice);
|
||||
let mut ts = FullNode::new_validator(
|
||||
replicant_bank,
|
||||
0,
|
||||
validator.data.clone(),
|
||||
validator.sockets.requests,
|
||||
validator.sockets.respond,
|
||||
validator.sockets.replicate,
|
||||
validator.sockets.gossip,
|
||||
validator.sockets.repair,
|
||||
leader.clone(),
|
||||
exit.clone(),
|
||||
);
|
||||
threads.append(&mut ts.thread_hdls);
|
||||
}
|
||||
|
||||
fn converge(
|
||||
leader: &ReplicatedData,
|
||||
exit: Arc<AtomicBool>,
|
||||
num_nodes: usize,
|
||||
threads: &mut Vec<JoinHandle<()>>,
|
||||
) -> (Vec<ReplicatedData>, PublicKey) {
|
||||
fn converge(leader: &NodeInfo, num_nodes: usize) -> Vec<NodeInfo> {
|
||||
//lets spy on the network
|
||||
let mut spy = TestNode::new();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let mut spy = TestNode::new_localhost();
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
let me = spy.data.id.clone();
|
||||
spy.data.replicate_addr = daddr;
|
||||
spy.data.requests_addr = daddr;
|
||||
let mut spy_crdt = Crdt::new(spy.data);
|
||||
spy.data.contact_info.tvu = daddr;
|
||||
spy.data.contact_info.rpu = daddr;
|
||||
let mut spy_crdt = Crdt::new(spy.data).expect("Crdt::new");
|
||||
spy_crdt.insert(&leader);
|
||||
spy_crdt.set_leader(leader.id);
|
||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||
let spy_window = default_window();
|
||||
let dr = Ncp::new(
|
||||
spy_ref.clone(),
|
||||
let ncp = Ncp::new(
|
||||
&spy_ref,
|
||||
spy_window,
|
||||
spy.sockets.gossip,
|
||||
spy.sockets.gossip_send,
|
||||
exit,
|
||||
exit.clone(),
|
||||
).unwrap();
|
||||
//wait for the network to converge
|
||||
let mut converged = false;
|
||||
let mut rv = vec![];
|
||||
for _ in 0..30 {
|
||||
let num = spy_ref.read().unwrap().convergence();
|
||||
if num == num_nodes as u64 {
|
||||
let mut v: Vec<NodeInfo> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.id != me)
|
||||
.filter(|x| x.contact_info.rpu != daddr)
|
||||
.cloned()
|
||||
.collect();
|
||||
if num >= num_nodes as u64 && v.len() >= num_nodes {
|
||||
rv.append(&mut v);
|
||||
converged = true;
|
||||
break;
|
||||
}
|
||||
sleep(Duration::new(1, 0));
|
||||
}
|
||||
assert!(converged);
|
||||
threads.extend(dr.thread_hdls.into_iter());
|
||||
let v: Vec<ReplicatedData> = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.table
|
||||
.values()
|
||||
.into_iter()
|
||||
.filter(|x| x.id != me)
|
||||
.map(|x| x.clone())
|
||||
.collect();
|
||||
(v.clone(), me)
|
||||
ncp.close().unwrap();
|
||||
rv
|
||||
}
|
||||
|
||||
fn genesis(num: i64) -> (Mint, String) {
|
||||
let mint = Mint::new(num);
|
||||
let id = {
|
||||
let ids: Vec<_> = mint.pubkey().iter().map(|id| format!("{}", id)).collect();
|
||||
ids.join("")
|
||||
};
|
||||
let path = format!("target/test_multi_node_dynamic_network-{}.log", id);
|
||||
let mut writer = File::create(path.clone()).unwrap();
|
||||
|
||||
EntryWriter::write_entries(&mut writer, mint.create_entries()).unwrap();
|
||||
(mint, path.to_string())
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -98,40 +87,42 @@ fn test_multi_node_validator_catchup_from_zero() {
|
||||
logger::setup();
|
||||
const N: usize = 5;
|
||||
trace!("test_multi_node_validator_catchup_from_zero");
|
||||
let leader = TestNode::new();
|
||||
let alice = Mint::new(10_000);
|
||||
let leader = TestNode::new_localhost();
|
||||
let leader_data = leader.data.clone();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let leader_bank = Bank::new(&alice);
|
||||
let server = FullNode::new_leader(
|
||||
leader_bank,
|
||||
0,
|
||||
let (alice, ledger_path) = genesis(10_000);
|
||||
let server = FullNode::new(
|
||||
leader,
|
||||
true,
|
||||
LedgerFile::Path(ledger_path.clone()),
|
||||
None,
|
||||
None,
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
|
||||
let mut threads = server.thread_hdls;
|
||||
let mut nodes = vec![server];
|
||||
for _ in 0..N {
|
||||
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
||||
let keypair = KeyPair::new();
|
||||
let validator = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||
let mut val = FullNode::new(
|
||||
validator,
|
||||
false,
|
||||
LedgerFile::Path(ledger_path.clone()),
|
||||
Some(keypair),
|
||||
Some(leader_data.contact_info.ncp),
|
||||
);
|
||||
nodes.push(val);
|
||||
}
|
||||
let (servers, spy_id0) = converge(&leader.data, exit.clone(), N + 2, &mut threads);
|
||||
let servers = converge(&leader_data, N + 1);
|
||||
//contains the leader addr as well
|
||||
assert_eq!(servers.len(), N + 1);
|
||||
//verify leader can do transfer
|
||||
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
|
||||
let leader_balance =
|
||||
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, None).unwrap();
|
||||
assert_eq!(leader_balance, 500);
|
||||
//verify validator has the same balance
|
||||
let mut success = 0usize;
|
||||
for server in servers.iter() {
|
||||
info!("0server: {:?}", server.id[0]);
|
||||
info!("0server: {:x}", server.debug_id());
|
||||
let mut client = mk_client(server);
|
||||
if let Ok(bal) = client.poll_get_balance(&bob_pubkey) {
|
||||
info!("validator balance {}", bal);
|
||||
@ -144,13 +135,24 @@ fn test_multi_node_validator_catchup_from_zero() {
|
||||
|
||||
success = 0;
|
||||
// start up another validator, converge and then check everyone's balances
|
||||
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
||||
let (servers, _) = converge(&leader.data, exit.clone(), N + 4, &mut threads);
|
||||
let keypair = KeyPair::new();
|
||||
let validator = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||
let val = FullNode::new(
|
||||
validator,
|
||||
false,
|
||||
LedgerFile::Path(ledger_path.clone()),
|
||||
Some(keypair),
|
||||
Some(leader_data.contact_info.ncp),
|
||||
);
|
||||
nodes.push(val);
|
||||
//contains the leader and new node
|
||||
let servers = converge(&leader_data, N + 2);
|
||||
|
||||
let mut leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
|
||||
let mut leader_balance =
|
||||
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, None).unwrap();
|
||||
info!("leader balance {}", leader_balance);
|
||||
loop {
|
||||
let mut client = mk_client(&leader.data);
|
||||
let mut client = mk_client(&leader_data);
|
||||
leader_balance = client.poll_get_balance(&bob_pubkey).unwrap();
|
||||
if leader_balance == 1000 {
|
||||
break;
|
||||
@ -160,26 +162,23 @@ fn test_multi_node_validator_catchup_from_zero() {
|
||||
assert_eq!(leader_balance, 1000);
|
||||
|
||||
for server in servers.iter() {
|
||||
if server.id != spy_id0 {
|
||||
let mut client = mk_client(server);
|
||||
info!("1server: {:?}", server.id[0]);
|
||||
for _ in 0..10 {
|
||||
if let Ok(bal) = client.poll_get_balance(&bob_pubkey) {
|
||||
info!("validator balance {}", bal);
|
||||
if bal == leader_balance {
|
||||
success += 1;
|
||||
break;
|
||||
}
|
||||
let mut client = mk_client(server);
|
||||
info!("1server: {:x}", server.debug_id());
|
||||
for _ in 0..15 {
|
||||
if let Ok(bal) = client.poll_get_balance(&bob_pubkey) {
|
||||
info!("validator balance {}", bal);
|
||||
if bal == leader_balance {
|
||||
success += 1;
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
}
|
||||
assert_eq!(success, (servers.len() - 1));
|
||||
assert_eq!(success, servers.len());
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in threads {
|
||||
t.join().unwrap();
|
||||
for node in nodes {
|
||||
node.close().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@ -188,35 +187,36 @@ fn test_multi_node_basic() {
|
||||
logger::setup();
|
||||
const N: usize = 5;
|
||||
trace!("test_multi_node_basic");
|
||||
let leader = TestNode::new();
|
||||
let alice = Mint::new(10_000);
|
||||
let leader = TestNode::new_localhost();
|
||||
let leader_data = leader.data.clone();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let leader_bank = Bank::new(&alice);
|
||||
let server = FullNode::new_leader(
|
||||
leader_bank,
|
||||
0,
|
||||
let (alice, ledger_path) = genesis(10_000);
|
||||
let server = FullNode::new(
|
||||
leader,
|
||||
true,
|
||||
LedgerFile::Path(ledger_path.clone()),
|
||||
None,
|
||||
None,
|
||||
leader.data.clone(),
|
||||
leader.sockets.requests,
|
||||
leader.sockets.transaction,
|
||||
leader.sockets.broadcast,
|
||||
leader.sockets.respond,
|
||||
leader.sockets.gossip,
|
||||
exit.clone(),
|
||||
sink(),
|
||||
);
|
||||
|
||||
let mut threads = server.thread_hdls;
|
||||
let mut nodes = vec![server];
|
||||
for _ in 0..N {
|
||||
validator(&leader.data, exit.clone(), &alice, &mut threads);
|
||||
let keypair = KeyPair::new();
|
||||
let validator = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||
let val = FullNode::new(
|
||||
validator,
|
||||
false,
|
||||
LedgerFile::Path(ledger_path.clone()),
|
||||
Some(keypair),
|
||||
Some(leader_data.contact_info.ncp),
|
||||
);
|
||||
nodes.push(val);
|
||||
}
|
||||
let (servers, _) = converge(&leader.data, exit.clone(), N + 2, &mut threads);
|
||||
let servers = converge(&leader_data, N + 1);
|
||||
//contains the leader addr as well
|
||||
assert_eq!(servers.len(), N + 1);
|
||||
//verify leader can do transfer
|
||||
let leader_balance = tx_and_retry_get_balance(&leader.data, &alice, &bob_pubkey).unwrap();
|
||||
let leader_balance =
|
||||
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, None).unwrap();
|
||||
assert_eq!(leader_balance, 500);
|
||||
//verify validator has the same balance
|
||||
let mut success = 0usize;
|
||||
@ -231,32 +231,296 @@ fn test_multi_node_basic() {
|
||||
}
|
||||
assert_eq!(success, servers.len());
|
||||
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in threads {
|
||||
t.join().unwrap();
|
||||
for node in nodes {
|
||||
node.close().unwrap();
|
||||
}
|
||||
std::fs::remove_file(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
fn mk_client(leader: &ReplicatedData) -> ThinClient {
|
||||
#[test]
|
||||
fn test_boot_validator_from_file() {
|
||||
logger::setup();
|
||||
let leader = TestNode::new_localhost();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let (alice, ledger_path) = genesis(100_000);
|
||||
let leader_data = leader.data.clone();
|
||||
let leader_fullnode = FullNode::new(
|
||||
leader,
|
||||
true,
|
||||
LedgerFile::Path(ledger_path.clone()),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
let leader_balance =
|
||||
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(500)).unwrap();
|
||||
assert_eq!(leader_balance, 500);
|
||||
let leader_balance =
|
||||
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(1000)).unwrap();
|
||||
assert_eq!(leader_balance, 1000);
|
||||
|
||||
let keypair = KeyPair::new();
|
||||
let validator = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||
let validator_data = validator.data.clone();
|
||||
let val_fullnode = FullNode::new(
|
||||
validator,
|
||||
false,
|
||||
LedgerFile::Path(ledger_path.clone()),
|
||||
Some(keypair),
|
||||
Some(leader_data.contact_info.ncp),
|
||||
);
|
||||
let mut client = mk_client(&validator_data);
|
||||
let getbal = retry_get_balance(&mut client, &bob_pubkey, Some(leader_balance));
|
||||
assert!(getbal == Some(leader_balance));
|
||||
|
||||
leader_fullnode.close().unwrap();
|
||||
val_fullnode.close().unwrap();
|
||||
std::fs::remove_file(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
fn create_leader(ledger_path: &str) -> (NodeInfo, FullNode) {
|
||||
let leader = TestNode::new_localhost();
|
||||
let leader_data = leader.data.clone();
|
||||
let leader_fullnode = FullNode::new(
|
||||
leader,
|
||||
true,
|
||||
LedgerFile::Path(ledger_path.to_string()),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
(leader_data, leader_fullnode)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_leader_restart_validator_start_from_old_ledger() {
|
||||
// this test verifies that a freshly started leader makes his ledger available
|
||||
// in the repair window to validators that are started with an older
|
||||
// ledger (currently up to WINDOW_SIZE entries)
|
||||
logger::setup();
|
||||
|
||||
let (alice, ledger_path) = genesis(100_000);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
|
||||
let (leader_data, leader_fullnode) = create_leader(&ledger_path);
|
||||
|
||||
// lengthen the ledger
|
||||
let leader_balance =
|
||||
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(500)).unwrap();
|
||||
assert_eq!(leader_balance, 500);
|
||||
|
||||
// create a "stale" ledger by copying current ledger
|
||||
let mut stale_ledger_path = ledger_path.clone();
|
||||
stale_ledger_path.insert_str(ledger_path.rfind("/").unwrap() + 1, "stale_");
|
||||
|
||||
std::fs::copy(&ledger_path, &stale_ledger_path)
|
||||
.expect(format!("copy {} to {}", &ledger_path, &stale_ledger_path,).as_str());
|
||||
|
||||
// restart the leader
|
||||
leader_fullnode.close().unwrap();
|
||||
let (leader_data, leader_fullnode) = create_leader(&ledger_path);
|
||||
|
||||
// lengthen the ledger
|
||||
let leader_balance =
|
||||
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(1000)).unwrap();
|
||||
assert_eq!(leader_balance, 1000);
|
||||
|
||||
// restart the leader
|
||||
leader_fullnode.close().unwrap();
|
||||
let (leader_data, leader_fullnode) = create_leader(&ledger_path);
|
||||
|
||||
// start validator from old ledger
|
||||
let keypair = KeyPair::new();
|
||||
let validator = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||
let validator_data = validator.data.clone();
|
||||
let val_fullnode = FullNode::new(
|
||||
validator,
|
||||
false,
|
||||
LedgerFile::Path(stale_ledger_path.clone()),
|
||||
Some(keypair),
|
||||
Some(leader_data.contact_info.ncp),
|
||||
);
|
||||
|
||||
// trigger broadcast, validator should catch up from leader, whose window contains
|
||||
// the entries missing from the stale ledger
|
||||
// send requests so the validator eventually sees a gap and requests a repair
|
||||
let mut expected = 1500;
|
||||
let mut client = mk_client(&validator_data);
|
||||
for _ in 0..10 {
|
||||
let leader_balance =
|
||||
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(expected))
|
||||
.unwrap();
|
||||
assert_eq!(leader_balance, expected);
|
||||
let getbal = retry_get_balance(&mut client, &bob_pubkey, Some(leader_balance));
|
||||
if getbal == Some(leader_balance) {
|
||||
break;
|
||||
}
|
||||
expected += 500;
|
||||
}
|
||||
let getbal = retry_get_balance(&mut client, &bob_pubkey, Some(expected));
|
||||
assert_eq!(getbal, Some(expected));
|
||||
|
||||
leader_fullnode.close().unwrap();
|
||||
val_fullnode.close().unwrap();
|
||||
std::fs::remove_file(ledger_path).unwrap();
|
||||
std::fs::remove_file(stale_ledger_path).unwrap();
|
||||
}
|
||||
|
||||
//TODO: this test will run a long time so it's disabled for CI
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_multi_node_dynamic_network() {
|
||||
logger::setup();
|
||||
const N: usize = 60;
|
||||
let leader = TestNode::new_localhost();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let (alice, ledger_path) = genesis(100_000);
|
||||
let leader_data = leader.data.clone();
|
||||
let server = FullNode::new(
|
||||
leader,
|
||||
true,
|
||||
LedgerFile::Path(ledger_path.clone()),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
info!("{:x} LEADER", leader_data.debug_id());
|
||||
let leader_balance =
|
||||
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(500)).unwrap();
|
||||
assert_eq!(leader_balance, 500);
|
||||
let leader_balance =
|
||||
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(1000)).unwrap();
|
||||
assert_eq!(leader_balance, 1000);
|
||||
|
||||
let validators: Vec<(NodeInfo, FullNode)> = (0..N)
|
||||
.into_iter()
|
||||
.map(|n| {
|
||||
let keypair = KeyPair::new();
|
||||
let validator = TestNode::new_localhost_with_pubkey(keypair.pubkey());
|
||||
let rd = validator.data.clone();
|
||||
//send some tokens to the new validator
|
||||
let bal =
|
||||
send_tx_and_retry_get_balance(&leader_data, &alice, &keypair.pubkey(), Some(500));
|
||||
assert_eq!(bal, Some(500));
|
||||
let val = FullNode::new(
|
||||
validator,
|
||||
false,
|
||||
LedgerFile::Path(ledger_path.clone()),
|
||||
Some(keypair),
|
||||
Some(leader_data.contact_info.ncp),
|
||||
);
|
||||
info!("started[{}/{}] {:x}", n, N, rd.debug_id());
|
||||
(rd, val)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut consecutive_success = 0;
|
||||
for i in 0..N {
|
||||
//verify leader can do transfer
|
||||
let expected = ((i + 3) * 500) as i64;
|
||||
let leader_balance =
|
||||
send_tx_and_retry_get_balance(&leader_data, &alice, &bob_pubkey, Some(expected))
|
||||
.unwrap();
|
||||
if leader_balance != expected {
|
||||
info!(
|
||||
"leader dropped transaction {} {:?} {:?}",
|
||||
i, leader_balance, expected
|
||||
);
|
||||
}
|
||||
//verify all validators have the same balance
|
||||
{
|
||||
let mut success = 0usize;
|
||||
let mut distance = 0i64;
|
||||
for server in validators.iter() {
|
||||
let mut client = mk_client(&server.0);
|
||||
trace!("{:x} {} get_balance start", server.0.debug_id(), i);
|
||||
let getbal = retry_get_balance(&mut client, &bob_pubkey, Some(leader_balance));
|
||||
trace!(
|
||||
"{:x} {} get_balance: {:?} leader_balance: {}",
|
||||
server.0.debug_id(),
|
||||
i,
|
||||
getbal,
|
||||
leader_balance
|
||||
);
|
||||
let bal = getbal.unwrap_or(0);
|
||||
distance += (leader_balance - bal) / 500;
|
||||
if let Some(bal) = getbal {
|
||||
if bal == leader_balance {
|
||||
success += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
info!(
|
||||
"SUCCESS[{}] {} out of {} distance: {}",
|
||||
i,
|
||||
success,
|
||||
validators.len(),
|
||||
distance
|
||||
);
|
||||
if success == validators.len() && distance == 0 {
|
||||
consecutive_success += 1;
|
||||
} else {
|
||||
consecutive_success = 0;
|
||||
}
|
||||
if consecutive_success == 10 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
assert_eq!(consecutive_success, 10);
|
||||
for (_, node) in &validators {
|
||||
node.exit();
|
||||
}
|
||||
server.exit();
|
||||
for (_, node) in validators {
|
||||
node.join().unwrap();
|
||||
}
|
||||
server.join().unwrap();
|
||||
|
||||
std::fs::remove_file(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
fn mk_client(leader: &NodeInfo) -> ThinClient {
|
||||
let requests_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
requests_socket
|
||||
.set_read_timeout(Some(Duration::new(1, 0)))
|
||||
.unwrap();
|
||||
let transactions_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||
assert!(leader.contact_info.rpu != daddr);
|
||||
assert!(leader.contact_info.tpu != daddr);
|
||||
ThinClient::new(
|
||||
leader.requests_addr,
|
||||
leader.contact_info.rpu,
|
||||
requests_socket,
|
||||
leader.transactions_addr,
|
||||
leader.contact_info.tpu,
|
||||
transactions_socket,
|
||||
)
|
||||
}
|
||||
|
||||
fn tx_and_retry_get_balance(
|
||||
leader: &ReplicatedData,
|
||||
fn retry_get_balance(
|
||||
client: &mut ThinClient,
|
||||
bob_pubkey: &PublicKey,
|
||||
expected: Option<i64>,
|
||||
) -> Option<i64> {
|
||||
const LAST: usize = 20;
|
||||
for run in 0..(LAST + 1) {
|
||||
let out = client.poll_get_balance(bob_pubkey);
|
||||
if expected.is_none() || run == LAST {
|
||||
return out.ok().clone();
|
||||
}
|
||||
trace!("retry_get_balance[{}] {:?} {:?}", run, out, expected);
|
||||
if let (Some(e), Ok(o)) = (expected, out) {
|
||||
if o == e {
|
||||
return Some(o);
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn send_tx_and_retry_get_balance(
|
||||
leader: &NodeInfo,
|
||||
alice: &Mint,
|
||||
bob_pubkey: &PublicKey,
|
||||
) -> io::Result<i64> {
|
||||
expected: Option<i64>,
|
||||
) -> Option<i64> {
|
||||
let mut client = mk_client(leader);
|
||||
trace!("getting leader last_id");
|
||||
let last_id = client.get_last_id();
|
||||
@ -264,5 +528,5 @@ fn tx_and_retry_get_balance(
|
||||
let _sig = client
|
||||
.transfer(500, &alice.keypair(), *bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
client.poll_get_balance(bob_pubkey)
|
||||
retry_get_balance(&mut client, bob_pubkey, expected)
|
||||
}
|
||||
|
Reference in New Issue
Block a user