Compare commits
152 Commits
Author | SHA1 | Date | |
---|---|---|---|
e6c3c215ab | |||
5c66bbde01 | |||
6268d540a8 | |||
3cfb571356 | |||
f6e5f2439d | |||
edf6272374 | |||
7f6a4b0ce3 | |||
3be5f25f2f | |||
1b6cdd5637 | |||
f752e55929 | |||
ebb089b3f1 | |||
ad6303f031 | |||
828b9d6717 | |||
444adcd1ca | |||
69ac305883 | |||
2ff57df2a0 | |||
7077f4cbe2 | |||
266f85f607 | |||
d90ab90145 | |||
48018b3f5b | |||
15584e7062 | |||
d415b17146 | |||
9ed953e8c3 | |||
b60a98bd6e | |||
a15e30d4b3 | |||
d5d133353f | |||
6badc98510 | |||
ea8bfb46ce | |||
58860ed19f | |||
583f652197 | |||
3215dcff78 | |||
38fdd17067 | |||
807ccd15ba | |||
1c923d2f9e | |||
2676b21400 | |||
fd5ef94b5a | |||
02c7eea236 | |||
34d1805b54 | |||
753eaa8266 | |||
0b39c6f98e | |||
55b8d0db4d | |||
3d7969d8a2 | |||
041de8082a | |||
3da1fa4d88 | |||
39df21de30 | |||
8cbb7d7362 | |||
10a0c47210 | |||
89bf3765f3 | |||
8181bc591b | |||
ca877e689c | |||
c6048e2bab | |||
60015aee04 | |||
43e6741071 | |||
b91f6bcbff | |||
64e2f1b949 | |||
13a2f05776 | |||
903374ae9b | |||
d366a07403 | |||
e94921174a | |||
dea5ab2f79 | |||
5e11078f34 | |||
d7670cd4ff | |||
29f3230089 | |||
d003efb522 | |||
97e772e87a | |||
0b33615979 | |||
249cead13e | |||
7c96dea359 | |||
374c9921fd | |||
fb55ab8c33 | |||
13485074ac | |||
4944c965e4 | |||
83c5b3bc38 | |||
7fc42de758 | |||
0a30bd74c1 | |||
9b12a79c8d | |||
0dcde23b05 | |||
8dc15b88eb | |||
d20c952f92 | |||
c2eeeb27fd | |||
180d8b67e4 | |||
9c989c46ee | |||
51633f509d | |||
705228ecc2 | |||
740f6d2258 | |||
3b9ef5ccab | |||
ab74e7f24f | |||
be9a670fb7 | |||
6e43e7a146 | |||
ab2093926a | |||
916b90f415 | |||
2ef3db9fab | |||
6987b6fd58 | |||
078179e9b8 | |||
50ccecdff5 | |||
e838a8c28a | |||
e5f7eeedbf | |||
d1948b5a00 | |||
c07f700c53 | |||
c934a30f66 | |||
310d01d8a2 | |||
f330739bc7 | |||
58626721ad | |||
584c8c07b8 | |||
a93ec03d2c | |||
7bd3a8e004 | |||
912a5f951e | |||
6869089111 | |||
6fd32fe850 | |||
81e2b36d38 | |||
7d811afab1 | |||
39f5aaab8b | |||
5fc81dd6c8 | |||
491a530d90 | |||
c12da50f9b | |||
41e8500fc5 | |||
a7f59ef3c1 | |||
f4466c8c0a | |||
bc6d6b20fa | |||
01326936e6 | |||
c960e8d351 | |||
fc69d31914 | |||
8d425e127b | |||
3cfb07ea38 | |||
76679ffb92 | |||
dc2ec925d7 | |||
81d6ba3ec5 | |||
014bdaa355 | |||
0c60fdd2ce | |||
43d986d14e | |||
123d7c6a37 | |||
5ac7df17f9 | |||
bc0dde696a | |||
c323bd3c87 | |||
5c672adc21 | |||
2f80747dc7 | |||
95749ed0e3 | |||
94eea3abec | |||
fe32159673 | |||
07aa2e1260 | |||
6fec8fad57 | |||
84df487f7d | |||
49708e92d3 | |||
daadae7987 | |||
2b788d06b7 | |||
90cd9bd533 | |||
d63506f98c | |||
17de6876bb | |||
fc540395f9 | |||
da2b4962a9 | |||
3abe305a21 | |||
46e8c09bd8 |
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
*.a filter=lfs diff=lfs merge=lfs -text
|
21
Cargo.toml
21
Cargo.toml
@ -1,13 +1,14 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "solana"
|
name = "solana"
|
||||||
description = "High Performance Blockchain"
|
description = "High Performance Blockchain"
|
||||||
version = "0.4.0"
|
version = "0.5.0-beta"
|
||||||
documentation = "https://docs.rs/solana"
|
documentation = "https://docs.rs/solana"
|
||||||
homepage = "http://loomprotocol.com/"
|
homepage = "http://solana.io/"
|
||||||
repository = "https://github.com/solana-labs/solana"
|
repository = "https://github.com/solana-labs/solana"
|
||||||
authors = [
|
authors = [
|
||||||
"Anatoly Yakovenko <anatoly@solana.co>",
|
"Anatoly Yakovenko <anatoly@solana.io>",
|
||||||
"Greg Fitzgerald <greg@solana.co>",
|
"Greg Fitzgerald <greg@solana.io>",
|
||||||
|
"Stephen Akridge <stephen@solana.io>",
|
||||||
]
|
]
|
||||||
license = "Apache-2.0"
|
license = "Apache-2.0"
|
||||||
|
|
||||||
@ -35,12 +36,18 @@ path = "src/bin/genesis-demo.rs"
|
|||||||
name = "solana-mint"
|
name = "solana-mint"
|
||||||
path = "src/bin/mint.rs"
|
path = "src/bin/mint.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "solana-mint-demo"
|
||||||
|
path = "src/bin/mint-demo.rs"
|
||||||
|
|
||||||
[badges]
|
[badges]
|
||||||
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
unstable = []
|
unstable = []
|
||||||
ipv6 = []
|
ipv6 = []
|
||||||
|
cuda = []
|
||||||
|
erasure = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
rayon = "1.0.0"
|
rayon = "1.0.0"
|
||||||
@ -54,4 +61,10 @@ untrusted = "0.5.1"
|
|||||||
bincode = "1.0.0"
|
bincode = "1.0.0"
|
||||||
chrono = { version = "0.4.0", features = ["serde"] }
|
chrono = { version = "0.4.0", features = ["serde"] }
|
||||||
log = "^0.4.1"
|
log = "^0.4.1"
|
||||||
|
env_logger = "^0.4.1"
|
||||||
matches = "^0.1.6"
|
matches = "^0.1.6"
|
||||||
|
byteorder = "^1.2.1"
|
||||||
|
libc = "^0.2.1"
|
||||||
|
getopts = "^0.2"
|
||||||
|
isatty = "0.1"
|
||||||
|
futures = "0.1"
|
||||||
|
2
LICENSE
2
LICENSE
@ -1,4 +1,4 @@
|
|||||||
Copyright 2018 Anatoly Yakovenko <anatoly@loomprotocol.com> and Greg Fitzgerald <garious@gmail.com>
|
Copyright 2018 Anatoly Yakovenko, Greg Fitzgerald and Stephen Akridge
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
60
README.md
60
README.md
@ -14,6 +14,11 @@ Solana: High Performance Blockchain
|
|||||||
Solana™ is a new architecture for a high performance blockchain. It aims to support
|
Solana™ is a new architecture for a high performance blockchain. It aims to support
|
||||||
over 700 thousand transactions per second on a gigabit network.
|
over 700 thousand transactions per second on a gigabit network.
|
||||||
|
|
||||||
|
Introduction
|
||||||
|
===
|
||||||
|
|
||||||
|
It's possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 178 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [H.T.Kung, J.T.Robinson (1981)]. At Solana, we're demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes can't trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain! Furthermore, and much to our surprise, it can implemented using a mechanism that has existed in Bitcoin since day one. The Bitcoin feature is called nLocktime and it can be used to postdate transactions using block height instead of a timestamp. As a Bitcoin client, you'd use block height instead of a timestamp if you don't trust the network. Block height turns out to be an instance of what's being called a Verifiable Delay Function in cryptography circles. It's a cryptographically secure way to say time has passed. In Solana, we use a far more granular verifiable delay function, a SHA 256 hash chain, to checkpoint the ledger and coordinate consensus. With it, we implement Optimistic Concurrency Control and are now well in route towards that theoretical limit of 710,000 transactions per second.
|
||||||
|
|
||||||
Running the demo
|
Running the demo
|
||||||
===
|
===
|
||||||
|
|
||||||
@ -24,28 +29,42 @@ $ curl https://sh.rustup.rs -sSf | sh
|
|||||||
$ source $HOME/.cargo/env
|
$ source $HOME/.cargo/env
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you plan to run with GPU optimizations enabled (not recommended), you'll need a CUDA library stored in git LFS. Install git-lfs here:
|
||||||
|
|
||||||
|
https://git-lfs.github.com/
|
||||||
|
|
||||||
|
Now checkout the code from github:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ git clone https://github.com/solana-labs/solana.git
|
||||||
|
$ cd solana
|
||||||
|
```
|
||||||
|
|
||||||
The testnode server is initialized with a ledger from stdin and
|
The testnode server is initialized with a ledger from stdin and
|
||||||
generates new ledger entries on stdout. To create the input ledger, we'll need
|
generates new ledger entries on stdout. To create the input ledger, we'll need
|
||||||
to create *the mint* and use it to generate a *genesis ledger*. It's done in
|
to create *the mint* and use it to generate a *genesis ledger*. It's done in
|
||||||
two steps because the mint.json file contains a private key that will be
|
two steps because the mint-demo.json file contains private keys that will be
|
||||||
used later in this demo.
|
used later in this demo.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ echo 1000000000 | cargo run --release --bin solana-mint | tee mint.json
|
$ echo 1000000000 | cargo run --release --bin solana-mint-demo > mint-demo.json
|
||||||
$ cat mint.json | cargo run --release --bin solana-genesis | tee genesis.log
|
$ cat mint-demo.json | cargo run --release --bin solana-genesis-demo > genesis.log
|
||||||
```
|
```
|
||||||
|
|
||||||
Now you can start the server:
|
Now you can start the server:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cat genesis.log | cargo run --release --bin solana-testnode | tee transactions0.log
|
$ cat genesis.log | cargo run --release --bin solana-testnode > transactions0.log
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Wait a few seconds for the server to initialize. It will print "Ready." when it's safe
|
||||||
|
to start sending it transactions.
|
||||||
|
|
||||||
Then, in a separate shell, let's execute some transactions. Note we pass in
|
Then, in a separate shell, let's execute some transactions. Note we pass in
|
||||||
the JSON configuration file here, not the genesis ledger.
|
the JSON configuration file here, not the genesis ledger.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cat mint.json | cargo run --release --bin solana-client-demo
|
$ cat mint-demo.json | cargo run --release --bin solana-client-demo
|
||||||
```
|
```
|
||||||
|
|
||||||
Now kill the server with Ctrl-C, and take a look at the ledger. You should
|
Now kill the server with Ctrl-C, and take a look at the ledger. You should
|
||||||
@ -61,14 +80,14 @@ Now restart the server from where we left off. Pass it both the genesis ledger,
|
|||||||
the transaction ledger.
|
the transaction ledger.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cat genesis.log transactions0.log | cargo run --release --bin solana-testnode | tee transactions1.log
|
$ cat genesis.log transactions0.log | cargo run --release --bin solana-testnode > transactions1.log
|
||||||
```
|
```
|
||||||
|
|
||||||
Lastly, run the client demo again, and verify that all funds were spent in the
|
Lastly, run the client demo again, and verify that all funds were spent in the
|
||||||
previous round, and so no additional transactions are added.
|
previous round, and so no additional transactions are added.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ cat mint.json | cargo run --release --bin solana-client-demo
|
$ cat mint-demo.json | cargo run --release --bin solana-client-demo
|
||||||
```
|
```
|
||||||
|
|
||||||
Stop the server again, and verify there are only Tick entries, and no Transaction entries.
|
Stop the server again, and verify there are only Tick entries, and no Transaction entries.
|
||||||
@ -117,3 +136,30 @@ Run the benchmarks:
|
|||||||
```bash
|
```bash
|
||||||
$ cargo +nightly bench --features="unstable"
|
$ cargo +nightly bench --features="unstable"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To run the benchmarks on Linux with GPU optimizations enabled:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cargo +nightly bench --features="unstable,cuda"
|
||||||
|
```
|
||||||
|
|
||||||
|
Code coverage
|
||||||
|
---
|
||||||
|
|
||||||
|
To generate code coverage statistics, run kcov via Docker:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ docker run -it --rm --security-opt seccomp=unconfined --volume "$PWD:/volume" elmtai/docker-rust-kcov
|
||||||
|
```
|
||||||
|
|
||||||
|
Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer
|
||||||
|
productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to
|
||||||
|
some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running
|
||||||
|
the test suite should indicate that your change didn't *infringe* on anyone else's solutions. Adding a
|
||||||
|
test *protects* your solution from future changes. Say you don't understand why a line of code exists,
|
||||||
|
try deleting it and running the unit-tests. The nearest test failure should tell you what problem
|
||||||
|
was solved by that code. If no test fails, go ahead and submit a Pull Request that asks, "what
|
||||||
|
problem is solved by this code?" On the other hand, if a test does fail and you can think of a
|
||||||
|
better way to solve the same problem, a Pull Request with your solution would most certainly be
|
||||||
|
welcome! Likewise, if rewriting a test can better communicate what code it's protecting, please
|
||||||
|
send us that patch!
|
||||||
|
15
build.rs
Normal file
15
build.rs
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
use std::env;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
println!("cargo:rustc-link-search=native=.");
|
||||||
|
if !env::var("CARGO_FEATURE_CUDA").is_err() {
|
||||||
|
println!("cargo:rustc-link-lib=static=cuda_verify_ed25519");
|
||||||
|
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
|
||||||
|
println!("cargo:rustc-link-lib=dylib=cudart");
|
||||||
|
println!("cargo:rustc-link-lib=dylib=cuda");
|
||||||
|
println!("cargo:rustc-link-lib=dylib=cudadevrt");
|
||||||
|
}
|
||||||
|
if !env::var("CARGO_FEATURE_ERASURE").is_err() {
|
||||||
|
println!("cargo:rustc-link-lib=dylib=Jerasure");
|
||||||
|
}
|
||||||
|
}
|
@ -11,7 +11,7 @@ with by verifying each entry's hash can be generated from the hash in the previo
|
|||||||
extern crate solana;
|
extern crate solana;
|
||||||
|
|
||||||
use solana::historian::Historian;
|
use solana::historian::Historian;
|
||||||
use solana::ledger::{verify_slice, Entry, Hash};
|
use solana::ledger::{Block, Entry, Hash};
|
||||||
use solana::event::{generate_keypair, get_pubkey, sign_claim_data, Event};
|
use solana::event::{generate_keypair, get_pubkey, sign_claim_data, Event};
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@ -38,7 +38,7 @@ fn main() {
|
|||||||
}
|
}
|
||||||
// Proof-of-History: Verify the historian learned about the events
|
// Proof-of-History: Verify the historian learned about the events
|
||||||
// in the same order they appear in the vector.
|
// in the same order they appear in the vector.
|
||||||
assert!(verify_slice(&entries, &seed));
|
assert!(entries[..].verify(&seed));
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ Proof-of-History
|
|||||||
Take note of the last line:
|
Take note of the last line:
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
assert!(verify_slice(&entries, &seed));
|
assert!(entries[..].verify(&seed));
|
||||||
```
|
```
|
||||||
|
|
||||||
[It's a proof!](https://en.wikipedia.org/wiki/Curry–Howard_correspondence) For each entry returned by the
|
[It's a proof!](https://en.wikipedia.org/wiki/Curry–Howard_correspondence) For each entry returned by the
|
||||||
|
@ -14,5 +14,5 @@ msc {
|
|||||||
recorder=>historian [ label = "e2 = Entry{id: h6, n: 3, event: Tick}" ] ;
|
recorder=>historian [ label = "e2 = Entry{id: h6, n: 3, event: Tick}" ] ;
|
||||||
client=>historian [ label = "collect()" ] ;
|
client=>historian [ label = "collect()" ] ;
|
||||||
historian=>client [ label = "entries = [e0, e1, e2]" ] ;
|
historian=>client [ label = "entries = [e0, e1, e2]" ] ;
|
||||||
client=>client [ label = "verify_slice(entries, h0)" ] ;
|
client=>client [ label = "entries.verify(h0)" ] ;
|
||||||
}
|
}
|
||||||
|
@ -3,157 +3,218 @@
|
|||||||
//! on behalf of the caller, and a private low-level API for when they have
|
//! on behalf of the caller, and a private low-level API for when they have
|
||||||
//! already been signed and verified.
|
//! already been signed and verified.
|
||||||
|
|
||||||
|
extern crate libc;
|
||||||
|
|
||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
use entry::Entry;
|
|
||||||
use event::Event;
|
use event::Event;
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use historian::Historian;
|
|
||||||
use mint::Mint;
|
use mint::Mint;
|
||||||
use plan::{Plan, Witness};
|
use plan::{Payment, Plan, Witness};
|
||||||
use recorder::Signal;
|
use rayon::prelude::*;
|
||||||
use signature::{KeyPair, PublicKey, Signature};
|
use signature::{KeyPair, PublicKey, Signature};
|
||||||
use std::collections::hash_map::Entry::Occupied;
|
use std::collections::hash_map::Entry::Occupied;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet, VecDeque};
|
||||||
use std::result;
|
use std::result;
|
||||||
use std::sync::mpsc::SendError;
|
use std::sync::RwLock;
|
||||||
use transaction::Transaction;
|
use transaction::Transaction;
|
||||||
|
|
||||||
|
pub const MAX_ENTRY_IDS: usize = 1024 * 4;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub enum AccountingError {
|
pub enum AccountingError {
|
||||||
|
AccountNotFound,
|
||||||
InsufficientFunds,
|
InsufficientFunds,
|
||||||
InvalidTransfer,
|
|
||||||
InvalidTransferSignature,
|
InvalidTransferSignature,
|
||||||
SendError,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = result::Result<T, AccountingError>;
|
pub type Result<T> = result::Result<T, AccountingError>;
|
||||||
|
|
||||||
/// Commit funds to the 'to' party.
|
/// Commit funds to the 'to' party.
|
||||||
fn complete_transaction(balances: &mut HashMap<PublicKey, i64>, plan: &Plan) {
|
fn apply_payment(balances: &RwLock<HashMap<PublicKey, RwLock<i64>>>, payment: &Payment) {
|
||||||
if let Plan::Pay(ref payment) = *plan {
|
if balances.read().unwrap().contains_key(&payment.to) {
|
||||||
*balances.entry(payment.to).or_insert(0) += payment.tokens;
|
let bals = balances.read().unwrap();
|
||||||
|
*bals[&payment.to].write().unwrap() += payment.tokens;
|
||||||
|
} else {
|
||||||
|
let mut bals = balances.write().unwrap();
|
||||||
|
bals.insert(payment.to, RwLock::new(payment.tokens));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Accountant {
|
pub struct Accountant {
|
||||||
pub historian: Historian,
|
balances: RwLock<HashMap<PublicKey, RwLock<i64>>>,
|
||||||
pub balances: HashMap<PublicKey, i64>,
|
pending: RwLock<HashMap<Signature, Plan>>,
|
||||||
pub first_id: Hash,
|
last_ids: RwLock<VecDeque<(Hash, RwLock<HashSet<Signature>>)>>,
|
||||||
pending: HashMap<Signature, Plan>,
|
time_sources: RwLock<HashSet<PublicKey>>,
|
||||||
time_sources: HashSet<PublicKey>,
|
last_time: RwLock<DateTime<Utc>>,
|
||||||
last_time: DateTime<Utc>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Accountant {
|
impl Accountant {
|
||||||
/// Create an Accountant using an existing ledger.
|
/// Create an Accountant using a deposit.
|
||||||
pub fn new_from_entries<I>(entries: I, ms_per_tick: Option<u64>) -> Self
|
pub fn new_from_deposit(deposit: &Payment) -> Self {
|
||||||
where
|
let balances = RwLock::new(HashMap::new());
|
||||||
I: IntoIterator<Item = Entry>,
|
apply_payment(&balances, deposit);
|
||||||
{
|
Accountant {
|
||||||
let mut entries = entries.into_iter();
|
balances,
|
||||||
|
pending: RwLock::new(HashMap::new()),
|
||||||
// The first item in the ledger is required to be an entry with zero num_hashes,
|
last_ids: RwLock::new(VecDeque::new()),
|
||||||
// which implies its id can be used as the ledger's seed.
|
time_sources: RwLock::new(HashSet::new()),
|
||||||
let entry0 = entries.next().unwrap();
|
last_time: RwLock::new(Utc.timestamp(0, 0)),
|
||||||
let start_hash = entry0.id;
|
|
||||||
|
|
||||||
let hist = Historian::new(&start_hash, ms_per_tick);
|
|
||||||
let mut acc = Accountant {
|
|
||||||
historian: hist,
|
|
||||||
balances: HashMap::new(),
|
|
||||||
first_id: start_hash,
|
|
||||||
pending: HashMap::new(),
|
|
||||||
time_sources: HashSet::new(),
|
|
||||||
last_time: Utc.timestamp(0, 0),
|
|
||||||
};
|
|
||||||
|
|
||||||
// The second item in the ledger is a special transaction where the to and from
|
|
||||||
// fields are the same. That entry should be treated as a deposit, not a
|
|
||||||
// transfer to oneself.
|
|
||||||
let entry1 = entries.next().unwrap();
|
|
||||||
acc.process_verified_event(&entry1.events[0], true).unwrap();
|
|
||||||
|
|
||||||
for entry in entries {
|
|
||||||
for event in entry.events {
|
|
||||||
acc.process_verified_event(&event, false).unwrap();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
acc
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create an Accountant with only a Mint. Typically used by unit tests.
|
/// Create an Accountant with only a Mint. Typically used by unit tests.
|
||||||
pub fn new(mint: &Mint, ms_per_tick: Option<u64>) -> Self {
|
pub fn new(mint: &Mint) -> Self {
|
||||||
Self::new_from_entries(mint.create_entries(), ms_per_tick)
|
let deposit = Payment {
|
||||||
|
to: mint.pubkey(),
|
||||||
|
tokens: mint.tokens,
|
||||||
|
};
|
||||||
|
let acc = Self::new_from_deposit(&deposit);
|
||||||
|
acc.register_entry_id(&mint.last_id());
|
||||||
|
acc
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_deposit(allow_deposits: bool, from: &PublicKey, plan: &Plan) -> bool {
|
fn reserve_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
|
||||||
if let Plan::Pay(ref payment) = *plan {
|
if signatures.read().unwrap().contains(sig) {
|
||||||
allow_deposits && *from == payment.to
|
return false;
|
||||||
} else {
|
}
|
||||||
|
signatures.write().unwrap().insert(*sig);
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
fn forget_signature(signatures: &RwLock<HashSet<Signature>>, sig: &Signature) -> bool {
|
||||||
|
signatures.write().unwrap().remove(sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn forget_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
|
||||||
|
if let Some(entry) = self.last_ids
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.iter()
|
||||||
|
.rev()
|
||||||
|
.find(|x| x.0 == *last_id)
|
||||||
|
{
|
||||||
|
return Self::forget_signature(&entry.1, sig);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn reserve_signature_with_last_id(&self, sig: &Signature, last_id: &Hash) -> bool {
|
||||||
|
if let Some(entry) = self.last_ids
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.iter()
|
||||||
|
.rev()
|
||||||
|
.find(|x| x.0 == *last_id)
|
||||||
|
{
|
||||||
|
return Self::reserve_signature(&entry.1, sig);
|
||||||
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Tell the accountant which Entry IDs exist on the ledger. This function
|
||||||
|
/// assumes subsequent calls correspond to later entries, and will boot
|
||||||
|
/// the oldest ones once its internal cache is full. Once boot, the
|
||||||
|
/// accountant will reject transactions using that `last_id`.
|
||||||
|
pub fn register_entry_id(&self, last_id: &Hash) {
|
||||||
|
let mut last_ids = self.last_ids.write().unwrap();
|
||||||
|
if last_ids.len() >= MAX_ENTRY_IDS {
|
||||||
|
last_ids.pop_front();
|
||||||
|
}
|
||||||
|
last_ids.push_back((*last_id, RwLock::new(HashSet::new())));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process and log the given Transaction.
|
/// Deduct tokens from the 'from' address the account has sufficient
|
||||||
pub fn log_verified_transaction(&mut self, tr: Transaction) -> Result<()> {
|
/// funds and isn't a duplicate.
|
||||||
if self.get_balance(&tr.from).unwrap_or(0) < tr.tokens {
|
pub fn process_verified_transaction_debits(&self, tr: &Transaction) -> Result<()> {
|
||||||
|
let bals = self.balances.read().unwrap();
|
||||||
|
|
||||||
|
// Hold a write lock before the condition check, so that a debit can't occur
|
||||||
|
// between checking the balance and the withdraw.
|
||||||
|
let option = bals.get(&tr.from);
|
||||||
|
if option.is_none() {
|
||||||
|
return Err(AccountingError::AccountNotFound);
|
||||||
|
}
|
||||||
|
let mut bal = option.unwrap().write().unwrap();
|
||||||
|
|
||||||
|
if !self.reserve_signature_with_last_id(&tr.sig, &tr.data.last_id) {
|
||||||
|
return Err(AccountingError::InvalidTransferSignature);
|
||||||
|
}
|
||||||
|
|
||||||
|
if *bal < tr.data.tokens {
|
||||||
|
self.forget_signature_with_last_id(&tr.sig, &tr.data.last_id);
|
||||||
return Err(AccountingError::InsufficientFunds);
|
return Err(AccountingError::InsufficientFunds);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.process_verified_transaction(&tr, false)?;
|
*bal -= tr.data.tokens;
|
||||||
if let Err(SendError(_)) = self.historian
|
|
||||||
.sender
|
|
||||||
.send(Signal::Event(Event::Transaction(tr)))
|
|
||||||
{
|
|
||||||
return Err(AccountingError::SendError);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify and process the given Transaction.
|
pub fn process_verified_transaction_credits(&self, tr: &Transaction) {
|
||||||
pub fn log_transaction(&mut self, tr: Transaction) -> Result<()> {
|
let mut plan = tr.data.plan.clone();
|
||||||
if !tr.verify() {
|
plan.apply_witness(&Witness::Timestamp(*self.last_time.read().unwrap()));
|
||||||
return Err(AccountingError::InvalidTransfer);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.log_verified_transaction(tr)
|
if let Some(ref payment) = plan.final_payment() {
|
||||||
|
apply_payment(&self.balances, payment);
|
||||||
|
} else {
|
||||||
|
let mut pending = self.pending.write().unwrap();
|
||||||
|
pending.insert(tr.sig, plan);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process a Transaction that has already been verified.
|
/// Process a Transaction that has already been verified.
|
||||||
fn process_verified_transaction(
|
pub fn process_verified_transaction(&self, tr: &Transaction) -> Result<()> {
|
||||||
self: &mut Self,
|
self.process_verified_transaction_debits(tr)?;
|
||||||
tr: &Transaction,
|
self.process_verified_transaction_credits(tr);
|
||||||
allow_deposits: bool,
|
Ok(())
|
||||||
) -> Result<()> {
|
|
||||||
if !self.historian.reserve_signature(&tr.sig) {
|
|
||||||
return Err(AccountingError::InvalidTransferSignature);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !Self::is_deposit(allow_deposits, &tr.from, &tr.plan) {
|
/// Process a batch of verified transactions.
|
||||||
if let Some(x) = self.balances.get_mut(&tr.from) {
|
pub fn process_verified_transactions(&self, trs: Vec<Transaction>) -> Vec<Result<Transaction>> {
|
||||||
*x -= tr.tokens;
|
// Run all debits first to filter out any transactions that can't be processed
|
||||||
}
|
// in parallel deterministically.
|
||||||
|
let results: Vec<_> = trs.into_par_iter()
|
||||||
|
.map(|tr| self.process_verified_transaction_debits(&tr).map(|_| tr))
|
||||||
|
.collect(); // Calling collect() here forces all debits to complete before moving on.
|
||||||
|
|
||||||
|
results
|
||||||
|
.into_par_iter()
|
||||||
|
.map(|result| {
|
||||||
|
result.map(|tr| {
|
||||||
|
self.process_verified_transaction_credits(&tr);
|
||||||
|
tr
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut plan = tr.plan.clone();
|
fn partition_events(events: Vec<Event>) -> (Vec<Transaction>, Vec<Event>) {
|
||||||
plan.apply_witness(&Witness::Timestamp(self.last_time));
|
let mut trs = vec![];
|
||||||
|
let mut rest = vec![];
|
||||||
if plan.is_complete() {
|
for event in events {
|
||||||
complete_transaction(&mut self.balances, &plan);
|
match event {
|
||||||
} else {
|
Event::Transaction(tr) => trs.push(tr),
|
||||||
self.pending.insert(tr.sig, plan);
|
_ => rest.push(event),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(trs, rest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn process_verified_events(&self, events: Vec<Event>) -> Result<()> {
|
||||||
|
let (trs, rest) = Self::partition_events(events);
|
||||||
|
self.process_verified_transactions(trs);
|
||||||
|
for event in rest {
|
||||||
|
self.process_verified_event(&event)?;
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process a Witness Signature that has already been verified.
|
/// Process a Witness Signature that has already been verified.
|
||||||
fn process_verified_sig(&mut self, from: PublicKey, tx_sig: Signature) -> Result<()> {
|
fn process_verified_sig(&self, from: PublicKey, tx_sig: Signature) -> Result<()> {
|
||||||
if let Occupied(mut e) = self.pending.entry(tx_sig) {
|
if let Occupied(mut e) = self.pending.write().unwrap().entry(tx_sig) {
|
||||||
e.get_mut().apply_witness(&Witness::Signature(from));
|
e.get_mut().apply_witness(&Witness::Signature(from));
|
||||||
if e.get().is_complete() {
|
if let Some(ref payment) = e.get().final_payment() {
|
||||||
complete_transaction(&mut self.balances, e.get());
|
apply_payment(&self.balances, payment);
|
||||||
e.remove_entry();
|
e.remove_entry();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -162,16 +223,16 @@ impl Accountant {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Process a Witness Timestamp that has already been verified.
|
/// Process a Witness Timestamp that has already been verified.
|
||||||
fn process_verified_timestamp(&mut self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
fn process_verified_timestamp(&self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
||||||
// If this is the first timestamp we've seen, it probably came from the genesis block,
|
// If this is the first timestamp we've seen, it probably came from the genesis block,
|
||||||
// so we'll trust it.
|
// so we'll trust it.
|
||||||
if self.last_time == Utc.timestamp(0, 0) {
|
if *self.last_time.read().unwrap() == Utc.timestamp(0, 0) {
|
||||||
self.time_sources.insert(from);
|
self.time_sources.write().unwrap().insert(from);
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.time_sources.contains(&from) {
|
if self.time_sources.read().unwrap().contains(&from) {
|
||||||
if dt > self.last_time {
|
if dt > *self.last_time.read().unwrap() {
|
||||||
self.last_time = dt;
|
*self.last_time.write().unwrap() = dt;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@ -179,25 +240,29 @@ impl Accountant {
|
|||||||
|
|
||||||
// Check to see if any timelocked transactions can be completed.
|
// Check to see if any timelocked transactions can be completed.
|
||||||
let mut completed = vec![];
|
let mut completed = vec![];
|
||||||
for (key, plan) in &mut self.pending {
|
|
||||||
plan.apply_witness(&Witness::Timestamp(self.last_time));
|
// Hold 'pending' write lock until the end of this function. Otherwise another thread can
|
||||||
if plan.is_complete() {
|
// double-spend if it enters before the modified plan is removed from 'pending'.
|
||||||
complete_transaction(&mut self.balances, plan);
|
let mut pending = self.pending.write().unwrap();
|
||||||
|
for (key, plan) in pending.iter_mut() {
|
||||||
|
plan.apply_witness(&Witness::Timestamp(*self.last_time.read().unwrap()));
|
||||||
|
if let Some(ref payment) = plan.final_payment() {
|
||||||
|
apply_payment(&self.balances, payment);
|
||||||
completed.push(key.clone());
|
completed.push(key.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for key in completed {
|
for key in completed {
|
||||||
self.pending.remove(&key);
|
pending.remove(&key);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process an Transaction or Witness that has already been verified.
|
/// Process an Transaction or Witness that has already been verified.
|
||||||
fn process_verified_event(self: &mut Self, event: &Event, allow_deposits: bool) -> Result<()> {
|
pub fn process_verified_event(&self, event: &Event) -> Result<()> {
|
||||||
match *event {
|
match *event {
|
||||||
Event::Transaction(ref tr) => self.process_verified_transaction(tr, allow_deposits),
|
Event::Transaction(ref tr) => self.process_verified_transaction(tr),
|
||||||
Event::Signature { from, tx_sig, .. } => self.process_verified_sig(from, tx_sig),
|
Event::Signature { from, tx_sig, .. } => self.process_verified_sig(from, tx_sig),
|
||||||
Event::Timestamp { from, dt, .. } => self.process_verified_timestamp(from, dt),
|
Event::Timestamp { from, dt, .. } => self.process_verified_timestamp(from, dt),
|
||||||
}
|
}
|
||||||
@ -206,7 +271,7 @@ impl Accountant {
|
|||||||
/// Create, sign, and process a Transaction from `keypair` to `to` of
|
/// Create, sign, and process a Transaction from `keypair` to `to` of
|
||||||
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
|
/// `n` tokens where `last_id` is the last Entry ID observed by the client.
|
||||||
pub fn transfer(
|
pub fn transfer(
|
||||||
self: &mut Self,
|
&self,
|
||||||
n: i64,
|
n: i64,
|
||||||
keypair: &KeyPair,
|
keypair: &KeyPair,
|
||||||
to: PublicKey,
|
to: PublicKey,
|
||||||
@ -214,14 +279,14 @@ impl Accountant {
|
|||||||
) -> Result<Signature> {
|
) -> Result<Signature> {
|
||||||
let tr = Transaction::new(keypair, to, n, last_id);
|
let tr = Transaction::new(keypair, to, n, last_id);
|
||||||
let sig = tr.sig;
|
let sig = tr.sig;
|
||||||
self.log_transaction(tr).map(|_| sig)
|
self.process_verified_transaction(&tr).map(|_| sig)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create, sign, and process a postdated Transaction from `keypair`
|
/// Create, sign, and process a postdated Transaction from `keypair`
|
||||||
/// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID
|
/// to `to` of `n` tokens on `dt` where `last_id` is the last Entry ID
|
||||||
/// observed by the client.
|
/// observed by the client.
|
||||||
pub fn transfer_on_date(
|
pub fn transfer_on_date(
|
||||||
self: &mut Self,
|
&self,
|
||||||
n: i64,
|
n: i64,
|
||||||
keypair: &KeyPair,
|
keypair: &KeyPair,
|
||||||
to: PublicKey,
|
to: PublicKey,
|
||||||
@ -230,112 +295,82 @@ impl Accountant {
|
|||||||
) -> Result<Signature> {
|
) -> Result<Signature> {
|
||||||
let tr = Transaction::new_on_date(keypair, to, dt, n, last_id);
|
let tr = Transaction::new_on_date(keypair, to, dt, n, last_id);
|
||||||
let sig = tr.sig;
|
let sig = tr.sig;
|
||||||
self.log_transaction(tr).map(|_| sig)
|
self.process_verified_transaction(&tr).map(|_| sig)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_balance(self: &Self, pubkey: &PublicKey) -> Option<i64> {
|
pub fn get_balance(&self, pubkey: &PublicKey) -> Option<i64> {
|
||||||
self.balances.get(pubkey).cloned()
|
let bals = self.balances.read().unwrap();
|
||||||
|
bals.get(pubkey).map(|x| *x.read().unwrap())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use recorder::ExitReason;
|
use bincode::serialize;
|
||||||
|
use hash::hash;
|
||||||
use signature::KeyPairUtil;
|
use signature::KeyPairUtil;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_accountant() {
|
fn test_accountant() {
|
||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let mut acc = Accountant::new(&alice, Some(2));
|
let acc = Accountant::new(&alice);
|
||||||
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.seed())
|
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.last_id())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
|
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
|
||||||
|
|
||||||
acc.transfer(500, &alice.keypair(), bob_pubkey, alice.seed())
|
acc.transfer(500, &alice.keypair(), bob_pubkey, alice.last_id())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_500);
|
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_500);
|
||||||
|
}
|
||||||
|
|
||||||
drop(acc.historian.sender);
|
#[test]
|
||||||
|
fn test_account_not_found() {
|
||||||
|
let mint = Mint::new(1);
|
||||||
|
let acc = Accountant::new(&mint);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
acc.historian.thread_hdl.join().unwrap(),
|
acc.transfer(1, &KeyPair::new(), mint.pubkey(), mint.last_id()),
|
||||||
ExitReason::RecvDisconnected
|
Err(AccountingError::AccountNotFound)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_invalid_transfer() {
|
fn test_invalid_transfer() {
|
||||||
let alice = Mint::new(11_000);
|
let alice = Mint::new(11_000);
|
||||||
let mut acc = Accountant::new(&alice, Some(2));
|
let acc = Accountant::new(&alice);
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.seed())
|
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.last_id())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
acc.transfer(10_001, &alice.keypair(), bob_pubkey, alice.seed()),
|
acc.transfer(10_001, &alice.keypair(), bob_pubkey, alice.last_id()),
|
||||||
Err(AccountingError::InsufficientFunds)
|
Err(AccountingError::InsufficientFunds)
|
||||||
);
|
);
|
||||||
|
|
||||||
let alice_pubkey = alice.keypair().pubkey();
|
let alice_pubkey = alice.keypair().pubkey();
|
||||||
assert_eq!(acc.get_balance(&alice_pubkey).unwrap(), 10_000);
|
assert_eq!(acc.get_balance(&alice_pubkey).unwrap(), 10_000);
|
||||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
|
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
|
||||||
|
|
||||||
drop(acc.historian.sender);
|
|
||||||
assert_eq!(
|
|
||||||
acc.historian.thread_hdl.join().unwrap(),
|
|
||||||
ExitReason::RecvDisconnected
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_overspend_attack() {
|
|
||||||
let alice = Mint::new(1);
|
|
||||||
let mut acc = Accountant::new(&alice, None);
|
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
|
||||||
let mut tr = Transaction::new(&alice.keypair(), bob_pubkey, 1, alice.seed());
|
|
||||||
if let Plan::Pay(ref mut payment) = tr.plan {
|
|
||||||
payment.tokens = 2; // <-- attack!
|
|
||||||
}
|
|
||||||
assert_eq!(
|
|
||||||
acc.log_transaction(tr.clone()),
|
|
||||||
Err(AccountingError::InvalidTransfer)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Also, ensure all branchs of the plan spend all tokens
|
|
||||||
if let Plan::Pay(ref mut payment) = tr.plan {
|
|
||||||
payment.tokens = 0; // <-- whoops!
|
|
||||||
}
|
|
||||||
assert_eq!(
|
|
||||||
acc.log_transaction(tr.clone()),
|
|
||||||
Err(AccountingError::InvalidTransfer)
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_transfer_to_newb() {
|
fn test_transfer_to_newb() {
|
||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
let mut acc = Accountant::new(&alice, Some(2));
|
let acc = Accountant::new(&alice);
|
||||||
let alice_keypair = alice.keypair();
|
let alice_keypair = alice.keypair();
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
acc.transfer(500, &alice_keypair, bob_pubkey, alice.seed())
|
acc.transfer(500, &alice_keypair, bob_pubkey, alice.last_id())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 500);
|
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 500);
|
||||||
|
|
||||||
drop(acc.historian.sender);
|
|
||||||
assert_eq!(
|
|
||||||
acc.historian.thread_hdl.join().unwrap(),
|
|
||||||
ExitReason::RecvDisconnected
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_transfer_on_date() {
|
fn test_transfer_on_date() {
|
||||||
let alice = Mint::new(1);
|
let alice = Mint::new(1);
|
||||||
let mut acc = Accountant::new(&alice, Some(2));
|
let acc = Accountant::new(&alice);
|
||||||
let alice_keypair = alice.keypair();
|
let alice_keypair = alice.keypair();
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let dt = Utc::now();
|
let dt = Utc::now();
|
||||||
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.seed())
|
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// Alice's balance will be zero because all funds are locked up.
|
// Alice's balance will be zero because all funds are locked up.
|
||||||
@ -357,14 +392,14 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_transfer_after_date() {
|
fn test_transfer_after_date() {
|
||||||
let alice = Mint::new(1);
|
let alice = Mint::new(1);
|
||||||
let mut acc = Accountant::new(&alice, Some(2));
|
let acc = Accountant::new(&alice);
|
||||||
let alice_keypair = alice.keypair();
|
let alice_keypair = alice.keypair();
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let dt = Utc::now();
|
let dt = Utc::now();
|
||||||
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap();
|
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap();
|
||||||
|
|
||||||
// It's now past now, so this transfer should be processed immediately.
|
// It's now past now, so this transfer should be processed immediately.
|
||||||
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.seed())
|
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
|
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
|
||||||
@ -374,11 +409,11 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_cancel_transfer() {
|
fn test_cancel_transfer() {
|
||||||
let alice = Mint::new(1);
|
let alice = Mint::new(1);
|
||||||
let mut acc = Accountant::new(&alice, Some(2));
|
let acc = Accountant::new(&alice);
|
||||||
let alice_keypair = alice.keypair();
|
let alice_keypair = alice.keypair();
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let dt = Utc::now();
|
let dt = Utc::now();
|
||||||
let sig = acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.seed())
|
let sig = acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.last_id())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// Alice's balance will be zero because all funds are locked up.
|
// Alice's balance will be zero because all funds are locked up.
|
||||||
@ -396,4 +431,96 @@ mod tests {
|
|||||||
acc.process_verified_sig(alice.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
acc.process_verified_sig(alice.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
||||||
assert_ne!(acc.get_balance(&alice.pubkey()), Some(2));
|
assert_ne!(acc.get_balance(&alice.pubkey()), Some(2));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_duplicate_event_signature() {
|
||||||
|
let alice = Mint::new(1);
|
||||||
|
let acc = Accountant::new(&alice);
|
||||||
|
let sig = Signature::default();
|
||||||
|
assert!(acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
|
||||||
|
assert!(!acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_forget_signature() {
|
||||||
|
let alice = Mint::new(1);
|
||||||
|
let acc = Accountant::new(&alice);
|
||||||
|
let sig = Signature::default();
|
||||||
|
acc.reserve_signature_with_last_id(&sig, &alice.last_id());
|
||||||
|
assert!(acc.forget_signature_with_last_id(&sig, &alice.last_id()));
|
||||||
|
assert!(!acc.forget_signature_with_last_id(&sig, &alice.last_id()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_max_entry_ids() {
|
||||||
|
let alice = Mint::new(1);
|
||||||
|
let acc = Accountant::new(&alice);
|
||||||
|
let sig = Signature::default();
|
||||||
|
for i in 0..MAX_ENTRY_IDS {
|
||||||
|
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||||
|
acc.register_entry_id(&last_id);
|
||||||
|
}
|
||||||
|
// Assert we're no longer able to use the oldest entry ID.
|
||||||
|
assert!(!acc.reserve_signature_with_last_id(&sig, &alice.last_id()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_debits_before_credits() {
|
||||||
|
let mint = Mint::new(2);
|
||||||
|
let acc = Accountant::new(&mint);
|
||||||
|
let alice = KeyPair::new();
|
||||||
|
let tr0 = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
|
||||||
|
let tr1 = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
|
||||||
|
let trs = vec![tr0, tr1];
|
||||||
|
assert!(acc.process_verified_transactions(trs)[1].is_err());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(feature = "unstable", test))]
|
||||||
|
mod bench {
|
||||||
|
extern crate test;
|
||||||
|
use self::test::Bencher;
|
||||||
|
use accountant::*;
|
||||||
|
use bincode::serialize;
|
||||||
|
use hash::hash;
|
||||||
|
use signature::KeyPairUtil;
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn process_verified_event_bench(bencher: &mut Bencher) {
|
||||||
|
let mint = Mint::new(100_000_000);
|
||||||
|
let acc = Accountant::new(&mint);
|
||||||
|
// Create transactions between unrelated parties.
|
||||||
|
let transactions: Vec<_> = (0..4096)
|
||||||
|
.into_par_iter()
|
||||||
|
.map(|i| {
|
||||||
|
// Seed the 'from' account.
|
||||||
|
let rando0 = KeyPair::new();
|
||||||
|
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, mint.last_id());
|
||||||
|
acc.process_verified_transaction(&tr).unwrap();
|
||||||
|
|
||||||
|
// Seed the 'to' account and a cell for its signature.
|
||||||
|
let last_id = hash(&serialize(&i).unwrap()); // Unique hash
|
||||||
|
acc.register_entry_id(&last_id);
|
||||||
|
|
||||||
|
let rando1 = KeyPair::new();
|
||||||
|
let tr = Transaction::new(&rando0, rando1.pubkey(), 1, last_id);
|
||||||
|
acc.process_verified_transaction(&tr).unwrap();
|
||||||
|
|
||||||
|
// Finally, return a transaction that's unique
|
||||||
|
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
bencher.iter(|| {
|
||||||
|
// Since benchmarker runs this multiple times, we need to clear the signatures.
|
||||||
|
for sigs in acc.last_ids.read().unwrap().iter() {
|
||||||
|
sigs.1.write().unwrap().clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
acc.process_verified_transactions(transactions.clone())
|
||||||
|
.iter()
|
||||||
|
.all(|x| x.is_ok())
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,154 +4,325 @@
|
|||||||
|
|
||||||
use accountant::Accountant;
|
use accountant::Accountant;
|
||||||
use bincode::{deserialize, serialize};
|
use bincode::{deserialize, serialize};
|
||||||
|
use ecdsa;
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
|
use event::Event;
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
|
use historian::Historian;
|
||||||
|
use packet;
|
||||||
|
use packet::SharedPackets;
|
||||||
|
use rayon::prelude::*;
|
||||||
|
use recorder::Signal;
|
||||||
use result::Result;
|
use result::Result;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
use signature::PublicKey;
|
use signature::PublicKey;
|
||||||
use std::default::Default;
|
use std::cmp::max;
|
||||||
|
use std::collections::VecDeque;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::net::{SocketAddr, UdpSocket};
|
use std::net::{SocketAddr, UdpSocket};
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex, RwLock};
|
||||||
use std::thread::{spawn, JoinHandle};
|
use std::thread::{spawn, JoinHandle};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use streamer;
|
use streamer;
|
||||||
use transaction::Transaction;
|
use transaction::Transaction;
|
||||||
use rayon::prelude::*;
|
|
||||||
|
use subscribers;
|
||||||
|
|
||||||
pub struct AccountantSkel<W: Write + Send + 'static> {
|
pub struct AccountantSkel<W: Write + Send + 'static> {
|
||||||
pub acc: Accountant,
|
acc: Accountant,
|
||||||
pub last_id: Hash,
|
last_id: Hash,
|
||||||
writer: W,
|
writer: W,
|
||||||
|
historian: Historian,
|
||||||
|
entry_info_subscribers: Vec<SocketAddr>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
pub enum Request {
|
pub enum Request {
|
||||||
Transaction(Transaction),
|
Transaction(Transaction),
|
||||||
GetBalance { key: PublicKey },
|
GetBalance { key: PublicKey },
|
||||||
GetId { is_last: bool },
|
GetLastId,
|
||||||
|
Subscribe { subscriptions: Vec<Subscription> },
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
|
pub enum Subscription {
|
||||||
|
EntryInfo,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
|
pub struct EntryInfo {
|
||||||
|
pub id: Hash,
|
||||||
|
pub num_hashes: u64,
|
||||||
|
pub num_events: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Request {
|
impl Request {
|
||||||
/// Verify the request is valid.
|
/// Verify the request is valid.
|
||||||
pub fn verify(&self) -> bool {
|
pub fn verify(&self) -> bool {
|
||||||
match *self {
|
match *self {
|
||||||
Request::Transaction(ref tr) => tr.verify(),
|
Request::Transaction(ref tr) => tr.verify_plan(),
|
||||||
_ => true,
|
_ => true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Parallel verfication of a batch of requests.
|
|
||||||
fn filter_valid_requests(reqs: Vec<(Request, SocketAddr)>) -> Vec<(Request, SocketAddr)> {
|
|
||||||
reqs.into_par_iter().filter({ |x| x.0.verify() }).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
pub enum Response {
|
pub enum Response {
|
||||||
Balance { key: PublicKey, val: Option<i64> },
|
Balance { key: PublicKey, val: Option<i64> },
|
||||||
Entries { entries: Vec<Entry> },
|
EntryInfo(EntryInfo),
|
||||||
Id { id: Hash, is_last: bool },
|
LastId { id: Hash },
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<W: Write + Send + 'static> AccountantSkel<W> {
|
impl<W: Write + Send + 'static> AccountantSkel<W> {
|
||||||
/// Create a new AccountantSkel that wraps the given Accountant.
|
/// Create a new AccountantSkel that wraps the given Accountant.
|
||||||
pub fn new(acc: Accountant, w: W) -> Self {
|
pub fn new(acc: Accountant, last_id: Hash, writer: W, historian: Historian) -> Self {
|
||||||
let last_id = acc.first_id;
|
|
||||||
AccountantSkel {
|
AccountantSkel {
|
||||||
acc,
|
acc,
|
||||||
last_id,
|
last_id,
|
||||||
writer: w,
|
writer,
|
||||||
|
historian,
|
||||||
|
entry_info_subscribers: vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn notify_entry_info_subscribers(&mut self, entry: &Entry) {
|
||||||
|
// TODO: No need to bind().
|
||||||
|
let socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
|
||||||
|
for addr in &self.entry_info_subscribers {
|
||||||
|
let entry_info = EntryInfo {
|
||||||
|
id: entry.id,
|
||||||
|
num_hashes: entry.num_hashes,
|
||||||
|
num_events: entry.events.len() as u64,
|
||||||
|
};
|
||||||
|
let data = serialize(&Response::EntryInfo(entry_info)).expect("serialize EntryInfo");
|
||||||
|
let _res = socket.send_to(&data, addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process any Entry items that have been published by the Historian.
|
/// Process any Entry items that have been published by the Historian.
|
||||||
pub fn sync(&mut self) -> Hash {
|
pub fn sync(&mut self) -> Hash {
|
||||||
while let Ok(entry) = self.acc.historian.receiver.try_recv() {
|
while let Ok(entry) = self.historian.receiver.try_recv() {
|
||||||
self.last_id = entry.id;
|
self.last_id = entry.id;
|
||||||
|
self.acc.register_entry_id(&self.last_id);
|
||||||
writeln!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap();
|
writeln!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap();
|
||||||
|
self.notify_entry_info_subscribers(&entry);
|
||||||
}
|
}
|
||||||
self.last_id
|
self.last_id
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process Request items sent by clients.
|
/// Process Request items sent by clients.
|
||||||
pub fn log_verified_request(&mut self, msg: Request) -> Option<Response> {
|
pub fn process_request(
|
||||||
|
&mut self,
|
||||||
|
msg: Request,
|
||||||
|
rsp_addr: SocketAddr,
|
||||||
|
) -> Option<(Response, SocketAddr)> {
|
||||||
match msg {
|
match msg {
|
||||||
Request::Transaction(tr) => {
|
Request::GetBalance { key } => {
|
||||||
if let Err(err) = self.acc.log_verified_transaction(tr) {
|
let val = self.acc.get_balance(&key);
|
||||||
eprintln!("Transaction error: {:?}", err);
|
Some((Response::Balance { key, val }, rsp_addr))
|
||||||
|
}
|
||||||
|
Request::GetLastId => Some((Response::LastId { id: self.sync() }, rsp_addr)),
|
||||||
|
Request::Transaction(_) => unreachable!(),
|
||||||
|
Request::Subscribe { subscriptions } => {
|
||||||
|
for subscription in subscriptions {
|
||||||
|
match subscription {
|
||||||
|
Subscription::EntryInfo => self.entry_info_subscribers.push(rsp_addr),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
Request::GetBalance { key } => {
|
|
||||||
let val = self.acc.get_balance(&key);
|
|
||||||
Some(Response::Balance { key, val })
|
|
||||||
}
|
}
|
||||||
Request::GetId { is_last } => Some(Response::Id {
|
|
||||||
id: if is_last {
|
|
||||||
self.sync()
|
|
||||||
} else {
|
|
||||||
self.acc.first_id
|
|
||||||
},
|
|
||||||
is_last,
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn recv_batch(recvr: &streamer::PacketReceiver) -> Result<Vec<SharedPackets>> {
|
||||||
|
let timer = Duration::new(1, 0);
|
||||||
|
let msgs = recvr.recv_timeout(timer)?;
|
||||||
|
trace!("got msgs");
|
||||||
|
let mut batch = vec![msgs];
|
||||||
|
while let Ok(more) = recvr.try_recv() {
|
||||||
|
trace!("got more msgs");
|
||||||
|
batch.push(more);
|
||||||
|
}
|
||||||
|
info!("batch len {}", batch.len());
|
||||||
|
Ok(batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_batch(batch: Vec<SharedPackets>) -> Vec<Vec<(SharedPackets, Vec<u8>)>> {
|
||||||
|
let chunk_size = max(1, (batch.len() + 3) / 4);
|
||||||
|
let batches: Vec<_> = batch.chunks(chunk_size).map(|x| x.to_vec()).collect();
|
||||||
|
batches
|
||||||
|
.into_par_iter()
|
||||||
|
.map(|batch| {
|
||||||
|
let r = ecdsa::ed25519_verify(&batch);
|
||||||
|
batch.into_iter().zip(r).collect()
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verifier(
|
||||||
|
recvr: &streamer::PacketReceiver,
|
||||||
|
sendr: &Sender<Vec<(SharedPackets, Vec<u8>)>>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let batch = Self::recv_batch(recvr)?;
|
||||||
|
let verified_batches = Self::verify_batch(batch);
|
||||||
|
for xs in verified_batches {
|
||||||
|
sendr.send(xs)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deserialize_packets(p: &packet::Packets) -> Vec<Option<(Request, SocketAddr)>> {
|
||||||
|
p.packets
|
||||||
|
.par_iter()
|
||||||
|
.map(|x| {
|
||||||
|
deserialize(&x.data[0..x.meta.size])
|
||||||
|
.map(|req| (req, x.meta.addr()))
|
||||||
|
.ok()
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Split Request list into verified transactions and the rest
|
||||||
|
fn partition_requests(
|
||||||
|
req_vers: Vec<(Request, SocketAddr, u8)>,
|
||||||
|
) -> (Vec<Transaction>, Vec<(Request, SocketAddr)>) {
|
||||||
|
let mut trs = vec![];
|
||||||
|
let mut reqs = vec![];
|
||||||
|
for (msg, rsp_addr, verify) in req_vers {
|
||||||
|
match msg {
|
||||||
|
Request::Transaction(tr) => {
|
||||||
|
if verify != 0 {
|
||||||
|
trs.push(tr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => reqs.push((msg, rsp_addr)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(trs, reqs)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_packets(
|
||||||
|
&mut self,
|
||||||
|
req_vers: Vec<(Request, SocketAddr, u8)>,
|
||||||
|
) -> Result<Vec<(Response, SocketAddr)>> {
|
||||||
|
let (trs, reqs) = Self::partition_requests(req_vers);
|
||||||
|
|
||||||
|
// Process the transactions in parallel and then log the successful ones.
|
||||||
|
for result in self.acc.process_verified_transactions(trs) {
|
||||||
|
if let Ok(tr) = result {
|
||||||
|
self.historian
|
||||||
|
.sender
|
||||||
|
.send(Signal::Event(Event::Transaction(tr)))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Let validators know they should not attempt to process additional
|
||||||
|
// transactions in parallel.
|
||||||
|
self.historian.sender.send(Signal::Tick)?;
|
||||||
|
|
||||||
|
// Process the remaining requests serially.
|
||||||
|
let rsps = reqs.into_iter()
|
||||||
|
.filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Ok(rsps)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn serialize_response(
|
||||||
|
resp: Response,
|
||||||
|
rsp_addr: SocketAddr,
|
||||||
|
blob_recycler: &packet::BlobRecycler,
|
||||||
|
) -> Result<packet::SharedBlob> {
|
||||||
|
let blob = blob_recycler.allocate();
|
||||||
|
{
|
||||||
|
let mut b = blob.write().unwrap();
|
||||||
|
let v = serialize(&resp)?;
|
||||||
|
let len = v.len();
|
||||||
|
b.data[..len].copy_from_slice(&v);
|
||||||
|
b.meta.size = len;
|
||||||
|
b.meta.set_addr(&rsp_addr);
|
||||||
|
}
|
||||||
|
Ok(blob)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn serialize_responses(
|
||||||
|
rsps: Vec<(Response, SocketAddr)>,
|
||||||
|
blob_recycler: &packet::BlobRecycler,
|
||||||
|
) -> Result<VecDeque<packet::SharedBlob>> {
|
||||||
|
let mut blobs = VecDeque::new();
|
||||||
|
for (resp, rsp_addr) in rsps {
|
||||||
|
blobs.push_back(Self::serialize_response(resp, rsp_addr, blob_recycler)?);
|
||||||
|
}
|
||||||
|
Ok(blobs)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process(
|
fn process(
|
||||||
obj: &Arc<Mutex<AccountantSkel<W>>>,
|
obj: &Arc<Mutex<AccountantSkel<W>>>,
|
||||||
r_reader: &streamer::Receiver,
|
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||||
s_responder: &streamer::Responder,
|
blob_sender: &streamer::BlobSender,
|
||||||
packet_recycler: &streamer::PacketRecycler,
|
packet_recycler: &packet::PacketRecycler,
|
||||||
response_recycler: &streamer::ResponseRecycler,
|
blob_recycler: &packet::BlobRecycler,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
let msgs = r_reader.recv_timeout(timer)?;
|
let mms = verified_receiver.recv_timeout(timer)?;
|
||||||
let msgs_ = msgs.clone();
|
for (msgs, vers) in mms {
|
||||||
let rsps = streamer::allocate(response_recycler);
|
let reqs = Self::deserialize_packets(&msgs.read().unwrap());
|
||||||
let rsps_ = rsps.clone();
|
let req_vers = reqs.into_iter()
|
||||||
{
|
.zip(vers)
|
||||||
let mut reqs = vec![];
|
.filter_map(|(req, ver)| req.map(|(msg, addr)| (msg, addr, ver)))
|
||||||
for packet in &msgs.read().unwrap().packets {
|
.filter(|x| x.0.verify())
|
||||||
let rsp_addr = packet.meta.get_addr();
|
.collect();
|
||||||
let sz = packet.meta.size;
|
let rsps = obj.lock().unwrap().process_packets(req_vers)?;
|
||||||
let req = deserialize(&packet.data[0..sz])?;
|
let blobs = Self::serialize_responses(rsps, blob_recycler)?;
|
||||||
reqs.push((req, rsp_addr));
|
if !blobs.is_empty() {
|
||||||
|
//don't wake up the other side if there is nothing
|
||||||
|
blob_sender.send(blobs)?;
|
||||||
}
|
}
|
||||||
let reqs = filter_valid_requests(reqs);
|
packet_recycler.recycle(msgs);
|
||||||
|
|
||||||
let mut num = 0;
|
// Write new entries to the ledger and notify subscribers.
|
||||||
let mut ursps = rsps.write().unwrap();
|
obj.lock().unwrap().sync();
|
||||||
for (req, rsp_addr) in reqs {
|
|
||||||
if let Some(resp) = obj.lock().unwrap().log_verified_request(req) {
|
|
||||||
if ursps.responses.len() <= num {
|
|
||||||
ursps
|
|
||||||
.responses
|
|
||||||
.resize((num + 1) * 2, streamer::Response::default());
|
|
||||||
}
|
}
|
||||||
let rsp = &mut ursps.responses[num];
|
|
||||||
let v = serialize(&resp)?;
|
Ok(())
|
||||||
let len = v.len();
|
|
||||||
rsp.data[..len].copy_from_slice(&v);
|
|
||||||
rsp.meta.size = len;
|
|
||||||
rsp.meta.set_addr(&rsp_addr);
|
|
||||||
num += 1;
|
|
||||||
}
|
}
|
||||||
|
/// Process verified blobs, already in order
|
||||||
|
/// Respond with a signed hash of the state
|
||||||
|
fn replicate_state(
|
||||||
|
obj: &Arc<Mutex<AccountantSkel<W>>>,
|
||||||
|
verified_receiver: &streamer::BlobReceiver,
|
||||||
|
blob_recycler: &packet::BlobRecycler,
|
||||||
|
) -> Result<()> {
|
||||||
|
let timer = Duration::new(1, 0);
|
||||||
|
let blobs = verified_receiver.recv_timeout(timer)?;
|
||||||
|
for msgs in &blobs {
|
||||||
|
let blob = msgs.read().unwrap();
|
||||||
|
let entries: Vec<Entry> = deserialize(&blob.data()[..blob.meta.size]).unwrap();
|
||||||
|
for entry in entries {
|
||||||
|
obj.lock().unwrap().acc.register_entry_id(&entry.id);
|
||||||
|
|
||||||
|
obj.lock()
|
||||||
|
.unwrap()
|
||||||
|
.acc
|
||||||
|
.process_verified_events(entry.events)?;
|
||||||
}
|
}
|
||||||
ursps.responses.resize(num, streamer::Response::default());
|
//TODO respond back to leader with hash of the state
|
||||||
|
}
|
||||||
|
for blob in blobs {
|
||||||
|
blob_recycler.recycle(blob);
|
||||||
}
|
}
|
||||||
s_responder.send(rsps_)?;
|
|
||||||
streamer::recycle(packet_recycler, msgs_);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a UDP microservice that forwards messages the given AccountantSkel.
|
/// Create a UDP microservice that forwards messages the given AccountantSkel.
|
||||||
|
/// This service is the network leader
|
||||||
/// Set `exit` to shutdown its threads.
|
/// Set `exit` to shutdown its threads.
|
||||||
pub fn serve(
|
pub fn serve(
|
||||||
obj: Arc<Mutex<AccountantSkel<W>>>,
|
obj: &Arc<Mutex<AccountantSkel<W>>>,
|
||||||
addr: &str,
|
addr: &str,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
) -> Result<Vec<JoinHandle<()>>> {
|
) -> Result<Vec<JoinHandle<()>>> {
|
||||||
@ -161,29 +332,479 @@ impl<W: Write + Send + 'static> AccountantSkel<W> {
|
|||||||
local.set_port(0);
|
local.set_port(0);
|
||||||
let write = UdpSocket::bind(local)?;
|
let write = UdpSocket::bind(local)?;
|
||||||
|
|
||||||
let packet_recycler = Arc::new(Mutex::new(Vec::new()));
|
let packet_recycler = packet::PacketRecycler::default();
|
||||||
let response_recycler = Arc::new(Mutex::new(Vec::new()));
|
let blob_recycler = packet::BlobRecycler::default();
|
||||||
let (s_reader, r_reader) = channel();
|
let (packet_sender, packet_receiver) = channel();
|
||||||
let t_receiver = streamer::receiver(read, exit.clone(), packet_recycler.clone(), s_reader)?;
|
let t_receiver =
|
||||||
|
streamer::receiver(read, exit.clone(), packet_recycler.clone(), packet_sender)?;
|
||||||
let (s_responder, r_responder) = channel();
|
let (blob_sender, blob_receiver) = channel();
|
||||||
let t_responder =
|
let t_responder =
|
||||||
streamer::responder(write, exit.clone(), response_recycler.clone(), r_responder);
|
streamer::responder(write, exit.clone(), blob_recycler.clone(), blob_receiver);
|
||||||
|
let (verified_sender, verified_receiver) = channel();
|
||||||
|
|
||||||
let skel = obj.clone();
|
let exit_ = exit.clone();
|
||||||
let t_server = spawn(move || loop {
|
let t_verifier = spawn(move || loop {
|
||||||
let e = AccountantSkel::process(
|
let e = Self::verifier(&packet_receiver, &verified_sender);
|
||||||
&skel,
|
if e.is_err() && exit_.load(Ordering::Relaxed) {
|
||||||
&r_reader,
|
|
||||||
&s_responder,
|
|
||||||
&packet_recycler,
|
|
||||||
&response_recycler,
|
|
||||||
);
|
|
||||||
if e.is_err() && exit.load(Ordering::Relaxed) {
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(vec![t_receiver, t_responder, t_server])
|
let skel = obj.clone();
|
||||||
|
let t_server = spawn(move || loop {
|
||||||
|
let e = Self::process(
|
||||||
|
&skel,
|
||||||
|
&verified_receiver,
|
||||||
|
&blob_sender,
|
||||||
|
&packet_recycler,
|
||||||
|
&blob_recycler,
|
||||||
|
);
|
||||||
|
if e.is_err() {
|
||||||
|
// Assume this was a timeout, so sync any empty entries.
|
||||||
|
skel.lock().unwrap().sync();
|
||||||
|
|
||||||
|
if exit.load(Ordering::Relaxed) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
Ok(vec![t_receiver, t_responder, t_server, t_verifier])
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This service receives messages from a leader in the network and processes the transactions
|
||||||
|
/// on the accountant state.
|
||||||
|
/// # Arguments
|
||||||
|
/// * `obj` - The accountant state.
|
||||||
|
/// * `rsubs` - The subscribers.
|
||||||
|
/// * `exit` - The exit signal.
|
||||||
|
/// # Remarks
|
||||||
|
/// The pipeline is constructed as follows:
|
||||||
|
/// 1. receive blobs from the network, these are out of order
|
||||||
|
/// 2. verify blobs, PoH, signatures (TODO)
|
||||||
|
/// 3. reconstruct contiguous window
|
||||||
|
/// a. order the blobs
|
||||||
|
/// b. use erasure coding to reconstruct missing blobs
|
||||||
|
/// c. ask the network for missing blobs, if erasure coding is insufficient
|
||||||
|
/// d. make sure that the blobs PoH sequences connect (TODO)
|
||||||
|
/// 4. process the transaction state machine
|
||||||
|
/// 5. respond with the hash of the state back to the leader
|
||||||
|
pub fn replicate(
|
||||||
|
obj: &Arc<Mutex<AccountantSkel<W>>>,
|
||||||
|
rsubs: subscribers::Subscribers,
|
||||||
|
exit: Arc<AtomicBool>,
|
||||||
|
) -> Result<Vec<JoinHandle<()>>> {
|
||||||
|
let read = UdpSocket::bind(rsubs.me.addr)?;
|
||||||
|
// make sure we are on the same interface
|
||||||
|
let mut local = read.local_addr()?;
|
||||||
|
local.set_port(0);
|
||||||
|
let write = UdpSocket::bind(local)?;
|
||||||
|
|
||||||
|
let blob_recycler = packet::BlobRecycler::default();
|
||||||
|
let (blob_sender, blob_receiver) = channel();
|
||||||
|
let t_blob_receiver = streamer::blob_receiver(
|
||||||
|
exit.clone(),
|
||||||
|
blob_recycler.clone(),
|
||||||
|
read,
|
||||||
|
blob_sender.clone(),
|
||||||
|
)?;
|
||||||
|
let (window_sender, window_receiver) = channel();
|
||||||
|
let (retransmit_sender, retransmit_receiver) = channel();
|
||||||
|
|
||||||
|
let subs = Arc::new(RwLock::new(rsubs));
|
||||||
|
let t_retransmit = streamer::retransmitter(
|
||||||
|
write,
|
||||||
|
exit.clone(),
|
||||||
|
subs.clone(),
|
||||||
|
blob_recycler.clone(),
|
||||||
|
retransmit_receiver,
|
||||||
|
);
|
||||||
|
//TODO
|
||||||
|
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
||||||
|
//then sent to the window, which does the erasure coding reconstruction
|
||||||
|
let t_window = streamer::window(
|
||||||
|
exit.clone(),
|
||||||
|
subs,
|
||||||
|
blob_recycler.clone(),
|
||||||
|
blob_receiver,
|
||||||
|
window_sender,
|
||||||
|
retransmit_sender,
|
||||||
|
);
|
||||||
|
|
||||||
|
let skel = obj.clone();
|
||||||
|
let t_server = spawn(move || loop {
|
||||||
|
let e = Self::replicate_state(&skel, &window_receiver, &blob_recycler);
|
||||||
|
if e.is_err() && exit.load(Ordering::Relaxed) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
Ok(vec![t_blob_receiver, t_retransmit, t_window, t_server])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn to_packets(r: &packet::PacketRecycler, reqs: Vec<Request>) -> Vec<SharedPackets> {
|
||||||
|
let mut out = vec![];
|
||||||
|
for rrs in reqs.chunks(packet::NUM_PACKETS) {
|
||||||
|
let p = r.allocate();
|
||||||
|
p.write()
|
||||||
|
.unwrap()
|
||||||
|
.packets
|
||||||
|
.resize(rrs.len(), Default::default());
|
||||||
|
for (i, o) in rrs.iter().zip(p.write().unwrap().packets.iter_mut()) {
|
||||||
|
let v = serialize(&i).expect("serialize request");
|
||||||
|
let len = v.len();
|
||||||
|
o.data[..len].copy_from_slice(&v);
|
||||||
|
o.meta.size = len;
|
||||||
|
}
|
||||||
|
out.push(p);
|
||||||
|
}
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use accountant_skel::{to_packets, Request};
|
||||||
|
use bincode::serialize;
|
||||||
|
use ecdsa;
|
||||||
|
use packet::{BlobRecycler, PacketRecycler, NUM_PACKETS};
|
||||||
|
use transaction::{memfind, test_tx};
|
||||||
|
|
||||||
|
use accountant::Accountant;
|
||||||
|
use accountant_skel::AccountantSkel;
|
||||||
|
use accountant_stub::AccountantStub;
|
||||||
|
use entry::Entry;
|
||||||
|
use futures::Future;
|
||||||
|
use historian::Historian;
|
||||||
|
use mint::Mint;
|
||||||
|
use plan::Plan;
|
||||||
|
use recorder::Signal;
|
||||||
|
use signature::{KeyPair, KeyPairUtil};
|
||||||
|
use std::io::sink;
|
||||||
|
use std::net::{SocketAddr, UdpSocket};
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::thread::sleep;
|
||||||
|
use std::time::Duration;
|
||||||
|
use transaction::Transaction;
|
||||||
|
|
||||||
|
use subscribers::{Node, Subscribers};
|
||||||
|
use streamer;
|
||||||
|
use std::sync::mpsc::channel;
|
||||||
|
use std::collections::VecDeque;
|
||||||
|
use hash::{hash, Hash};
|
||||||
|
use event::Event;
|
||||||
|
use entry;
|
||||||
|
use chrono::prelude::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_layout() {
|
||||||
|
let tr = test_tx();
|
||||||
|
let tx = serialize(&tr).unwrap();
|
||||||
|
let packet = serialize(&Request::Transaction(tr)).unwrap();
|
||||||
|
assert_matches!(memfind(&packet, &tx), Some(ecdsa::TX_OFFSET));
|
||||||
|
assert_matches!(memfind(&packet, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), None);
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_to_packets() {
|
||||||
|
let tr = Request::Transaction(test_tx());
|
||||||
|
let re = PacketRecycler::default();
|
||||||
|
let rv = to_packets(&re, vec![tr.clone(); 1]);
|
||||||
|
assert_eq!(rv.len(), 1);
|
||||||
|
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
|
||||||
|
|
||||||
|
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS]);
|
||||||
|
assert_eq!(rv.len(), 1);
|
||||||
|
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||||
|
|
||||||
|
let rv = to_packets(&re, vec![tr.clone(); NUM_PACKETS + 1]);
|
||||||
|
assert_eq!(rv.len(), 2);
|
||||||
|
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
||||||
|
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_accounting_sequential_consistency() {
|
||||||
|
// In this attack we'll demonstrate that a verifier can interpret the ledger
|
||||||
|
// differently if either the server doesn't signal the ledger to add an
|
||||||
|
// Entry OR if the verifier tries to parallelize across multiple Entries.
|
||||||
|
let mint = Mint::new(2);
|
||||||
|
let acc = Accountant::new(&mint);
|
||||||
|
let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address");
|
||||||
|
let historian = Historian::new(&mint.last_id(), None);
|
||||||
|
let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian);
|
||||||
|
|
||||||
|
// Process a batch that includes a transaction that receives two tokens.
|
||||||
|
let alice = KeyPair::new();
|
||||||
|
let tr = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id());
|
||||||
|
let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)];
|
||||||
|
assert!(skel.process_packets(req_vers).is_ok());
|
||||||
|
|
||||||
|
// Process a second batch that spends one of those tokens.
|
||||||
|
let tr = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id());
|
||||||
|
let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)];
|
||||||
|
assert!(skel.process_packets(req_vers).is_ok());
|
||||||
|
|
||||||
|
// Collect the ledger and feed it to a new accountant.
|
||||||
|
skel.historian.sender.send(Signal::Tick).unwrap();
|
||||||
|
drop(skel.historian.sender);
|
||||||
|
let entries: Vec<Entry> = skel.historian.receiver.iter().collect();
|
||||||
|
|
||||||
|
// Assert the user holds one token, not two. If the server only output one
|
||||||
|
// entry, then the second transaction will be rejected, because it drives
|
||||||
|
// the account balance below zero before the credit is added.
|
||||||
|
let acc = Accountant::new(&mint);
|
||||||
|
for entry in entries {
|
||||||
|
acc.process_verified_events(entry.events).unwrap();
|
||||||
|
}
|
||||||
|
assert_eq!(acc.get_balance(&alice.pubkey()), Some(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_accountant_bad_sig() {
|
||||||
|
let serve_port = 9002;
|
||||||
|
let send_port = 9003;
|
||||||
|
let addr = format!("127.0.0.1:{}", serve_port);
|
||||||
|
let send_addr = format!("127.0.0.1:{}", send_port);
|
||||||
|
let alice = Mint::new(10_000);
|
||||||
|
let acc = Accountant::new(&alice);
|
||||||
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let historian = Historian::new(&alice.last_id(), Some(30));
|
||||||
|
let acc = Arc::new(Mutex::new(AccountantSkel::new(
|
||||||
|
acc,
|
||||||
|
alice.last_id(),
|
||||||
|
sink(),
|
||||||
|
historian,
|
||||||
|
)));
|
||||||
|
let _threads = AccountantSkel::serve(&acc, &addr, exit.clone()).unwrap();
|
||||||
|
sleep(Duration::from_millis(300));
|
||||||
|
|
||||||
|
let socket = UdpSocket::bind(send_addr).unwrap();
|
||||||
|
socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap();
|
||||||
|
|
||||||
|
let mut acc = AccountantStub::new(&addr, socket);
|
||||||
|
let last_id = acc.get_last_id().wait().unwrap();
|
||||||
|
|
||||||
|
let tr = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id);
|
||||||
|
|
||||||
|
let _sig = acc.transfer_signed(tr).unwrap();
|
||||||
|
|
||||||
|
let last_id = acc.get_last_id().wait().unwrap();
|
||||||
|
|
||||||
|
let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id);
|
||||||
|
tr2.data.tokens = 502;
|
||||||
|
tr2.data.plan = Plan::new_payment(502, bob_pubkey);
|
||||||
|
let _sig = acc.transfer_signed(tr2).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500);
|
||||||
|
exit.store(true, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
use std::sync::{Once, ONCE_INIT};
|
||||||
|
extern crate env_logger;
|
||||||
|
|
||||||
|
static INIT: Once = ONCE_INIT;
|
||||||
|
|
||||||
|
/// Setup function that is only run once, even if called multiple times.
|
||||||
|
fn setup() {
|
||||||
|
INIT.call_once(|| {
|
||||||
|
env_logger::init().unwrap();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_replicate() {
|
||||||
|
setup();
|
||||||
|
let leader_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
let leader_addr = leader_sock.local_addr().unwrap();
|
||||||
|
let me_addr = "127.0.0.1:9010".parse().unwrap();
|
||||||
|
let target_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
let target_peer_addr = target_peer_sock.local_addr().unwrap();
|
||||||
|
let source_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
|
let node_me = Node::new([0, 0, 0, 0, 0, 0, 0, 1], 10, me_addr);
|
||||||
|
let node_subs = vec![Node::new([0, 0, 0, 0, 0, 0, 0, 2], 8, target_peer_addr); 1];
|
||||||
|
let node_leader = Node::new([0, 0, 0, 0, 0, 0, 0, 3], 20, leader_addr);
|
||||||
|
let subs = Subscribers::new(node_me, node_leader, &node_subs);
|
||||||
|
|
||||||
|
// setup some blob services to send blobs into the socket
|
||||||
|
// to simulate the source peer and get blobs out of the socket to
|
||||||
|
// simulate target peer
|
||||||
|
let recv_recycler = BlobRecycler::default();
|
||||||
|
let resp_recycler = BlobRecycler::default();
|
||||||
|
let (s_reader, r_reader) = channel();
|
||||||
|
let t_receiver = streamer::blob_receiver(
|
||||||
|
exit.clone(),
|
||||||
|
recv_recycler.clone(),
|
||||||
|
target_peer_sock,
|
||||||
|
s_reader,
|
||||||
|
).unwrap();
|
||||||
|
let (s_responder, r_responder) = channel();
|
||||||
|
let t_responder = streamer::responder(
|
||||||
|
source_peer_sock,
|
||||||
|
exit.clone(),
|
||||||
|
resp_recycler.clone(),
|
||||||
|
r_responder,
|
||||||
|
);
|
||||||
|
|
||||||
|
let starting_balance = 10_000;
|
||||||
|
let alice = Mint::new(starting_balance);
|
||||||
|
let acc = Accountant::new(&alice);
|
||||||
|
let historian = Historian::new(&alice.last_id(), Some(30));
|
||||||
|
let acc = Arc::new(Mutex::new(AccountantSkel::new(
|
||||||
|
acc,
|
||||||
|
alice.last_id(),
|
||||||
|
sink(),
|
||||||
|
historian,
|
||||||
|
)));
|
||||||
|
|
||||||
|
let _threads = AccountantSkel::replicate(&acc, subs, exit.clone()).unwrap();
|
||||||
|
|
||||||
|
let mut alice_ref_balance = starting_balance;
|
||||||
|
let mut msgs = VecDeque::new();
|
||||||
|
let mut cur_hash = Hash::default();
|
||||||
|
let num_blobs = 10;
|
||||||
|
let transfer_amount = 501;
|
||||||
|
let bob_keypair = KeyPair::new();
|
||||||
|
for i in 0..num_blobs {
|
||||||
|
let b = resp_recycler.allocate();
|
||||||
|
let b_ = b.clone();
|
||||||
|
let mut w = b.write().unwrap();
|
||||||
|
w.set_index(i).unwrap();
|
||||||
|
|
||||||
|
let tr0 = Event::new_timestamp(&bob_keypair, Utc::now());
|
||||||
|
let entry0 = entry::create_entry(&cur_hash, i, vec![tr0]);
|
||||||
|
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
|
||||||
|
cur_hash = hash(&cur_hash);
|
||||||
|
|
||||||
|
let tr1 = Transaction::new(
|
||||||
|
&alice.keypair(),
|
||||||
|
bob_keypair.pubkey(),
|
||||||
|
transfer_amount,
|
||||||
|
cur_hash,
|
||||||
|
);
|
||||||
|
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
|
||||||
|
cur_hash = hash(&cur_hash);
|
||||||
|
let entry1 =
|
||||||
|
entry::create_entry(&cur_hash, i + num_blobs, vec![Event::Transaction(tr1)]);
|
||||||
|
acc.lock().unwrap().acc.register_entry_id(&cur_hash);
|
||||||
|
cur_hash = hash(&cur_hash);
|
||||||
|
|
||||||
|
alice_ref_balance -= transfer_amount;
|
||||||
|
|
||||||
|
let serialized_entry = serialize(&vec![entry0, entry1]).unwrap();
|
||||||
|
|
||||||
|
w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry);
|
||||||
|
w.set_size(serialized_entry.len());
|
||||||
|
w.meta.set_addr(&me_addr);
|
||||||
|
drop(w);
|
||||||
|
msgs.push_back(b_);
|
||||||
|
}
|
||||||
|
|
||||||
|
// send the blobs into the socket
|
||||||
|
s_responder.send(msgs).expect("send");
|
||||||
|
|
||||||
|
// receive retransmitted messages
|
||||||
|
let timer = Duration::new(1, 0);
|
||||||
|
let mut msgs: Vec<_> = Vec::new();
|
||||||
|
while let Ok(msg) = r_reader.recv_timeout(timer) {
|
||||||
|
trace!("msg: {:?}", msg);
|
||||||
|
msgs.push(msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
let alice_balance = acc.lock()
|
||||||
|
.unwrap()
|
||||||
|
.acc
|
||||||
|
.get_balance(&alice.keypair().pubkey())
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(alice_balance, alice_ref_balance);
|
||||||
|
|
||||||
|
let bob_balance = acc.lock()
|
||||||
|
.unwrap()
|
||||||
|
.acc
|
||||||
|
.get_balance(&bob_keypair.pubkey())
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(bob_balance, starting_balance - alice_ref_balance);
|
||||||
|
|
||||||
|
exit.store(true, Ordering::Relaxed);
|
||||||
|
t_receiver.join().expect("join");
|
||||||
|
t_responder.join().expect("join");
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(feature = "unstable", test))]
|
||||||
|
mod bench {
|
||||||
|
extern crate test;
|
||||||
|
use self::test::Bencher;
|
||||||
|
use accountant::{Accountant, MAX_ENTRY_IDS};
|
||||||
|
use accountant_skel::*;
|
||||||
|
use bincode::serialize;
|
||||||
|
use hash::hash;
|
||||||
|
use mint::Mint;
|
||||||
|
use signature::{KeyPair, KeyPairUtil};
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::io::sink;
|
||||||
|
use std::time::Instant;
|
||||||
|
use transaction::Transaction;
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn process_packets_bench(_bencher: &mut Bencher) {
|
||||||
|
let mint = Mint::new(100_000_000);
|
||||||
|
let acc = Accountant::new(&mint);
|
||||||
|
let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address");
|
||||||
|
// Create transactions between unrelated parties.
|
||||||
|
let txs = 100_000;
|
||||||
|
let last_ids: Mutex<HashSet<Hash>> = Mutex::new(HashSet::new());
|
||||||
|
let transactions: Vec<_> = (0..txs)
|
||||||
|
.into_par_iter()
|
||||||
|
.map(|i| {
|
||||||
|
// Seed the 'to' account and a cell for its signature.
|
||||||
|
let dummy_id = i % (MAX_ENTRY_IDS as i32);
|
||||||
|
let last_id = hash(&serialize(&dummy_id).unwrap()); // Semi-unique hash
|
||||||
|
{
|
||||||
|
let mut last_ids = last_ids.lock().unwrap();
|
||||||
|
if !last_ids.contains(&last_id) {
|
||||||
|
last_ids.insert(last_id);
|
||||||
|
acc.register_entry_id(&last_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seed the 'from' account.
|
||||||
|
let rando0 = KeyPair::new();
|
||||||
|
let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id);
|
||||||
|
acc.process_verified_transaction(&tr).unwrap();
|
||||||
|
|
||||||
|
let rando1 = KeyPair::new();
|
||||||
|
let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id);
|
||||||
|
acc.process_verified_transaction(&tr).unwrap();
|
||||||
|
|
||||||
|
// Finally, return a transaction that's unique
|
||||||
|
Transaction::new(&rando0, rando1.pubkey(), 1, last_id)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let req_vers = transactions
|
||||||
|
.into_iter()
|
||||||
|
.map(|tr| (Request::Transaction(tr), rsp_addr, 1_u8))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let historian = Historian::new(&mint.last_id(), None);
|
||||||
|
let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian);
|
||||||
|
|
||||||
|
let now = Instant::now();
|
||||||
|
assert!(skel.process_packets(req_vers).is_ok());
|
||||||
|
let duration = now.elapsed();
|
||||||
|
let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0;
|
||||||
|
let tps = txs as f64 / sec;
|
||||||
|
|
||||||
|
// Ensure that all transactions were successfully logged.
|
||||||
|
drop(skel.historian.sender);
|
||||||
|
let entries: Vec<Entry> = skel.historian.receiver.iter().collect();
|
||||||
|
assert_eq!(entries.len(), 1);
|
||||||
|
assert_eq!(entries[0].events.len(), txs as usize);
|
||||||
|
|
||||||
|
println!("{} tps", tps);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,10 +3,12 @@
|
|||||||
//! this object instead of writing messages to the network directly. The binary
|
//! this object instead of writing messages to the network directly. The binary
|
||||||
//! encoding of its messages are unstable and may change in future releases.
|
//! encoding of its messages are unstable and may change in future releases.
|
||||||
|
|
||||||
use accountant_skel::{Request, Response};
|
use accountant_skel::{Request, Response, Subscription};
|
||||||
use bincode::{deserialize, serialize};
|
use bincode::{deserialize, serialize};
|
||||||
|
use futures::future::{ok, FutureResult};
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use signature::{KeyPair, PublicKey, Signature};
|
use signature::{KeyPair, PublicKey, Signature};
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use transaction::Transaction;
|
use transaction::Transaction;
|
||||||
@ -14,6 +16,9 @@ use transaction::Transaction;
|
|||||||
pub struct AccountantStub {
|
pub struct AccountantStub {
|
||||||
pub addr: String,
|
pub addr: String,
|
||||||
pub socket: UdpSocket,
|
pub socket: UdpSocket,
|
||||||
|
last_id: Option<Hash>,
|
||||||
|
num_events: u64,
|
||||||
|
balances: HashMap<PublicKey, Option<i64>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AccountantStub {
|
impl AccountantStub {
|
||||||
@ -21,9 +26,43 @@ impl AccountantStub {
|
|||||||
/// over `socket`. To receive responses, the caller must bind `socket`
|
/// over `socket`. To receive responses, the caller must bind `socket`
|
||||||
/// to a public address before invoking AccountantStub methods.
|
/// to a public address before invoking AccountantStub methods.
|
||||||
pub fn new(addr: &str, socket: UdpSocket) -> Self {
|
pub fn new(addr: &str, socket: UdpSocket) -> Self {
|
||||||
AccountantStub {
|
let stub = AccountantStub {
|
||||||
addr: addr.to_string(),
|
addr: addr.to_string(),
|
||||||
socket,
|
socket,
|
||||||
|
last_id: None,
|
||||||
|
num_events: 0,
|
||||||
|
balances: HashMap::new(),
|
||||||
|
};
|
||||||
|
stub.init();
|
||||||
|
stub
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn init(&self) {
|
||||||
|
let subscriptions = vec![Subscription::EntryInfo];
|
||||||
|
let req = Request::Subscribe { subscriptions };
|
||||||
|
let data = serialize(&req).expect("serialize Subscribe");
|
||||||
|
let _res = self.socket.send_to(&data, &self.addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn recv_response(&self) -> io::Result<Response> {
|
||||||
|
let mut buf = vec![0u8; 1024];
|
||||||
|
self.socket.recv_from(&mut buf)?;
|
||||||
|
let resp = deserialize(&buf).expect("deserialize balance");
|
||||||
|
Ok(resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn process_response(&mut self, resp: Response) {
|
||||||
|
match resp {
|
||||||
|
Response::Balance { key, val } => {
|
||||||
|
self.balances.insert(key, val);
|
||||||
|
}
|
||||||
|
Response::LastId { id } => {
|
||||||
|
self.last_id = Some(id);
|
||||||
|
}
|
||||||
|
Response::EntryInfo(entry_info) => {
|
||||||
|
self.last_id = Some(entry_info.id);
|
||||||
|
self.num_events += entry_info.num_events;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -51,40 +90,67 @@ impl AccountantStub {
|
|||||||
/// Request the balance of the user holding `pubkey`. This method blocks
|
/// Request the balance of the user holding `pubkey`. This method blocks
|
||||||
/// until the server sends a response. If the response packet is dropped
|
/// until the server sends a response. If the response packet is dropped
|
||||||
/// by the network, this method will hang indefinitely.
|
/// by the network, this method will hang indefinitely.
|
||||||
pub fn get_balance(&self, pubkey: &PublicKey) -> io::Result<Option<i64>> {
|
pub fn get_balance(&mut self, pubkey: &PublicKey) -> FutureResult<i64, i64> {
|
||||||
let req = Request::GetBalance { key: *pubkey };
|
let req = Request::GetBalance { key: *pubkey };
|
||||||
let data = serialize(&req).expect("serialize GetBalance");
|
let data = serialize(&req).expect("serialize GetBalance");
|
||||||
self.socket.send_to(&data, &self.addr)?;
|
self.socket
|
||||||
let mut buf = vec![0u8; 1024];
|
.send_to(&data, &self.addr)
|
||||||
self.socket.recv_from(&mut buf)?;
|
.expect("buffer error");
|
||||||
let resp = deserialize(&buf).expect("deserialize balance");
|
let mut done = false;
|
||||||
if let Response::Balance { key, val } = resp {
|
while !done {
|
||||||
assert_eq!(key, *pubkey);
|
let resp = self.recv_response().expect("recv response");
|
||||||
return Ok(val);
|
if let &Response::Balance { ref key, .. } = &resp {
|
||||||
|
done = key == pubkey;
|
||||||
}
|
}
|
||||||
Ok(None)
|
self.process_response(resp);
|
||||||
}
|
}
|
||||||
|
ok(self.balances[pubkey].unwrap())
|
||||||
/// Request the first or last Entry ID from the server.
|
|
||||||
fn get_id(&self, is_last: bool) -> io::Result<Hash> {
|
|
||||||
let req = Request::GetId { is_last };
|
|
||||||
let data = serialize(&req).expect("serialize GetId");
|
|
||||||
self.socket.send_to(&data, &self.addr)?;
|
|
||||||
let mut buf = vec![0u8; 1024];
|
|
||||||
self.socket.recv_from(&mut buf)?;
|
|
||||||
let resp = deserialize(&buf).expect("deserialize Id");
|
|
||||||
if let Response::Id { id, .. } = resp {
|
|
||||||
return Ok(id);
|
|
||||||
}
|
|
||||||
Ok(Default::default())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Request the last Entry ID from the server. This method blocks
|
/// Request the last Entry ID from the server. This method blocks
|
||||||
/// until the server sends a response. At the time of this writing,
|
/// until the server sends a response. At the time of this writing,
|
||||||
/// it also has the side-effect of causing the server to log any
|
/// it also has the side-effect of causing the server to log any
|
||||||
/// entries that have been published by the Historian.
|
/// entries that have been published by the Historian.
|
||||||
pub fn get_last_id(&self) -> io::Result<Hash> {
|
pub fn get_last_id(&mut self) -> FutureResult<Hash, ()> {
|
||||||
self.get_id(true)
|
let req = Request::GetLastId;
|
||||||
|
let data = serialize(&req).expect("serialize GetId");
|
||||||
|
self.socket
|
||||||
|
.send_to(&data, &self.addr)
|
||||||
|
.expect("buffer error");
|
||||||
|
let mut done = false;
|
||||||
|
while !done {
|
||||||
|
let resp = self.recv_response().expect("recv response");
|
||||||
|
if let &Response::LastId { .. } = &resp {
|
||||||
|
done = true;
|
||||||
|
}
|
||||||
|
self.process_response(resp);
|
||||||
|
}
|
||||||
|
ok(self.last_id.unwrap_or(Hash::default()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the number of transactions the server processed since creating
|
||||||
|
/// this stub instance.
|
||||||
|
pub fn transaction_count(&mut self) -> u64 {
|
||||||
|
// Wait for at least one EntryInfo.
|
||||||
|
let mut done = false;
|
||||||
|
while !done {
|
||||||
|
let resp = self.recv_response().expect("recv response");
|
||||||
|
if let &Response::EntryInfo(_) = &resp {
|
||||||
|
done = true;
|
||||||
|
}
|
||||||
|
self.process_response(resp);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Then take the rest.
|
||||||
|
self.socket.set_nonblocking(true).expect("set nonblocking");
|
||||||
|
loop {
|
||||||
|
match self.recv_response() {
|
||||||
|
Err(_) => break,
|
||||||
|
Ok(resp) => self.process_response(resp),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.socket.set_nonblocking(false).expect("set blocking");
|
||||||
|
self.num_events
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,6 +159,8 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use accountant::Accountant;
|
use accountant::Accountant;
|
||||||
use accountant_skel::AccountantSkel;
|
use accountant_skel::AccountantSkel;
|
||||||
|
use futures::Future;
|
||||||
|
use historian::Historian;
|
||||||
use mint::Mint;
|
use mint::Mint;
|
||||||
use signature::{KeyPair, KeyPairUtil};
|
use signature::{KeyPair, KeyPairUtil};
|
||||||
use std::io::sink;
|
use std::io::sink;
|
||||||
@ -107,20 +175,27 @@ mod tests {
|
|||||||
let addr = "127.0.0.1:9000";
|
let addr = "127.0.0.1:9000";
|
||||||
let send_addr = "127.0.0.1:9001";
|
let send_addr = "127.0.0.1:9001";
|
||||||
let alice = Mint::new(10_000);
|
let alice = Mint::new(10_000);
|
||||||
let acc = Accountant::new(&alice, Some(30));
|
let acc = Accountant::new(&alice);
|
||||||
let bob_pubkey = KeyPair::new().pubkey();
|
let bob_pubkey = KeyPair::new().pubkey();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let acc = Arc::new(Mutex::new(AccountantSkel::new(acc, sink())));
|
let historian = Historian::new(&alice.last_id(), Some(30));
|
||||||
let _threads = AccountantSkel::serve(acc, addr, exit.clone()).unwrap();
|
let acc = Arc::new(Mutex::new(AccountantSkel::new(
|
||||||
|
acc,
|
||||||
|
alice.last_id(),
|
||||||
|
sink(),
|
||||||
|
historian,
|
||||||
|
)));
|
||||||
|
let _threads = AccountantSkel::serve(&acc, addr, exit.clone()).unwrap();
|
||||||
sleep(Duration::from_millis(300));
|
sleep(Duration::from_millis(300));
|
||||||
|
|
||||||
let socket = UdpSocket::bind(send_addr).unwrap();
|
let socket = UdpSocket::bind(send_addr).unwrap();
|
||||||
|
socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap();
|
||||||
|
|
||||||
let acc = AccountantStub::new(addr, socket);
|
let mut acc = AccountantStub::new(addr, socket);
|
||||||
let last_id = acc.get_last_id().unwrap();
|
let last_id = acc.get_last_id().wait().unwrap();
|
||||||
let _sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
let _sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap().unwrap(), 500);
|
assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500);
|
||||||
exit.store(true, Ordering::Relaxed);
|
exit.store(true, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,41 +1,106 @@
|
|||||||
|
extern crate futures;
|
||||||
|
extern crate getopts;
|
||||||
|
extern crate isatty;
|
||||||
extern crate rayon;
|
extern crate rayon;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
extern crate solana;
|
extern crate solana;
|
||||||
|
extern crate untrusted;
|
||||||
|
|
||||||
|
use futures::Future;
|
||||||
|
use getopts::Options;
|
||||||
|
use isatty::stdin_isatty;
|
||||||
|
use rayon::prelude::*;
|
||||||
use solana::accountant_stub::AccountantStub;
|
use solana::accountant_stub::AccountantStub;
|
||||||
use solana::mint::Mint;
|
use solana::mint::MintDemo;
|
||||||
use solana::signature::{KeyPair, KeyPairUtil};
|
use solana::signature::{KeyPair, KeyPairUtil};
|
||||||
use solana::transaction::Transaction;
|
use solana::transaction::Transaction;
|
||||||
use std::io::stdin;
|
use std::env;
|
||||||
|
use std::io::{stdin, Read};
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::time::{Duration, Instant};
|
use std::process::exit;
|
||||||
use std::thread::sleep;
|
use std::time::Instant;
|
||||||
use rayon::prelude::*;
|
use untrusted::Input;
|
||||||
|
|
||||||
|
fn print_usage(program: &str, opts: Options) {
|
||||||
|
let mut brief = format!("Usage: cat <mint.json> | {} [options]\n\n", program);
|
||||||
|
brief += " Solana client demo creates a number of transactions and\n";
|
||||||
|
brief += " sends them to a target node.";
|
||||||
|
brief += " Takes json formatted mint file to stdin.";
|
||||||
|
|
||||||
|
print!("{}", opts.usage(&brief));
|
||||||
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let addr = "127.0.0.1:8000";
|
let mut threads = 4usize;
|
||||||
let send_addr = "127.0.0.1:8001";
|
let mut addr: String = "127.0.0.1:8000".to_string();
|
||||||
|
let mut send_addr: String = "127.0.0.1:8001".to_string();
|
||||||
|
|
||||||
let mint: Mint = serde_json::from_reader(stdin()).unwrap();
|
let mut opts = Options::new();
|
||||||
let mint_keypair = mint.keypair();
|
opts.optopt("s", "", "server address", "host:port");
|
||||||
let mint_pubkey = mint.pubkey();
|
opts.optopt("c", "", "client address", "host:port");
|
||||||
|
opts.optopt("t", "", "number of threads", "4");
|
||||||
|
opts.optflag("h", "help", "print help");
|
||||||
|
let args: Vec<String> = env::args().collect();
|
||||||
|
let matches = match opts.parse(&args[1..]) {
|
||||||
|
Ok(m) => m,
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("{}", e);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let socket = UdpSocket::bind(send_addr).unwrap();
|
if matches.opt_present("h") {
|
||||||
let acc = AccountantStub::new(addr, socket);
|
let program = args[0].clone();
|
||||||
let last_id = acc.get_last_id().unwrap();
|
print_usage(&program, opts);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if matches.opt_present("s") {
|
||||||
|
addr = matches.opt_str("s").unwrap();
|
||||||
|
}
|
||||||
|
if matches.opt_present("c") {
|
||||||
|
send_addr = matches.opt_str("c").unwrap();
|
||||||
|
}
|
||||||
|
if matches.opt_present("t") {
|
||||||
|
threads = matches.opt_str("t").unwrap().parse().expect("integer");
|
||||||
|
}
|
||||||
|
|
||||||
let mint_balance = acc.get_balance(&mint_pubkey).unwrap().unwrap();
|
if stdin_isatty() {
|
||||||
println!("Mint's Initial Balance {}", mint_balance);
|
eprintln!("nothing found on stdin, expected a json file");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut buffer = String::new();
|
||||||
|
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||||
|
if num_bytes == 0 {
|
||||||
|
eprintln!("empty file on stdin, expected a json file");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("Parsing stdin...");
|
||||||
|
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||||
|
eprintln!("failed to parse json: {}", e);
|
||||||
|
exit(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
let socket = UdpSocket::bind(&send_addr).unwrap();
|
||||||
|
let mut acc = AccountantStub::new(&addr, socket);
|
||||||
|
|
||||||
|
println!("Get last ID...");
|
||||||
|
let last_id = acc.get_last_id().wait().unwrap();
|
||||||
|
|
||||||
|
println!("Creating keypairs...");
|
||||||
|
let txs = demo.users.len() / 2;
|
||||||
|
let keypairs: Vec<_> = demo.users
|
||||||
|
.into_par_iter()
|
||||||
|
.map(|(pkcs8, _)| KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap())
|
||||||
|
.collect();
|
||||||
|
let keypair_pairs: Vec<_> = keypairs.chunks(2).collect();
|
||||||
|
|
||||||
println!("Signing transactions...");
|
println!("Signing transactions...");
|
||||||
let txs = 100_000;
|
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let transactions: Vec<_> = (0..txs)
|
let transactions: Vec<_> = keypair_pairs
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|_| {
|
.map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id))
|
||||||
let rando_pubkey = KeyPair::new().pubkey();
|
|
||||||
Transaction::new(&mint_keypair, rando_pubkey, 1, last_id)
|
|
||||||
})
|
|
||||||
.collect();
|
.collect();
|
||||||
let duration = now.elapsed();
|
let duration = now.elapsed();
|
||||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||||
@ -47,27 +112,32 @@ fn main() {
|
|||||||
nsps / 1_000_f64
|
nsps / 1_000_f64
|
||||||
);
|
);
|
||||||
|
|
||||||
println!("Transferring 1 unit {} times...", txs);
|
let initial_tx_count = acc.transaction_count();
|
||||||
|
|
||||||
|
println!("Transfering {} transactions in {} batches", txs, threads);
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let mut _sig = Default::default();
|
let sz = transactions.len() / threads;
|
||||||
for tr in transactions {
|
let chunks: Vec<_> = transactions.chunks(sz).collect();
|
||||||
_sig = tr.sig;
|
chunks.into_par_iter().for_each(|trs| {
|
||||||
acc.transfer_signed(tr).unwrap();
|
println!("Transferring 1 unit {} times...", trs.len());
|
||||||
|
let send_addr = "0.0.0.0:0";
|
||||||
|
let socket = UdpSocket::bind(send_addr).unwrap();
|
||||||
|
let acc = AccountantStub::new(&addr, socket);
|
||||||
|
for tr in trs {
|
||||||
|
acc.transfer_signed(tr.clone()).unwrap();
|
||||||
}
|
}
|
||||||
println!("Waiting for last transaction to be confirmed...",);
|
});
|
||||||
let mut val = mint_balance;
|
|
||||||
let mut prev = 0;
|
println!("Waiting for half the transactions to complete...",);
|
||||||
while val != prev {
|
let mut tx_count = acc.transaction_count();
|
||||||
sleep(Duration::from_millis(20));
|
while tx_count < transactions.len() as u64 / 2 {
|
||||||
prev = val;
|
tx_count = acc.transaction_count();
|
||||||
val = acc.get_balance(&mint_pubkey).unwrap().unwrap();
|
|
||||||
}
|
}
|
||||||
println!("Mint's Final Balance {}", val);
|
let txs = tx_count - initial_tx_count;
|
||||||
let txs = mint_balance - val;
|
println!("Transactions processed {}", txs);
|
||||||
println!("Successful transactions {}", txs);
|
|
||||||
|
|
||||||
let duration = now.elapsed();
|
let duration = now.elapsed();
|
||||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||||
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
|
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
|
||||||
println!("Done. {} tps!", tps);
|
println!("Done. {} tps", tps);
|
||||||
}
|
}
|
||||||
|
@ -1,30 +1,73 @@
|
|||||||
|
extern crate isatty;
|
||||||
|
extern crate rayon;
|
||||||
|
extern crate ring;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
extern crate solana;
|
extern crate solana;
|
||||||
|
extern crate untrusted;
|
||||||
|
|
||||||
use solana::entry::create_entry;
|
use isatty::stdin_isatty;
|
||||||
|
use rayon::prelude::*;
|
||||||
|
use solana::accountant::MAX_ENTRY_IDS;
|
||||||
|
use solana::entry::{create_entry, next_tick};
|
||||||
use solana::event::Event;
|
use solana::event::Event;
|
||||||
use solana::hash::Hash;
|
use solana::mint::MintDemo;
|
||||||
use solana::mint::Mint;
|
use solana::signature::{KeyPair, KeyPairUtil};
|
||||||
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
|
|
||||||
use solana::transaction::Transaction;
|
use solana::transaction::Transaction;
|
||||||
use std::io::stdin;
|
use std::io::{stdin, Read};
|
||||||
|
use std::process::exit;
|
||||||
fn transfer(from: &KeyPair, (to, tokens): (PublicKey, i64), last_id: Hash) -> Event {
|
use untrusted::Input;
|
||||||
Event::Transaction(Transaction::new(from, to, tokens, last_id))
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// Generate a ledger with lots and lots of accounts.
|
||||||
fn main() {
|
fn main() {
|
||||||
let mint: Mint = serde_json::from_reader(stdin()).unwrap();
|
if stdin_isatty() {
|
||||||
let mut entries = mint.create_entries();
|
eprintln!("nothing found on stdin, expected a json file");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
let from = mint.keypair();
|
let mut buffer = String::new();
|
||||||
let seed = mint.seed();
|
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||||
let alice = (KeyPair::new().pubkey(), 200);
|
if num_bytes == 0 {
|
||||||
let bob = (KeyPair::new().pubkey(), 100);
|
eprintln!("empty file on stdin, expected a json file");
|
||||||
let events = vec![transfer(&from, alice, seed), transfer(&from, bob, seed)];
|
exit(1);
|
||||||
entries.push(create_entry(&seed, 0, events));
|
}
|
||||||
|
|
||||||
for entry in entries {
|
let demo: MintDemo = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||||
|
eprintln!("failed to parse json: {}", e);
|
||||||
|
exit(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
let num_accounts = demo.users.len();
|
||||||
|
let last_id = demo.mint.last_id();
|
||||||
|
let mint_keypair = demo.mint.keypair();
|
||||||
|
|
||||||
|
eprintln!("Signing {} transactions...", num_accounts);
|
||||||
|
let events: Vec<_> = demo.users
|
||||||
|
.into_par_iter()
|
||||||
|
.map(|(pkcs8, tokens)| {
|
||||||
|
let rando = KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap();
|
||||||
|
let tr = Transaction::new(&mint_keypair, rando.pubkey(), tokens, last_id);
|
||||||
|
Event::Transaction(tr)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
for entry in demo.mint.create_entries() {
|
||||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
println!("{}", serde_json::to_string(&entry).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
eprintln!("Logging the creation of {} accounts...", num_accounts);
|
||||||
|
let entry = create_entry(&last_id, 0, events);
|
||||||
|
println!("{}", serde_json::to_string(&entry).unwrap());
|
||||||
|
|
||||||
|
eprintln!("Creating {} empty entries...", MAX_ENTRY_IDS);
|
||||||
|
// Offer client lots of entry IDs to use for each transaction's last_id.
|
||||||
|
let mut last_id = last_id;
|
||||||
|
for _ in 0..MAX_ENTRY_IDS {
|
||||||
|
let entry = next_tick(&last_id, 1);
|
||||||
|
last_id = entry.id;
|
||||||
|
let serialized = serde_json::to_string(&entry).unwrap_or_else(|e| {
|
||||||
|
eprintln!("failed to serialize: {}", e);
|
||||||
|
exit(1);
|
||||||
|
});
|
||||||
|
println!("{}", serialized);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,14 +1,36 @@
|
|||||||
//! A command-line executable for generating the chain's genesis block.
|
//! A command-line executable for generating the chain's genesis block.
|
||||||
|
|
||||||
|
extern crate isatty;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
extern crate solana;
|
extern crate solana;
|
||||||
|
|
||||||
|
use isatty::stdin_isatty;
|
||||||
use solana::mint::Mint;
|
use solana::mint::Mint;
|
||||||
use std::io::stdin;
|
use std::io::{stdin, Read};
|
||||||
|
use std::process::exit;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let mint: Mint = serde_json::from_reader(stdin()).unwrap();
|
if stdin_isatty() {
|
||||||
|
eprintln!("nothing found on stdin, expected a json file");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut buffer = String::new();
|
||||||
|
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||||
|
if num_bytes == 0 {
|
||||||
|
eprintln!("empty file on stdin, expected a json file");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mint: Mint = serde_json::from_str(&buffer).unwrap_or_else(|e| {
|
||||||
|
eprintln!("failed to parse json: {}", e);
|
||||||
|
exit(1);
|
||||||
|
});
|
||||||
for x in mint.create_entries() {
|
for x in mint.create_entries() {
|
||||||
println!("{}", serde_json::to_string(&x).unwrap());
|
let serialized = serde_json::to_string(&x).unwrap_or_else(|e| {
|
||||||
|
eprintln!("failed to serialize: {}", e);
|
||||||
|
exit(1);
|
||||||
|
});
|
||||||
|
println!("{}", serialized);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,7 @@ use solana::entry::Entry;
|
|||||||
use solana::event::Event;
|
use solana::event::Event;
|
||||||
use solana::hash::Hash;
|
use solana::hash::Hash;
|
||||||
use solana::historian::Historian;
|
use solana::historian::Historian;
|
||||||
use solana::ledger::verify_slice;
|
use solana::ledger::Block;
|
||||||
use solana::recorder::Signal;
|
use solana::recorder::Signal;
|
||||||
use solana::signature::{KeyPair, KeyPairUtil};
|
use solana::signature::{KeyPair, KeyPairUtil};
|
||||||
use solana::transaction::Transaction;
|
use solana::transaction::Transaction;
|
||||||
@ -33,5 +33,5 @@ fn main() {
|
|||||||
}
|
}
|
||||||
// Proof-of-History: Verify the historian learned about the events
|
// Proof-of-History: Verify the historian learned about the events
|
||||||
// in the same order they appear in the vector.
|
// in the same order they appear in the vector.
|
||||||
assert!(verify_slice(&entries, &seed));
|
assert!(entries[..].verify(&seed));
|
||||||
}
|
}
|
||||||
|
33
src/bin/mint-demo.rs
Normal file
33
src/bin/mint-demo.rs
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
extern crate rayon;
|
||||||
|
extern crate ring;
|
||||||
|
extern crate serde_json;
|
||||||
|
extern crate solana;
|
||||||
|
|
||||||
|
use rayon::prelude::*;
|
||||||
|
use ring::rand::SystemRandom;
|
||||||
|
use solana::mint::{Mint, MintDemo};
|
||||||
|
use solana::signature::KeyPair;
|
||||||
|
use std::io;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let mut input_text = String::new();
|
||||||
|
io::stdin().read_line(&mut input_text).unwrap();
|
||||||
|
let trimmed = input_text.trim();
|
||||||
|
let tokens = trimmed.parse::<i64>().unwrap();
|
||||||
|
|
||||||
|
let mint = Mint::new(tokens);
|
||||||
|
let tokens_per_user = 1_000;
|
||||||
|
let num_accounts = tokens / tokens_per_user;
|
||||||
|
let rnd = SystemRandom::new();
|
||||||
|
|
||||||
|
let users: Vec<_> = (0..num_accounts)
|
||||||
|
.into_par_iter()
|
||||||
|
.map(|_| {
|
||||||
|
let pkcs8 = KeyPair::generate_pkcs8(&rnd).unwrap().to_vec();
|
||||||
|
(pkcs8, tokens_per_user)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let demo = MintDemo { mint, users };
|
||||||
|
println!("{}", serde_json::to_string(&demo).unwrap());
|
||||||
|
}
|
@ -1,15 +1,29 @@
|
|||||||
|
extern crate isatty;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
extern crate solana;
|
extern crate solana;
|
||||||
|
|
||||||
|
use isatty::stdin_isatty;
|
||||||
use solana::mint::Mint;
|
use solana::mint::Mint;
|
||||||
use std::io;
|
use std::io;
|
||||||
|
use std::process::exit;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let mut input_text = String::new();
|
let mut input_text = String::new();
|
||||||
|
if stdin_isatty() {
|
||||||
|
eprintln!("nothing found on stdin, expected a token number");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
io::stdin().read_line(&mut input_text).unwrap();
|
io::stdin().read_line(&mut input_text).unwrap();
|
||||||
let trimmed = input_text.trim();
|
let trimmed = input_text.trim();
|
||||||
let tokens = trimmed.parse::<i64>().unwrap();
|
let tokens = trimmed.parse::<i64>().unwrap_or_else(|e| {
|
||||||
|
eprintln!("{}", e);
|
||||||
|
exit(1);
|
||||||
|
});
|
||||||
let mint = Mint::new(tokens);
|
let mint = Mint::new(tokens);
|
||||||
println!("{}", serde_json::to_string(&mint).unwrap());
|
let serialized = serde_json::to_string(&mint).unwrap_or_else(|e| {
|
||||||
|
eprintln!("failed to serialize: {}", e);
|
||||||
|
exit(1);
|
||||||
|
});
|
||||||
|
println!("{}", serialized);
|
||||||
}
|
}
|
||||||
|
@ -1,24 +1,110 @@
|
|||||||
|
extern crate env_logger;
|
||||||
|
extern crate getopts;
|
||||||
|
extern crate isatty;
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
extern crate solana;
|
extern crate solana;
|
||||||
|
|
||||||
|
use getopts::Options;
|
||||||
|
use isatty::stdin_isatty;
|
||||||
use solana::accountant::Accountant;
|
use solana::accountant::Accountant;
|
||||||
use solana::accountant_skel::AccountantSkel;
|
use solana::accountant_skel::AccountantSkel;
|
||||||
use std::io::{self, stdout, BufRead};
|
use solana::entry::Entry;
|
||||||
|
use solana::event::Event;
|
||||||
|
use solana::historian::Historian;
|
||||||
|
use std::env;
|
||||||
|
use std::io::{stdin, stdout, Read};
|
||||||
|
use std::process::exit;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
fn print_usage(program: &str, opts: Options) {
|
||||||
|
let mut brief = format!("Usage: cat <transaction.log> | {} [options]\n\n", program);
|
||||||
|
brief += " Run a Solana node to handle transactions and\n";
|
||||||
|
brief += " write a new transaction log to stdout.\n";
|
||||||
|
brief += " Takes existing transaction log from stdin.";
|
||||||
|
|
||||||
|
print!("{}", opts.usage(&brief));
|
||||||
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let addr = "127.0.0.1:8000";
|
env_logger::init().unwrap();
|
||||||
let stdin = io::stdin();
|
let mut port = 8000u16;
|
||||||
let entries = stdin
|
let mut opts = Options::new();
|
||||||
.lock()
|
opts.optopt("p", "", "port", "port");
|
||||||
.lines()
|
opts.optflag("h", "help", "print help");
|
||||||
.map(|line| serde_json::from_str(&line.unwrap()).unwrap());
|
let args: Vec<String> = env::args().collect();
|
||||||
let acc = Accountant::new_from_entries(entries, Some(1000));
|
let matches = match opts.parse(&args[1..]) {
|
||||||
|
Ok(m) => m,
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("{}", e);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if matches.opt_present("h") {
|
||||||
|
let program = args[0].clone();
|
||||||
|
print_usage(&program, opts);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if matches.opt_present("p") {
|
||||||
|
port = matches.opt_str("p").unwrap().parse().expect("port");
|
||||||
|
}
|
||||||
|
let addr = format!("0.0.0.0:{}", port);
|
||||||
|
|
||||||
|
if stdin_isatty() {
|
||||||
|
eprintln!("nothing found on stdin, expected a log file");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut buffer = String::new();
|
||||||
|
let num_bytes = stdin().read_to_string(&mut buffer).unwrap();
|
||||||
|
if num_bytes == 0 {
|
||||||
|
eprintln!("empty file on stdin, expected a log file");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
eprintln!("Initializing...");
|
||||||
|
let mut entries = buffer.lines().map(|line| {
|
||||||
|
serde_json::from_str(&line).unwrap_or_else(|e| {
|
||||||
|
eprintln!("failed to parse json: {}", e);
|
||||||
|
exit(1);
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
// The first item in the ledger is required to be an entry with zero num_hashes,
|
||||||
|
// which implies its id can be used as the ledger's seed.
|
||||||
|
let entry0 = entries.next().unwrap();
|
||||||
|
|
||||||
|
// The second item in the ledger is a special transaction where the to and from
|
||||||
|
// fields are the same. That entry should be treated as a deposit, not a
|
||||||
|
// transfer to oneself.
|
||||||
|
let entry1: Entry = entries.next().unwrap();
|
||||||
|
let deposit = if let Event::Transaction(ref tr) = entry1.events[0] {
|
||||||
|
tr.data.plan.final_payment()
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let acc = Accountant::new_from_deposit(&deposit.unwrap());
|
||||||
|
acc.register_entry_id(&entry0.id);
|
||||||
|
acc.register_entry_id(&entry1.id);
|
||||||
|
|
||||||
|
let mut last_id = entry1.id;
|
||||||
|
for entry in entries {
|
||||||
|
last_id = entry.id;
|
||||||
|
acc.process_verified_events(entry.events).unwrap();
|
||||||
|
acc.register_entry_id(&last_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
let historian = Historian::new(&last_id, Some(1000));
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let skel = Arc::new(Mutex::new(AccountantSkel::new(acc, stdout())));
|
let skel = Arc::new(Mutex::new(AccountantSkel::new(
|
||||||
eprintln!("Listening on {}", addr);
|
acc,
|
||||||
let threads = AccountantSkel::serve(skel, addr, exit.clone()).unwrap();
|
last_id,
|
||||||
|
stdout(),
|
||||||
|
historian,
|
||||||
|
)));
|
||||||
|
let threads = AccountantSkel::serve(&skel, &addr, exit.clone()).unwrap();
|
||||||
|
eprintln!("Ready. Listening on {}", addr);
|
||||||
for t in threads {
|
for t in threads {
|
||||||
t.join().expect("join");
|
t.join().expect("join");
|
||||||
}
|
}
|
||||||
|
372
src/crdt.rs
Normal file
372
src/crdt.rs
Normal file
@ -0,0 +1,372 @@
|
|||||||
|
//! The `crdt` module defines a data structure that is shared by all the nodes in the network over
|
||||||
|
//! a gossip control plane. The goal is to share small bits of of-chain information and detect and
|
||||||
|
//! repair partitions.
|
||||||
|
//!
|
||||||
|
//! This CRDT only supports a very limited set of types. A map of PublicKey -> Versioned Struct.
|
||||||
|
//! The last version is always picked durring an update.
|
||||||
|
|
||||||
|
use bincode::{deserialize, serialize};
|
||||||
|
use byteorder::{LittleEndian, ReadBytesExt};
|
||||||
|
use hash::Hash;
|
||||||
|
use result::Result;
|
||||||
|
use ring::rand::{SecureRandom, SystemRandom};
|
||||||
|
use signature::{PublicKey, Signature};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::io::Cursor;
|
||||||
|
use std::net::{SocketAddr, UdpSocket};
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
use std::thread::{sleep, spawn, JoinHandle};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
/// Structure to be replicated by the network
|
||||||
|
#[derive(Serialize, Deserialize, Clone)]
|
||||||
|
pub struct ReplicatedData {
|
||||||
|
id: PublicKey,
|
||||||
|
sig: Signature,
|
||||||
|
/// should always be increasing
|
||||||
|
version: u64,
|
||||||
|
/// address to connect to for gossip
|
||||||
|
gossip_addr: SocketAddr,
|
||||||
|
/// address to connect to for replication
|
||||||
|
replicate_addr: SocketAddr,
|
||||||
|
/// address to connect to when this node is leader
|
||||||
|
lead_addr: SocketAddr,
|
||||||
|
/// current leader identity
|
||||||
|
current_leader_id: PublicKey,
|
||||||
|
/// last verified hash that was submitted to the leader
|
||||||
|
last_verified_hash: Hash,
|
||||||
|
/// last verified count, always increasing
|
||||||
|
last_verified_count: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ReplicatedData {
|
||||||
|
pub fn new(id: PublicKey, gossip_addr: SocketAddr) -> ReplicatedData {
|
||||||
|
let daddr = "0.0.0.0:0".parse().unwrap();
|
||||||
|
ReplicatedData {
|
||||||
|
id,
|
||||||
|
sig: Signature::default(),
|
||||||
|
version: 0,
|
||||||
|
gossip_addr,
|
||||||
|
replicate_addr: daddr,
|
||||||
|
lead_addr: daddr,
|
||||||
|
current_leader_id: PublicKey::default(),
|
||||||
|
last_verified_hash: Hash::default(),
|
||||||
|
last_verified_count: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// `Crdt` structure keeps a table of `ReplicatedData` structs
|
||||||
|
/// # Properties
|
||||||
|
/// * `table` - map of public id's to versioned and signed ReplicatedData structs
|
||||||
|
/// * `local` - map of public id's to what `self.update_index` `self.table` was updated
|
||||||
|
/// * `remote` - map of public id's to the `remote.update_index` was sent
|
||||||
|
/// * `update_index` - my update index
|
||||||
|
/// # Remarks
|
||||||
|
/// This implements two services, `gossip` and `listen`.
|
||||||
|
/// * `gossip` - asynchronously ask nodes to send updates
|
||||||
|
/// * `listen` - listen for requests and responses
|
||||||
|
/// No attempt to keep track of timeouts or dropped requests is made, or should be.
|
||||||
|
pub struct Crdt {
|
||||||
|
table: HashMap<PublicKey, ReplicatedData>,
|
||||||
|
/// Value of my update index when entry in table was updated.
|
||||||
|
/// Nodes will ask for updates since `update_index`, and this node
|
||||||
|
/// should respond with all the identities that are greater then the
|
||||||
|
/// request's `update_index` in this list
|
||||||
|
local: HashMap<PublicKey, u64>,
|
||||||
|
/// The value of the remote update index that i have last seen
|
||||||
|
/// This Node will ask external nodes for updates since the value in this list
|
||||||
|
remote: HashMap<PublicKey, u64>,
|
||||||
|
update_index: u64,
|
||||||
|
me: PublicKey,
|
||||||
|
timeout: Duration,
|
||||||
|
}
|
||||||
|
// TODO These messages should be signed, and go through the gpu pipeline for spam filtering
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
enum Protocol {
|
||||||
|
/// forward your own latest data structure when requesting an update
|
||||||
|
/// this doesn't update the `remote` update index, but it allows the
|
||||||
|
/// recepient of this request to add knowledge of this node to the network
|
||||||
|
RequestUpdates(u64, ReplicatedData),
|
||||||
|
//TODO might need a since?
|
||||||
|
/// from id, form's last update index, ReplicatedData
|
||||||
|
ReceiveUpdates(PublicKey, u64, Vec<ReplicatedData>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Crdt {
|
||||||
|
pub fn new(me: ReplicatedData) -> Crdt {
|
||||||
|
assert_eq!(me.version, 0);
|
||||||
|
let mut g = Crdt {
|
||||||
|
table: HashMap::new(),
|
||||||
|
local: HashMap::new(),
|
||||||
|
remote: HashMap::new(),
|
||||||
|
me: me.id,
|
||||||
|
update_index: 1,
|
||||||
|
timeout: Duration::new(0, 100_000),
|
||||||
|
};
|
||||||
|
g.local.insert(me.id, g.update_index);
|
||||||
|
g.table.insert(me.id, me);
|
||||||
|
g
|
||||||
|
}
|
||||||
|
pub fn import(&mut self, v: &ReplicatedData) {
|
||||||
|
// TODO check that last_verified types are always increasing
|
||||||
|
// TODO probably an error or attack
|
||||||
|
if self.me != v.id {
|
||||||
|
self.insert(v);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn insert(&mut self, v: &ReplicatedData) {
|
||||||
|
if self.table.get(&v.id).is_none() || (v.version > self.table[&v.id].version) {
|
||||||
|
trace!("insert! {}", v.version);
|
||||||
|
self.update_index += 1;
|
||||||
|
let _ = self.table.insert(v.id, v.clone());
|
||||||
|
let _ = self.local.insert(v.id, self.update_index);
|
||||||
|
} else {
|
||||||
|
trace!("INSERT FAILED {}", v.version);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn random() -> u64 {
|
||||||
|
let rnd = SystemRandom::new();
|
||||||
|
let mut buf = [0u8; 8];
|
||||||
|
rnd.fill(&mut buf).unwrap();
|
||||||
|
let mut rdr = Cursor::new(&buf);
|
||||||
|
rdr.read_u64::<LittleEndian>().unwrap()
|
||||||
|
}
|
||||||
|
fn get_updates_since(&self, v: u64) -> (PublicKey, u64, Vec<ReplicatedData>) {
|
||||||
|
trace!("get updates since {}", v);
|
||||||
|
let data = self.table
|
||||||
|
.values()
|
||||||
|
.filter(|x| self.local[&x.id] > v)
|
||||||
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
let id = self.me;
|
||||||
|
let ups = self.update_index;
|
||||||
|
(id, ups, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a random gossip request
|
||||||
|
/// # Returns
|
||||||
|
/// (A,B,C)
|
||||||
|
/// * A - Remote gossip address
|
||||||
|
/// * B - My gossip address
|
||||||
|
/// * C - Remote update index to request updates since
|
||||||
|
fn gossip_request(&self) -> (SocketAddr, Protocol) {
|
||||||
|
let n = (Self::random() as usize) % self.table.len();
|
||||||
|
trace!("random {:?} {}", &self.me[0..1], n);
|
||||||
|
let v = self.table.values().nth(n).unwrap().clone();
|
||||||
|
let remote_update_index = *self.remote.get(&v.id).unwrap_or(&0);
|
||||||
|
let req = Protocol::RequestUpdates(remote_update_index, self.table[&self.me].clone());
|
||||||
|
(v.gossip_addr, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// At random pick a node and try to get updated changes from them
|
||||||
|
fn run_gossip(obj: &Arc<RwLock<Self>>) -> Result<()> {
|
||||||
|
//TODO we need to keep track of stakes and weight the selection by stake size
|
||||||
|
//TODO cache sockets
|
||||||
|
|
||||||
|
// Lock the object only to do this operation and not for any longer
|
||||||
|
// especially not when doing the `sock.send_to`
|
||||||
|
let (remote_gossip_addr, req) = obj.read().unwrap().gossip_request();
|
||||||
|
let sock = UdpSocket::bind("0.0.0.0:0")?;
|
||||||
|
// TODO this will get chatty, so we need to first ask for number of updates since
|
||||||
|
// then only ask for specific data that we dont have
|
||||||
|
let r = serialize(&req)?;
|
||||||
|
sock.send_to(&r, remote_gossip_addr)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Apply updates that we received from the identity `from`
|
||||||
|
/// # Arguments
|
||||||
|
/// * `from` - identity of the sender of the updates
|
||||||
|
/// * `update_index` - the number of updates that `from` has completed and this set of `data` represents
|
||||||
|
/// * `data` - the update data
|
||||||
|
fn apply_updates(&mut self, from: PublicKey, update_index: u64, data: &[ReplicatedData]) {
|
||||||
|
trace!("got updates {}", data.len());
|
||||||
|
// TODO we need to punish/spam resist here
|
||||||
|
// sig verify the whole update and slash anyone who sends a bad update
|
||||||
|
for v in data {
|
||||||
|
self.import(&v);
|
||||||
|
}
|
||||||
|
*self.remote.entry(from).or_insert(update_index) = update_index;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// randomly pick a node and ask them for updates asynchronously
|
||||||
|
pub fn gossip(obj: Arc<RwLock<Self>>, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||||
|
spawn(move || loop {
|
||||||
|
let _ = Self::run_gossip(&obj);
|
||||||
|
if exit.load(Ordering::Relaxed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
//TODO this should be a tuned parameter
|
||||||
|
sleep(obj.read().unwrap().timeout);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process messages from the network
|
||||||
|
fn run_listen(obj: &Arc<RwLock<Self>>, sock: &UdpSocket) -> Result<()> {
|
||||||
|
//TODO cache connections
|
||||||
|
let mut buf = vec![0u8; 1024 * 64];
|
||||||
|
let (amt, src) = sock.recv_from(&mut buf)?;
|
||||||
|
trace!("got request from {}", src);
|
||||||
|
buf.resize(amt, 0);
|
||||||
|
let r = deserialize(&buf)?;
|
||||||
|
match r {
|
||||||
|
// TODO sigverify these
|
||||||
|
Protocol::RequestUpdates(v, reqdata) => {
|
||||||
|
trace!("RequestUpdates {}", v);
|
||||||
|
let addr = reqdata.gossip_addr;
|
||||||
|
// only lock for this call, dont lock durring IO `sock.send_to` or `sock.recv_from`
|
||||||
|
let (from, ups, data) = obj.read().unwrap().get_updates_since(v);
|
||||||
|
trace!("get updates since response {} {}", v, data.len());
|
||||||
|
let rsp = serialize(&Protocol::ReceiveUpdates(from, ups, data))?;
|
||||||
|
trace!("send_to {}", addr);
|
||||||
|
//TODO verify reqdata belongs to sender
|
||||||
|
obj.write().unwrap().import(&reqdata);
|
||||||
|
sock.send_to(&rsp, addr).unwrap();
|
||||||
|
trace!("send_to done!");
|
||||||
|
}
|
||||||
|
Protocol::ReceiveUpdates(from, ups, data) => {
|
||||||
|
trace!("ReceivedUpdates");
|
||||||
|
obj.write().unwrap().apply_updates(from, ups, &data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
pub fn listen(
|
||||||
|
obj: Arc<RwLock<Self>>,
|
||||||
|
sock: UdpSocket,
|
||||||
|
exit: Arc<AtomicBool>,
|
||||||
|
) -> JoinHandle<()> {
|
||||||
|
sock.set_read_timeout(Some(Duration::new(2, 0))).unwrap();
|
||||||
|
spawn(move || loop {
|
||||||
|
let _ = Self::run_listen(&obj, &sock);
|
||||||
|
if exit.load(Ordering::Relaxed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use crdt::{Crdt, ReplicatedData};
|
||||||
|
use signature::KeyPair;
|
||||||
|
use signature::KeyPairUtil;
|
||||||
|
use std::net::UdpSocket;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
use std::thread::{sleep, JoinHandle};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
/// Test that the network converges.
|
||||||
|
/// Run until every node in the network has a full ReplicatedData set.
|
||||||
|
/// Check that nodes stop sending updates after all the ReplicatedData has been shared.
|
||||||
|
/// tests that actually use this function are below
|
||||||
|
fn run_gossip_topo<F>(topo: F)
|
||||||
|
where
|
||||||
|
F: Fn(&Vec<(Arc<RwLock<Crdt>>, JoinHandle<()>)>) -> (),
|
||||||
|
{
|
||||||
|
let num: usize = 5;
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let listen: Vec<_> = (0..num)
|
||||||
|
.map(|_| {
|
||||||
|
let listener = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
|
let pubkey = KeyPair::new().pubkey();
|
||||||
|
let d = ReplicatedData::new(pubkey, listener.local_addr().unwrap());
|
||||||
|
let crdt = Crdt::new(d);
|
||||||
|
let c = Arc::new(RwLock::new(crdt));
|
||||||
|
let l = Crdt::listen(c.clone(), listener, exit.clone());
|
||||||
|
(c, l)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
topo(&listen);
|
||||||
|
let gossip: Vec<_> = listen
|
||||||
|
.iter()
|
||||||
|
.map(|&(ref c, _)| Crdt::gossip(c.clone(), exit.clone()))
|
||||||
|
.collect();
|
||||||
|
let mut done = true;
|
||||||
|
for _ in 0..(num * 16) {
|
||||||
|
done = true;
|
||||||
|
for &(ref c, _) in listen.iter() {
|
||||||
|
trace!(
|
||||||
|
"done updates {} {}",
|
||||||
|
c.read().unwrap().table.len(),
|
||||||
|
c.read().unwrap().update_index
|
||||||
|
);
|
||||||
|
//make sure the number of updates doesn't grow unbounded
|
||||||
|
assert!(c.read().unwrap().update_index <= num as u64);
|
||||||
|
//make sure we got all the updates
|
||||||
|
if c.read().unwrap().table.len() != num {
|
||||||
|
done = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if done == true {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
sleep(Duration::new(1, 0));
|
||||||
|
}
|
||||||
|
exit.store(true, Ordering::Relaxed);
|
||||||
|
for j in gossip {
|
||||||
|
j.join().unwrap();
|
||||||
|
}
|
||||||
|
for (c, j) in listen.into_iter() {
|
||||||
|
j.join().unwrap();
|
||||||
|
// make it clear what failed
|
||||||
|
// protocol is to chatty, updates should stop after everyone receives `num`
|
||||||
|
assert!(c.read().unwrap().update_index <= num as u64);
|
||||||
|
// protocol is not chatty enough, everyone should get `num` entries
|
||||||
|
assert_eq!(c.read().unwrap().table.len(), num);
|
||||||
|
}
|
||||||
|
assert!(done);
|
||||||
|
}
|
||||||
|
/// ring a -> b -> c -> d -> e -> a
|
||||||
|
#[test]
|
||||||
|
fn gossip_ring_test() {
|
||||||
|
run_gossip_topo(|listen| {
|
||||||
|
let num = listen.len();
|
||||||
|
for n in 0..num {
|
||||||
|
let y = n % listen.len();
|
||||||
|
let x = (n + 1) % listen.len();
|
||||||
|
let mut xv = listen[x].0.write().unwrap();
|
||||||
|
let yv = listen[y].0.read().unwrap();
|
||||||
|
let mut d = yv.table[&yv.me].clone();
|
||||||
|
d.version = 0;
|
||||||
|
xv.insert(&d);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// star (b,c,d,e) -> a
|
||||||
|
#[test]
|
||||||
|
fn gossip_star_test() {
|
||||||
|
run_gossip_topo(|listen| {
|
||||||
|
let num = listen.len();
|
||||||
|
for n in 0..(num - 1) {
|
||||||
|
let x = 0;
|
||||||
|
let y = (n + 1) % listen.len();
|
||||||
|
let mut xv = listen[x].0.write().unwrap();
|
||||||
|
let yv = listen[y].0.read().unwrap();
|
||||||
|
let mut d = yv.table[&yv.me].clone();
|
||||||
|
d.version = 0;
|
||||||
|
xv.insert(&d);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Test that insert drops messages that are older
|
||||||
|
#[test]
|
||||||
|
fn insert_test() {
|
||||||
|
let mut d = ReplicatedData::new(KeyPair::new().pubkey(), "127.0.0.1:1234".parse().unwrap());
|
||||||
|
assert_eq!(d.version, 0);
|
||||||
|
let mut crdt = Crdt::new(d.clone());
|
||||||
|
assert_eq!(crdt.table[&d.id].version, 0);
|
||||||
|
d.version = 2;
|
||||||
|
crdt.insert(&d);
|
||||||
|
assert_eq!(crdt.table[&d.id].version, 2);
|
||||||
|
d.version = 1;
|
||||||
|
crdt.insert(&d);
|
||||||
|
assert_eq!(crdt.table[&d.id].version, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
194
src/ecdsa.rs
Normal file
194
src/ecdsa.rs
Normal file
@ -0,0 +1,194 @@
|
|||||||
|
use packet::{Packet, SharedPackets};
|
||||||
|
use std::mem::size_of;
|
||||||
|
use transaction::{PUB_KEY_OFFSET, SIGNED_DATA_OFFSET, SIG_OFFSET};
|
||||||
|
|
||||||
|
pub const TX_OFFSET: usize = 4;
|
||||||
|
|
||||||
|
#[cfg(feature = "cuda")]
|
||||||
|
#[repr(C)]
|
||||||
|
struct Elems {
|
||||||
|
elems: *const Packet,
|
||||||
|
num: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "cuda")]
|
||||||
|
#[link(name = "cuda_verify_ed25519")]
|
||||||
|
extern "C" {
|
||||||
|
fn ed25519_verify_many(
|
||||||
|
vecs: *const Elems,
|
||||||
|
num: u32, //number of vecs
|
||||||
|
message_size: u32, //size of each element inside the elems field of the vec
|
||||||
|
public_key_offset: u32,
|
||||||
|
signature_offset: u32,
|
||||||
|
signed_message_offset: u32,
|
||||||
|
signed_message_len_offset: u32,
|
||||||
|
out: *mut u8, //combined length of all the items in vecs
|
||||||
|
) -> u32;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "cuda"))]
|
||||||
|
fn verify_packet(packet: &Packet) -> u8 {
|
||||||
|
use ring::signature;
|
||||||
|
use signature::{PublicKey, Signature};
|
||||||
|
use untrusted;
|
||||||
|
|
||||||
|
let msg_start = TX_OFFSET + SIGNED_DATA_OFFSET;
|
||||||
|
let sig_start = TX_OFFSET + SIG_OFFSET;
|
||||||
|
let sig_end = sig_start + size_of::<Signature>();
|
||||||
|
let pub_key_start = TX_OFFSET + PUB_KEY_OFFSET;
|
||||||
|
let pub_key_end = pub_key_start + size_of::<PublicKey>();
|
||||||
|
|
||||||
|
if packet.meta.size <= msg_start {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
let msg_end = packet.meta.size;
|
||||||
|
signature::verify(
|
||||||
|
&signature::ED25519,
|
||||||
|
untrusted::Input::from(&packet.data[pub_key_start..pub_key_end]),
|
||||||
|
untrusted::Input::from(&packet.data[msg_start..msg_end]),
|
||||||
|
untrusted::Input::from(&packet.data[sig_start..sig_end]),
|
||||||
|
).is_ok() as u8
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "cuda"))]
|
||||||
|
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||||
|
use rayon::prelude::*;
|
||||||
|
|
||||||
|
batches
|
||||||
|
.into_par_iter()
|
||||||
|
.map(|p| {
|
||||||
|
p.read()
|
||||||
|
.unwrap()
|
||||||
|
.packets
|
||||||
|
.par_iter()
|
||||||
|
.map(verify_packet)
|
||||||
|
.collect()
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "cuda")]
|
||||||
|
pub fn ed25519_verify(batches: &Vec<SharedPackets>) -> Vec<Vec<u8>> {
|
||||||
|
use packet::PACKET_DATA_SIZE;
|
||||||
|
|
||||||
|
let mut out = Vec::new();
|
||||||
|
let mut elems = Vec::new();
|
||||||
|
let mut locks = Vec::new();
|
||||||
|
let mut rvs = Vec::new();
|
||||||
|
|
||||||
|
for packets in batches {
|
||||||
|
locks.push(packets.read().unwrap());
|
||||||
|
}
|
||||||
|
let mut num = 0;
|
||||||
|
for p in locks {
|
||||||
|
elems.push(Elems {
|
||||||
|
elems: p.packets.as_ptr(),
|
||||||
|
num: p.packets.len() as u32,
|
||||||
|
});
|
||||||
|
let mut v = Vec::new();
|
||||||
|
v.resize(p.packets.len(), 0);
|
||||||
|
rvs.push(v);
|
||||||
|
num += p.packets.len();
|
||||||
|
}
|
||||||
|
out.resize(num, 0);
|
||||||
|
trace!("Starting verify num packets: {}", num);
|
||||||
|
trace!("elem len: {}", elems.len() as u32);
|
||||||
|
trace!("packet sizeof: {}", size_of::<Packet>() as u32);
|
||||||
|
trace!("pub key: {}", (TX_OFFSET + PUB_KEY_OFFSET) as u32);
|
||||||
|
trace!("sig offset: {}", (TX_OFFSET + SIG_OFFSET) as u32);
|
||||||
|
trace!("sign data: {}", (TX_OFFSET + SIGNED_DATA_OFFSET) as u32);
|
||||||
|
trace!("len offset: {}", PACKET_DATA_SIZE as u32);
|
||||||
|
unsafe {
|
||||||
|
let res = ed25519_verify_many(
|
||||||
|
elems.as_ptr(),
|
||||||
|
elems.len() as u32,
|
||||||
|
size_of::<Packet>() as u32,
|
||||||
|
(TX_OFFSET + PUB_KEY_OFFSET) as u32,
|
||||||
|
(TX_OFFSET + SIG_OFFSET) as u32,
|
||||||
|
(TX_OFFSET + SIGNED_DATA_OFFSET) as u32,
|
||||||
|
PACKET_DATA_SIZE as u32,
|
||||||
|
out.as_mut_ptr(),
|
||||||
|
);
|
||||||
|
if res != 0 {
|
||||||
|
trace!("RETURN!!!: {}", res);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
trace!("done verify");
|
||||||
|
let mut num = 0;
|
||||||
|
for vs in rvs.iter_mut() {
|
||||||
|
for mut v in vs.iter_mut() {
|
||||||
|
*v = out[num];
|
||||||
|
if *v != 0 {
|
||||||
|
trace!("VERIFIED PACKET!!!!!");
|
||||||
|
}
|
||||||
|
num += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rvs
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use accountant_skel::Request;
|
||||||
|
use bincode::serialize;
|
||||||
|
use ecdsa;
|
||||||
|
use packet::{Packet, Packets, SharedPackets};
|
||||||
|
use std::sync::RwLock;
|
||||||
|
use transaction::test_tx;
|
||||||
|
use transaction::Transaction;
|
||||||
|
|
||||||
|
fn make_packet_from_transaction(tr: Transaction) -> Packet {
|
||||||
|
let tx = serialize(&Request::Transaction(tr)).unwrap();
|
||||||
|
let mut packet = Packet::default();
|
||||||
|
packet.meta.size = tx.len();
|
||||||
|
packet.data[..packet.meta.size].copy_from_slice(&tx);
|
||||||
|
return packet;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_verify_n(n: usize, modify_data: bool) {
|
||||||
|
let tr = test_tx();
|
||||||
|
let mut packet = make_packet_from_transaction(tr);
|
||||||
|
|
||||||
|
// jumble some data to test failure
|
||||||
|
if modify_data {
|
||||||
|
packet.data[20] = 10;
|
||||||
|
}
|
||||||
|
|
||||||
|
// generate packet vector
|
||||||
|
let mut packets = Packets::default();
|
||||||
|
packets.packets = Vec::new();
|
||||||
|
for _ in 0..n {
|
||||||
|
packets.packets.push(packet.clone());
|
||||||
|
}
|
||||||
|
let shared_packets = SharedPackets::new(RwLock::new(packets));
|
||||||
|
let batches = vec![shared_packets.clone(), shared_packets.clone()];
|
||||||
|
|
||||||
|
// verify packets
|
||||||
|
let ans = ecdsa::ed25519_verify(&batches);
|
||||||
|
|
||||||
|
// check result
|
||||||
|
let ref_ans = if modify_data { 0u8 } else { 1u8 };
|
||||||
|
assert_eq!(ans, vec![vec![ref_ans; n], vec![ref_ans; n]]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_verify_zero() {
|
||||||
|
test_verify_n(0, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_verify_one() {
|
||||||
|
test_verify_n(1, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_verify_seventy_one() {
|
||||||
|
test_verify_n(71, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_verify_fail() {
|
||||||
|
test_verify_n(5, true);
|
||||||
|
}
|
||||||
|
}
|
54
src/entry.rs
54
src/entry.rs
@ -43,8 +43,25 @@ impl Entry {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn add_event_data(hash_data: &mut Vec<u8>, event: &Event) {
|
||||||
|
match *event {
|
||||||
|
Event::Transaction(ref tr) => {
|
||||||
|
hash_data.push(0u8);
|
||||||
|
hash_data.extend_from_slice(&tr.sig);
|
||||||
|
}
|
||||||
|
Event::Signature { ref sig, .. } => {
|
||||||
|
hash_data.push(1u8);
|
||||||
|
hash_data.extend_from_slice(sig);
|
||||||
|
}
|
||||||
|
Event::Timestamp { ref sig, .. } => {
|
||||||
|
hash_data.push(2u8);
|
||||||
|
hash_data.extend_from_slice(sig);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates the hash `num_hashes` after `start_hash`. If the event contains
|
/// Creates the hash `num_hashes` after `start_hash`. If the event contains
|
||||||
/// signature, the final hash will be a hash of both the previous ID and
|
/// a signature, the final hash will be a hash of both the previous ID and
|
||||||
/// the signature.
|
/// the signature.
|
||||||
pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
|
pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
|
||||||
let mut id = *start_hash;
|
let mut id = *start_hash;
|
||||||
@ -55,17 +72,16 @@ pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
|
|||||||
// Hash all the event data
|
// Hash all the event data
|
||||||
let mut hash_data = vec![];
|
let mut hash_data = vec![];
|
||||||
for event in events {
|
for event in events {
|
||||||
let sig = event.get_signature();
|
add_event_data(&mut hash_data, event);
|
||||||
if let Some(sig) = sig {
|
|
||||||
hash_data.extend_from_slice(&sig);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hash_data.is_empty() {
|
if !hash_data.is_empty() {
|
||||||
return extend_and_hash(&id, &hash_data);
|
extend_and_hash(&id, &hash_data)
|
||||||
}
|
} else if num_hashes != 0 {
|
||||||
|
hash(&id)
|
||||||
|
} else {
|
||||||
id
|
id
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates the next Entry `num_hashes` after `start_hash`.
|
/// Creates the next Entry `num_hashes` after `start_hash`.
|
||||||
@ -99,6 +115,7 @@ pub fn next_tick(start_hash: &Hash, num_hashes: u64) -> Entry {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use chrono::prelude::*;
|
||||||
use entry::create_entry;
|
use entry::create_entry;
|
||||||
use event::Event;
|
use event::Event;
|
||||||
use hash::hash;
|
use hash::hash;
|
||||||
@ -132,9 +149,28 @@ mod tests {
|
|||||||
assert!(!e0.verify(&zero));
|
assert!(!e0.verify(&zero));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_witness_reorder_attack() {
|
||||||
|
let zero = Hash::default();
|
||||||
|
|
||||||
|
// First, verify entries
|
||||||
|
let keypair = KeyPair::new();
|
||||||
|
let tr0 = Event::new_timestamp(&keypair, Utc::now());
|
||||||
|
let tr1 = Event::new_signature(&keypair, Default::default());
|
||||||
|
let mut e0 = create_entry(&zero, 0, vec![tr0.clone(), tr1.clone()]);
|
||||||
|
assert!(e0.verify(&zero));
|
||||||
|
|
||||||
|
// Next, swap two witness events and ensure verification fails.
|
||||||
|
e0.events[0] = tr1; // <-- attack
|
||||||
|
e0.events[1] = tr0;
|
||||||
|
assert!(!e0.verify(&zero));
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_next_tick() {
|
fn test_next_tick() {
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
assert_eq!(next_tick(&zero, 1).num_hashes, 1)
|
let tick = next_tick(&zero, 1);
|
||||||
|
assert_eq!(tick.num_hashes, 1);
|
||||||
|
assert_ne!(tick.id, zero);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
420
src/erasure.rs
Normal file
420
src/erasure.rs
Normal file
@ -0,0 +1,420 @@
|
|||||||
|
// Support erasure coding
|
||||||
|
|
||||||
|
use packet::{BlobRecycler, SharedBlob};
|
||||||
|
use std::result;
|
||||||
|
|
||||||
|
//TODO(sakridge) pick these values
|
||||||
|
const NUM_CODED: usize = 10;
|
||||||
|
const MAX_MISSING: usize = 2;
|
||||||
|
const NUM_DATA: usize = NUM_CODED - MAX_MISSING;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
|
pub enum ErasureError {
|
||||||
|
NotEnoughBlocksToDecode,
|
||||||
|
DecodeError,
|
||||||
|
InvalidBlockSize,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Result<T> = result::Result<T, ErasureError>;
|
||||||
|
|
||||||
|
// k = number of data devices
|
||||||
|
// m = number of coding devices
|
||||||
|
// w = word size
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
fn jerasure_matrix_encode(
|
||||||
|
k: i32,
|
||||||
|
m: i32,
|
||||||
|
w: i32,
|
||||||
|
matrix: *const i32,
|
||||||
|
data_ptrs: *const *const u8,
|
||||||
|
coding_ptrs: *const *mut u8,
|
||||||
|
size: i32,
|
||||||
|
);
|
||||||
|
fn jerasure_matrix_decode(
|
||||||
|
k: i32,
|
||||||
|
m: i32,
|
||||||
|
w: i32,
|
||||||
|
matrix: *const i32,
|
||||||
|
row_k_ones: i32,
|
||||||
|
erasures: *const i32,
|
||||||
|
data_ptrs: *const *mut u8,
|
||||||
|
coding_ptrs: *const *const u8,
|
||||||
|
size: i32,
|
||||||
|
) -> i32;
|
||||||
|
fn galois_single_divide(a: i32, b: i32, w: i32) -> i32;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_matrix(m: i32, k: i32, w: i32) -> Vec<i32> {
|
||||||
|
let mut matrix = vec![0; (m * k) as usize];
|
||||||
|
for i in 0..m {
|
||||||
|
for j in 0..k {
|
||||||
|
unsafe {
|
||||||
|
matrix[(i * k + j) as usize] = galois_single_divide(1, i ^ (m + j), w);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
matrix
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ERASURE_W: i32 = 32;
|
||||||
|
|
||||||
|
// Generate coding blocks into coding
|
||||||
|
// There are some alignment restrictions, blocks should be aligned by 16 bytes
|
||||||
|
// which means their size should be >= 16 bytes
|
||||||
|
pub fn generate_coding_blocks(coding: &mut [&mut [u8]], data: &[&[u8]]) -> Result<()> {
|
||||||
|
if data.len() == 0 {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
let m = coding.len() as i32;
|
||||||
|
let block_len = data[0].len();
|
||||||
|
let matrix: Vec<i32> = get_matrix(m, data.len() as i32, ERASURE_W);
|
||||||
|
let mut coding_arg = Vec::new();
|
||||||
|
let mut data_arg = Vec::new();
|
||||||
|
for block in data {
|
||||||
|
if block_len != block.len() {
|
||||||
|
return Err(ErasureError::InvalidBlockSize);
|
||||||
|
}
|
||||||
|
data_arg.push(block.as_ptr());
|
||||||
|
}
|
||||||
|
for mut block in coding {
|
||||||
|
if block_len != block.len() {
|
||||||
|
return Err(ErasureError::InvalidBlockSize);
|
||||||
|
}
|
||||||
|
coding_arg.push(block.as_mut_ptr());
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
jerasure_matrix_encode(
|
||||||
|
data.len() as i32,
|
||||||
|
m,
|
||||||
|
ERASURE_W,
|
||||||
|
matrix.as_ptr(),
|
||||||
|
data_arg.as_ptr(),
|
||||||
|
coding_arg.as_ptr(),
|
||||||
|
data[0].len() as i32,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recover data + coding blocks into data blocks
|
||||||
|
// data: array of blocks to recover into
|
||||||
|
// coding: arry of coding blocks
|
||||||
|
// erasures: list of indices in data where blocks should be recovered
|
||||||
|
pub fn decode_blocks(data: &mut [&mut [u8]], coding: &[&[u8]], erasures: &[i32]) -> Result<()> {
|
||||||
|
if data.len() == 0 {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
let block_len = data[0].len();
|
||||||
|
let matrix: Vec<i32> = get_matrix(coding.len() as i32, data.len() as i32, ERASURE_W);
|
||||||
|
|
||||||
|
// generate coding pointers, blocks should be the same size
|
||||||
|
let mut coding_arg: Vec<*const u8> = Vec::new();
|
||||||
|
for x in coding.iter() {
|
||||||
|
if x.len() != block_len {
|
||||||
|
return Err(ErasureError::InvalidBlockSize);
|
||||||
|
}
|
||||||
|
coding_arg.push(x.as_ptr());
|
||||||
|
}
|
||||||
|
|
||||||
|
// generate data pointers, blocks should be the same size
|
||||||
|
let mut data_arg: Vec<*mut u8> = Vec::new();
|
||||||
|
for x in data.iter_mut() {
|
||||||
|
if x.len() != block_len {
|
||||||
|
return Err(ErasureError::InvalidBlockSize);
|
||||||
|
}
|
||||||
|
data_arg.push(x.as_mut_ptr());
|
||||||
|
}
|
||||||
|
unsafe {
|
||||||
|
let ret = jerasure_matrix_decode(
|
||||||
|
data.len() as i32,
|
||||||
|
coding.len() as i32,
|
||||||
|
ERASURE_W,
|
||||||
|
matrix.as_ptr(),
|
||||||
|
0,
|
||||||
|
erasures.as_ptr(),
|
||||||
|
data_arg.as_ptr(),
|
||||||
|
coding_arg.as_ptr(),
|
||||||
|
data[0].len() as i32,
|
||||||
|
);
|
||||||
|
trace!("jerasure_matrix_decode ret: {}", ret);
|
||||||
|
for x in data[erasures[0] as usize][0..8].iter() {
|
||||||
|
trace!("{} ", x)
|
||||||
|
}
|
||||||
|
trace!("");
|
||||||
|
if ret < 0 {
|
||||||
|
return Err(ErasureError::DecodeError);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate coding blocks in window from consumed to consumed+NUM_DATA
|
||||||
|
pub fn generate_coding(
|
||||||
|
re: &BlobRecycler,
|
||||||
|
window: &mut Vec<Option<SharedBlob>>,
|
||||||
|
consumed: usize,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut data_blobs = Vec::new();
|
||||||
|
let mut coding_blobs = Vec::new();
|
||||||
|
let mut data_locks = Vec::new();
|
||||||
|
let mut data_ptrs: Vec<&[u8]> = Vec::new();
|
||||||
|
let mut coding_locks = Vec::new();
|
||||||
|
let mut coding_ptrs: Vec<&mut [u8]> = Vec::new();
|
||||||
|
for i in consumed..consumed + NUM_DATA {
|
||||||
|
let n = i % window.len();
|
||||||
|
data_blobs.push(window[n].clone().unwrap());
|
||||||
|
}
|
||||||
|
for b in &data_blobs {
|
||||||
|
data_locks.push(b.write().unwrap());
|
||||||
|
}
|
||||||
|
for (i, l) in data_locks.iter_mut().enumerate() {
|
||||||
|
trace!("i: {} data: {}", i, l.data[0]);
|
||||||
|
data_ptrs.push(&l.data);
|
||||||
|
}
|
||||||
|
|
||||||
|
// generate coding ptr array
|
||||||
|
let coding_start = consumed + NUM_DATA;
|
||||||
|
let coding_end = consumed + NUM_CODED;
|
||||||
|
for i in coding_start..coding_end {
|
||||||
|
let n = i % window.len();
|
||||||
|
window[n] = Some(re.allocate());
|
||||||
|
coding_blobs.push(window[n].clone().unwrap());
|
||||||
|
}
|
||||||
|
for b in &coding_blobs {
|
||||||
|
coding_locks.push(b.write().unwrap());
|
||||||
|
}
|
||||||
|
for (i, l) in coding_locks.iter_mut().enumerate() {
|
||||||
|
trace!("i: {} data: {}", i, l.data[0]);
|
||||||
|
coding_ptrs.push(&mut l.data);
|
||||||
|
}
|
||||||
|
|
||||||
|
generate_coding_blocks(coding_ptrs.as_mut_slice(), &data_ptrs)?;
|
||||||
|
trace!("consumed: {}", consumed);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recover missing blocks into window
|
||||||
|
// missing blocks should be None, will use re
|
||||||
|
// to allocate new ones. Returns err if not enough
|
||||||
|
// coding blocks are present to restore
|
||||||
|
pub fn recover(
|
||||||
|
re: &BlobRecycler,
|
||||||
|
window: &mut Vec<Option<SharedBlob>>,
|
||||||
|
consumed: usize,
|
||||||
|
) -> Result<()> {
|
||||||
|
//recover with erasure coding
|
||||||
|
let mut data_missing = 0;
|
||||||
|
let mut coded_missing = 0;
|
||||||
|
let coding_start = consumed + NUM_DATA;
|
||||||
|
let coding_end = consumed + NUM_CODED;
|
||||||
|
for i in consumed..coding_end {
|
||||||
|
let n = i % window.len();
|
||||||
|
if window[n].is_none() {
|
||||||
|
if i >= coding_start {
|
||||||
|
coded_missing += 1;
|
||||||
|
} else {
|
||||||
|
data_missing += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
trace!("missing: data: {} coding: {}", data_missing, coded_missing);
|
||||||
|
if data_missing > 0 {
|
||||||
|
if (data_missing + coded_missing) <= MAX_MISSING {
|
||||||
|
let mut blobs: Vec<SharedBlob> = Vec::new();
|
||||||
|
let mut locks = Vec::new();
|
||||||
|
let mut data_ptrs: Vec<&mut [u8]> = Vec::new();
|
||||||
|
let mut coding_ptrs: Vec<&[u8]> = Vec::new();
|
||||||
|
let mut erasures: Vec<i32> = Vec::new();
|
||||||
|
for i in consumed..coding_end {
|
||||||
|
let j = i % window.len();
|
||||||
|
let mut b = &mut window[j];
|
||||||
|
if b.is_some() {
|
||||||
|
blobs.push(b.clone().unwrap());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let n = re.allocate();
|
||||||
|
*b = Some(n.clone());
|
||||||
|
//mark the missing memory
|
||||||
|
blobs.push(n);
|
||||||
|
erasures.push((i - consumed) as i32);
|
||||||
|
}
|
||||||
|
erasures.push(-1);
|
||||||
|
trace!("erasures: {:?}", erasures);
|
||||||
|
//lock everything
|
||||||
|
for b in &blobs {
|
||||||
|
locks.push(b.write().unwrap());
|
||||||
|
}
|
||||||
|
for (i, l) in locks.iter_mut().enumerate() {
|
||||||
|
if i >= NUM_DATA {
|
||||||
|
trace!("pushing coding: {}", i);
|
||||||
|
coding_ptrs.push(&l.data);
|
||||||
|
} else {
|
||||||
|
trace!("pushing data: {}", i);
|
||||||
|
data_ptrs.push(&mut l.data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
trace!(
|
||||||
|
"coding_ptrs.len: {} data_ptrs.len {}",
|
||||||
|
coding_ptrs.len(),
|
||||||
|
data_ptrs.len()
|
||||||
|
);
|
||||||
|
decode_blocks(data_ptrs.as_mut_slice(), &coding_ptrs, &erasures)?;
|
||||||
|
} else {
|
||||||
|
return Err(ErasureError::NotEnoughBlocksToDecode);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use erasure;
|
||||||
|
use packet::{BlobRecycler, SharedBlob, PACKET_DATA_SIZE};
|
||||||
|
extern crate env_logger;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_coding() {
|
||||||
|
let zero_vec = vec![0; 16];
|
||||||
|
let mut vs: Vec<Vec<u8>> = (0..4).map(|i| (i..(16 + i)).collect()).collect();
|
||||||
|
let v_orig: Vec<u8> = vs[0].clone();
|
||||||
|
|
||||||
|
let m = 2;
|
||||||
|
let mut coding_blocks: Vec<_> = (0..m).map(|_| vec![0u8; 16]).collect();
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut coding_blocks_slices: Vec<_> =
|
||||||
|
coding_blocks.iter_mut().map(|x| x.as_mut_slice()).collect();
|
||||||
|
let v_slices: Vec<_> = vs.iter().map(|x| x.as_slice()).collect();
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
erasure::generate_coding_blocks(
|
||||||
|
coding_blocks_slices.as_mut_slice(),
|
||||||
|
v_slices.as_slice()
|
||||||
|
).is_ok()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
trace!("coding blocks:");
|
||||||
|
for b in &coding_blocks {
|
||||||
|
trace!("{:?}", b);
|
||||||
|
}
|
||||||
|
let erasure: i32 = 1;
|
||||||
|
let erasures = vec![erasure, -1];
|
||||||
|
// clear an entry
|
||||||
|
vs[erasure as usize].copy_from_slice(zero_vec.as_slice());
|
||||||
|
|
||||||
|
{
|
||||||
|
let coding_blocks_slices: Vec<_> = coding_blocks.iter().map(|x| x.as_slice()).collect();
|
||||||
|
let mut v_slices: Vec<_> = vs.iter_mut().map(|x| x.as_mut_slice()).collect();
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
erasure::decode_blocks(
|
||||||
|
v_slices.as_mut_slice(),
|
||||||
|
coding_blocks_slices.as_slice(),
|
||||||
|
erasures.as_slice(),
|
||||||
|
).is_ok()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
trace!("vs:");
|
||||||
|
for v in &vs {
|
||||||
|
trace!("{:?}", v);
|
||||||
|
}
|
||||||
|
assert_eq!(v_orig, vs[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn print_window(window: &Vec<Option<SharedBlob>>) {
|
||||||
|
for (i, w) in window.iter().enumerate() {
|
||||||
|
print!("window({}): ", i);
|
||||||
|
if w.is_some() {
|
||||||
|
let window_lock = w.clone().unwrap();
|
||||||
|
let window_data = window_lock.read().unwrap().data;
|
||||||
|
for i in 0..8 {
|
||||||
|
print!("{} ", window_data[i]);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
print!("null");
|
||||||
|
}
|
||||||
|
println!("");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn test_window_recover() {
|
||||||
|
let mut window = Vec::new();
|
||||||
|
let blob_recycler = BlobRecycler::default();
|
||||||
|
let offset = 4;
|
||||||
|
for i in 0..(4 * erasure::NUM_CODED + 1) {
|
||||||
|
let b = blob_recycler.allocate();
|
||||||
|
let b_ = b.clone();
|
||||||
|
let data_len = b.read().unwrap().data.len();
|
||||||
|
let mut w = b.write().unwrap();
|
||||||
|
w.set_index(i as u64).unwrap();
|
||||||
|
assert_eq!(i as u64, w.get_index().unwrap());
|
||||||
|
w.meta.size = PACKET_DATA_SIZE;
|
||||||
|
for k in 0..data_len {
|
||||||
|
w.data[k] = (k + i) as u8;
|
||||||
|
}
|
||||||
|
window.push(Some(b_));
|
||||||
|
}
|
||||||
|
println!("** after-gen:");
|
||||||
|
print_window(&window);
|
||||||
|
assert!(erasure::generate_coding(&blob_recycler, &mut window, offset).is_ok());
|
||||||
|
assert!(
|
||||||
|
erasure::generate_coding(&blob_recycler, &mut window, offset + erasure::NUM_CODED)
|
||||||
|
.is_ok()
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
erasure::generate_coding(
|
||||||
|
&blob_recycler,
|
||||||
|
&mut window,
|
||||||
|
offset + (2 * erasure::NUM_CODED)
|
||||||
|
).is_ok()
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
erasure::generate_coding(
|
||||||
|
&blob_recycler,
|
||||||
|
&mut window,
|
||||||
|
offset + (3 * erasure::NUM_CODED)
|
||||||
|
).is_ok()
|
||||||
|
);
|
||||||
|
println!("** after-coding:");
|
||||||
|
print_window(&window);
|
||||||
|
let refwindow = window[offset + 1].clone();
|
||||||
|
window[offset + 1] = None;
|
||||||
|
window[offset + 2] = None;
|
||||||
|
window[offset + erasure::NUM_CODED + 3] = None;
|
||||||
|
window[offset + (2 * erasure::NUM_CODED) + 0] = None;
|
||||||
|
window[offset + (2 * erasure::NUM_CODED) + 1] = None;
|
||||||
|
window[offset + (2 * erasure::NUM_CODED) + 2] = None;
|
||||||
|
let window_l0 = &(window[offset + (3 * erasure::NUM_CODED)]).clone().unwrap();
|
||||||
|
window_l0.write().unwrap().data[0] = 55;
|
||||||
|
println!("** after-nulling:");
|
||||||
|
print_window(&window);
|
||||||
|
assert!(erasure::recover(&blob_recycler, &mut window, offset).is_ok());
|
||||||
|
assert!(erasure::recover(&blob_recycler, &mut window, offset + erasure::NUM_CODED).is_ok());
|
||||||
|
assert!(
|
||||||
|
erasure::recover(
|
||||||
|
&blob_recycler,
|
||||||
|
&mut window,
|
||||||
|
offset + (2 * erasure::NUM_CODED)
|
||||||
|
).is_err()
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
erasure::recover(
|
||||||
|
&blob_recycler,
|
||||||
|
&mut window,
|
||||||
|
offset + (3 * erasure::NUM_CODED)
|
||||||
|
).is_ok()
|
||||||
|
);
|
||||||
|
println!("** after-restore:");
|
||||||
|
print_window(&window);
|
||||||
|
let window_l = window[offset + 1].clone().unwrap();
|
||||||
|
let ref_l = refwindow.clone().unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
window_l.read().unwrap().data.to_vec(),
|
||||||
|
ref_l.read().unwrap().data.to_vec()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
27
src/event.rs
27
src/event.rs
@ -33,12 +33,13 @@ impl Event {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Rename this to transaction_signature().
|
/// Create and sign a new Witness Signature. Used for unit-testing.
|
||||||
/// If the Event is a Transaction, return its Signature.
|
pub fn new_signature(from: &KeyPair, tx_sig: Signature) -> Self {
|
||||||
pub fn get_signature(&self) -> Option<Signature> {
|
let sig = Signature::clone_from_slice(from.sign(&tx_sig).as_ref());
|
||||||
match *self {
|
Event::Signature {
|
||||||
Event::Transaction(ref tr) => Some(tr.sig),
|
from: from.pubkey(),
|
||||||
Event::Signature { .. } | Event::Timestamp { .. } => None,
|
tx_sig,
|
||||||
|
sig,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,9 +47,21 @@ impl Event {
|
|||||||
/// spending plan is valid.
|
/// spending plan is valid.
|
||||||
pub fn verify(&self) -> bool {
|
pub fn verify(&self) -> bool {
|
||||||
match *self {
|
match *self {
|
||||||
Event::Transaction(ref tr) => tr.verify(),
|
Event::Transaction(ref tr) => tr.verify_sig(),
|
||||||
Event::Signature { from, tx_sig, sig } => sig.verify(&from, &tx_sig),
|
Event::Signature { from, tx_sig, sig } => sig.verify(&from, &tx_sig),
|
||||||
Event::Timestamp { from, dt, sig } => sig.verify(&from, &serialize(&dt).unwrap()),
|
Event::Timestamp { from, dt, sig } => sig.verify(&from, &serialize(&dt).unwrap()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use signature::{KeyPair, KeyPairUtil};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_event_verify() {
|
||||||
|
assert!(Event::new_timestamp(&KeyPair::new(), Utc::now()).verify());
|
||||||
|
assert!(Event::new_signature(&KeyPair::new(), Signature::default()).verify());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
//! The `hash` module provides functions for creating SHA-256 hashes.
|
//! The `hash` module provides functions for creating SHA-256 hashes.
|
||||||
|
|
||||||
use generic_array::GenericArray;
|
|
||||||
use generic_array::typenum::U32;
|
use generic_array::typenum::U32;
|
||||||
|
use generic_array::GenericArray;
|
||||||
use sha2::{Digest, Sha256};
|
use sha2::{Digest, Sha256};
|
||||||
|
|
||||||
pub type Hash = GenericArray<u8, U32>;
|
pub type Hash = GenericArray<u8, U32>;
|
||||||
|
@ -4,8 +4,6 @@
|
|||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use recorder::{ExitReason, Recorder, Signal};
|
use recorder::{ExitReason, Recorder, Signal};
|
||||||
use signature::Signature;
|
|
||||||
use std::collections::HashSet;
|
|
||||||
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
|
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
|
||||||
use std::thread::{spawn, JoinHandle};
|
use std::thread::{spawn, JoinHandle};
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
@ -14,32 +12,21 @@ pub struct Historian {
|
|||||||
pub sender: SyncSender<Signal>,
|
pub sender: SyncSender<Signal>,
|
||||||
pub receiver: Receiver<Entry>,
|
pub receiver: Receiver<Entry>,
|
||||||
pub thread_hdl: JoinHandle<ExitReason>,
|
pub thread_hdl: JoinHandle<ExitReason>,
|
||||||
pub signatures: HashSet<Signature>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Historian {
|
impl Historian {
|
||||||
pub fn new(start_hash: &Hash, ms_per_tick: Option<u64>) -> Self {
|
pub fn new(start_hash: &Hash, ms_per_tick: Option<u64>) -> Self {
|
||||||
let (sender, event_receiver) = sync_channel(1000);
|
let (sender, event_receiver) = sync_channel(10_000);
|
||||||
let (entry_sender, receiver) = sync_channel(1000);
|
let (entry_sender, receiver) = sync_channel(10_000);
|
||||||
let thread_hdl =
|
let thread_hdl =
|
||||||
Historian::create_recorder(*start_hash, ms_per_tick, event_receiver, entry_sender);
|
Historian::create_recorder(*start_hash, ms_per_tick, event_receiver, entry_sender);
|
||||||
let signatures = HashSet::new();
|
|
||||||
Historian {
|
Historian {
|
||||||
sender,
|
sender,
|
||||||
receiver,
|
receiver,
|
||||||
thread_hdl,
|
thread_hdl,
|
||||||
signatures,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reserve_signature(&mut self, sig: &Signature) -> bool {
|
|
||||||
if self.signatures.contains(sig) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
self.signatures.insert(*sig);
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A background thread that will continue tagging received Event messages and
|
/// A background thread that will continue tagging received Event messages and
|
||||||
/// sending back Entry messages until either the receiver or sender channel is closed.
|
/// sending back Entry messages until either the receiver or sender channel is closed.
|
||||||
fn create_recorder(
|
fn create_recorder(
|
||||||
@ -66,7 +53,7 @@ impl Historian {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use ledger::*;
|
use ledger::Block;
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
@ -95,7 +82,7 @@ mod tests {
|
|||||||
ExitReason::RecvDisconnected
|
ExitReason::RecvDisconnected
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(verify_slice(&[entry0, entry1, entry2], &zero));
|
assert!([entry0, entry1, entry2].verify(&zero));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -110,20 +97,11 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_duplicate_event_signature() {
|
|
||||||
let zero = Hash::default();
|
|
||||||
let mut hist = Historian::new(&zero, None);
|
|
||||||
let sig = Signature::default();
|
|
||||||
assert!(hist.reserve_signature(&sig));
|
|
||||||
assert!(!hist.reserve_signature(&sig));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_ticking_historian() {
|
fn test_ticking_historian() {
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let hist = Historian::new(&zero, Some(20));
|
let hist = Historian::new(&zero, Some(20));
|
||||||
sleep(Duration::from_millis(30));
|
sleep(Duration::from_millis(300));
|
||||||
hist.sender.send(Signal::Tick).unwrap();
|
hist.sender.send(Signal::Tick).unwrap();
|
||||||
drop(hist.sender);
|
drop(hist.sender);
|
||||||
let entries: Vec<Entry> = hist.receiver.iter().collect();
|
let entries: Vec<Entry> = hist.receiver.iter().collect();
|
||||||
|
@ -5,11 +5,17 @@ use entry::{next_tick, Entry};
|
|||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
|
|
||||||
/// Verifies the hashes and counts of a slice of events are all consistent.
|
pub trait Block {
|
||||||
pub fn verify_slice(entries: &[Entry], start_hash: &Hash) -> bool {
|
/// Verifies the hashes and counts of a slice of events are all consistent.
|
||||||
let genesis = [Entry::new_tick(Default::default(), start_hash)];
|
fn verify(&self, start_hash: &Hash) -> bool;
|
||||||
let entry_pairs = genesis.par_iter().chain(entries).zip(entries);
|
}
|
||||||
|
|
||||||
|
impl Block for [Entry] {
|
||||||
|
fn verify(&self, start_hash: &Hash) -> bool {
|
||||||
|
let genesis = [Entry::new_tick(0, start_hash)];
|
||||||
|
let entry_pairs = genesis.par_iter().chain(self).zip(self);
|
||||||
entry_pairs.all(|(x0, x1)| x1.verify(&x0.id))
|
entry_pairs.all(|(x0, x1)| x1.verify(&x0.id))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a vector of Ticks of length `len` from `start_hash` hash and `num_hashes`.
|
/// Create a vector of Ticks of length `len` from `start_hash` hash and `num_hashes`.
|
||||||
@ -33,14 +39,14 @@ mod tests {
|
|||||||
fn test_verify_slice() {
|
fn test_verify_slice() {
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let one = hash(&zero);
|
let one = hash(&zero);
|
||||||
assert!(verify_slice(&vec![], &zero)); // base case
|
assert!(vec![][..].verify(&zero)); // base case
|
||||||
assert!(verify_slice(&vec![Entry::new_tick(0, &zero)], &zero)); // singleton case 1
|
assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1
|
||||||
assert!(!verify_slice(&vec![Entry::new_tick(0, &zero)], &one)); // singleton case 2, bad
|
assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad
|
||||||
assert!(verify_slice(&next_ticks(&zero, 0, 2), &zero)); // inductive step
|
assert!(next_ticks(&zero, 0, 2)[..].verify(&zero)); // inductive step
|
||||||
|
|
||||||
let mut bad_ticks = next_ticks(&zero, 0, 2);
|
let mut bad_ticks = next_ticks(&zero, 0, 2);
|
||||||
bad_ticks[1].id = one;
|
bad_ticks[1].id = one;
|
||||||
assert!(!verify_slice(&bad_ticks, &zero)); // inductive step, bad
|
assert!(!bad_ticks.verify(&zero)); // inductive step, bad
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -52,10 +58,10 @@ mod bench {
|
|||||||
|
|
||||||
#[bench]
|
#[bench]
|
||||||
fn event_bench(bencher: &mut Bencher) {
|
fn event_bench(bencher: &mut Bencher) {
|
||||||
let start_hash = Default::default();
|
let start_hash = Hash::default();
|
||||||
let events = next_ticks(&start_hash, 10_000, 8);
|
let entries = next_ticks(&start_hash, 10_000, 8);
|
||||||
bencher.iter(|| {
|
bencher.iter(|| {
|
||||||
assert!(verify_slice(&events, &start_hash));
|
assert!(entries.verify(&start_hash));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
10
src/lib.rs
10
src/lib.rs
@ -2,21 +2,29 @@
|
|||||||
pub mod accountant;
|
pub mod accountant;
|
||||||
pub mod accountant_skel;
|
pub mod accountant_skel;
|
||||||
pub mod accountant_stub;
|
pub mod accountant_stub;
|
||||||
|
pub mod crdt;
|
||||||
|
pub mod ecdsa;
|
||||||
pub mod entry;
|
pub mod entry;
|
||||||
|
#[cfg(feature = "erasure")]
|
||||||
|
pub mod erasure;
|
||||||
pub mod event;
|
pub mod event;
|
||||||
pub mod hash;
|
pub mod hash;
|
||||||
pub mod historian;
|
pub mod historian;
|
||||||
pub mod ledger;
|
pub mod ledger;
|
||||||
pub mod mint;
|
pub mod mint;
|
||||||
|
pub mod packet;
|
||||||
pub mod plan;
|
pub mod plan;
|
||||||
pub mod recorder;
|
pub mod recorder;
|
||||||
pub mod result;
|
pub mod result;
|
||||||
pub mod signature;
|
pub mod signature;
|
||||||
pub mod streamer;
|
pub mod streamer;
|
||||||
|
pub mod subscribers;
|
||||||
pub mod transaction;
|
pub mod transaction;
|
||||||
extern crate bincode;
|
extern crate bincode;
|
||||||
|
extern crate byteorder;
|
||||||
extern crate chrono;
|
extern crate chrono;
|
||||||
extern crate generic_array;
|
extern crate generic_array;
|
||||||
|
extern crate libc;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log;
|
extern crate log;
|
||||||
extern crate rayon;
|
extern crate rayon;
|
||||||
@ -28,6 +36,8 @@ extern crate serde_json;
|
|||||||
extern crate sha2;
|
extern crate sha2;
|
||||||
extern crate untrusted;
|
extern crate untrusted;
|
||||||
|
|
||||||
|
extern crate futures;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate matches;
|
extern crate matches;
|
||||||
|
18
src/mint.rs
18
src/mint.rs
@ -1,7 +1,7 @@
|
|||||||
//! The `mint` module is a library for generating the chain's genesis block.
|
//! The `mint` module is a library for generating the chain's genesis block.
|
||||||
|
|
||||||
use entry::Entry;
|
|
||||||
use entry::create_entry;
|
use entry::create_entry;
|
||||||
|
use entry::Entry;
|
||||||
use event::Event;
|
use event::Event;
|
||||||
use hash::{hash, Hash};
|
use hash::{hash, Hash};
|
||||||
use ring::rand::SystemRandom;
|
use ring::rand::SystemRandom;
|
||||||
@ -33,6 +33,10 @@ impl Mint {
|
|||||||
hash(&self.pkcs8)
|
hash(&self.pkcs8)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn last_id(&self) -> Hash {
|
||||||
|
self.create_entries()[1].id
|
||||||
|
}
|
||||||
|
|
||||||
pub fn keypair(&self) -> KeyPair {
|
pub fn keypair(&self) -> KeyPair {
|
||||||
KeyPair::from_pkcs8(Input::from(&self.pkcs8)).unwrap()
|
KeyPair::from_pkcs8(Input::from(&self.pkcs8)).unwrap()
|
||||||
}
|
}
|
||||||
@ -54,17 +58,23 @@ impl Mint {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
pub struct MintDemo {
|
||||||
|
pub mint: Mint,
|
||||||
|
pub users: Vec<(Vec<u8>, i64)>,
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use ledger::verify_slice;
|
use ledger::Block;
|
||||||
use plan::Plan;
|
use plan::Plan;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_create_events() {
|
fn test_create_events() {
|
||||||
let mut events = Mint::new(100).create_events().into_iter();
|
let mut events = Mint::new(100).create_events().into_iter();
|
||||||
if let Event::Transaction(tr) = events.next().unwrap() {
|
if let Event::Transaction(tr) = events.next().unwrap() {
|
||||||
if let Plan::Pay(payment) = tr.plan {
|
if let Plan::Pay(payment) = tr.data.plan {
|
||||||
assert_eq!(tr.from, payment.to);
|
assert_eq!(tr.from, payment.to);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -74,6 +84,6 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_verify_entries() {
|
fn test_verify_entries() {
|
||||||
let entries = Mint::new(100).create_entries();
|
let entries = Mint::new(100).create_entries();
|
||||||
assert!(verify_slice(&entries, &entries[0].id));
|
assert!(entries[..].verify(&entries[0].id));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
396
src/packet.rs
Normal file
396
src/packet.rs
Normal file
@ -0,0 +1,396 @@
|
|||||||
|
//! The `packet` module defines data structures and methods to pull data from the network.
|
||||||
|
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
||||||
|
use result::{Error, Result};
|
||||||
|
use std::collections::VecDeque;
|
||||||
|
use std::fmt;
|
||||||
|
use std::io;
|
||||||
|
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||||
|
use std::sync::{Arc, Mutex, RwLock};
|
||||||
|
use std::mem::size_of;
|
||||||
|
|
||||||
|
pub type SharedPackets = Arc<RwLock<Packets>>;
|
||||||
|
pub type SharedBlob = Arc<RwLock<Blob>>;
|
||||||
|
pub type PacketRecycler = Recycler<Packets>;
|
||||||
|
pub type BlobRecycler = Recycler<Blob>;
|
||||||
|
|
||||||
|
pub const NUM_PACKETS: usize = 1024 * 8;
|
||||||
|
const BLOB_SIZE: usize = 64 * 1024;
|
||||||
|
pub const PACKET_DATA_SIZE: usize = 256;
|
||||||
|
pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE;
|
||||||
|
|
||||||
|
#[derive(Clone, Default)]
|
||||||
|
#[repr(C)]
|
||||||
|
pub struct Meta {
|
||||||
|
pub size: usize,
|
||||||
|
pub addr: [u16; 8],
|
||||||
|
pub port: u16,
|
||||||
|
pub v6: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
#[repr(C)]
|
||||||
|
pub struct Packet {
|
||||||
|
pub data: [u8; PACKET_DATA_SIZE],
|
||||||
|
pub meta: Meta,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for Packet {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"Packet {{ size: {:?}, addr: {:?} }}",
|
||||||
|
self.meta.size,
|
||||||
|
self.meta.addr()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Packet {
|
||||||
|
fn default() -> Packet {
|
||||||
|
Packet {
|
||||||
|
data: [0u8; PACKET_DATA_SIZE],
|
||||||
|
meta: Meta::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Meta {
|
||||||
|
pub fn addr(&self) -> SocketAddr {
|
||||||
|
if !self.v6 {
|
||||||
|
let addr = [
|
||||||
|
self.addr[0] as u8,
|
||||||
|
self.addr[1] as u8,
|
||||||
|
self.addr[2] as u8,
|
||||||
|
self.addr[3] as u8,
|
||||||
|
];
|
||||||
|
let ipv4: Ipv4Addr = From::<[u8; 4]>::from(addr);
|
||||||
|
SocketAddr::new(IpAddr::V4(ipv4), self.port)
|
||||||
|
} else {
|
||||||
|
let ipv6: Ipv6Addr = From::<[u16; 8]>::from(self.addr);
|
||||||
|
SocketAddr::new(IpAddr::V6(ipv6), self.port)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_addr(&mut self, a: &SocketAddr) {
|
||||||
|
match *a {
|
||||||
|
SocketAddr::V4(v4) => {
|
||||||
|
let ip = v4.ip().octets();
|
||||||
|
self.addr[0] = u16::from(ip[0]);
|
||||||
|
self.addr[1] = u16::from(ip[1]);
|
||||||
|
self.addr[2] = u16::from(ip[2]);
|
||||||
|
self.addr[3] = u16::from(ip[3]);
|
||||||
|
self.port = a.port();
|
||||||
|
}
|
||||||
|
SocketAddr::V6(v6) => {
|
||||||
|
self.addr = v6.ip().segments();
|
||||||
|
self.port = a.port();
|
||||||
|
self.v6 = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Packets {
|
||||||
|
pub packets: Vec<Packet>,
|
||||||
|
}
|
||||||
|
|
||||||
|
//auto derive doesn't support large arrays
|
||||||
|
impl Default for Packets {
|
||||||
|
fn default() -> Packets {
|
||||||
|
Packets {
|
||||||
|
packets: vec![Packet::default(); NUM_PACKETS],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct Blob {
|
||||||
|
pub data: [u8; BLOB_SIZE],
|
||||||
|
pub meta: Meta,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for Blob {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"Blob {{ size: {:?}, addr: {:?} }}",
|
||||||
|
self.meta.size,
|
||||||
|
self.meta.addr()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//auto derive doesn't support large arrays
|
||||||
|
impl Default for Blob {
|
||||||
|
fn default() -> Blob {
|
||||||
|
Blob {
|
||||||
|
data: [0u8; BLOB_SIZE],
|
||||||
|
meta: Meta::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Recycler<T> {
|
||||||
|
gc: Arc<Mutex<Vec<Arc<RwLock<T>>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Default> Default for Recycler<T> {
|
||||||
|
fn default() -> Recycler<T> {
|
||||||
|
Recycler {
|
||||||
|
gc: Arc::new(Mutex::new(vec![])),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Default> Clone for Recycler<T> {
|
||||||
|
fn clone(&self) -> Recycler<T> {
|
||||||
|
Recycler {
|
||||||
|
gc: self.gc.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Default> Recycler<T> {
|
||||||
|
pub fn allocate(&self) -> Arc<RwLock<T>> {
|
||||||
|
let mut gc = self.gc.lock().expect("recycler lock");
|
||||||
|
gc.pop()
|
||||||
|
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())))
|
||||||
|
}
|
||||||
|
pub fn recycle(&self, msgs: Arc<RwLock<T>>) {
|
||||||
|
let mut gc = self.gc.lock().expect("recycler lock");
|
||||||
|
gc.push(msgs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Packets {
|
||||||
|
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
||||||
|
self.packets.resize(NUM_PACKETS, Packet::default());
|
||||||
|
let mut i = 0;
|
||||||
|
//DOCUMENTED SIDE-EFFECT
|
||||||
|
//Performance out of the IO without poll
|
||||||
|
// * block on the socket until its readable
|
||||||
|
// * set the socket to non blocking
|
||||||
|
// * read until it fails
|
||||||
|
// * set it back to blocking before returning
|
||||||
|
socket.set_nonblocking(false)?;
|
||||||
|
for p in &mut self.packets {
|
||||||
|
p.meta.size = 0;
|
||||||
|
match socket.recv_from(&mut p.data) {
|
||||||
|
Err(_) if i > 0 => {
|
||||||
|
trace!("got {:?} messages", i);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
info!("recv_from err {:?}", e);
|
||||||
|
return Err(Error::IO(e));
|
||||||
|
}
|
||||||
|
Ok((nrecv, from)) => {
|
||||||
|
p.meta.size = nrecv;
|
||||||
|
p.meta.set_addr(&from);
|
||||||
|
if i == 0 {
|
||||||
|
socket.set_nonblocking(true)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
Ok(i)
|
||||||
|
}
|
||||||
|
pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<()> {
|
||||||
|
let sz = self.run_read_from(socket)?;
|
||||||
|
self.packets.resize(sz, Packet::default());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
pub fn send_to(&self, socket: &UdpSocket) -> Result<()> {
|
||||||
|
for p in &self.packets {
|
||||||
|
let a = p.meta.addr();
|
||||||
|
socket.send_to(&p.data[..p.meta.size], &a)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const BLOB_INDEX_SIZE: usize = size_of::<u64>();
|
||||||
|
|
||||||
|
impl Blob {
|
||||||
|
pub fn get_index(&self) -> Result<u64> {
|
||||||
|
let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_SIZE]);
|
||||||
|
let r = rdr.read_u64::<LittleEndian>()?;
|
||||||
|
Ok(r)
|
||||||
|
}
|
||||||
|
pub fn set_index(&mut self, ix: u64) -> Result<()> {
|
||||||
|
let mut wtr = vec![];
|
||||||
|
wtr.write_u64::<LittleEndian>(ix)?;
|
||||||
|
self.data[..BLOB_INDEX_SIZE].clone_from_slice(&wtr);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
pub fn data(&self) -> &[u8] {
|
||||||
|
&self.data[BLOB_INDEX_SIZE..]
|
||||||
|
}
|
||||||
|
pub fn data_mut(&mut self) -> &mut [u8] {
|
||||||
|
&mut self.data[BLOB_INDEX_SIZE..]
|
||||||
|
}
|
||||||
|
pub fn set_size(&mut self, size: usize) {
|
||||||
|
self.meta.size = size + BLOB_INDEX_SIZE;
|
||||||
|
}
|
||||||
|
pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result<VecDeque<SharedBlob>> {
|
||||||
|
let mut v = VecDeque::new();
|
||||||
|
//DOCUMENTED SIDE-EFFECT
|
||||||
|
//Performance out of the IO without poll
|
||||||
|
// * block on the socket until its readable
|
||||||
|
// * set the socket to non blocking
|
||||||
|
// * read until it fails
|
||||||
|
// * set it back to blocking before returning
|
||||||
|
socket.set_nonblocking(false)?;
|
||||||
|
for i in 0..NUM_BLOBS {
|
||||||
|
let r = re.allocate();
|
||||||
|
{
|
||||||
|
let mut p = r.write().unwrap();
|
||||||
|
match socket.recv_from(&mut p.data) {
|
||||||
|
Err(_) if i > 0 => {
|
||||||
|
trace!("got {:?} messages", i);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
info!("recv_from err {:?}", e);
|
||||||
|
return Err(Error::IO(e));
|
||||||
|
}
|
||||||
|
Ok((nrecv, from)) => {
|
||||||
|
p.meta.size = nrecv;
|
||||||
|
p.meta.set_addr(&from);
|
||||||
|
if i == 0 {
|
||||||
|
socket.set_nonblocking(true)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v.push_back(r);
|
||||||
|
}
|
||||||
|
Ok(v)
|
||||||
|
}
|
||||||
|
pub fn send_to(
|
||||||
|
re: &BlobRecycler,
|
||||||
|
socket: &UdpSocket,
|
||||||
|
v: &mut VecDeque<SharedBlob>,
|
||||||
|
) -> Result<()> {
|
||||||
|
while let Some(r) = v.pop_front() {
|
||||||
|
{
|
||||||
|
let p = r.read().unwrap();
|
||||||
|
let a = p.meta.addr();
|
||||||
|
socket.send_to(&p.data[..p.meta.size], &a)?;
|
||||||
|
}
|
||||||
|
re.recycle(r);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets};
|
||||||
|
use std::collections::VecDeque;
|
||||||
|
use std::io;
|
||||||
|
use std::io::Write;
|
||||||
|
use std::net::UdpSocket;
|
||||||
|
#[test]
|
||||||
|
pub fn packet_recycler_test() {
|
||||||
|
let r = PacketRecycler::default();
|
||||||
|
let p = r.allocate();
|
||||||
|
r.recycle(p);
|
||||||
|
assert_eq!(r.gc.lock().unwrap().len(), 1);
|
||||||
|
let _ = r.allocate();
|
||||||
|
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
pub fn blob_recycler_test() {
|
||||||
|
let r = BlobRecycler::default();
|
||||||
|
let p = r.allocate();
|
||||||
|
r.recycle(p);
|
||||||
|
assert_eq!(r.gc.lock().unwrap().len(), 1);
|
||||||
|
let _ = r.allocate();
|
||||||
|
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
pub fn packet_send_recv() {
|
||||||
|
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
let addr = reader.local_addr().unwrap();
|
||||||
|
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
let saddr = sender.local_addr().unwrap();
|
||||||
|
let r = PacketRecycler::default();
|
||||||
|
let p = r.allocate();
|
||||||
|
p.write().unwrap().packets.resize(10, Packet::default());
|
||||||
|
for m in p.write().unwrap().packets.iter_mut() {
|
||||||
|
m.meta.set_addr(&addr);
|
||||||
|
m.meta.size = 256;
|
||||||
|
}
|
||||||
|
p.read().unwrap().send_to(&sender).unwrap();
|
||||||
|
p.write().unwrap().recv_from(&reader).unwrap();
|
||||||
|
for m in p.write().unwrap().packets.iter_mut() {
|
||||||
|
assert_eq!(m.meta.size, 256);
|
||||||
|
assert_eq!(m.meta.addr(), saddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
r.recycle(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn blob_send_recv() {
|
||||||
|
trace!("start");
|
||||||
|
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
let addr = reader.local_addr().unwrap();
|
||||||
|
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
let r = BlobRecycler::default();
|
||||||
|
let p = r.allocate();
|
||||||
|
p.write().unwrap().meta.set_addr(&addr);
|
||||||
|
p.write().unwrap().meta.size = 1024;
|
||||||
|
let mut v = VecDeque::new();
|
||||||
|
v.push_back(p);
|
||||||
|
assert_eq!(v.len(), 1);
|
||||||
|
Blob::send_to(&r, &sender, &mut v).unwrap();
|
||||||
|
trace!("send_to");
|
||||||
|
assert_eq!(v.len(), 0);
|
||||||
|
let mut rv = Blob::recv_from(&r, &reader).unwrap();
|
||||||
|
trace!("recv_from");
|
||||||
|
assert_eq!(rv.len(), 1);
|
||||||
|
let rp = rv.pop_front().unwrap();
|
||||||
|
assert_eq!(rp.write().unwrap().meta.size, 1024);
|
||||||
|
r.recycle(rp);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(feature = "ipv6", test))]
|
||||||
|
#[test]
|
||||||
|
pub fn blob_ipv6_send_recv() {
|
||||||
|
let reader = UdpSocket::bind("[::1]:0").expect("bind");
|
||||||
|
let addr = reader.local_addr().unwrap();
|
||||||
|
let sender = UdpSocket::bind("[::1]:0").expect("bind");
|
||||||
|
let r = BlobRecycler::default();
|
||||||
|
let p = r.allocate();
|
||||||
|
p.write().unwrap().meta.set_addr(&addr);
|
||||||
|
p.write().unwrap().meta.size = 1024;
|
||||||
|
let mut v = VecDeque::default();
|
||||||
|
v.push_back(p);
|
||||||
|
Blob::send_to(&r, &sender, &mut v).unwrap();
|
||||||
|
let mut rv = Blob::recv_from(&r, &reader).unwrap();
|
||||||
|
let rp = rv.pop_front().unwrap();
|
||||||
|
assert_eq!(rp.write().unwrap().meta.size, 1024);
|
||||||
|
r.recycle(rp);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn debug_trait() {
|
||||||
|
write!(io::sink(), "{:?}", Packet::default()).unwrap();
|
||||||
|
write!(io::sink(), "{:?}", Packets::default()).unwrap();
|
||||||
|
write!(io::sink(), "{:?}", Blob::default()).unwrap();
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
pub fn blob_test() {
|
||||||
|
let mut b = Blob::default();
|
||||||
|
b.set_index(<u64>::max_value()).unwrap();
|
||||||
|
assert_eq!(b.get_index().unwrap(), <u64>::max_value());
|
||||||
|
b.data_mut()[0] = 1;
|
||||||
|
assert_eq!(b.data()[0], 1);
|
||||||
|
assert_eq!(b.get_index().unwrap(), <u64>::max_value());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -35,6 +35,7 @@ pub struct Payment {
|
|||||||
pub to: PublicKey,
|
pub to: PublicKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[repr(C)]
|
||||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||||
pub enum Plan {
|
pub enum Plan {
|
||||||
Pay(Payment),
|
Pay(Payment),
|
||||||
@ -72,11 +73,11 @@ impl Plan {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return true if the spending plan requires no additional Witnesses.
|
/// Return Payment if the spending plan requires no additional Witnesses.
|
||||||
pub fn is_complete(&self) -> bool {
|
pub fn final_payment(&self) -> Option<Payment> {
|
||||||
match *self {
|
match *self {
|
||||||
Plan::Pay(_) => true,
|
Plan::Pay(ref payment) => Some(payment.clone()),
|
||||||
_ => false,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,11 +34,11 @@ pub struct Recorder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Recorder {
|
impl Recorder {
|
||||||
pub fn new(receiver: Receiver<Signal>, sender: SyncSender<Entry>, start_hash: Hash) -> Self {
|
pub fn new(receiver: Receiver<Signal>, sender: SyncSender<Entry>, last_hash: Hash) -> Self {
|
||||||
Recorder {
|
Recorder {
|
||||||
receiver,
|
receiver,
|
||||||
sender,
|
sender,
|
||||||
last_hash: start_hash,
|
last_hash,
|
||||||
events: vec![],
|
events: vec![],
|
||||||
num_hashes: 0,
|
num_hashes: 0,
|
||||||
num_ticks: 0,
|
num_ticks: 0,
|
||||||
|
@ -4,6 +4,7 @@ use bincode;
|
|||||||
use serde_json;
|
use serde_json;
|
||||||
use std;
|
use std;
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
|
use accountant;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
@ -14,6 +15,7 @@ pub enum Error {
|
|||||||
RecvError(std::sync::mpsc::RecvError),
|
RecvError(std::sync::mpsc::RecvError),
|
||||||
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
|
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
|
||||||
Serialize(std::boxed::Box<bincode::ErrorKind>),
|
Serialize(std::boxed::Box<bincode::ErrorKind>),
|
||||||
|
AccountingError(accountant::AccountingError),
|
||||||
SendError,
|
SendError,
|
||||||
Services,
|
Services,
|
||||||
}
|
}
|
||||||
@ -30,6 +32,11 @@ impl std::convert::From<std::sync::mpsc::RecvTimeoutError> for Error {
|
|||||||
Error::RecvTimeoutError(e)
|
Error::RecvTimeoutError(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl std::convert::From<accountant::AccountingError> for Error {
|
||||||
|
fn from(e: accountant::AccountingError) -> Error {
|
||||||
|
Error::AccountingError(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
|
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
|
||||||
fn from(_e: std::sync::mpsc::SendError<T>) -> Error {
|
fn from(_e: std::sync::mpsc::SendError<T>) -> Error {
|
||||||
Error::SendError
|
Error::SendError
|
||||||
@ -70,9 +77,9 @@ mod tests {
|
|||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::mpsc::channel;
|
||||||
use std::sync::mpsc::RecvError;
|
use std::sync::mpsc::RecvError;
|
||||||
use std::sync::mpsc::RecvTimeoutError;
|
use std::sync::mpsc::RecvTimeoutError;
|
||||||
use std::sync::mpsc::channel;
|
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
fn addr_parse_error() -> Result<SocketAddr> {
|
fn addr_parse_error() -> Result<SocketAddr> {
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
//! The `signature` module provides functionality for public, and private keys.
|
//! The `signature` module provides functionality for public, and private keys.
|
||||||
|
|
||||||
use generic_array::GenericArray;
|
|
||||||
use generic_array::typenum::{U32, U64};
|
use generic_array::typenum::{U32, U64};
|
||||||
|
use generic_array::GenericArray;
|
||||||
use ring::signature::Ed25519KeyPair;
|
use ring::signature::Ed25519KeyPair;
|
||||||
use ring::{rand, signature};
|
use ring::{rand, signature};
|
||||||
use untrusted;
|
use untrusted;
|
||||||
|
643
src/streamer.rs
643
src/streamer.rs
@ -1,240 +1,38 @@
|
|||||||
//! The 'streamer` module allows for efficient batch processing of UDP packets.
|
//! The `streamer` module defines a set of services for effecently pulling data from udp sockets.
|
||||||
|
use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets, NUM_BLOBS};
|
||||||
use result::{Error, Result};
|
use result::Result;
|
||||||
use std::fmt;
|
use std::collections::VecDeque;
|
||||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc;
|
use std::sync::mpsc;
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread::{spawn, JoinHandle};
|
use std::thread::{spawn, JoinHandle};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use subscribers::Subscribers;
|
||||||
|
|
||||||
const BLOCK_SIZE: usize = 1024 * 8;
|
pub type PacketReceiver = mpsc::Receiver<SharedPackets>;
|
||||||
pub const PACKET_SIZE: usize = 256;
|
pub type PacketSender = mpsc::Sender<SharedPackets>;
|
||||||
pub const RESP_SIZE: usize = 64 * 1024;
|
pub type BlobSender = mpsc::Sender<VecDeque<SharedBlob>>;
|
||||||
pub const NUM_RESP: usize = (BLOCK_SIZE * PACKET_SIZE) / RESP_SIZE;
|
pub type BlobReceiver = mpsc::Receiver<VecDeque<SharedBlob>>;
|
||||||
|
|
||||||
#[derive(Clone, Default)]
|
|
||||||
pub struct Meta {
|
|
||||||
pub size: usize,
|
|
||||||
pub addr: [u16; 8],
|
|
||||||
pub port: u16,
|
|
||||||
pub v6: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct Packet {
|
|
||||||
pub data: [u8; PACKET_SIZE],
|
|
||||||
pub meta: Meta,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for Packet {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
write!(
|
|
||||||
f,
|
|
||||||
"Packet {{ size: {:?}, addr: {:?} }}",
|
|
||||||
self.meta.size,
|
|
||||||
self.meta.get_addr()
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Packet {
|
|
||||||
fn default() -> Packet {
|
|
||||||
Packet {
|
|
||||||
data: [0u8; PACKET_SIZE],
|
|
||||||
meta: Meta::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Meta {
|
|
||||||
pub fn get_addr(&self) -> SocketAddr {
|
|
||||||
if !self.v6 {
|
|
||||||
let ipv4 = Ipv4Addr::new(
|
|
||||||
self.addr[0] as u8,
|
|
||||||
self.addr[1] as u8,
|
|
||||||
self.addr[2] as u8,
|
|
||||||
self.addr[3] as u8,
|
|
||||||
);
|
|
||||||
SocketAddr::new(IpAddr::V4(ipv4), self.port)
|
|
||||||
} else {
|
|
||||||
let ipv6 = Ipv6Addr::new(
|
|
||||||
self.addr[0],
|
|
||||||
self.addr[1],
|
|
||||||
self.addr[2],
|
|
||||||
self.addr[3],
|
|
||||||
self.addr[4],
|
|
||||||
self.addr[5],
|
|
||||||
self.addr[6],
|
|
||||||
self.addr[7],
|
|
||||||
);
|
|
||||||
SocketAddr::new(IpAddr::V6(ipv6), self.port)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_addr(&mut self, a: &SocketAddr) {
|
|
||||||
match *a {
|
|
||||||
SocketAddr::V4(v4) => {
|
|
||||||
let ip = v4.ip().octets();
|
|
||||||
self.addr[0] = u16::from(ip[0]);
|
|
||||||
self.addr[1] = u16::from(ip[1]);
|
|
||||||
self.addr[2] = u16::from(ip[2]);
|
|
||||||
self.addr[3] = u16::from(ip[3]);
|
|
||||||
self.port = a.port();
|
|
||||||
}
|
|
||||||
SocketAddr::V6(v6) => {
|
|
||||||
self.addr = v6.ip().segments();
|
|
||||||
self.port = a.port();
|
|
||||||
self.v6 = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Packets {
|
|
||||||
pub packets: Vec<Packet>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Packets {
|
|
||||||
fn default() -> Packets {
|
|
||||||
Packets {
|
|
||||||
packets: vec![Packet::default(); BLOCK_SIZE],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct Response {
|
|
||||||
pub data: [u8; RESP_SIZE],
|
|
||||||
pub meta: Meta,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for Response {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
write!(
|
|
||||||
f,
|
|
||||||
"Response {{ size: {:?}, addr: {:?} }}",
|
|
||||||
self.meta.size,
|
|
||||||
self.meta.get_addr()
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Response {
|
|
||||||
fn default() -> Response {
|
|
||||||
Response {
|
|
||||||
data: [0u8; RESP_SIZE],
|
|
||||||
meta: Meta::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Responses {
|
|
||||||
pub responses: Vec<Response>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Responses {
|
|
||||||
fn default() -> Responses {
|
|
||||||
Responses {
|
|
||||||
responses: vec![Response::default(); NUM_RESP],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type SharedPackets = Arc<RwLock<Packets>>;
|
|
||||||
pub type PacketRecycler = Arc<Mutex<Vec<SharedPackets>>>;
|
|
||||||
pub type Receiver = mpsc::Receiver<SharedPackets>;
|
|
||||||
pub type Sender = mpsc::Sender<SharedPackets>;
|
|
||||||
pub type SharedResponses = Arc<RwLock<Responses>>;
|
|
||||||
pub type ResponseRecycler = Arc<Mutex<Vec<SharedResponses>>>;
|
|
||||||
pub type Responder = mpsc::Sender<SharedResponses>;
|
|
||||||
pub type ResponseReceiver = mpsc::Receiver<SharedResponses>;
|
|
||||||
|
|
||||||
impl Packets {
|
|
||||||
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
|
||||||
self.packets.resize(BLOCK_SIZE, Packet::default());
|
|
||||||
let mut i = 0;
|
|
||||||
socket.set_nonblocking(false)?;
|
|
||||||
for p in &mut self.packets {
|
|
||||||
p.meta.size = 0;
|
|
||||||
match socket.recv_from(&mut p.data) {
|
|
||||||
Err(_) if i > 0 => {
|
|
||||||
trace!("got {:?} messages", i);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
info!("recv_from err {:?}", e);
|
|
||||||
return Err(Error::IO(e));
|
|
||||||
}
|
|
||||||
Ok((nrecv, from)) => {
|
|
||||||
p.meta.size = nrecv;
|
|
||||||
p.meta.set_addr(&from);
|
|
||||||
if i == 0 {
|
|
||||||
socket.set_nonblocking(true)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
Ok(i)
|
|
||||||
}
|
|
||||||
fn read_from(&mut self, socket: &UdpSocket) -> Result<()> {
|
|
||||||
let sz = self.run_read_from(socket)?;
|
|
||||||
self.packets.resize(sz, Packet::default());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Responses {
|
|
||||||
fn send_to(&self, socket: &UdpSocket, num: &mut usize) -> Result<()> {
|
|
||||||
for p in &self.responses {
|
|
||||||
let a = p.meta.get_addr();
|
|
||||||
socket.send_to(&p.data[..p.meta.size], &a)?;
|
|
||||||
//TODO(anatoly): wtf do we do about errors?
|
|
||||||
*num += 1;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn allocate<T>(recycler: &Arc<Mutex<Vec<Arc<RwLock<T>>>>>) -> Arc<RwLock<T>>
|
|
||||||
where
|
|
||||||
T: Default,
|
|
||||||
{
|
|
||||||
let mut gc = recycler.lock().expect("lock");
|
|
||||||
gc.pop()
|
|
||||||
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn recycle<T>(recycler: &Arc<Mutex<Vec<Arc<RwLock<T>>>>>, msgs: Arc<RwLock<T>>)
|
|
||||||
where
|
|
||||||
T: Default,
|
|
||||||
{
|
|
||||||
let mut gc = recycler.lock().expect("lock");
|
|
||||||
gc.push(msgs);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn recv_loop(
|
fn recv_loop(
|
||||||
sock: &UdpSocket,
|
sock: &UdpSocket,
|
||||||
exit: &Arc<AtomicBool>,
|
exit: &Arc<AtomicBool>,
|
||||||
recycler: &PacketRecycler,
|
re: &PacketRecycler,
|
||||||
channel: &Sender,
|
channel: &PacketSender,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
loop {
|
loop {
|
||||||
let msgs = allocate(recycler);
|
let msgs = re.allocate();
|
||||||
let msgs_ = msgs.clone();
|
let msgs_ = msgs.clone();
|
||||||
loop {
|
loop {
|
||||||
match msgs.write().unwrap().read_from(sock) {
|
match msgs.write().unwrap().recv_from(sock) {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
channel.send(msgs_)?;
|
channel.send(msgs_)?;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
if exit.load(Ordering::Relaxed) {
|
if exit.load(Ordering::Relaxed) {
|
||||||
recycle(recycler, msgs_);
|
re.recycle(msgs_);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -247,7 +45,7 @@ pub fn receiver(
|
|||||||
sock: UdpSocket,
|
sock: UdpSocket,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
recycler: PacketRecycler,
|
recycler: PacketRecycler,
|
||||||
channel: Sender,
|
channel: PacketSender,
|
||||||
) -> Result<JoinHandle<()>> {
|
) -> Result<JoinHandle<()>> {
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
sock.set_read_timeout(Some(timer))?;
|
sock.set_read_timeout(Some(timer))?;
|
||||||
@ -257,21 +55,18 @@ pub fn receiver(
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn recv_send(sock: &UdpSocket, recycler: &ResponseRecycler, r: &ResponseReceiver) -> Result<()> {
|
fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> {
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
let msgs = r.recv_timeout(timer)?;
|
let mut msgs = r.recv_timeout(timer)?;
|
||||||
let msgs_ = msgs.clone();
|
Blob::send_to(recycler, sock, &mut msgs)?;
|
||||||
let mut num = 0;
|
|
||||||
msgs.read().unwrap().send_to(sock, &mut num)?;
|
|
||||||
recycle(recycler, msgs_);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn responder(
|
pub fn responder(
|
||||||
sock: UdpSocket,
|
sock: UdpSocket,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
recycler: ResponseRecycler,
|
recycler: BlobRecycler,
|
||||||
r: ResponseReceiver,
|
r: BlobReceiver,
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
spawn(move || loop {
|
spawn(move || loop {
|
||||||
if recv_send(&sock, &recycler, &r).is_err() && exit.load(Ordering::Relaxed) {
|
if recv_send(&sock, &recycler, &r).is_err() && exit.load(Ordering::Relaxed) {
|
||||||
@ -280,10 +75,205 @@ pub fn responder(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//TODO, we would need to stick block authentication before we create the
|
||||||
|
//window.
|
||||||
|
fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Result<()> {
|
||||||
|
let dq = Blob::recv_from(recycler, sock)?;
|
||||||
|
if !dq.is_empty() {
|
||||||
|
s.send(dq)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn blob_receiver(
|
||||||
|
exit: Arc<AtomicBool>,
|
||||||
|
recycler: BlobRecycler,
|
||||||
|
sock: UdpSocket,
|
||||||
|
s: BlobSender,
|
||||||
|
) -> Result<JoinHandle<()>> {
|
||||||
|
//DOCUMENTED SIDE-EFFECT
|
||||||
|
//1 second timeout on socket read
|
||||||
|
let timer = Duration::new(1, 0);
|
||||||
|
sock.set_read_timeout(Some(timer))?;
|
||||||
|
let t = spawn(move || loop {
|
||||||
|
if exit.load(Ordering::Relaxed) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let ret = recv_blobs(&recycler, &sock, &s);
|
||||||
|
if ret.is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
Ok(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn recv_window(
|
||||||
|
window: &mut Vec<Option<SharedBlob>>,
|
||||||
|
subs: &Arc<RwLock<Subscribers>>,
|
||||||
|
recycler: &BlobRecycler,
|
||||||
|
consumed: &mut usize,
|
||||||
|
r: &BlobReceiver,
|
||||||
|
s: &BlobSender,
|
||||||
|
retransmit: &BlobSender,
|
||||||
|
) -> Result<()> {
|
||||||
|
let timer = Duration::new(1, 0);
|
||||||
|
let mut dq = r.recv_timeout(timer)?;
|
||||||
|
while let Ok(mut nq) = r.try_recv() {
|
||||||
|
dq.append(&mut nq)
|
||||||
|
}
|
||||||
|
{
|
||||||
|
//retransmit all leader blocks
|
||||||
|
let mut retransmitq = VecDeque::new();
|
||||||
|
let rsubs = subs.read().unwrap();
|
||||||
|
for b in &dq {
|
||||||
|
let p = b.read().unwrap();
|
||||||
|
//TODO this check isn't safe against adverserial packets
|
||||||
|
//we need to maintain a sequence window
|
||||||
|
trace!(
|
||||||
|
"idx: {} addr: {:?} leader: {:?}",
|
||||||
|
p.get_index().unwrap(),
|
||||||
|
p.meta.addr(),
|
||||||
|
rsubs.leader.addr
|
||||||
|
);
|
||||||
|
if p.meta.addr() == rsubs.leader.addr {
|
||||||
|
//TODO
|
||||||
|
//need to copy the retransmited blob
|
||||||
|
//otherwise we get into races with which thread
|
||||||
|
//should do the recycling
|
||||||
|
//
|
||||||
|
//a better absraction would be to recycle when the blob
|
||||||
|
//is dropped via a weakref to the recycler
|
||||||
|
let nv = recycler.allocate();
|
||||||
|
{
|
||||||
|
let mut mnv = nv.write().unwrap();
|
||||||
|
let sz = p.meta.size;
|
||||||
|
mnv.meta.size = sz;
|
||||||
|
mnv.data[..sz].copy_from_slice(&p.data[..sz]);
|
||||||
|
}
|
||||||
|
retransmitq.push_back(nv);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !retransmitq.is_empty() {
|
||||||
|
retransmit.send(retransmitq)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//send a contiguous set of blocks
|
||||||
|
let mut contq = VecDeque::new();
|
||||||
|
while let Some(b) = dq.pop_front() {
|
||||||
|
let b_ = b.clone();
|
||||||
|
let p = b.write().unwrap();
|
||||||
|
let pix = p.get_index()? as usize;
|
||||||
|
let w = pix % NUM_BLOBS;
|
||||||
|
//TODO, after the block are authenticated
|
||||||
|
//if we get different blocks at the same index
|
||||||
|
//that is a network failure/attack
|
||||||
|
trace!("window w: {} size: {}", w, p.meta.size);
|
||||||
|
{
|
||||||
|
if window[w].is_none() {
|
||||||
|
window[w] = Some(b_);
|
||||||
|
} else {
|
||||||
|
debug!("duplicate blob at index {:}", w);
|
||||||
|
}
|
||||||
|
loop {
|
||||||
|
let k = *consumed % NUM_BLOBS;
|
||||||
|
trace!("k: {} consumed: {}", k, *consumed);
|
||||||
|
if window[k].is_none() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
contq.push_back(window[k].clone().unwrap());
|
||||||
|
window[k] = None;
|
||||||
|
*consumed += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
trace!("sending contq.len: {}", contq.len());
|
||||||
|
if !contq.is_empty() {
|
||||||
|
s.send(contq)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn window(
|
||||||
|
exit: Arc<AtomicBool>,
|
||||||
|
subs: Arc<RwLock<Subscribers>>,
|
||||||
|
recycler: BlobRecycler,
|
||||||
|
r: BlobReceiver,
|
||||||
|
s: BlobSender,
|
||||||
|
retransmit: BlobSender,
|
||||||
|
) -> JoinHandle<()> {
|
||||||
|
spawn(move || {
|
||||||
|
let mut window = vec![None; NUM_BLOBS];
|
||||||
|
let mut consumed = 0;
|
||||||
|
loop {
|
||||||
|
if exit.load(Ordering::Relaxed) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let _ = recv_window(
|
||||||
|
&mut window,
|
||||||
|
&subs,
|
||||||
|
&recycler,
|
||||||
|
&mut consumed,
|
||||||
|
&r,
|
||||||
|
&s,
|
||||||
|
&retransmit,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn retransmit(
|
||||||
|
subs: &Arc<RwLock<Subscribers>>,
|
||||||
|
recycler: &BlobRecycler,
|
||||||
|
r: &BlobReceiver,
|
||||||
|
sock: &UdpSocket,
|
||||||
|
) -> Result<()> {
|
||||||
|
let timer = Duration::new(1, 0);
|
||||||
|
let mut dq = r.recv_timeout(timer)?;
|
||||||
|
while let Ok(mut nq) = r.try_recv() {
|
||||||
|
dq.append(&mut nq);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let wsubs = subs.read().unwrap();
|
||||||
|
for b in &dq {
|
||||||
|
let mut mb = b.write().unwrap();
|
||||||
|
wsubs.retransmit(&mut mb, sock)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
while let Some(b) = dq.pop_front() {
|
||||||
|
recycler.recycle(b);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Service to retransmit messages from the leader to layer 1 nodes.
|
||||||
|
/// See `subscribers` for network layer definitions.
|
||||||
|
/// # Arguments
|
||||||
|
/// * `sock` - Socket to read from. Read timeout is set to 1.
|
||||||
|
/// * `exit` - Boolean to signal system exit.
|
||||||
|
/// * `subs` - Shared Subscriber structure. This structure needs to be updated and popualted by
|
||||||
|
/// the accountant.
|
||||||
|
/// * `recycler` - Blob recycler.
|
||||||
|
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||||
|
pub fn retransmitter(
|
||||||
|
sock: UdpSocket,
|
||||||
|
exit: Arc<AtomicBool>,
|
||||||
|
subs: Arc<RwLock<Subscribers>>,
|
||||||
|
recycler: BlobRecycler,
|
||||||
|
r: BlobReceiver,
|
||||||
|
) -> JoinHandle<()> {
|
||||||
|
spawn(move || loop {
|
||||||
|
if exit.load(Ordering::Relaxed) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let _ = retransmit(&subs, &recycler, &r, &sock);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(all(feature = "unstable", test))]
|
#[cfg(all(feature = "unstable", test))]
|
||||||
mod bench {
|
mod bench {
|
||||||
extern crate test;
|
extern crate test;
|
||||||
use self::test::Bencher;
|
use self::test::Bencher;
|
||||||
|
use packet::{Packet, PacketRecycler, PACKET_DATA_SIZE};
|
||||||
use result::Result;
|
use result::Result;
|
||||||
use std::net::{SocketAddr, UdpSocket};
|
use std::net::{SocketAddr, UdpSocket};
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
@ -293,7 +283,7 @@ mod bench {
|
|||||||
use std::thread::{spawn, JoinHandle};
|
use std::thread::{spawn, JoinHandle};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
use streamer::{allocate, receiver, recycle, Packet, PacketRecycler, Receiver, PACKET_SIZE};
|
use streamer::{receiver, PacketReceiver};
|
||||||
|
|
||||||
fn producer(
|
fn producer(
|
||||||
addr: &SocketAddr,
|
addr: &SocketAddr,
|
||||||
@ -301,11 +291,11 @@ mod bench {
|
|||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
let msgs = allocate(&recycler);
|
let msgs = recycler.allocate();
|
||||||
let msgs_ = msgs.clone();
|
let msgs_ = msgs.clone();
|
||||||
msgs.write().unwrap().packets.resize(10, Packet::default());
|
msgs.write().unwrap().packets.resize(10, Packet::default());
|
||||||
for w in msgs.write().unwrap().packets.iter_mut() {
|
for w in msgs.write().unwrap().packets.iter_mut() {
|
||||||
w.meta.size = PACKET_SIZE;
|
w.meta.size = PACKET_DATA_SIZE;
|
||||||
w.meta.set_addr(&addr);
|
w.meta.set_addr(&addr);
|
||||||
}
|
}
|
||||||
spawn(move || loop {
|
spawn(move || loop {
|
||||||
@ -314,7 +304,7 @@ mod bench {
|
|||||||
}
|
}
|
||||||
let mut num = 0;
|
let mut num = 0;
|
||||||
for p in msgs_.read().unwrap().packets.iter() {
|
for p in msgs_.read().unwrap().packets.iter() {
|
||||||
let a = p.meta.get_addr();
|
let a = p.meta.addr();
|
||||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||||
num += 1;
|
num += 1;
|
||||||
}
|
}
|
||||||
@ -326,7 +316,7 @@ mod bench {
|
|||||||
recycler: PacketRecycler,
|
recycler: PacketRecycler,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
rvs: Arc<Mutex<usize>>,
|
rvs: Arc<Mutex<usize>>,
|
||||||
r: Receiver,
|
r: PacketReceiver,
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
spawn(move || loop {
|
spawn(move || loop {
|
||||||
if exit.load(Ordering::Relaxed) {
|
if exit.load(Ordering::Relaxed) {
|
||||||
@ -337,7 +327,7 @@ mod bench {
|
|||||||
Ok(msgs) => {
|
Ok(msgs) => {
|
||||||
let msgs_ = msgs.clone();
|
let msgs_ = msgs.clone();
|
||||||
*rvs.lock().unwrap() += msgs.read().unwrap().packets.len();
|
*rvs.lock().unwrap() += msgs.read().unwrap().packets.len();
|
||||||
recycle(&recycler, msgs_);
|
recycler.recycle(msgs_);
|
||||||
}
|
}
|
||||||
_ => (),
|
_ => (),
|
||||||
}
|
}
|
||||||
@ -347,16 +337,16 @@ mod bench {
|
|||||||
let read = UdpSocket::bind("127.0.0.1:0")?;
|
let read = UdpSocket::bind("127.0.0.1:0")?;
|
||||||
let addr = read.local_addr()?;
|
let addr = read.local_addr()?;
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let recycler = Arc::new(Mutex::new(Vec::new()));
|
let pack_recycler = PacketRecycler::default();
|
||||||
|
|
||||||
let (s_reader, r_reader) = channel();
|
let (s_reader, r_reader) = channel();
|
||||||
let t_reader = receiver(read, exit.clone(), recycler.clone(), s_reader)?;
|
let t_reader = receiver(read, exit.clone(), pack_recycler.clone(), s_reader)?;
|
||||||
let t_producer1 = producer(&addr, recycler.clone(), exit.clone());
|
let t_producer1 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||||
let t_producer2 = producer(&addr, recycler.clone(), exit.clone());
|
let t_producer2 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||||
let t_producer3 = producer(&addr, recycler.clone(), exit.clone());
|
let t_producer3 = producer(&addr, pack_recycler.clone(), exit.clone());
|
||||||
|
|
||||||
let rvs = Arc::new(Mutex::new(0));
|
let rvs = Arc::new(Mutex::new(0));
|
||||||
let t_sink = sink(recycler.clone(), exit.clone(), rvs.clone(), r_reader);
|
let t_sink = sink(pack_recycler.clone(), exit.clone(), rvs.clone(), r_reader);
|
||||||
|
|
||||||
let start = SystemTime::now();
|
let start = SystemTime::now();
|
||||||
let start_val = *rvs.lock().unwrap();
|
let start_val = *rvs.lock().unwrap();
|
||||||
@ -383,17 +373,20 @@ mod bench {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
|
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
|
||||||
|
use std::collections::VecDeque;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::sync::mpsc::channel;
|
use std::sync::mpsc::channel;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use streamer::{allocate, receiver, responder, Packet, Packets, Receiver, Response, Responses,
|
use streamer::{blob_receiver, receiver, responder, retransmitter, window, BlobReceiver,
|
||||||
PACKET_SIZE};
|
PacketReceiver};
|
||||||
|
use subscribers::{Node, Subscribers};
|
||||||
|
|
||||||
fn get_msgs(r: Receiver, num: &mut usize) {
|
fn get_msgs(r: PacketReceiver, num: &mut usize) {
|
||||||
for _t in 0..5 {
|
for _t in 0..5 {
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
match r.recv_timeout(timer) {
|
match r.recv_timeout(timer) {
|
||||||
@ -405,40 +398,11 @@ mod test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#[cfg(ipv6)]
|
|
||||||
#[test]
|
|
||||||
pub fn streamer_send_test_ipv6() {
|
|
||||||
let read = UdpSocket::bind("[::1]:0").expect("bind");
|
|
||||||
let addr = read.local_addr().unwrap();
|
|
||||||
let send = UdpSocket::bind("[::1]:0").expect("bind");
|
|
||||||
let exit = Arc::new(Mutex::new(false));
|
|
||||||
let recycler = Arc::new(Mutex::new(Vec::new()));
|
|
||||||
let (s_reader, r_reader) = channel();
|
|
||||||
let t_receiver = receiver(read, exit.clone(), recycler.clone(), s_reader).unwrap();
|
|
||||||
let (s_responder, r_responder) = channel();
|
|
||||||
let t_responder = responder(send, exit.clone(), recycler.clone(), r_responder);
|
|
||||||
let msgs = allocate(&recycler);
|
|
||||||
msgs.write().unwrap().packets.resize(10, Packet::default());
|
|
||||||
for (i, w) in msgs.write().unwrap().packets.iter_mut().enumerate() {
|
|
||||||
w.data[0] = i as u8;
|
|
||||||
w.size = PACKET_SIZE;
|
|
||||||
w.set_addr(&addr);
|
|
||||||
assert_eq!(w.get_addr(), addr);
|
|
||||||
}
|
|
||||||
s_responder.send(msgs).expect("send");
|
|
||||||
let mut num = 0;
|
|
||||||
get_msgs(r_reader, &mut num);
|
|
||||||
assert_eq!(num, 10);
|
|
||||||
exit.store(true, Ordering::Relaxed);
|
|
||||||
t_receiver.join().expect("join");
|
|
||||||
t_responder.join().expect("join");
|
|
||||||
}
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn streamer_debug() {
|
pub fn streamer_debug() {
|
||||||
write!(io::sink(), "{:?}", Packet::default()).unwrap();
|
write!(io::sink(), "{:?}", Packet::default()).unwrap();
|
||||||
write!(io::sink(), "{:?}", Packets::default()).unwrap();
|
write!(io::sink(), "{:?}", Packets::default()).unwrap();
|
||||||
write!(io::sink(), "{:?}", Response::default()).unwrap();
|
write!(io::sink(), "{:?}", Blob::default()).unwrap();
|
||||||
write!(io::sink(), "{:?}", Responses::default()).unwrap();
|
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
pub fn streamer_send_test() {
|
pub fn streamer_send_test() {
|
||||||
@ -446,22 +410,21 @@ mod test {
|
|||||||
let addr = read.local_addr().unwrap();
|
let addr = read.local_addr().unwrap();
|
||||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let packet_recycler = Arc::new(Mutex::new(Vec::new()));
|
let pack_recycler = PacketRecycler::default();
|
||||||
let resp_recycler = Arc::new(Mutex::new(Vec::new()));
|
let resp_recycler = BlobRecycler::default();
|
||||||
let (s_reader, r_reader) = channel();
|
let (s_reader, r_reader) = channel();
|
||||||
let t_receiver = receiver(read, exit.clone(), packet_recycler.clone(), s_reader).unwrap();
|
let t_receiver = receiver(read, exit.clone(), pack_recycler.clone(), s_reader).unwrap();
|
||||||
let (s_responder, r_responder) = channel();
|
let (s_responder, r_responder) = channel();
|
||||||
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
|
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
|
||||||
let msgs = allocate(&resp_recycler);
|
let mut msgs = VecDeque::new();
|
||||||
msgs.write()
|
for i in 0..10 {
|
||||||
.unwrap()
|
let b = resp_recycler.allocate();
|
||||||
.responses
|
let b_ = b.clone();
|
||||||
.resize(10, Response::default());
|
let mut w = b.write().unwrap();
|
||||||
for (i, w) in msgs.write().unwrap().responses.iter_mut().enumerate() {
|
|
||||||
w.data[0] = i as u8;
|
w.data[0] = i as u8;
|
||||||
w.meta.size = PACKET_SIZE;
|
w.meta.size = PACKET_DATA_SIZE;
|
||||||
w.meta.set_addr(&addr);
|
w.meta.set_addr(&addr);
|
||||||
assert_eq!(w.meta.get_addr(), addr);
|
msgs.push_back(b_);
|
||||||
}
|
}
|
||||||
s_responder.send(msgs).expect("send");
|
s_responder.send(msgs).expect("send");
|
||||||
let mut num = 0;
|
let mut num = 0;
|
||||||
@ -471,4 +434,116 @@ mod test {
|
|||||||
t_receiver.join().expect("join");
|
t_receiver.join().expect("join");
|
||||||
t_responder.join().expect("join");
|
t_responder.join().expect("join");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_blobs(r: BlobReceiver, num: &mut usize) {
|
||||||
|
for _t in 0..5 {
|
||||||
|
let timer = Duration::new(1, 0);
|
||||||
|
match r.recv_timeout(timer) {
|
||||||
|
Ok(m) => {
|
||||||
|
for (i, v) in m.iter().enumerate() {
|
||||||
|
assert_eq!(v.read().unwrap().get_index().unwrap() as usize, *num + i);
|
||||||
|
}
|
||||||
|
*num += m.len();
|
||||||
|
}
|
||||||
|
e => println!("error {:?}", e),
|
||||||
|
}
|
||||||
|
if *num == 10 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn window_send_test() {
|
||||||
|
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
let addr = read.local_addr().unwrap();
|
||||||
|
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let subs = Arc::new(RwLock::new(Subscribers::new(
|
||||||
|
Node::default(),
|
||||||
|
Node::new([0; 8], 0, send.local_addr().unwrap()),
|
||||||
|
&[],
|
||||||
|
)));
|
||||||
|
let resp_recycler = BlobRecycler::default();
|
||||||
|
let (s_reader, r_reader) = channel();
|
||||||
|
let t_receiver =
|
||||||
|
blob_receiver(exit.clone(), resp_recycler.clone(), read, s_reader).unwrap();
|
||||||
|
let (s_window, r_window) = channel();
|
||||||
|
let (s_retransmit, r_retransmit) = channel();
|
||||||
|
let t_window = window(
|
||||||
|
exit.clone(),
|
||||||
|
subs,
|
||||||
|
resp_recycler.clone(),
|
||||||
|
r_reader,
|
||||||
|
s_window,
|
||||||
|
s_retransmit,
|
||||||
|
);
|
||||||
|
let (s_responder, r_responder) = channel();
|
||||||
|
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
|
||||||
|
let mut msgs = VecDeque::new();
|
||||||
|
for v in 0..10 {
|
||||||
|
let i = 9 - v;
|
||||||
|
let b = resp_recycler.allocate();
|
||||||
|
let b_ = b.clone();
|
||||||
|
let mut w = b.write().unwrap();
|
||||||
|
w.set_index(i).unwrap();
|
||||||
|
assert_eq!(i, w.get_index().unwrap());
|
||||||
|
w.meta.size = PACKET_DATA_SIZE;
|
||||||
|
w.meta.set_addr(&addr);
|
||||||
|
msgs.push_back(b_);
|
||||||
|
}
|
||||||
|
s_responder.send(msgs).expect("send");
|
||||||
|
let mut num = 0;
|
||||||
|
get_blobs(r_window, &mut num);
|
||||||
|
assert_eq!(num, 10);
|
||||||
|
let mut q = r_retransmit.recv().unwrap();
|
||||||
|
while let Ok(mut nq) = r_retransmit.try_recv() {
|
||||||
|
q.append(&mut nq);
|
||||||
|
}
|
||||||
|
assert_eq!(q.len(), 10);
|
||||||
|
exit.store(true, Ordering::Relaxed);
|
||||||
|
t_receiver.join().expect("join");
|
||||||
|
t_responder.join().expect("join");
|
||||||
|
t_window.join().expect("join");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn retransmit() {
|
||||||
|
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
let subs = Arc::new(RwLock::new(Subscribers::new(
|
||||||
|
Node::default(),
|
||||||
|
Node::default(),
|
||||||
|
&[Node::new([0; 8], 1, read.local_addr().unwrap())],
|
||||||
|
)));
|
||||||
|
let (s_retransmit, r_retransmit) = channel();
|
||||||
|
let blob_recycler = BlobRecycler::default();
|
||||||
|
let saddr = send.local_addr().unwrap();
|
||||||
|
let t_retransmit = retransmitter(
|
||||||
|
send,
|
||||||
|
exit.clone(),
|
||||||
|
subs,
|
||||||
|
blob_recycler.clone(),
|
||||||
|
r_retransmit,
|
||||||
|
);
|
||||||
|
let mut bq = VecDeque::new();
|
||||||
|
let b = blob_recycler.allocate();
|
||||||
|
b.write().unwrap().meta.size = 10;
|
||||||
|
bq.push_back(b);
|
||||||
|
s_retransmit.send(bq).unwrap();
|
||||||
|
let (s_blob_receiver, r_blob_receiver) = channel();
|
||||||
|
let t_receiver =
|
||||||
|
blob_receiver(exit.clone(), blob_recycler.clone(), read, s_blob_receiver).unwrap();
|
||||||
|
let mut oq = r_blob_receiver.recv().unwrap();
|
||||||
|
assert_eq!(oq.len(), 1);
|
||||||
|
let o = oq.pop_front().unwrap();
|
||||||
|
let ro = o.read().unwrap();
|
||||||
|
assert_eq!(ro.meta.size, 10);
|
||||||
|
assert_eq!(ro.meta.addr(), saddr);
|
||||||
|
exit.store(true, Ordering::Relaxed);
|
||||||
|
t_receiver.join().expect("join");
|
||||||
|
t_retransmit.join().expect("join");
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
149
src/subscribers.rs
Normal file
149
src/subscribers.rs
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
//! The `subscribers` module defines data structures to keep track of nodes on the network.
|
||||||
|
//! The network is arranged in layers:
|
||||||
|
//!
|
||||||
|
//! * layer 0 - Leader.
|
||||||
|
//! * layer 1 - As many nodes as we can fit to quickly get reliable `2/3+1` finality
|
||||||
|
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
|
||||||
|
//!
|
||||||
|
//! It's up to the external state machine to keep this updated.
|
||||||
|
use packet::Blob;
|
||||||
|
use rayon::prelude::*;
|
||||||
|
use result::{Error, Result};
|
||||||
|
use std::net::{SocketAddr, UdpSocket};
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq)]
|
||||||
|
pub struct Node {
|
||||||
|
pub id: [u64; 8],
|
||||||
|
pub weight: u64,
|
||||||
|
pub addr: SocketAddr,
|
||||||
|
}
|
||||||
|
|
||||||
|
//sockaddr doesn't implement default
|
||||||
|
impl Default for Node {
|
||||||
|
fn default() -> Node {
|
||||||
|
Node {
|
||||||
|
id: [0; 8],
|
||||||
|
weight: 0,
|
||||||
|
addr: "0.0.0.0:0".parse().unwrap(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Node {
|
||||||
|
pub fn new(id: [u64; 8], weight: u64, addr: SocketAddr) -> Node {
|
||||||
|
Node { id, weight, addr }
|
||||||
|
}
|
||||||
|
fn key(&self) -> i64 {
|
||||||
|
(self.weight as i64).checked_neg().unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for Node {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "Node {{ weight: {} addr: {} }}", self.weight, self.addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Subscribers {
|
||||||
|
data: Vec<Node>,
|
||||||
|
pub me: Node,
|
||||||
|
pub leader: Node,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Subscribers {
|
||||||
|
pub fn new(me: Node, leader: Node, network: &[Node]) -> Subscribers {
|
||||||
|
let mut h = Subscribers {
|
||||||
|
data: vec![],
|
||||||
|
me: me.clone(),
|
||||||
|
leader: leader.clone(),
|
||||||
|
};
|
||||||
|
h.insert(&[me, leader]);
|
||||||
|
h.insert(network);
|
||||||
|
h
|
||||||
|
}
|
||||||
|
|
||||||
|
/// retransmit messages from the leader to layer 1 nodes
|
||||||
|
pub fn retransmit(&self, blob: &mut Blob, s: &UdpSocket) -> Result<()> {
|
||||||
|
let errs: Vec<_> = self.data
|
||||||
|
.par_iter()
|
||||||
|
.map(|i| {
|
||||||
|
if self.me == *i {
|
||||||
|
return Ok(0);
|
||||||
|
}
|
||||||
|
if self.leader == *i {
|
||||||
|
return Ok(0);
|
||||||
|
}
|
||||||
|
trace!("retransmit blob to {}", i.addr);
|
||||||
|
s.send_to(&blob.data[..blob.meta.size], &i.addr)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
for e in errs {
|
||||||
|
trace!("retransmit result {:?}", e);
|
||||||
|
match e {
|
||||||
|
Err(e) => return Err(Error::IO(e)),
|
||||||
|
_ => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
pub fn insert(&mut self, ns: &[Node]) {
|
||||||
|
self.data.extend_from_slice(ns);
|
||||||
|
self.data.sort_by_key(Node::key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use packet::Blob;
|
||||||
|
use rayon::prelude::*;
|
||||||
|
use std::net::UdpSocket;
|
||||||
|
use std::time::Duration;
|
||||||
|
use subscribers::{Node, Subscribers};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn subscriber() {
|
||||||
|
let mut me = Node::default();
|
||||||
|
me.weight = 10;
|
||||||
|
let mut leader = Node::default();
|
||||||
|
leader.weight = 11;
|
||||||
|
let mut s = Subscribers::new(me, leader, &[]);
|
||||||
|
assert_eq!(s.data.len(), 2);
|
||||||
|
assert_eq!(s.data[0].weight, 11);
|
||||||
|
assert_eq!(s.data[1].weight, 10);
|
||||||
|
let mut n = Node::default();
|
||||||
|
n.weight = 12;
|
||||||
|
s.insert(&[n]);
|
||||||
|
assert_eq!(s.data.len(), 3);
|
||||||
|
assert_eq!(s.data[0].weight, 12);
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
pub fn retransmit() {
|
||||||
|
let s1 = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
let s2 = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
let s3 = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
let n1 = Node::new([0; 8], 0, s1.local_addr().unwrap());
|
||||||
|
let n2 = Node::new([0; 8], 0, s2.local_addr().unwrap());
|
||||||
|
let mut s = Subscribers::new(n1.clone(), n2.clone(), &[]);
|
||||||
|
let n3 = Node::new([0; 8], 0, s3.local_addr().unwrap());
|
||||||
|
s.insert(&[n3]);
|
||||||
|
let mut b = Blob::default();
|
||||||
|
b.meta.size = 10;
|
||||||
|
let s4 = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
|
s.retransmit(&mut b, &s4).unwrap();
|
||||||
|
let res: Vec<_> = [s1, s2, s3]
|
||||||
|
.into_par_iter()
|
||||||
|
.map(|s| {
|
||||||
|
let mut b = Blob::default();
|
||||||
|
s.set_read_timeout(Some(Duration::new(1, 0))).unwrap();
|
||||||
|
s.recv_from(&mut b.data).is_err()
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
assert_eq!(res, [true, true, false]);
|
||||||
|
let mut n4 = Node::default();
|
||||||
|
n4.addr = "255.255.255.255:1".parse().unwrap();
|
||||||
|
s.insert(&[n4]);
|
||||||
|
assert!(s.retransmit(&mut b, &s4).is_err());
|
||||||
|
}
|
||||||
|
}
|
@ -2,18 +2,27 @@
|
|||||||
|
|
||||||
use bincode::serialize;
|
use bincode::serialize;
|
||||||
use chrono::prelude::*;
|
use chrono::prelude::*;
|
||||||
use rayon::prelude::*;
|
|
||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use plan::{Condition, Payment, Plan};
|
use plan::{Condition, Payment, Plan};
|
||||||
|
use rayon::prelude::*;
|
||||||
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
|
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
|
||||||
|
|
||||||
|
pub const SIGNED_DATA_OFFSET: usize = 112;
|
||||||
|
pub const SIG_OFFSET: usize = 8;
|
||||||
|
pub const PUB_KEY_OFFSET: usize = 80;
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||||
|
pub struct TransactionData {
|
||||||
|
pub tokens: i64,
|
||||||
|
pub last_id: Hash,
|
||||||
|
pub plan: Plan,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||||
pub struct Transaction {
|
pub struct Transaction {
|
||||||
pub from: PublicKey,
|
|
||||||
pub plan: Plan,
|
|
||||||
pub tokens: i64,
|
|
||||||
pub last_id: Hash,
|
|
||||||
pub sig: Signature,
|
pub sig: Signature,
|
||||||
|
pub from: PublicKey,
|
||||||
|
pub data: TransactionData,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Transaction {
|
impl Transaction {
|
||||||
@ -22,11 +31,13 @@ impl Transaction {
|
|||||||
let from = from_keypair.pubkey();
|
let from = from_keypair.pubkey();
|
||||||
let plan = Plan::Pay(Payment { tokens, to });
|
let plan = Plan::Pay(Payment { tokens, to });
|
||||||
let mut tr = Transaction {
|
let mut tr = Transaction {
|
||||||
from,
|
sig: Signature::default(),
|
||||||
|
data: TransactionData {
|
||||||
plan,
|
plan,
|
||||||
tokens,
|
tokens,
|
||||||
last_id,
|
last_id,
|
||||||
sig: Signature::default(),
|
},
|
||||||
|
from: from,
|
||||||
};
|
};
|
||||||
tr.sign(from_keypair);
|
tr.sign(from_keypair);
|
||||||
tr
|
tr
|
||||||
@ -46,10 +57,12 @@ impl Transaction {
|
|||||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||||
);
|
);
|
||||||
let mut tr = Transaction {
|
let mut tr = Transaction {
|
||||||
from,
|
data: TransactionData {
|
||||||
plan,
|
plan,
|
||||||
tokens,
|
tokens,
|
||||||
last_id,
|
last_id,
|
||||||
|
},
|
||||||
|
from: from,
|
||||||
sig: Signature::default(),
|
sig: Signature::default(),
|
||||||
};
|
};
|
||||||
tr.sign(from_keypair);
|
tr.sign(from_keypair);
|
||||||
@ -57,7 +70,7 @@ impl Transaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_sign_data(&self) -> Vec<u8> {
|
fn get_sign_data(&self) -> Vec<u8> {
|
||||||
serialize(&(&self.from, &self.plan, &self.tokens, &self.last_id)).unwrap()
|
serialize(&(&self.data)).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sign this transaction.
|
/// Sign this transaction.
|
||||||
@ -66,20 +79,43 @@ impl Transaction {
|
|||||||
self.sig = Signature::clone_from_slice(keypair.sign(&sign_data).as_ref());
|
self.sig = Signature::clone_from_slice(keypair.sign(&sign_data).as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify this transaction's signature and its spending plan.
|
pub fn verify_sig(&self) -> bool {
|
||||||
pub fn verify(&self) -> bool {
|
self.sig.verify(&self.from, &self.get_sign_data())
|
||||||
self.sig.verify(&self.from, &self.get_sign_data()) && self.plan.verify(self.tokens)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn verify_plan(&self) -> bool {
|
||||||
|
self.data.plan.verify(self.data.tokens)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn test_tx() -> Transaction {
|
||||||
|
let keypair1 = KeyPair::new();
|
||||||
|
let pubkey1 = keypair1.pubkey();
|
||||||
|
let zero = Hash::default();
|
||||||
|
Transaction::new(&keypair1, pubkey1, 42, zero)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn memfind<A: Eq>(a: &[A], b: &[A]) -> Option<usize> {
|
||||||
|
assert!(a.len() >= b.len());
|
||||||
|
let end = a.len() - b.len() + 1;
|
||||||
|
for i in 0..end {
|
||||||
|
if a[i..i + b.len()] == b[..] {
|
||||||
|
return Some(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify a batch of signatures.
|
/// Verify a batch of signatures.
|
||||||
pub fn verify_signatures(transactions: &[Transaction]) -> bool {
|
pub fn verify_signatures(transactions: &[Transaction]) -> bool {
|
||||||
transactions.par_iter().all(|tr| tr.verify())
|
transactions.par_iter().all(|tr| tr.verify_sig())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify a batch of spending plans.
|
/// Verify a batch of spending plans.
|
||||||
pub fn verify_plans(transactions: &[Transaction]) -> bool {
|
pub fn verify_plans(transactions: &[Transaction]) -> bool {
|
||||||
transactions.par_iter().all(|tr| tr.plan.verify(tr.tokens))
|
transactions.par_iter().all(|tr| tr.verify_plan())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify a batch of transactions.
|
/// Verify a batch of transactions.
|
||||||
@ -97,7 +133,7 @@ mod tests {
|
|||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let tr0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero);
|
let tr0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero);
|
||||||
assert!(tr0.verify());
|
assert!(tr0.verify_plan());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -107,7 +143,7 @@ mod tests {
|
|||||||
let keypair1 = KeyPair::new();
|
let keypair1 = KeyPair::new();
|
||||||
let pubkey1 = keypair1.pubkey();
|
let pubkey1 = keypair1.pubkey();
|
||||||
let tr0 = Transaction::new(&keypair0, pubkey1, 42, zero);
|
let tr0 = Transaction::new(&keypair0, pubkey1, 42, zero);
|
||||||
assert!(tr0.verify());
|
assert!(tr0.verify_plan());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -117,10 +153,12 @@ mod tests {
|
|||||||
to: Default::default(),
|
to: Default::default(),
|
||||||
});
|
});
|
||||||
let claim0 = Transaction {
|
let claim0 = Transaction {
|
||||||
from: Default::default(),
|
data: TransactionData {
|
||||||
plan,
|
plan,
|
||||||
tokens: 0,
|
tokens: 0,
|
||||||
last_id: Default::default(),
|
last_id: Default::default(),
|
||||||
|
},
|
||||||
|
from: Default::default(),
|
||||||
sig: Default::default(),
|
sig: Default::default(),
|
||||||
};
|
};
|
||||||
let buf = serialize(&claim0).unwrap();
|
let buf = serialize(&claim0).unwrap();
|
||||||
@ -129,14 +167,17 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_bad_event_signature() {
|
fn test_token_attack() {
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let keypair = KeyPair::new();
|
let keypair = KeyPair::new();
|
||||||
let pubkey = keypair.pubkey();
|
let pubkey = keypair.pubkey();
|
||||||
let mut tr = Transaction::new(&keypair, pubkey, 42, zero);
|
let mut tr = Transaction::new(&keypair, pubkey, 42, zero);
|
||||||
tr.sign(&keypair);
|
tr.data.tokens = 1_000_000; // <-- attack, part 1!
|
||||||
tr.tokens = 1_000_000; // <-- attack!
|
if let Plan::Pay(ref mut payment) = tr.data.plan {
|
||||||
assert!(!tr.verify());
|
payment.tokens = tr.data.tokens; // <-- attack, part 2!
|
||||||
|
};
|
||||||
|
assert!(tr.verify_plan());
|
||||||
|
assert!(!tr.verify_sig());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -147,11 +188,38 @@ mod tests {
|
|||||||
let pubkey1 = keypair1.pubkey();
|
let pubkey1 = keypair1.pubkey();
|
||||||
let zero = Hash::default();
|
let zero = Hash::default();
|
||||||
let mut tr = Transaction::new(&keypair0, pubkey1, 42, zero);
|
let mut tr = Transaction::new(&keypair0, pubkey1, 42, zero);
|
||||||
tr.sign(&keypair0);
|
if let Plan::Pay(ref mut payment) = tr.data.plan {
|
||||||
if let Plan::Pay(ref mut payment) = tr.plan {
|
|
||||||
payment.to = thief_keypair.pubkey(); // <-- attack!
|
payment.to = thief_keypair.pubkey(); // <-- attack!
|
||||||
};
|
};
|
||||||
assert!(!tr.verify());
|
assert!(tr.verify_plan());
|
||||||
|
assert!(!tr.verify_sig());
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_layout() {
|
||||||
|
let tr = test_tx();
|
||||||
|
let sign_data = tr.get_sign_data();
|
||||||
|
let tx = serialize(&tr).unwrap();
|
||||||
|
assert_matches!(memfind(&tx, &sign_data), Some(SIGNED_DATA_OFFSET));
|
||||||
|
assert_matches!(memfind(&tx, &tr.sig), Some(SIG_OFFSET));
|
||||||
|
assert_matches!(memfind(&tx, &tr.from), Some(PUB_KEY_OFFSET));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_overspend_attack() {
|
||||||
|
let keypair0 = KeyPair::new();
|
||||||
|
let keypair1 = KeyPair::new();
|
||||||
|
let zero = Hash::default();
|
||||||
|
let mut tr = Transaction::new(&keypair0, keypair1.pubkey(), 1, zero);
|
||||||
|
if let Plan::Pay(ref mut payment) = tr.data.plan {
|
||||||
|
payment.tokens = 2; // <-- attack!
|
||||||
|
}
|
||||||
|
assert!(!tr.verify_plan());
|
||||||
|
|
||||||
|
// Also, ensure all branchs of the plan spend all tokens
|
||||||
|
if let Plan::Pay(ref mut payment) = tr.data.plan {
|
||||||
|
payment.tokens = 0; // <-- whoops!
|
||||||
|
}
|
||||||
|
assert!(!tr.verify_plan());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
Reference in New Issue
Block a user