Compare commits
247 Commits
v0.2.0
...
v0.4.0-alp
Author | SHA1 | Date | |
---|---|---|---|
|
116166f62d | ||
|
26b19dde75 | ||
|
c8ddc68f13 | ||
|
2f18302d32 | ||
|
ddb21d151d | ||
|
c64a9fb456 | ||
|
ee19b4f86e | ||
|
14239e584f | ||
|
112aecf6eb | ||
|
c1783d77d7 | ||
|
f089abb3c5 | ||
|
8e551f5e32 | ||
|
290960c3b5 | ||
|
62af09adbe | ||
|
e39c0b34e5 | ||
|
8ad90807ee | ||
|
533b3170a7 | ||
|
7732f3f5fb | ||
|
f52f02a434 | ||
|
4d7d4d673e | ||
|
9a437f0d38 | ||
|
c385f8bb6e | ||
|
fa44be2a9d | ||
|
117ab0c141 | ||
|
7488d19ae6 | ||
|
60524ad5f2 | ||
|
fad7ff8bf0 | ||
|
383d445ba1 | ||
|
803dcb0800 | ||
|
fde320e2f2 | ||
|
8ea97141ea | ||
|
9f232bac58 | ||
|
8295cc11c0 | ||
|
70f80adb9a | ||
|
9a7cac1e07 | ||
|
c584a25ec9 | ||
|
bff32bf7bc | ||
|
d0e7450389 | ||
|
4da89ac8a9 | ||
|
f7032f7d9a | ||
|
7c7e3931a0 | ||
|
6be3d62d89 | ||
|
6f509a8a1e | ||
|
4379fabf16 | ||
|
6b66e1a077 | ||
|
c11a3e0fdc | ||
|
3418033c55 | ||
|
caa9a846ed | ||
|
8ee76bcea0 | ||
|
47325cbe01 | ||
|
e0c8417297 | ||
|
9238ee9572 | ||
|
64af37e0cd | ||
|
9f9b79f30b | ||
|
265f41887f | ||
|
4f09e5d04c | ||
|
434f321336 | ||
|
f4e0d1be58 | ||
|
e5bae0604b | ||
|
e7da083c31 | ||
|
367c32dabe | ||
|
e054238af6 | ||
|
e8faf6d59a | ||
|
baa4ea3cd8 | ||
|
75ef0f0329 | ||
|
65185c0011 | ||
|
eb94613d7d | ||
|
67f4f4fb49 | ||
|
a7ecf4ac4c | ||
|
45765b625a | ||
|
aa0a184ebe | ||
|
069f9f0d5d | ||
|
c82b520ea8 | ||
|
9d6e5bde4a | ||
|
0eb3669fbf | ||
|
30449b6054 | ||
|
f5f71a19b8 | ||
|
0135971769 | ||
|
8579795c40 | ||
|
9d77fd7eec | ||
|
8c40d1bd72 | ||
|
7a0bc7d888 | ||
|
1e07014f86 | ||
|
49281b24e5 | ||
|
a8b1980de4 | ||
|
b8cd5f0482 | ||
|
cc9f0788aa | ||
|
209910299d | ||
|
17926ff5d9 | ||
|
957fb0667c | ||
|
8d17aed785 | ||
|
7ef8d5ddde | ||
|
9930a2e167 | ||
|
a86be9ebf2 | ||
|
ad6665c8b6 | ||
|
923162ae9d | ||
|
dd2bd67049 | ||
|
d500bbff04 | ||
|
e759bd1a99 | ||
|
94daf4cea4 | ||
|
2379792e0a | ||
|
dba6d7a8a6 | ||
|
086c206b76 | ||
|
5dd567deef | ||
|
b6d8f737ca | ||
|
491ba9da84 | ||
|
a420a9293f | ||
|
c1bc5f6a07 | ||
|
9834c251d0 | ||
|
54340ed4c6 | ||
|
96a0a9202c | ||
|
a4c081d3a1 | ||
|
d1b6206858 | ||
|
0eb6849fe3 | ||
|
b725fdb093 | ||
|
1436bb1ff2 | ||
|
5a44c36b1f | ||
|
5d990502cb | ||
|
64735da716 | ||
|
95b82aa6dc | ||
|
f09952f3d7 | ||
|
b98e04dc56 | ||
|
cb436250da | ||
|
4376032e3a | ||
|
c231331e05 | ||
|
624c151ca2 | ||
|
5d0356f74b | ||
|
b019416518 | ||
|
4fcd9e3bd6 | ||
|
66bf889c39 | ||
|
a2811842c8 | ||
|
1929601425 | ||
|
282afee47e | ||
|
e701ccc949 | ||
|
6543497c17 | ||
|
7d9af5a937 | ||
|
720c54a5bb | ||
|
5dca3c41f2 | ||
|
929546f60b | ||
|
cb0ce9986c | ||
|
064eba00fd | ||
|
a4336a39d6 | ||
|
298989c4b9 | ||
|
48c28c2267 | ||
|
d76ecbc9c9 | ||
|
79fb9c00aa | ||
|
c9e03f37ce | ||
|
aa5f1699a7 | ||
|
e1e9126d03 | ||
|
672a4b3723 | ||
|
955f76baab | ||
|
7da8a5e2d1 | ||
|
ff82fbf112 | ||
|
8503a0a58f | ||
|
b1e9512f44 | ||
|
608def9c78 | ||
|
bcb21bc1d8 | ||
|
f63096620a | ||
|
9b26892bae | ||
|
572475ce14 | ||
|
876d7995e1 | ||
|
b8655e30d4 | ||
|
7cf0d55546 | ||
|
ce60b960c0 | ||
|
cebcb5b92d | ||
|
11a0f96f5e | ||
|
74ebaf1744 | ||
|
f7496ea6d1 | ||
|
bebba7dc1f | ||
|
afb2bf442c | ||
|
c7de48c982 | ||
|
f906112c03 | ||
|
8ef864fb39 | ||
|
1c9b5ab53c | ||
|
c10faae3b5 | ||
|
2104dd5a0a | ||
|
fbe64037db | ||
|
d8c50b150c | ||
|
8871bb2d8e | ||
|
a148454376 | ||
|
be518b569b | ||
|
c998fbe2ae | ||
|
9f12cd0c09 | ||
|
0d0fee1ca1 | ||
|
a0410c4677 | ||
|
8fe464cfa3 | ||
|
3e2d6d9e8b | ||
|
32d677787b | ||
|
dfd1c4eab3 | ||
|
36bb1f989d | ||
|
684f4c59e0 | ||
|
1b77e8a69a | ||
|
662e10c3e0 | ||
|
c935fdb12f | ||
|
9e16937914 | ||
|
f705202381 | ||
|
f5532ad9f7 | ||
|
570e71f050 | ||
|
c9cc4b4369 | ||
|
7111aa3b18 | ||
|
12eba4bcc7 | ||
|
4610de8fdd | ||
|
3fcc2dd944 | ||
|
8299bae2d4 | ||
|
604ccf7552 | ||
|
f3dd47948a | ||
|
c3bb207488 | ||
|
9009d1bfb3 | ||
|
fa4d9e8bcb | ||
|
34b77efc87 | ||
|
5ca0ccbcd2 | ||
|
6aa4e52480 | ||
|
f98e9a2ad7 | ||
|
c6134cc25b | ||
|
0443b39264 | ||
|
8b0b8efbcb | ||
|
97449cee43 | ||
|
ab5252c750 | ||
|
05a27cb34d | ||
|
b02eab57d2 | ||
|
b8d52cc3e4 | ||
|
7d9bab9508 | ||
|
944181a30e | ||
|
d8dd50505a | ||
|
d78082f5e4 | ||
|
08e501e57b | ||
|
29a607427d | ||
|
afb830c91f | ||
|
c1326ac3d5 | ||
|
513a1adf57 | ||
|
7871b38c80 | ||
|
b34d2d7dee | ||
|
d7dfa8c22d | ||
|
8df274f0af | ||
|
07c4ebb7f2 | ||
|
49605b257d | ||
|
fa4e232d73 | ||
|
bd84cf6586 | ||
|
6e37f70d55 | ||
|
d97112d7f0 | ||
|
e57bba17c1 | ||
|
959da300cc | ||
|
ba90e43f72 | ||
|
6effd64ab0 | ||
|
e18da7c7c1 | ||
|
0297edaf1f | ||
|
b317d13b44 |
2
.codecov.yml
Normal file
2
.codecov.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
ignore:
|
||||
- "src/bin"
|
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,4 +1,3 @@
|
||||
|
||||
Cargo.lock
|
||||
/target/
|
||||
**/*.rs.bk
|
||||
Cargo.lock
|
||||
|
53
Cargo.toml
53
Cargo.toml
@@ -1,26 +1,57 @@
|
||||
[package]
|
||||
name = "silk"
|
||||
description = "A silky smooth implementation of the Loom architecture"
|
||||
version = "0.2.0"
|
||||
documentation = "https://docs.rs/silk"
|
||||
name = "solana"
|
||||
description = "High-Performance Blockchain"
|
||||
version = "0.4.0-alpha"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "http://loomprotocol.com/"
|
||||
repository = "https://github.com/loomprotocol/silk"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
authors = [
|
||||
"Anatoly Yakovenko <aeyakovenko@gmail.com>",
|
||||
"Greg Fitzgerald <garious@gmail.com>",
|
||||
"Anatoly Yakovenko <anatoly@solana.co>",
|
||||
"Greg Fitzgerald <greg@solana.co>",
|
||||
]
|
||||
license = "Apache-2.0"
|
||||
|
||||
[[bin]]
|
||||
name = "silk-demo"
|
||||
path = "src/bin/demo.rs"
|
||||
name = "solana-historian-demo"
|
||||
path = "src/bin/historian-demo.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-client-demo"
|
||||
path = "src/bin/client-demo.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-testnode"
|
||||
path = "src/bin/testnode.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-genesis"
|
||||
path = "src/bin/genesis.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-genesis-demo"
|
||||
path = "src/bin/genesis-demo.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "solana-mint"
|
||||
path = "src/bin/mint.rs"
|
||||
|
||||
[badges]
|
||||
codecov = { repository = "loomprotocol/silk", branch = "master", service = "github" }
|
||||
codecov = { repository = "solana-labs/solana", branch = "master", service = "github" }
|
||||
|
||||
[features]
|
||||
unstable = []
|
||||
ipv6 = []
|
||||
|
||||
[dependencies]
|
||||
rayon = "1.0.0"
|
||||
itertools = "0.7.6"
|
||||
sha2 = "0.7.0"
|
||||
generic-array = { version = "0.9.0", default-features = false, features = ["serde"] }
|
||||
serde = "1.0.27"
|
||||
serde_derive = "1.0.27"
|
||||
serde_json = "1.0.10"
|
||||
ring = "0.12.1"
|
||||
untrusted = "0.5.1"
|
||||
bincode = "1.0.0"
|
||||
chrono = { version = "0.4.0", features = ["serde"] }
|
||||
log = "^0.4.1"
|
||||
matches = "^0.1.6"
|
||||
|
127
README.md
127
README.md
@@ -1,69 +1,86 @@
|
||||
[](https://crates.io/crates/silk)
|
||||
[](https://docs.rs/silk)
|
||||
[](https://travis-ci.org/loomprotocol/silk)
|
||||
[](https://codecov.io/gh/loomprotocol/silk)
|
||||
[](https://crates.io/crates/solana)
|
||||
[](https://docs.rs/solana)
|
||||
[](https://travis-ci.org/solana-labs/solana)
|
||||
[](https://codecov.io/gh/solana-labs/solana)
|
||||
|
||||
# Silk, a silky smooth implementation of the Loom specification
|
||||
Disclaimer
|
||||
===
|
||||
|
||||
Loom is a new achitecture for a high performance blockchain. Its whitepaper boasts a theoretical
|
||||
throughput of 710k transactions per second on a 1 gbps network. The specification is implemented
|
||||
in two git repositories. Reserach is performed in the loom repository. That work drives the
|
||||
Loom specification forward. This repository, on the other hand, aims to implement the specification
|
||||
as-is. We care a great deal about quality, clarity and short learning curve. We avoid the use
|
||||
of `unsafe` Rust and write tests for *everything*. Optimizations are only added when
|
||||
corresponding benchmarks are also added that demonstrate real performance boots. We expect the
|
||||
feature set here will always be a ways behind the loom repo, but that this is an implementation
|
||||
you can take to the bank, literally.
|
||||
All claims, content, designs, algorithms, estimates, roadmaps, specifications, and performance measurements described in this project are done with the author's best effort. It is up to the reader to check and validate their accuracy and truthfulness. Furthermore nothing in this project constitutes a solicitation for investment.
|
||||
|
||||
# Usage
|
||||
Solana: High-Performance Blockchain
|
||||
===
|
||||
|
||||
Add the latest [silk package](https://crates.io/crates/silk) to the `[dependencies]` section
|
||||
of your Cargo.toml.
|
||||
Solana™ is a new architecture for a high performance blockchain. It aims to support
|
||||
over 710 thousand transactions per second on a gigabit network.
|
||||
|
||||
Create a *Historian* and send it *events* to generate an *event log*, where each log *entry*
|
||||
is tagged with the historian's latest *hash*. Then ensure the order of events was not tampered
|
||||
with by verifying each entry's hash can be generated from the hash in the previous entry:
|
||||
Running the demo
|
||||
===
|
||||
|
||||
```rust
|
||||
extern crate silk;
|
||||
First, install Rust's package manager Cargo.
|
||||
|
||||
use silk::historian::Historian;
|
||||
use silk::log::{verify_slice, Entry, Event};
|
||||
use std::{thread, time};
|
||||
use std::sync::mpsc::SendError;
|
||||
|
||||
fn create_log(hist: &Historian) -> Result<(), SendError<Event>> {
|
||||
hist.sender.send(Event::Tick)?;
|
||||
thread::sleep(time::Duration::new(0, 100_000));
|
||||
hist.sender.send(Event::UserDataKey(0xdeadbeef))?;
|
||||
thread::sleep(time::Duration::new(0, 100_000));
|
||||
hist.sender.send(Event::Tick)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let seed = 0;
|
||||
let hist = Historian::new(seed);
|
||||
create_log(&hist).expect("send error");
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry> = hist.receiver.iter().collect();
|
||||
for entry in &entries {
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
assert!(verify_slice(&entries, seed));
|
||||
}
|
||||
```bash
|
||||
$ curl https://sh.rustup.rs -sSf | sh
|
||||
$ source $HOME/.cargo/env
|
||||
```
|
||||
|
||||
Running the program should produce a log similar to:
|
||||
Install the solana executables:
|
||||
|
||||
```
|
||||
Entry { num_hashes: 0, end_hash: 0, event: Tick }
|
||||
Entry { num_hashes: 245, end_hash: 11504657626326377539, event: UserDataKey(3735928559) }
|
||||
Entry { num_hashes: 154, end_hash: 13410333856574024888, event: Tick }
|
||||
```bash
|
||||
$ cargo install solana
|
||||
```
|
||||
|
||||
The testnode server is initialized with a ledger from stdin and
|
||||
generates new ledger entries on stdout. To create the input ledger, we'll need
|
||||
to create *the mint* and use it to generate a *genesis ledger*. It's done in
|
||||
two steps because the mint.json file contains a private key that will be
|
||||
used later in this demo.
|
||||
|
||||
# Developing
|
||||
```bash
|
||||
$ echo 500 | solana-mint > mint.json
|
||||
$ cat mint.json | solana-genesis > genesis.log
|
||||
```
|
||||
|
||||
Now you can start the server:
|
||||
|
||||
```bash
|
||||
$ cat genesis.log | solana-testnode > transactions0.log
|
||||
```
|
||||
|
||||
Then, in a separate shell, let's execute some transactions. Note we pass in
|
||||
the JSON configuration file here, not the genesis ledger.
|
||||
|
||||
```bash
|
||||
$ cat mint.json | solana-client-demo
|
||||
```
|
||||
|
||||
Now kill the server with Ctrl-C, and take a look at the ledger. You should
|
||||
see something similar to:
|
||||
|
||||
```json
|
||||
{"num_hashes":27,"id":[0, "..."],"event":"Tick"}
|
||||
{"num_hashes":3,"id":[67, "..."],"event":{"Transaction":{"tokens":42}}}
|
||||
{"num_hashes":27,"id":[0, "..."],"event":"Tick"}
|
||||
```
|
||||
|
||||
Now restart the server from where we left off. Pass it both the genesis ledger, and
|
||||
the transaction ledger.
|
||||
|
||||
```bash
|
||||
$ cat genesis.log transactions0.log | solana-testnode > transactions1.log
|
||||
```
|
||||
|
||||
Lastly, run the client demo again, and verify that all funds were spent in the
|
||||
previous round, and so no additional transactions are added.
|
||||
|
||||
```bash
|
||||
$ cat mint.json | solana-client-demo
|
||||
```
|
||||
|
||||
Stop the server again, and verify there are only Tick entries, and no Transaction entries.
|
||||
|
||||
Developing
|
||||
===
|
||||
|
||||
Building
|
||||
---
|
||||
@@ -79,8 +96,8 @@ $ rustup component add rustfmt-preview
|
||||
Download the source code:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/loomprotocol/silk.git
|
||||
$ cd silk
|
||||
$ git clone https://github.com/solana-labs/solana.git
|
||||
$ cd solana
|
||||
```
|
||||
|
||||
Testing
|
||||
|
65
doc/historian.md
Normal file
65
doc/historian.md
Normal file
@@ -0,0 +1,65 @@
|
||||
The Historian
|
||||
===
|
||||
|
||||
Create a *Historian* and send it *events* to generate an *event log*, where each *entry*
|
||||
is tagged with the historian's latest *hash*. Then ensure the order of events was not tampered
|
||||
with by verifying each entry's hash can be generated from the hash in the previous entry:
|
||||
|
||||

|
||||
|
||||
```rust
|
||||
extern crate solana;
|
||||
|
||||
use solana::historian::Historian;
|
||||
use solana::ledger::{verify_slice, Entry, Hash};
|
||||
use solana::event::{generate_keypair, get_pubkey, sign_claim_data, Event};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use std::sync::mpsc::SendError;
|
||||
|
||||
fn create_ledger(hist: &Historian<Hash>) -> Result<(), SendError<Event<Hash>>> {
|
||||
sleep(Duration::from_millis(15));
|
||||
let tokens = 42;
|
||||
let keypair = generate_keypair();
|
||||
let event0 = Event::new_claim(get_pubkey(&keypair), tokens, sign_claim_data(&tokens, &keypair));
|
||||
hist.sender.send(event0)?;
|
||||
sleep(Duration::from_millis(10));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let seed = Hash::default();
|
||||
let hist = Historian::new(&seed, Some(10));
|
||||
create_ledger(&hist).expect("send error");
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry<Hash>> = hist.receiver.iter().collect();
|
||||
for entry in &entries {
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
// Proof-of-History: Verify the historian learned about the events
|
||||
// in the same order they appear in the vector.
|
||||
assert!(verify_slice(&entries, &seed));
|
||||
}
|
||||
```
|
||||
|
||||
Running the program should produce a ledger similar to:
|
||||
|
||||
```rust
|
||||
Entry { num_hashes: 0, id: [0, ...], event: Tick }
|
||||
Entry { num_hashes: 3, id: [67, ...], event: Transaction { tokens: 42 } }
|
||||
Entry { num_hashes: 3, id: [123, ...], event: Tick }
|
||||
```
|
||||
|
||||
Proof-of-History
|
||||
---
|
||||
|
||||
Take note of the last line:
|
||||
|
||||
```rust
|
||||
assert!(verify_slice(&entries, &seed));
|
||||
```
|
||||
|
||||
[It's a proof!](https://en.wikipedia.org/wiki/Curry–Howard_correspondence) For each entry returned by the
|
||||
historian, we can verify that `id` is the result of applying a sha256 hash to the previous `id`
|
||||
exactly `num_hashes` times, and then hashing then event data on top of that. Because the event data is
|
||||
included in the hash, the events cannot be reordered without regenerating all the hashes.
|
18
doc/historian.msc
Normal file
18
doc/historian.msc
Normal file
@@ -0,0 +1,18 @@
|
||||
msc {
|
||||
client,historian,recorder;
|
||||
|
||||
recorder=>historian [ label = "e0 = Entry{id: h0, n: 0, event: Tick}" ] ;
|
||||
recorder=>recorder [ label = "h1 = hash(h0)" ] ;
|
||||
recorder=>recorder [ label = "h2 = hash(h1)" ] ;
|
||||
client=>historian [ label = "Transaction(d0)" ] ;
|
||||
historian=>recorder [ label = "Transaction(d0)" ] ;
|
||||
recorder=>recorder [ label = "h3 = hash(h2 + d0)" ] ;
|
||||
recorder=>historian [ label = "e1 = Entry{id: hash(h3), n: 3, event: Transaction(d0)}" ] ;
|
||||
recorder=>recorder [ label = "h4 = hash(h3)" ] ;
|
||||
recorder=>recorder [ label = "h5 = hash(h4)" ] ;
|
||||
recorder=>recorder [ label = "h6 = hash(h5)" ] ;
|
||||
recorder=>historian [ label = "e2 = Entry{id: h6, n: 3, event: Tick}" ] ;
|
||||
client=>historian [ label = "collect()" ] ;
|
||||
historian=>client [ label = "entries = [e0, e1, e2]" ] ;
|
||||
client=>client [ label = "verify_slice(entries, h0)" ] ;
|
||||
}
|
381
src/accountant.rs
Normal file
381
src/accountant.rs
Normal file
@@ -0,0 +1,381 @@
|
||||
//! The `accountant` is a client of the `historian`. It uses the historian's
|
||||
//! event log to record transactions. Its users can deposit funds and
|
||||
//! transfer funds to other users.
|
||||
|
||||
use chrono::prelude::*;
|
||||
use entry::Entry;
|
||||
use event::Event;
|
||||
use hash::Hash;
|
||||
use historian::Historian;
|
||||
use mint::Mint;
|
||||
use plan::{Plan, Witness};
|
||||
use recorder::Signal;
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::collections::hash_map::Entry::Occupied;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::result;
|
||||
use std::sync::mpsc::SendError;
|
||||
use transaction::Transaction;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum AccountingError {
|
||||
InsufficientFunds,
|
||||
InvalidTransfer,
|
||||
InvalidTransferSignature,
|
||||
SendError,
|
||||
}
|
||||
|
||||
pub type Result<T> = result::Result<T, AccountingError>;
|
||||
|
||||
/// Commit funds to the 'to' party.
|
||||
fn complete_transaction(balances: &mut HashMap<PublicKey, i64>, plan: &Plan) {
|
||||
if let Plan::Pay(ref payment) = *plan {
|
||||
*balances.entry(payment.to).or_insert(0) += payment.tokens;
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Accountant {
|
||||
pub historian: Historian,
|
||||
pub balances: HashMap<PublicKey, i64>,
|
||||
pub first_id: Hash,
|
||||
pending: HashMap<Signature, Plan>,
|
||||
time_sources: HashSet<PublicKey>,
|
||||
last_time: DateTime<Utc>,
|
||||
}
|
||||
|
||||
impl Accountant {
|
||||
pub fn new_from_entries<I>(entries: I, ms_per_tick: Option<u64>) -> Self
|
||||
where
|
||||
I: IntoIterator<Item = Entry>,
|
||||
{
|
||||
let mut entries = entries.into_iter();
|
||||
|
||||
// The first item in the ledger is required to be an entry with zero num_hashes,
|
||||
// which implies its id can be used as the ledger's seed.
|
||||
let entry0 = entries.next().unwrap();
|
||||
let start_hash = entry0.id;
|
||||
|
||||
let hist = Historian::new(&start_hash, ms_per_tick);
|
||||
let mut acc = Accountant {
|
||||
historian: hist,
|
||||
balances: HashMap::new(),
|
||||
first_id: start_hash,
|
||||
pending: HashMap::new(),
|
||||
time_sources: HashSet::new(),
|
||||
last_time: Utc.timestamp(0, 0),
|
||||
};
|
||||
|
||||
// The second item in the ledger is a special transaction where the to and from
|
||||
// fields are the same. That entry should be treated as a deposit, not a
|
||||
// transfer to oneself.
|
||||
let entry1 = entries.next().unwrap();
|
||||
acc.process_verified_event(&entry1.events[0], true).unwrap();
|
||||
|
||||
for entry in entries {
|
||||
for event in entry.events {
|
||||
acc.process_verified_event(&event, false).unwrap();
|
||||
}
|
||||
}
|
||||
acc
|
||||
}
|
||||
|
||||
pub fn new(mint: &Mint, ms_per_tick: Option<u64>) -> Self {
|
||||
Self::new_from_entries(mint.create_entries(), ms_per_tick)
|
||||
}
|
||||
|
||||
fn is_deposit(allow_deposits: bool, from: &PublicKey, plan: &Plan) -> bool {
|
||||
if let Plan::Pay(ref payment) = *plan {
|
||||
allow_deposits && *from == payment.to
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_transaction(self: &mut Self, tr: Transaction) -> Result<()> {
|
||||
if !tr.verify() {
|
||||
return Err(AccountingError::InvalidTransfer);
|
||||
}
|
||||
|
||||
if self.get_balance(&tr.from).unwrap_or(0) < tr.tokens {
|
||||
return Err(AccountingError::InsufficientFunds);
|
||||
}
|
||||
|
||||
self.process_verified_transaction(&tr, false)?;
|
||||
if let Err(SendError(_)) = self.historian
|
||||
.sender
|
||||
.send(Signal::Event(Event::Transaction(tr)))
|
||||
{
|
||||
return Err(AccountingError::SendError);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_verified_transaction(
|
||||
self: &mut Self,
|
||||
tr: &Transaction,
|
||||
allow_deposits: bool,
|
||||
) -> Result<()> {
|
||||
if !self.historian.reserve_signature(&tr.sig) {
|
||||
return Err(AccountingError::InvalidTransferSignature);
|
||||
}
|
||||
|
||||
if !Self::is_deposit(allow_deposits, &tr.from, &tr.plan) {
|
||||
if let Some(x) = self.balances.get_mut(&tr.from) {
|
||||
*x -= tr.tokens;
|
||||
}
|
||||
}
|
||||
|
||||
let mut plan = tr.plan.clone();
|
||||
plan.apply_witness(&Witness::Timestamp(self.last_time));
|
||||
|
||||
if plan.is_complete() {
|
||||
complete_transaction(&mut self.balances, &plan);
|
||||
} else {
|
||||
self.pending.insert(tr.sig, plan);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_verified_sig(&mut self, from: PublicKey, tx_sig: Signature) -> Result<()> {
|
||||
if let Occupied(mut e) = self.pending.entry(tx_sig) {
|
||||
e.get_mut().apply_witness(&Witness::Signature(from));
|
||||
if e.get().is_complete() {
|
||||
complete_transaction(&mut self.balances, e.get());
|
||||
e.remove_entry();
|
||||
}
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_verified_timestamp(&mut self, from: PublicKey, dt: DateTime<Utc>) -> Result<()> {
|
||||
// If this is the first timestamp we've seen, it probably came from the genesis block,
|
||||
// so we'll trust it.
|
||||
if self.last_time == Utc.timestamp(0, 0) {
|
||||
self.time_sources.insert(from);
|
||||
}
|
||||
|
||||
if self.time_sources.contains(&from) {
|
||||
if dt > self.last_time {
|
||||
self.last_time = dt;
|
||||
}
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check to see if any timelocked transactions can be completed.
|
||||
let mut completed = vec![];
|
||||
for (key, plan) in &mut self.pending {
|
||||
plan.apply_witness(&Witness::Timestamp(self.last_time));
|
||||
if plan.is_complete() {
|
||||
complete_transaction(&mut self.balances, plan);
|
||||
completed.push(key.clone());
|
||||
}
|
||||
}
|
||||
|
||||
for key in completed {
|
||||
self.pending.remove(&key);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_verified_event(self: &mut Self, event: &Event, allow_deposits: bool) -> Result<()> {
|
||||
match *event {
|
||||
Event::Transaction(ref tr) => self.process_verified_transaction(tr, allow_deposits),
|
||||
Event::Signature { from, tx_sig, .. } => self.process_verified_sig(from, tx_sig),
|
||||
Event::Timestamp { from, dt, .. } => self.process_verified_timestamp(from, dt),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn transfer(
|
||||
self: &mut Self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tr = Transaction::new(keypair, to, n, last_id);
|
||||
let sig = tr.sig;
|
||||
self.process_transaction(tr).map(|_| sig)
|
||||
}
|
||||
|
||||
pub fn transfer_on_date(
|
||||
self: &mut Self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
dt: DateTime<Utc>,
|
||||
last_id: Hash,
|
||||
) -> Result<Signature> {
|
||||
let tr = Transaction::new_on_date(keypair, to, dt, n, last_id);
|
||||
let sig = tr.sig;
|
||||
self.process_transaction(tr).map(|_| sig)
|
||||
}
|
||||
|
||||
pub fn get_balance(self: &Self, pubkey: &PublicKey) -> Option<i64> {
|
||||
self.balances.get(pubkey).cloned()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use recorder::ExitReason;
|
||||
use signature::KeyPairUtil;
|
||||
|
||||
#[test]
|
||||
fn test_accountant() {
|
||||
let alice = Mint::new(10_000);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let mut acc = Accountant::new(&alice, Some(2));
|
||||
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.seed())
|
||||
.unwrap();
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
|
||||
|
||||
acc.transfer(500, &alice.keypair(), bob_pubkey, alice.seed())
|
||||
.unwrap();
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_500);
|
||||
|
||||
drop(acc.historian.sender);
|
||||
assert_eq!(
|
||||
acc.historian.thread_hdl.join().unwrap(),
|
||||
ExitReason::RecvDisconnected
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_transfer() {
|
||||
let alice = Mint::new(11_000);
|
||||
let mut acc = Accountant::new(&alice, Some(2));
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.seed())
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
acc.transfer(10_001, &alice.keypair(), bob_pubkey, alice.seed()),
|
||||
Err(AccountingError::InsufficientFunds)
|
||||
);
|
||||
|
||||
let alice_pubkey = alice.keypair().pubkey();
|
||||
assert_eq!(acc.get_balance(&alice_pubkey).unwrap(), 10_000);
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
|
||||
|
||||
drop(acc.historian.sender);
|
||||
assert_eq!(
|
||||
acc.historian.thread_hdl.join().unwrap(),
|
||||
ExitReason::RecvDisconnected
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_overspend_attack() {
|
||||
let alice = Mint::new(1);
|
||||
let mut acc = Accountant::new(&alice, None);
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let mut tr = Transaction::new(&alice.keypair(), bob_pubkey, 1, alice.seed());
|
||||
if let Plan::Pay(ref mut payment) = tr.plan {
|
||||
payment.tokens = 2; // <-- attack!
|
||||
}
|
||||
assert_eq!(
|
||||
acc.process_transaction(tr.clone()),
|
||||
Err(AccountingError::InvalidTransfer)
|
||||
);
|
||||
|
||||
// Also, ensure all branchs of the plan spend all tokens
|
||||
if let Plan::Pay(ref mut payment) = tr.plan {
|
||||
payment.tokens = 0; // <-- whoops!
|
||||
}
|
||||
assert_eq!(
|
||||
acc.process_transaction(tr.clone()),
|
||||
Err(AccountingError::InvalidTransfer)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_to_newb() {
|
||||
let alice = Mint::new(10_000);
|
||||
let mut acc = Accountant::new(&alice, Some(2));
|
||||
let alice_keypair = alice.keypair();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
acc.transfer(500, &alice_keypair, bob_pubkey, alice.seed())
|
||||
.unwrap();
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 500);
|
||||
|
||||
drop(acc.historian.sender);
|
||||
assert_eq!(
|
||||
acc.historian.thread_hdl.join().unwrap(),
|
||||
ExitReason::RecvDisconnected
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_on_date() {
|
||||
let alice = Mint::new(1);
|
||||
let mut acc = Accountant::new(&alice, Some(2));
|
||||
let alice_keypair = alice.keypair();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.seed())
|
||||
.unwrap();
|
||||
|
||||
// Alice's balance will be zero because all funds are locked up.
|
||||
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
|
||||
|
||||
// Bob's balance will be None because the funds have not been
|
||||
// sent.
|
||||
assert_eq!(acc.get_balance(&bob_pubkey), None);
|
||||
|
||||
// Now, acknowledge the time in the condition occurred and
|
||||
// that bob's funds are now available.
|
||||
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap();
|
||||
assert_eq!(acc.get_balance(&bob_pubkey), Some(1));
|
||||
|
||||
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap(); // <-- Attack! Attempt to process completed transaction.
|
||||
assert_ne!(acc.get_balance(&bob_pubkey), Some(2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer_after_date() {
|
||||
let alice = Mint::new(1);
|
||||
let mut acc = Accountant::new(&alice, Some(2));
|
||||
let alice_keypair = alice.keypair();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
acc.process_verified_timestamp(alice.pubkey(), dt).unwrap();
|
||||
|
||||
// It's now past now, so this transfer should be processed immediately.
|
||||
acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.seed())
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
|
||||
assert_eq!(acc.get_balance(&bob_pubkey), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cancel_transfer() {
|
||||
let alice = Mint::new(1);
|
||||
let mut acc = Accountant::new(&alice, Some(2));
|
||||
let alice_keypair = alice.keypair();
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let dt = Utc::now();
|
||||
let sig = acc.transfer_on_date(1, &alice_keypair, bob_pubkey, dt, alice.seed())
|
||||
.unwrap();
|
||||
|
||||
// Alice's balance will be zero because all funds are locked up.
|
||||
assert_eq!(acc.get_balance(&alice.pubkey()), Some(0));
|
||||
|
||||
// Bob's balance will be None because the funds have not been
|
||||
// sent.
|
||||
assert_eq!(acc.get_balance(&bob_pubkey), None);
|
||||
|
||||
// Now, cancel the trancaction. Alice gets her funds back, Bob never sees them.
|
||||
acc.process_verified_sig(alice.pubkey(), sig).unwrap();
|
||||
assert_eq!(acc.get_balance(&alice.pubkey()), Some(1));
|
||||
assert_eq!(acc.get_balance(&bob_pubkey), None);
|
||||
|
||||
acc.process_verified_sig(alice.pubkey(), sig).unwrap(); // <-- Attack! Attempt to cancel completed transaction.
|
||||
assert_ne!(acc.get_balance(&alice.pubkey()), Some(2));
|
||||
}
|
||||
}
|
173
src/accountant_skel.rs
Normal file
173
src/accountant_skel.rs
Normal file
@@ -0,0 +1,173 @@
|
||||
use accountant::Accountant;
|
||||
use bincode::{deserialize, serialize};
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use result::Result;
|
||||
use serde_json;
|
||||
use signature::PublicKey;
|
||||
use std::default::Default;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use streamer;
|
||||
use transaction::Transaction;
|
||||
|
||||
pub struct AccountantSkel<W: Write + Send + 'static> {
|
||||
pub acc: Accountant,
|
||||
pub last_id: Hash,
|
||||
pub ledger: Vec<Entry>,
|
||||
writer: W,
|
||||
}
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum Request {
|
||||
Transaction(Transaction),
|
||||
GetBalance { key: PublicKey },
|
||||
GetEntries { last_id: Hash },
|
||||
GetId { is_last: bool },
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum Response {
|
||||
Balance { key: PublicKey, val: Option<i64> },
|
||||
Entries { entries: Vec<Entry> },
|
||||
Id { id: Hash, is_last: bool },
|
||||
}
|
||||
|
||||
impl<W: Write + Send + 'static> AccountantSkel<W> {
|
||||
pub fn new(acc: Accountant, w: W) -> Self {
|
||||
let last_id = acc.first_id;
|
||||
AccountantSkel {
|
||||
acc,
|
||||
last_id,
|
||||
ledger: vec![],
|
||||
writer: w,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn sync(&mut self) -> Hash {
|
||||
while let Ok(entry) = self.acc.historian.receiver.try_recv() {
|
||||
self.last_id = entry.id;
|
||||
write!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap();
|
||||
self.ledger.push(entry);
|
||||
}
|
||||
self.last_id
|
||||
}
|
||||
|
||||
pub fn process_request(self: &mut Self, msg: Request) -> Option<Response> {
|
||||
match msg {
|
||||
Request::Transaction(tr) => {
|
||||
if let Err(err) = self.acc.process_transaction(tr) {
|
||||
eprintln!("Transaction error: {:?}", err);
|
||||
}
|
||||
None
|
||||
}
|
||||
Request::GetBalance { key } => {
|
||||
let val = self.acc.get_balance(&key);
|
||||
Some(Response::Balance { key, val })
|
||||
}
|
||||
Request::GetEntries { last_id } => {
|
||||
self.sync();
|
||||
let entries = self.ledger
|
||||
.iter()
|
||||
.skip_while(|x| x.id != last_id) // log(n) way to find Entry with id == last_id.
|
||||
.skip(1) // Skip the entry with last_id.
|
||||
.take(256) // TODO: Take while the serialized entries fit into a 64k UDP packet.
|
||||
.cloned()
|
||||
.collect();
|
||||
Some(Response::Entries { entries })
|
||||
}
|
||||
Request::GetId { is_last } => Some(Response::Id {
|
||||
id: if is_last {
|
||||
self.sync()
|
||||
} else {
|
||||
self.acc.first_id
|
||||
},
|
||||
is_last,
|
||||
}),
|
||||
}
|
||||
}
|
||||
fn process(
|
||||
&mut self,
|
||||
r_reader: &streamer::Receiver,
|
||||
s_responder: &streamer::Responder,
|
||||
packet_recycler: &streamer::PacketRecycler,
|
||||
response_recycler: &streamer::ResponseRecycler,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let msgs = r_reader.recv_timeout(timer)?;
|
||||
let msgs_ = msgs.clone();
|
||||
let rsps = streamer::allocate(response_recycler);
|
||||
let rsps_ = rsps.clone();
|
||||
{
|
||||
let mut num = 0;
|
||||
let mut ursps = rsps.write().unwrap();
|
||||
for packet in &msgs.read().unwrap().packets {
|
||||
let sz = packet.meta.size;
|
||||
let req = deserialize(&packet.data[0..sz])?;
|
||||
if let Some(resp) = self.process_request(req) {
|
||||
if ursps.responses.len() <= num {
|
||||
ursps
|
||||
.responses
|
||||
.resize((num + 1) * 2, streamer::Response::default());
|
||||
}
|
||||
let rsp = &mut ursps.responses[num];
|
||||
let v = serialize(&resp)?;
|
||||
let len = v.len();
|
||||
rsp.data[..len].copy_from_slice(&v);
|
||||
rsp.meta.size = len;
|
||||
rsp.meta.set_addr(&packet.meta.get_addr());
|
||||
num += 1;
|
||||
}
|
||||
}
|
||||
ursps.responses.resize(num, streamer::Response::default());
|
||||
}
|
||||
s_responder.send(rsps_)?;
|
||||
streamer::recycle(packet_recycler, msgs_);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// UDP Server that forwards messages to Accountant methods.
|
||||
pub fn serve(
|
||||
obj: Arc<Mutex<AccountantSkel<W>>>,
|
||||
addr: &str,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Result<Vec<JoinHandle<()>>> {
|
||||
let read = UdpSocket::bind(addr)?;
|
||||
// make sure we are on the same interface
|
||||
let mut local = read.local_addr()?;
|
||||
local.set_port(0);
|
||||
let write = UdpSocket::bind(local)?;
|
||||
|
||||
let packet_recycler = Arc::new(Mutex::new(Vec::new()));
|
||||
let response_recycler = Arc::new(Mutex::new(Vec::new()));
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver = streamer::receiver(read, exit.clone(), packet_recycler.clone(), s_reader)?;
|
||||
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder =
|
||||
streamer::responder(write, exit.clone(), response_recycler.clone(), r_responder);
|
||||
|
||||
let t_server = spawn(move || {
|
||||
if let Ok(me) = Arc::try_unwrap(obj) {
|
||||
loop {
|
||||
let e = me.lock().unwrap().process(
|
||||
&r_reader,
|
||||
&s_responder,
|
||||
&packet_recycler,
|
||||
&response_recycler,
|
||||
);
|
||||
if e.is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(vec![t_receiver, t_responder, t_server])
|
||||
}
|
||||
}
|
157
src/accountant_stub.rs
Normal file
157
src/accountant_stub.rs
Normal file
@@ -0,0 +1,157 @@
|
||||
//! The `accountant` is a client of the `historian`. It uses the historian's
|
||||
//! event log to record transactions. Its users can deposit funds and
|
||||
//! transfer funds to other users.
|
||||
|
||||
use accountant_skel::{Request, Response};
|
||||
use bincode::{deserialize, serialize};
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use signature::{KeyPair, PublicKey, Signature};
|
||||
use std::io;
|
||||
use std::net::UdpSocket;
|
||||
use transaction::Transaction;
|
||||
|
||||
pub struct AccountantStub {
|
||||
pub addr: String,
|
||||
pub socket: UdpSocket,
|
||||
}
|
||||
|
||||
impl AccountantStub {
|
||||
pub fn new(addr: &str, socket: UdpSocket) -> Self {
|
||||
AccountantStub {
|
||||
addr: addr.to_string(),
|
||||
socket,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn transfer_signed(&self, tr: Transaction) -> io::Result<usize> {
|
||||
let req = Request::Transaction(tr);
|
||||
let data = serialize(&req).unwrap();
|
||||
self.socket.send_to(&data, &self.addr)
|
||||
}
|
||||
|
||||
pub fn transfer(
|
||||
&self,
|
||||
n: i64,
|
||||
keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
last_id: &Hash,
|
||||
) -> io::Result<Signature> {
|
||||
let tr = Transaction::new(keypair, to, n, *last_id);
|
||||
let sig = tr.sig;
|
||||
self.transfer_signed(tr).map(|_| sig)
|
||||
}
|
||||
|
||||
pub fn get_balance(&self, pubkey: &PublicKey) -> io::Result<Option<i64>> {
|
||||
let req = Request::GetBalance { key: *pubkey };
|
||||
let data = serialize(&req).expect("serialize GetBalance");
|
||||
self.socket.send_to(&data, &self.addr)?;
|
||||
let mut buf = vec![0u8; 1024];
|
||||
self.socket.recv_from(&mut buf)?;
|
||||
let resp = deserialize(&buf).expect("deserialize balance");
|
||||
if let Response::Balance { key, val } = resp {
|
||||
assert_eq!(key, *pubkey);
|
||||
return Ok(val);
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn get_id(&self, is_last: bool) -> io::Result<Hash> {
|
||||
let req = Request::GetId { is_last };
|
||||
let data = serialize(&req).expect("serialize GetId");
|
||||
self.socket.send_to(&data, &self.addr)?;
|
||||
let mut buf = vec![0u8; 1024];
|
||||
self.socket.recv_from(&mut buf)?;
|
||||
let resp = deserialize(&buf).expect("deserialize Id");
|
||||
if let Response::Id { id, .. } = resp {
|
||||
return Ok(id);
|
||||
}
|
||||
Ok(Default::default())
|
||||
}
|
||||
|
||||
pub fn get_last_id(&self) -> io::Result<Hash> {
|
||||
self.get_id(true)
|
||||
}
|
||||
|
||||
pub fn check_on_signature(
|
||||
&mut self,
|
||||
wait_sig: &Signature,
|
||||
last_id: &Hash,
|
||||
) -> io::Result<(bool, Hash)> {
|
||||
let mut last_id = *last_id;
|
||||
let req = Request::GetEntries { last_id };
|
||||
let data = serialize(&req).unwrap();
|
||||
self.socket.send_to(&data, &self.addr).map(|_| ())?;
|
||||
|
||||
let mut buf = vec![0u8; 65_535];
|
||||
self.socket.recv_from(&mut buf)?;
|
||||
let resp = deserialize(&buf).expect("deserialize signature");
|
||||
let mut found = false;
|
||||
if let Response::Entries { entries } = resp {
|
||||
for Entry { id, events, .. } in entries {
|
||||
last_id = id;
|
||||
if !found {
|
||||
for event in events {
|
||||
if let Some(sig) = event.get_signature() {
|
||||
if sig == *wait_sig {
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok((found, last_id))
|
||||
}
|
||||
|
||||
pub fn wait_on_signature(&mut self, wait_sig: &Signature, last_id: &Hash) -> io::Result<Hash> {
|
||||
let mut found = false;
|
||||
let mut last_id = *last_id;
|
||||
while !found {
|
||||
let ret = self.check_on_signature(wait_sig, &last_id)?;
|
||||
found = ret.0;
|
||||
last_id = ret.1;
|
||||
}
|
||||
Ok(last_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use accountant::Accountant;
|
||||
use accountant_skel::AccountantSkel;
|
||||
use mint::Mint;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use std::io::sink;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_accountant_stub() {
|
||||
let addr = "127.0.0.1:9000";
|
||||
let send_addr = "127.0.0.1:9001";
|
||||
let alice = Mint::new(10_000);
|
||||
let acc = Accountant::new(&alice, Some(30));
|
||||
let bob_pubkey = KeyPair::new().pubkey();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let acc = Arc::new(Mutex::new(AccountantSkel::new(acc, sink())));
|
||||
let threads = AccountantSkel::serve(acc, addr, exit.clone()).unwrap();
|
||||
sleep(Duration::from_millis(300));
|
||||
|
||||
let socket = UdpSocket::bind(send_addr).unwrap();
|
||||
let mut acc = AccountantStub::new(addr, socket);
|
||||
let last_id = acc.get_last_id().unwrap();
|
||||
let sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id)
|
||||
.unwrap();
|
||||
acc.wait_on_signature(&sig, &last_id).unwrap();
|
||||
assert_eq!(acc.get_balance(&bob_pubkey).unwrap().unwrap(), 500);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
for t in threads {
|
||||
t.join().expect("join");
|
||||
}
|
||||
}
|
||||
}
|
77
src/bin/client-demo.rs
Normal file
77
src/bin/client-demo.rs
Normal file
@@ -0,0 +1,77 @@
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use solana::accountant_stub::AccountantStub;
|
||||
use solana::mint::Mint;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use std::io::stdin;
|
||||
use std::net::UdpSocket;
|
||||
use std::time::Instant;
|
||||
|
||||
fn main() {
|
||||
let addr = "127.0.0.1:8000";
|
||||
let send_addr = "127.0.0.1:8001";
|
||||
|
||||
let mint: Mint = serde_json::from_reader(stdin()).unwrap();
|
||||
let mint_keypair = mint.keypair();
|
||||
let mint_pubkey = mint.pubkey();
|
||||
|
||||
let socket = UdpSocket::bind(send_addr).unwrap();
|
||||
let mut acc = AccountantStub::new(addr, socket);
|
||||
let last_id = acc.get_last_id().unwrap();
|
||||
|
||||
let txs = acc.get_balance(&mint_pubkey).unwrap().unwrap();
|
||||
println!("Mint's Initial Balance {}", txs);
|
||||
|
||||
println!("Signing transactions...");
|
||||
let now = Instant::now();
|
||||
let transactions: Vec<_> = (0..txs)
|
||||
.map(|_| {
|
||||
let rando_pubkey = KeyPair::new().pubkey();
|
||||
Transaction::new(&mint_keypair, rando_pubkey, 1, last_id)
|
||||
})
|
||||
.collect();
|
||||
let duration = now.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsps = txs as f64 / ns as f64;
|
||||
let nsps = ns as f64 / txs as f64;
|
||||
println!(
|
||||
"Done. {} thousand signatures per second, {}us per signature",
|
||||
bsps * 1_000_000_f64,
|
||||
nsps / 1_000_f64
|
||||
);
|
||||
|
||||
println!("Verify signatures...");
|
||||
let now = Instant::now();
|
||||
for tr in &transactions {
|
||||
assert!(tr.verify());
|
||||
}
|
||||
let duration = now.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let bsvps = txs as f64 / ns as f64;
|
||||
let nspsv = ns as f64 / txs as f64;
|
||||
println!(
|
||||
"Done. {} thousand signature verifications per second, {}us per signature verification",
|
||||
bsvps * 1_000_000_f64,
|
||||
nspsv / 1_000_f64
|
||||
);
|
||||
|
||||
println!("Transferring 1 unit {} times...", txs);
|
||||
let now = Instant::now();
|
||||
let mut sig = Default::default();
|
||||
for tr in transactions {
|
||||
sig = tr.sig;
|
||||
acc.transfer_signed(tr).unwrap();
|
||||
}
|
||||
println!("Waiting for last transaction to be confirmed...",);
|
||||
acc.wait_on_signature(&sig, &last_id).unwrap();
|
||||
|
||||
let duration = now.elapsed();
|
||||
let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos());
|
||||
let tps = (txs * 1_000_000_000) as f64 / ns as f64;
|
||||
println!("Done. {} tps!", tps);
|
||||
let val = acc.get_balance(&mint_pubkey).unwrap().unwrap();
|
||||
println!("Mint's Final Balance {}", val);
|
||||
assert_eq!(val, 0);
|
||||
}
|
@@ -1,27 +0,0 @@
|
||||
extern crate silk;
|
||||
|
||||
use silk::historian::Historian;
|
||||
use silk::log::{verify_slice, Entry, Event};
|
||||
use std::{thread, time};
|
||||
use std::sync::mpsc::SendError;
|
||||
|
||||
fn create_log(hist: &Historian) -> Result<(), SendError<Event>> {
|
||||
hist.sender.send(Event::Tick)?;
|
||||
thread::sleep(time::Duration::new(0, 100_000));
|
||||
hist.sender.send(Event::UserDataKey(0xdeadbeef))?;
|
||||
thread::sleep(time::Duration::new(0, 100_000));
|
||||
hist.sender.send(Event::Tick)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let seed = 0;
|
||||
let hist = Historian::new(seed);
|
||||
create_log(&hist).expect("send error");
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry> = hist.receiver.iter().collect();
|
||||
for entry in &entries {
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
assert!(verify_slice(&entries, seed));
|
||||
}
|
30
src/bin/genesis-demo.rs
Normal file
30
src/bin/genesis-demo.rs
Normal file
@@ -0,0 +1,30 @@
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use solana::entry::create_entry;
|
||||
use solana::event::Event;
|
||||
use solana::hash::Hash;
|
||||
use solana::mint::Mint;
|
||||
use solana::signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
use solana::transaction::Transaction;
|
||||
use std::io::stdin;
|
||||
|
||||
fn transfer(from: &KeyPair, (to, tokens): (PublicKey, i64), last_id: Hash) -> Event {
|
||||
Event::Transaction(Transaction::new(from, to, tokens, last_id))
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mint: Mint = serde_json::from_reader(stdin()).unwrap();
|
||||
let mut entries = mint.create_entries();
|
||||
|
||||
let from = mint.keypair();
|
||||
let seed = mint.seed();
|
||||
let alice = (KeyPair::new().pubkey(), 200);
|
||||
let bob = (KeyPair::new().pubkey(), 100);
|
||||
let events = vec![transfer(&from, alice, seed), transfer(&from, bob, seed)];
|
||||
entries.push(create_entry(&seed, 0, events));
|
||||
|
||||
for entry in entries {
|
||||
println!("{}", serde_json::to_string(&entry).unwrap());
|
||||
}
|
||||
}
|
14
src/bin/genesis.rs
Normal file
14
src/bin/genesis.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
//! A command-line executable for generating the chain's genesis block.
|
||||
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use solana::mint::Mint;
|
||||
use std::io::stdin;
|
||||
|
||||
fn main() {
|
||||
let mint: Mint = serde_json::from_reader(stdin()).unwrap();
|
||||
for x in mint.create_entries() {
|
||||
println!("{}", serde_json::to_string(&x).unwrap());
|
||||
}
|
||||
}
|
37
src/bin/historian-demo.rs
Normal file
37
src/bin/historian-demo.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
extern crate solana;
|
||||
|
||||
use solana::entry::Entry;
|
||||
use solana::event::Event;
|
||||
use solana::hash::Hash;
|
||||
use solana::historian::Historian;
|
||||
use solana::ledger::verify_slice;
|
||||
use solana::recorder::Signal;
|
||||
use solana::signature::{KeyPair, KeyPairUtil};
|
||||
use solana::transaction::Transaction;
|
||||
use std::sync::mpsc::SendError;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
fn create_ledger(hist: &Historian, seed: &Hash) -> Result<(), SendError<Signal>> {
|
||||
sleep(Duration::from_millis(15));
|
||||
let keypair = KeyPair::new();
|
||||
let tr = Transaction::new(&keypair, keypair.pubkey(), 42, *seed);
|
||||
let signal0 = Signal::Event(Event::Transaction(tr));
|
||||
hist.sender.send(signal0)?;
|
||||
sleep(Duration::from_millis(10));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let seed = Hash::default();
|
||||
let hist = Historian::new(&seed, Some(10));
|
||||
create_ledger(&hist, &seed).expect("send error");
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry> = hist.receiver.iter().collect();
|
||||
for entry in &entries {
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
// Proof-of-History: Verify the historian learned about the events
|
||||
// in the same order they appear in the vector.
|
||||
assert!(verify_slice(&entries, &seed));
|
||||
}
|
15
src/bin/mint.rs
Normal file
15
src/bin/mint.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use solana::mint::Mint;
|
||||
use std::io;
|
||||
|
||||
fn main() {
|
||||
let mut input_text = String::new();
|
||||
io::stdin().read_line(&mut input_text).unwrap();
|
||||
let trimmed = input_text.trim();
|
||||
let tokens = trimmed.parse::<i64>().unwrap();
|
||||
|
||||
let mint = Mint::new(tokens);
|
||||
println!("{}", serde_json::to_string(&mint).unwrap());
|
||||
}
|
25
src/bin/testnode.rs
Normal file
25
src/bin/testnode.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
extern crate serde_json;
|
||||
extern crate solana;
|
||||
|
||||
use solana::accountant::Accountant;
|
||||
use solana::accountant_skel::AccountantSkel;
|
||||
use std::io::{self, stdout, BufRead};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
fn main() {
|
||||
let addr = "127.0.0.1:8000";
|
||||
let stdin = io::stdin();
|
||||
let entries = stdin
|
||||
.lock()
|
||||
.lines()
|
||||
.map(|line| serde_json::from_str(&line.unwrap()).unwrap());
|
||||
let acc = Accountant::new_from_entries(entries, Some(1000));
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let skel = Arc::new(Mutex::new(AccountantSkel::new(acc, stdout())));
|
||||
eprintln!("Listening on {}", addr);
|
||||
let threads = AccountantSkel::serve(skel, addr, exit.clone()).unwrap();
|
||||
for t in threads {
|
||||
t.join().expect("join");
|
||||
}
|
||||
}
|
128
src/entry.rs
Normal file
128
src/entry.rs
Normal file
@@ -0,0 +1,128 @@
|
||||
use event::Event;
|
||||
use hash::{extend_and_hash, hash, Hash};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Entry {
|
||||
pub num_hashes: u64,
|
||||
pub id: Hash,
|
||||
pub events: Vec<Event>,
|
||||
}
|
||||
|
||||
impl Entry {
|
||||
/// Creates a Entry from the number of hashes `num_hashes` since the previous event
|
||||
/// and that resulting `id`.
|
||||
pub fn new_tick(num_hashes: u64, id: &Hash) -> Self {
|
||||
Entry {
|
||||
num_hashes,
|
||||
id: *id,
|
||||
events: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
|
||||
/// If the event is not a Tick, then hash that as well.
|
||||
pub fn verify(&self, start_hash: &Hash) -> bool {
|
||||
for event in &self.events {
|
||||
if !event.verify() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
self.id == next_hash(start_hash, self.num_hashes, &self.events)
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the hash `num_hashes` after `start_hash`. If the event contains
|
||||
/// signature, the final hash will be a hash of both the previous ID and
|
||||
/// the signature.
|
||||
pub fn next_hash(start_hash: &Hash, num_hashes: u64, events: &[Event]) -> Hash {
|
||||
let mut id = *start_hash;
|
||||
for _ in 1..num_hashes {
|
||||
id = hash(&id);
|
||||
}
|
||||
|
||||
// Hash all the event data
|
||||
let mut hash_data = vec![];
|
||||
for event in events {
|
||||
let sig = event.get_signature();
|
||||
if let Some(sig) = sig {
|
||||
hash_data.extend_from_slice(&sig);
|
||||
}
|
||||
}
|
||||
|
||||
if !hash_data.is_empty() {
|
||||
return extend_and_hash(&id, &hash_data);
|
||||
}
|
||||
|
||||
id
|
||||
}
|
||||
|
||||
/// Creates the next Entry `num_hashes` after `start_hash`.
|
||||
pub fn create_entry(start_hash: &Hash, cur_hashes: u64, events: Vec<Event>) -> Entry {
|
||||
let num_hashes = cur_hashes + if events.is_empty() { 0 } else { 1 };
|
||||
let id = next_hash(start_hash, 0, &events);
|
||||
Entry {
|
||||
num_hashes,
|
||||
id,
|
||||
events,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
||||
pub fn create_entry_mut(start_hash: &mut Hash, cur_hashes: &mut u64, events: Vec<Event>) -> Entry {
|
||||
let entry = create_entry(start_hash, *cur_hashes, events);
|
||||
*start_hash = entry.id;
|
||||
*cur_hashes = 0;
|
||||
entry
|
||||
}
|
||||
|
||||
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
|
||||
pub fn next_tick(start_hash: &Hash, num_hashes: u64) -> Entry {
|
||||
Entry {
|
||||
num_hashes,
|
||||
id: next_hash(start_hash, num_hashes, &[]),
|
||||
events: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use entry::create_entry;
|
||||
use event::Event;
|
||||
use hash::hash;
|
||||
use signature::{KeyPair, KeyPairUtil};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[test]
|
||||
fn test_entry_verify() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case
|
||||
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
|
||||
assert!(next_tick(&zero, 1).verify(&zero)); // inductive step
|
||||
assert!(!next_tick(&zero, 1).verify(&one)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_event_reorder_attack() {
|
||||
let zero = Hash::default();
|
||||
|
||||
// First, verify entries
|
||||
let keypair = KeyPair::new();
|
||||
let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 0, zero));
|
||||
let tr1 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, zero));
|
||||
let mut e0 = create_entry(&zero, 0, vec![tr0.clone(), tr1.clone()]);
|
||||
assert!(e0.verify(&zero));
|
||||
|
||||
// Next, swap two events and ensure verification fails.
|
||||
e0.events[0] = tr1; // <-- attack
|
||||
e0.events[1] = tr0;
|
||||
assert!(!e0.verify(&zero));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_tick() {
|
||||
let zero = Hash::default();
|
||||
assert_eq!(next_tick(&zero, 1).num_hashes, 1)
|
||||
}
|
||||
}
|
49
src/event.rs
Normal file
49
src/event.rs
Normal file
@@ -0,0 +1,49 @@
|
||||
//! The `event` crate provides the data structures for log events.
|
||||
|
||||
use bincode::serialize;
|
||||
use chrono::prelude::*;
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
|
||||
use transaction::Transaction;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Event {
|
||||
Transaction(Transaction),
|
||||
Signature {
|
||||
from: PublicKey,
|
||||
tx_sig: Signature,
|
||||
sig: Signature,
|
||||
},
|
||||
Timestamp {
|
||||
from: PublicKey,
|
||||
dt: DateTime<Utc>,
|
||||
sig: Signature,
|
||||
},
|
||||
}
|
||||
|
||||
impl Event {
|
||||
pub fn new_timestamp(from: &KeyPair, dt: DateTime<Utc>) -> Self {
|
||||
let sign_data = serialize(&dt).unwrap();
|
||||
let sig = Signature::clone_from_slice(from.sign(&sign_data).as_ref());
|
||||
Event::Timestamp {
|
||||
from: from.pubkey(),
|
||||
dt,
|
||||
sig,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Rename this to transaction_signature().
|
||||
pub fn get_signature(&self) -> Option<Signature> {
|
||||
match *self {
|
||||
Event::Transaction(ref tr) => Some(tr.sig),
|
||||
Event::Signature { .. } | Event::Timestamp { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn verify(&self) -> bool {
|
||||
match *self {
|
||||
Event::Transaction(ref tr) => tr.verify(),
|
||||
Event::Signature { from, tx_sig, sig } => sig.verify(&from, &tx_sig),
|
||||
Event::Timestamp { from, dt, sig } => sig.verify(&from, &serialize(&dt).unwrap()),
|
||||
}
|
||||
}
|
||||
}
|
19
src/hash.rs
Normal file
19
src/hash.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
use generic_array::GenericArray;
|
||||
use generic_array::typenum::U32;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
pub type Hash = GenericArray<u8, U32>;
|
||||
|
||||
/// Return a Sha256 hash for the given data.
|
||||
pub fn hash(val: &[u8]) -> Hash {
|
||||
let mut hasher = Sha256::default();
|
||||
hasher.input(val);
|
||||
hasher.result()
|
||||
}
|
||||
|
||||
/// Return the hash of the given hash extended with the given value.
|
||||
pub fn extend_and_hash(id: &Hash, val: &[u8]) -> Hash {
|
||||
let mut hash_data = id.to_vec();
|
||||
hash_data.extend_from_slice(val);
|
||||
hash(&hash_data)
|
||||
}
|
182
src/historian.rs
182
src/historian.rs
@@ -1,137 +1,135 @@
|
||||
//! The `historian` crate provides a microservice for generating a Proof-of-History.
|
||||
//! It logs Event items on behalf of its users. It continuously generates
|
||||
//! new hashes, only stopping to check if it has been sent an Event item. It
|
||||
//! tags each Event with an Entry and sends it back. The Entry includes the
|
||||
//! Event, the latest hash, and the number of hashes since the last event.
|
||||
//! The resulting stream of entries represents ordered events in time.
|
||||
//! It manages a thread containing a Proof-of-History Logger.
|
||||
|
||||
use std::thread::JoinHandle;
|
||||
use std::sync::mpsc::{Receiver, Sender};
|
||||
use log::{hash, Entry, Event};
|
||||
use entry::Entry;
|
||||
use hash::Hash;
|
||||
use recorder::{ExitReason, Recorder, Signal};
|
||||
use signature::Signature;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Instant;
|
||||
|
||||
pub struct Historian {
|
||||
pub sender: Sender<Event>,
|
||||
pub sender: SyncSender<Signal>,
|
||||
pub receiver: Receiver<Entry>,
|
||||
pub thread_hdl: JoinHandle<(Entry, ExitReason)>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum ExitReason {
|
||||
RecvDisconnected,
|
||||
SendDisconnected,
|
||||
}
|
||||
|
||||
fn log_events(
|
||||
receiver: &Receiver<Event>,
|
||||
sender: &Sender<Entry>,
|
||||
num_hashes: u64,
|
||||
end_hash: u64,
|
||||
) -> Result<u64, (Entry, ExitReason)> {
|
||||
use std::sync::mpsc::TryRecvError;
|
||||
let mut num_hashes = num_hashes;
|
||||
loop {
|
||||
match receiver.try_recv() {
|
||||
Ok(event) => {
|
||||
let entry = Entry {
|
||||
end_hash,
|
||||
num_hashes,
|
||||
event,
|
||||
};
|
||||
if let Err(_) = sender.send(entry.clone()) {
|
||||
return Err((entry, ExitReason::SendDisconnected));
|
||||
}
|
||||
num_hashes = 0;
|
||||
}
|
||||
Err(TryRecvError::Empty) => {
|
||||
return Ok(num_hashes);
|
||||
}
|
||||
Err(TryRecvError::Disconnected) => {
|
||||
let entry = Entry {
|
||||
end_hash,
|
||||
num_hashes,
|
||||
event: Event::Tick,
|
||||
};
|
||||
return Err((entry, ExitReason::RecvDisconnected));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A background thread that will continue tagging received Event messages and
|
||||
/// sending back Entry messages until either the receiver or sender channel is closed.
|
||||
pub fn create_logger(
|
||||
start_hash: u64,
|
||||
receiver: Receiver<Event>,
|
||||
sender: Sender<Entry>,
|
||||
) -> JoinHandle<(Entry, ExitReason)> {
|
||||
use std::thread;
|
||||
thread::spawn(move || {
|
||||
let mut end_hash = start_hash;
|
||||
let mut num_hashes = 0;
|
||||
loop {
|
||||
match log_events(&receiver, &sender, num_hashes, end_hash) {
|
||||
Ok(n) => num_hashes = n,
|
||||
Err(err) => return err,
|
||||
}
|
||||
end_hash = hash(end_hash);
|
||||
num_hashes += 1;
|
||||
}
|
||||
})
|
||||
pub thread_hdl: JoinHandle<ExitReason>,
|
||||
pub signatures: HashSet<Signature>,
|
||||
}
|
||||
|
||||
impl Historian {
|
||||
pub fn new(start_hash: u64) -> Self {
|
||||
use std::sync::mpsc::channel;
|
||||
let (sender, event_receiver) = channel();
|
||||
let (entry_sender, receiver) = channel();
|
||||
let thread_hdl = create_logger(start_hash, event_receiver, entry_sender);
|
||||
pub fn new(start_hash: &Hash, ms_per_tick: Option<u64>) -> Self {
|
||||
let (sender, event_receiver) = sync_channel(1000);
|
||||
let (entry_sender, receiver) = sync_channel(1000);
|
||||
let thread_hdl =
|
||||
Historian::create_recorder(*start_hash, ms_per_tick, event_receiver, entry_sender);
|
||||
let signatures = HashSet::new();
|
||||
Historian {
|
||||
sender,
|
||||
receiver,
|
||||
thread_hdl,
|
||||
signatures,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reserve_signature(&mut self, sig: &Signature) -> bool {
|
||||
if self.signatures.contains(sig) {
|
||||
return false;
|
||||
}
|
||||
self.signatures.insert(*sig);
|
||||
true
|
||||
}
|
||||
|
||||
/// A background thread that will continue tagging received Event messages and
|
||||
/// sending back Entry messages until either the receiver or sender channel is closed.
|
||||
fn create_recorder(
|
||||
start_hash: Hash,
|
||||
ms_per_tick: Option<u64>,
|
||||
receiver: Receiver<Signal>,
|
||||
sender: SyncSender<Entry>,
|
||||
) -> JoinHandle<ExitReason> {
|
||||
spawn(move || {
|
||||
let mut recorder = Recorder::new(receiver, sender, start_hash);
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
if let Err(err) = recorder.process_events(now, ms_per_tick) {
|
||||
return err;
|
||||
}
|
||||
if ms_per_tick.is_some() {
|
||||
recorder.hash();
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use log::*;
|
||||
use ledger::*;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_historian() {
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
let zero = Hash::default();
|
||||
let hist = Historian::new(&zero, None);
|
||||
|
||||
let hist = Historian::new(0);
|
||||
|
||||
hist.sender.send(Event::Tick).unwrap();
|
||||
hist.sender.send(Signal::Tick).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
hist.sender.send(Event::UserDataKey(0xdeadbeef)).unwrap();
|
||||
hist.sender.send(Signal::Tick).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
hist.sender.send(Event::Tick).unwrap();
|
||||
hist.sender.send(Signal::Tick).unwrap();
|
||||
|
||||
let entry0 = hist.receiver.recv().unwrap();
|
||||
let entry1 = hist.receiver.recv().unwrap();
|
||||
let entry2 = hist.receiver.recv().unwrap();
|
||||
|
||||
assert_eq!(entry0.num_hashes, 0);
|
||||
assert_eq!(entry1.num_hashes, 0);
|
||||
assert_eq!(entry2.num_hashes, 0);
|
||||
|
||||
drop(hist.sender);
|
||||
assert_eq!(
|
||||
hist.thread_hdl.join().unwrap().1,
|
||||
hist.thread_hdl.join().unwrap(),
|
||||
ExitReason::RecvDisconnected
|
||||
);
|
||||
|
||||
assert!(verify_slice(&[entry0, entry1, entry2], 0));
|
||||
assert!(verify_slice(&[entry0, entry1, entry2], &zero));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_historian_closed_sender() {
|
||||
let hist = Historian::new(0);
|
||||
let zero = Hash::default();
|
||||
let hist = Historian::new(&zero, None);
|
||||
drop(hist.receiver);
|
||||
hist.sender.send(Event::Tick).unwrap();
|
||||
hist.sender.send(Signal::Tick).unwrap();
|
||||
assert_eq!(
|
||||
hist.thread_hdl.join().unwrap().1,
|
||||
hist.thread_hdl.join().unwrap(),
|
||||
ExitReason::SendDisconnected
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_duplicate_event_signature() {
|
||||
let zero = Hash::default();
|
||||
let mut hist = Historian::new(&zero, None);
|
||||
let sig = Signature::default();
|
||||
assert!(hist.reserve_signature(&sig));
|
||||
assert!(!hist.reserve_signature(&sig));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ticking_historian() {
|
||||
let zero = Hash::default();
|
||||
let hist = Historian::new(&zero, Some(20));
|
||||
sleep(Duration::from_millis(30));
|
||||
hist.sender.send(Signal::Tick).unwrap();
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry> = hist.receiver.iter().collect();
|
||||
assert!(entries.len() > 1);
|
||||
|
||||
// Ensure the ID is not the seed.
|
||||
assert_ne!(entries[0].id, zero);
|
||||
}
|
||||
}
|
||||
|
72
src/ledger.rs
Normal file
72
src/ledger.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
//! The `ledger` crate provides the foundational data structures for Proof-of-History,
|
||||
//! an ordered log of events in time.
|
||||
|
||||
use entry::{next_tick, Entry};
|
||||
/// Each entry contains three pieces of data. The `num_hashes` field is the number
|
||||
/// of hashes performed since the previous entry. The `id` field is the result
|
||||
/// of hashing `id` from the previous entry `num_hashes` times. The `event`
|
||||
/// field points to an Event that took place shortly after `id` was generated.
|
||||
///
|
||||
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
|
||||
/// get a duration estimate since the last event. Since processing power increases
|
||||
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
|
||||
/// Though processing power varies across nodes, the network gives priority to the
|
||||
/// fastest processor. Duration should therefore be estimated by assuming that the hash
|
||||
/// was generated by the fastest processor at the time the entry was recorded.
|
||||
use hash::Hash;
|
||||
use rayon::prelude::*;
|
||||
|
||||
/// Verifies the hashes and counts of a slice of events are all consistent.
|
||||
pub fn verify_slice(entries: &[Entry], start_hash: &Hash) -> bool {
|
||||
let genesis = [Entry::new_tick(Default::default(), start_hash)];
|
||||
let event_pairs = genesis.par_iter().chain(entries).zip(entries);
|
||||
event_pairs.all(|(x0, x1)| x1.verify(&x0.id))
|
||||
}
|
||||
|
||||
/// Create a vector of Ticks of length `len` from `start_hash` hash and `num_hashes`.
|
||||
pub fn next_ticks(start_hash: &Hash, num_hashes: u64, len: usize) -> Vec<Entry> {
|
||||
let mut id = *start_hash;
|
||||
let mut ticks = vec![];
|
||||
for _ in 0..len {
|
||||
let entry = next_tick(&id, num_hashes);
|
||||
id = entry.id;
|
||||
ticks.push(entry);
|
||||
}
|
||||
ticks
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use hash::hash;
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice() {
|
||||
let zero = Hash::default();
|
||||
let one = hash(&zero);
|
||||
assert!(verify_slice(&vec![], &zero)); // base case
|
||||
assert!(verify_slice(&vec![Entry::new_tick(0, &zero)], &zero)); // singleton case 1
|
||||
assert!(!verify_slice(&vec![Entry::new_tick(0, &zero)], &one)); // singleton case 2, bad
|
||||
assert!(verify_slice(&next_ticks(&zero, 0, 2), &zero)); // inductive step
|
||||
|
||||
let mut bad_ticks = next_ticks(&zero, 0, 2);
|
||||
bad_ticks[1].id = one;
|
||||
assert!(!verify_slice(&bad_ticks, &zero)); // inductive step, bad
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use ledger::*;
|
||||
|
||||
#[bench]
|
||||
fn event_bench(bencher: &mut Bencher) {
|
||||
let start_hash = Default::default();
|
||||
let events = next_ticks(&start_hash, 10_000, 8);
|
||||
bencher.iter(|| {
|
||||
assert!(verify_slice(&events, &start_hash));
|
||||
});
|
||||
}
|
||||
}
|
32
src/lib.rs
32
src/lib.rs
@@ -1,5 +1,33 @@
|
||||
#![cfg_attr(feature = "unstable", feature(test))]
|
||||
pub mod log;
|
||||
pub mod accountant;
|
||||
pub mod accountant_skel;
|
||||
pub mod accountant_stub;
|
||||
pub mod entry;
|
||||
pub mod event;
|
||||
pub mod hash;
|
||||
pub mod historian;
|
||||
extern crate itertools;
|
||||
pub mod ledger;
|
||||
pub mod mint;
|
||||
pub mod plan;
|
||||
pub mod recorder;
|
||||
pub mod result;
|
||||
pub mod signature;
|
||||
pub mod streamer;
|
||||
pub mod transaction;
|
||||
extern crate bincode;
|
||||
extern crate chrono;
|
||||
extern crate generic_array;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate rayon;
|
||||
extern crate ring;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate serde_json;
|
||||
extern crate sha2;
|
||||
extern crate untrusted;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate matches;
|
||||
|
157
src/log.rs
157
src/log.rs
@@ -1,157 +0,0 @@
|
||||
//! The `log` crate provides the foundational data structures for Proof-of-History,
|
||||
//! an ordered log of events in time.
|
||||
|
||||
/// Each log entry contains three pieces of data. The 'num_hashes' field is the number
|
||||
/// of hashes performed since the previous entry. The 'end_hash' field is the result
|
||||
/// of hashing 'end_hash' from the previous entry 'num_hashes' times. The 'event'
|
||||
/// field points to an Event that took place shortly after 'end_hash' was generated.
|
||||
///
|
||||
/// If you divide 'num_hashes' by the amount of time it takes to generate a new hash, you
|
||||
/// get a duration estimate since the last event. Since processing power increases
|
||||
/// over time, one should expect the duration 'num_hashes' represents to decrease proportionally.
|
||||
/// Though processing power varies across nodes, the network gives priority to the
|
||||
/// fastest processor. Duration should therefore be estimated by assuming that the hash
|
||||
/// was generated by the fastest processor at the time the entry was logged.
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Entry {
|
||||
pub num_hashes: u64,
|
||||
pub end_hash: u64,
|
||||
pub event: Event,
|
||||
}
|
||||
|
||||
/// When 'event' is Tick, the event represents a simple clock tick, and exists for the
|
||||
/// sole purpose of improving the performance of event log verification. A tick can
|
||||
/// be generated in 'num_hashes' hashes and verified in 'num_hashes' hashes. By logging
|
||||
/// a hash alongside the tick, each tick and be verified in parallel using the 'end_hash'
|
||||
/// of the preceding tick to seed its hashing.
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Event {
|
||||
Tick,
|
||||
UserDataKey(u64),
|
||||
}
|
||||
|
||||
impl Entry {
|
||||
/// Creates a Entry from the number of hashes 'num_hashes' since the previous event
|
||||
/// and that resulting 'end_hash'.
|
||||
pub fn new_tick(num_hashes: u64, end_hash: u64) -> Self {
|
||||
let event = Event::Tick;
|
||||
Entry {
|
||||
num_hashes,
|
||||
end_hash,
|
||||
event,
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies self.end_hash is the result of hashing a 'start_hash' 'self.num_hashes' times.
|
||||
pub fn verify(self: &Self, start_hash: u64) -> bool {
|
||||
self.end_hash == next_tick(start_hash, self.num_hashes).end_hash
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hash(val: u64) -> u64 {
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
let mut hasher = DefaultHasher::new();
|
||||
val.hash(&mut hasher);
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
/// Creates the next Tick Entry 'num_hashes' after 'start_hash'.
|
||||
pub fn next_tick(start_hash: u64, num_hashes: u64) -> Entry {
|
||||
let mut end_hash = start_hash;
|
||||
for _ in 0..num_hashes {
|
||||
end_hash = hash(end_hash);
|
||||
}
|
||||
Entry::new_tick(num_hashes, end_hash)
|
||||
}
|
||||
|
||||
/// Verifies the hashes and counts of a slice of events are all consistent.
|
||||
pub fn verify_slice(events: &[Entry], start_hash: u64) -> bool {
|
||||
use rayon::prelude::*;
|
||||
let genesis = [Entry::new_tick(0, start_hash)];
|
||||
let event_pairs = genesis.par_iter().chain(events).zip(events);
|
||||
event_pairs.all(|(x0, x1)| x1.verify(x0.end_hash))
|
||||
}
|
||||
|
||||
/// Verifies the hashes and events serially. Exists only for reference.
|
||||
pub fn verify_slice_seq(events: &[Entry], start_hash: u64) -> bool {
|
||||
let genesis = [Entry::new_tick(0, start_hash)];
|
||||
let mut event_pairs = genesis.iter().chain(events).zip(events);
|
||||
event_pairs.all(|(x0, x1)| x1.verify(x0.end_hash))
|
||||
}
|
||||
|
||||
/// Create a vector of Ticks of length 'len' from 'start_hash' hash and 'num_hashes'.
|
||||
pub fn create_ticks(start_hash: u64, num_hashes: u64, len: usize) -> Vec<Entry> {
|
||||
use itertools::unfold;
|
||||
let mut events = unfold(start_hash, |state| {
|
||||
let event = next_tick(*state, num_hashes);
|
||||
*state = event.end_hash;
|
||||
return Some(event);
|
||||
});
|
||||
events.by_ref().take(len).collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_event_verify() {
|
||||
assert!(Entry::new_tick(0, 0).verify(0)); // base case
|
||||
assert!(!Entry::new_tick(0, 0).verify(1)); // base case, bad
|
||||
assert!(next_tick(0, 1).verify(0)); // inductive step
|
||||
assert!(!next_tick(0, 1).verify(1)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_tick() {
|
||||
assert_eq!(next_tick(0, 1).num_hashes, 1)
|
||||
}
|
||||
|
||||
fn verify_slice_generic(verify_slice: fn(&[Entry], u64) -> bool) {
|
||||
assert!(verify_slice(&vec![], 0)); // base case
|
||||
assert!(verify_slice(&vec![Entry::new_tick(0, 0)], 0)); // singleton case 1
|
||||
assert!(!verify_slice(&vec![Entry::new_tick(0, 0)], 1)); // singleton case 2, bad
|
||||
assert!(verify_slice(&create_ticks(0, 0, 2), 0)); // inductive step
|
||||
|
||||
let mut bad_ticks = create_ticks(0, 0, 2);
|
||||
bad_ticks[1].end_hash = 1;
|
||||
assert!(!verify_slice(&bad_ticks, 0)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice() {
|
||||
verify_slice_generic(verify_slice);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice_seq() {
|
||||
verify_slice_generic(verify_slice_seq);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use log::*;
|
||||
|
||||
#[bench]
|
||||
fn event_bench(bencher: &mut Bencher) {
|
||||
let start_hash = 0;
|
||||
let events = create_ticks(start_hash, 100_000, 8);
|
||||
bencher.iter(|| {
|
||||
assert!(verify_slice(&events, start_hash));
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn event_bench_seq(bencher: &mut Bencher) {
|
||||
let start_hash = 0;
|
||||
let events = create_ticks(start_hash, 100_000, 8);
|
||||
bencher.iter(|| {
|
||||
assert!(verify_slice_seq(&events, start_hash));
|
||||
});
|
||||
}
|
||||
}
|
79
src/mint.rs
Normal file
79
src/mint.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
//! A library for generating the chain's genesis block.
|
||||
|
||||
use entry::Entry;
|
||||
use entry::create_entry;
|
||||
use event::Event;
|
||||
use hash::{hash, Hash};
|
||||
use ring::rand::SystemRandom;
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey};
|
||||
use transaction::Transaction;
|
||||
use untrusted::Input;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct Mint {
|
||||
pub pkcs8: Vec<u8>,
|
||||
pubkey: PublicKey,
|
||||
pub tokens: i64,
|
||||
}
|
||||
|
||||
impl Mint {
|
||||
pub fn new(tokens: i64) -> Self {
|
||||
let rnd = SystemRandom::new();
|
||||
let pkcs8 = KeyPair::generate_pkcs8(&rnd).unwrap().to_vec();
|
||||
let keypair = KeyPair::from_pkcs8(Input::from(&pkcs8)).unwrap();
|
||||
let pubkey = keypair.pubkey();
|
||||
Mint {
|
||||
pkcs8,
|
||||
pubkey,
|
||||
tokens,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn seed(&self) -> Hash {
|
||||
hash(&self.pkcs8)
|
||||
}
|
||||
|
||||
pub fn keypair(&self) -> KeyPair {
|
||||
KeyPair::from_pkcs8(Input::from(&self.pkcs8)).unwrap()
|
||||
}
|
||||
|
||||
pub fn pubkey(&self) -> PublicKey {
|
||||
self.pubkey
|
||||
}
|
||||
|
||||
pub fn create_events(&self) -> Vec<Event> {
|
||||
let keypair = self.keypair();
|
||||
let tr = Transaction::new(&keypair, self.pubkey(), self.tokens, self.seed());
|
||||
vec![Event::Transaction(tr)]
|
||||
}
|
||||
|
||||
pub fn create_entries(&self) -> Vec<Entry> {
|
||||
let e0 = create_entry(&self.seed(), 0, vec![]);
|
||||
let e1 = create_entry(&e0.id, 0, self.create_events());
|
||||
vec![e0, e1]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ledger::verify_slice;
|
||||
use plan::Plan;
|
||||
|
||||
#[test]
|
||||
fn test_create_events() {
|
||||
let mut events = Mint::new(100).create_events().into_iter();
|
||||
if let Event::Transaction(tr) = events.next().unwrap() {
|
||||
if let Plan::Pay(payment) = tr.plan {
|
||||
assert_eq!(tr.from, payment.to);
|
||||
}
|
||||
}
|
||||
assert_eq!(events.next(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_entries() {
|
||||
let entries = Mint::new(100).create_entries();
|
||||
assert!(verify_slice(&entries, &entries[0].id));
|
||||
}
|
||||
}
|
167
src/plan.rs
Normal file
167
src/plan.rs
Normal file
@@ -0,0 +1,167 @@
|
||||
//! A domain-specific language for payment plans. Users create Plan objects that
|
||||
//! are given to an interpreter. The interpreter listens for `Witness` events,
|
||||
//! which it uses to reduce the payment plan. When the plan is reduced to a
|
||||
//! `Payment`, the payment is executed.
|
||||
|
||||
use chrono::prelude::*;
|
||||
use signature::PublicKey;
|
||||
use std::mem;
|
||||
|
||||
pub enum Witness {
|
||||
Timestamp(DateTime<Utc>),
|
||||
Signature(PublicKey),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Condition {
|
||||
Timestamp(DateTime<Utc>),
|
||||
Signature(PublicKey),
|
||||
}
|
||||
|
||||
impl Condition {
|
||||
pub fn is_satisfied(&self, witness: &Witness) -> bool {
|
||||
match (self, witness) {
|
||||
(&Condition::Signature(ref pubkey), &Witness::Signature(ref from)) => pubkey == from,
|
||||
(&Condition::Timestamp(ref dt), &Witness::Timestamp(ref last_time)) => dt <= last_time,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Payment {
|
||||
pub tokens: i64,
|
||||
pub to: PublicKey,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Plan {
|
||||
Pay(Payment),
|
||||
After(Condition, Payment),
|
||||
Race((Condition, Payment), (Condition, Payment)),
|
||||
}
|
||||
|
||||
impl Plan {
|
||||
pub fn new_payment(tokens: i64, to: PublicKey) -> Self {
|
||||
Plan::Pay(Payment { tokens, to })
|
||||
}
|
||||
|
||||
pub fn new_authorized_payment(from: PublicKey, tokens: i64, to: PublicKey) -> Self {
|
||||
Plan::After(Condition::Signature(from), Payment { tokens, to })
|
||||
}
|
||||
|
||||
pub fn new_future_payment(dt: DateTime<Utc>, tokens: i64, to: PublicKey) -> Self {
|
||||
Plan::After(Condition::Timestamp(dt), Payment { tokens, to })
|
||||
}
|
||||
|
||||
pub fn new_cancelable_future_payment(
|
||||
dt: DateTime<Utc>,
|
||||
from: PublicKey,
|
||||
tokens: i64,
|
||||
to: PublicKey,
|
||||
) -> Self {
|
||||
Plan::Race(
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn is_complete(&self) -> bool {
|
||||
match *self {
|
||||
Plan::Pay(_) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn verify(&self, spendable_tokens: i64) -> bool {
|
||||
match *self {
|
||||
Plan::Pay(ref payment) | Plan::After(_, ref payment) => {
|
||||
payment.tokens == spendable_tokens
|
||||
}
|
||||
Plan::Race(ref a, ref b) => {
|
||||
a.1.tokens == spendable_tokens && b.1.tokens == spendable_tokens
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply a witness to the spending plan to see if the plan can be reduced.
|
||||
/// If so, modify the plan in-place.
|
||||
pub fn apply_witness(&mut self, witness: &Witness) {
|
||||
let new_payment = match *self {
|
||||
Plan::After(ref cond, ref payment) if cond.is_satisfied(witness) => Some(payment),
|
||||
Plan::Race((ref cond, ref payment), _) if cond.is_satisfied(witness) => Some(payment),
|
||||
Plan::Race(_, (ref cond, ref payment)) if cond.is_satisfied(witness) => Some(payment),
|
||||
_ => None,
|
||||
}.cloned();
|
||||
|
||||
if let Some(payment) = new_payment {
|
||||
mem::replace(self, Plan::Pay(payment));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_signature_satisfied() {
|
||||
let sig = PublicKey::default();
|
||||
assert!(Condition::Signature(sig).is_satisfied(&Witness::Signature(sig)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timestamp_satisfied() {
|
||||
let dt1 = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let dt2 = Utc.ymd(2014, 11, 14).and_hms(10, 9, 8);
|
||||
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt1)));
|
||||
assert!(Condition::Timestamp(dt1).is_satisfied(&Witness::Timestamp(dt2)));
|
||||
assert!(!Condition::Timestamp(dt2).is_satisfied(&Witness::Timestamp(dt1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_plan() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
assert!(Plan::new_payment(42, to).verify(42));
|
||||
assert!(Plan::new_authorized_payment(from, 42, to).verify(42));
|
||||
assert!(Plan::new_future_payment(dt, 42, to).verify(42));
|
||||
assert!(Plan::new_cancelable_future_payment(dt, from, 42, to).verify(42));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_authorized_payment() {
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
|
||||
let mut plan = Plan::new_authorized_payment(from, 42, to);
|
||||
plan.apply_witness(&Witness::Signature(from));
|
||||
assert_eq!(plan, Plan::new_payment(42, to));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_future_payment() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let to = PublicKey::default();
|
||||
|
||||
let mut plan = Plan::new_future_payment(dt, 42, to);
|
||||
plan.apply_witness(&Witness::Timestamp(dt));
|
||||
assert_eq!(plan, Plan::new_payment(42, to));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cancelable_future_payment() {
|
||||
let dt = Utc.ymd(2014, 11, 14).and_hms(8, 9, 10);
|
||||
let from = PublicKey::default();
|
||||
let to = PublicKey::default();
|
||||
|
||||
let mut plan = Plan::new_cancelable_future_payment(dt, from, 42, to);
|
||||
plan.apply_witness(&Witness::Timestamp(dt));
|
||||
assert_eq!(plan, Plan::new_payment(42, to));
|
||||
|
||||
let mut plan = Plan::new_cancelable_future_payment(dt, from, 42, to);
|
||||
plan.apply_witness(&Witness::Signature(from));
|
||||
assert_eq!(plan, Plan::new_payment(42, from));
|
||||
}
|
||||
}
|
89
src/recorder.rs
Normal file
89
src/recorder.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
//! The `recorder` crate provides an object for generating a Proof-of-History.
|
||||
//! It records Event items on behalf of its users. It continuously generates
|
||||
//! new hashes, only stopping to check if it has been sent an Event item. It
|
||||
//! tags each Event with an Entry and sends it back. The Entry includes the
|
||||
//! Event, the latest hash, and the number of hashes since the last event.
|
||||
//! The resulting stream of entries represents ordered events in time.
|
||||
|
||||
use entry::{create_entry_mut, Entry};
|
||||
use event::Event;
|
||||
use hash::{hash, Hash};
|
||||
use std::mem;
|
||||
use std::sync::mpsc::{Receiver, SyncSender, TryRecvError};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))]
|
||||
pub enum Signal {
|
||||
Tick,
|
||||
Event(Event),
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum ExitReason {
|
||||
RecvDisconnected,
|
||||
SendDisconnected,
|
||||
}
|
||||
|
||||
pub struct Recorder {
|
||||
sender: SyncSender<Entry>,
|
||||
receiver: Receiver<Signal>,
|
||||
last_hash: Hash,
|
||||
events: Vec<Event>,
|
||||
num_hashes: u64,
|
||||
num_ticks: u64,
|
||||
}
|
||||
|
||||
impl Recorder {
|
||||
pub fn new(receiver: Receiver<Signal>, sender: SyncSender<Entry>, start_hash: Hash) -> Self {
|
||||
Recorder {
|
||||
receiver,
|
||||
sender,
|
||||
last_hash: start_hash,
|
||||
events: vec![],
|
||||
num_hashes: 0,
|
||||
num_ticks: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hash(&mut self) {
|
||||
self.last_hash = hash(&self.last_hash);
|
||||
self.num_hashes += 1;
|
||||
}
|
||||
|
||||
pub fn record_entry(&mut self) -> Result<(), ExitReason> {
|
||||
let events = mem::replace(&mut self.events, vec![]);
|
||||
let entry = create_entry_mut(&mut self.last_hash, &mut self.num_hashes, events);
|
||||
self.sender
|
||||
.send(entry)
|
||||
.or(Err(ExitReason::SendDisconnected))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn process_events(
|
||||
&mut self,
|
||||
epoch: Instant,
|
||||
ms_per_tick: Option<u64>,
|
||||
) -> Result<(), ExitReason> {
|
||||
loop {
|
||||
if let Some(ms) = ms_per_tick {
|
||||
if epoch.elapsed() > Duration::from_millis((self.num_ticks + 1) * ms) {
|
||||
self.record_entry()?;
|
||||
self.num_ticks += 1;
|
||||
}
|
||||
}
|
||||
|
||||
match self.receiver.try_recv() {
|
||||
Ok(signal) => match signal {
|
||||
Signal::Tick => {
|
||||
self.record_entry()?;
|
||||
}
|
||||
Signal::Event(event) => {
|
||||
self.events.push(event);
|
||||
}
|
||||
},
|
||||
Err(TryRecvError::Empty) => return Ok(()),
|
||||
Err(TryRecvError::Disconnected) => return Err(ExitReason::RecvDisconnected),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
123
src/result.rs
Normal file
123
src/result.rs
Normal file
@@ -0,0 +1,123 @@
|
||||
use bincode;
|
||||
use serde_json;
|
||||
use std;
|
||||
use std::any::Any;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
IO(std::io::Error),
|
||||
JSON(serde_json::Error),
|
||||
AddrParse(std::net::AddrParseError),
|
||||
JoinError(Box<Any + Send + 'static>),
|
||||
RecvError(std::sync::mpsc::RecvError),
|
||||
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
|
||||
Serialize(std::boxed::Box<bincode::ErrorKind>),
|
||||
SendError,
|
||||
Services,
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
impl std::convert::From<std::sync::mpsc::RecvError> for Error {
|
||||
fn from(e: std::sync::mpsc::RecvError) -> Error {
|
||||
Error::RecvError(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<std::sync::mpsc::RecvTimeoutError> for Error {
|
||||
fn from(e: std::sync::mpsc::RecvTimeoutError) -> Error {
|
||||
Error::RecvTimeoutError(e)
|
||||
}
|
||||
}
|
||||
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
|
||||
fn from(_e: std::sync::mpsc::SendError<T>) -> Error {
|
||||
Error::SendError
|
||||
}
|
||||
}
|
||||
impl std::convert::From<Box<Any + Send + 'static>> for Error {
|
||||
fn from(e: Box<Any + Send + 'static>) -> Error {
|
||||
Error::JoinError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::convert::From<std::io::Error> for Error {
|
||||
fn from(e: std::io::Error) -> Error {
|
||||
Error::IO(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<serde_json::Error> for Error {
|
||||
fn from(e: serde_json::Error) -> Error {
|
||||
Error::JSON(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<std::net::AddrParseError> for Error {
|
||||
fn from(e: std::net::AddrParseError) -> Error {
|
||||
Error::AddrParse(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<std::boxed::Box<bincode::ErrorKind>> for Error {
|
||||
fn from(e: std::boxed::Box<bincode::ErrorKind>) -> Error {
|
||||
Error::Serialize(e)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use result::Error;
|
||||
use result::Result;
|
||||
use serde_json;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::mpsc::RecvError;
|
||||
use std::sync::mpsc::RecvTimeoutError;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::thread;
|
||||
|
||||
fn addr_parse_error() -> Result<SocketAddr> {
|
||||
let r = "12fdfasfsafsadfs".parse()?;
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
fn join_error() -> Result<()> {
|
||||
let r = thread::spawn(|| panic!("hi")).join()?;
|
||||
Ok(r)
|
||||
}
|
||||
fn json_error() -> Result<()> {
|
||||
let r = serde_json::from_slice("=342{;;;;:}".as_bytes())?;
|
||||
Ok(r)
|
||||
}
|
||||
fn send_error() -> Result<()> {
|
||||
let (s, r) = channel();
|
||||
drop(r);
|
||||
s.send(())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn from_test() {
|
||||
assert_matches!(addr_parse_error(), Err(Error::AddrParse(_)));
|
||||
assert_matches!(Error::from(RecvError {}), Error::RecvError(_));
|
||||
assert_matches!(
|
||||
Error::from(RecvTimeoutError::Timeout),
|
||||
Error::RecvTimeoutError(_)
|
||||
);
|
||||
assert_matches!(send_error(), Err(Error::SendError));
|
||||
assert_matches!(join_error(), Err(Error::JoinError(_)));
|
||||
let ioe = io::Error::new(io::ErrorKind::NotFound, "hi");
|
||||
assert_matches!(Error::from(ioe), Error::IO(_));
|
||||
}
|
||||
#[test]
|
||||
fn fmt_test() {
|
||||
write!(io::sink(), "{:?}", addr_parse_error()).unwrap();
|
||||
write!(io::sink(), "{:?}", Error::from(RecvError {})).unwrap();
|
||||
write!(io::sink(), "{:?}", Error::from(RecvTimeoutError::Timeout)).unwrap();
|
||||
write!(io::sink(), "{:?}", send_error()).unwrap();
|
||||
write!(io::sink(), "{:?}", join_error()).unwrap();
|
||||
write!(io::sink(), "{:?}", json_error()).unwrap();
|
||||
write!(
|
||||
io::sink(),
|
||||
"{:?}",
|
||||
Error::from(io::Error::new(io::ErrorKind::NotFound, "hi"))
|
||||
).unwrap();
|
||||
}
|
||||
}
|
43
src/signature.rs
Normal file
43
src/signature.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
//! The `signature` crate provides functionality for public and private keys
|
||||
|
||||
use generic_array::GenericArray;
|
||||
use generic_array::typenum::{U32, U64};
|
||||
use ring::signature::Ed25519KeyPair;
|
||||
use ring::{rand, signature};
|
||||
use untrusted;
|
||||
|
||||
pub type KeyPair = Ed25519KeyPair;
|
||||
pub type PublicKey = GenericArray<u8, U32>;
|
||||
pub type Signature = GenericArray<u8, U64>;
|
||||
|
||||
pub trait KeyPairUtil {
|
||||
fn new() -> Self;
|
||||
fn pubkey(&self) -> PublicKey;
|
||||
}
|
||||
|
||||
impl KeyPairUtil for Ed25519KeyPair {
|
||||
/// Return a new ED25519 keypair
|
||||
fn new() -> Self {
|
||||
let rng = rand::SystemRandom::new();
|
||||
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).unwrap();
|
||||
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)).unwrap()
|
||||
}
|
||||
|
||||
/// Return the public key for the given keypair
|
||||
fn pubkey(&self) -> PublicKey {
|
||||
GenericArray::clone_from_slice(self.public_key_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait SignatureUtil {
|
||||
fn verify(&self, peer_public_key_bytes: &[u8], msg_bytes: &[u8]) -> bool;
|
||||
}
|
||||
|
||||
impl SignatureUtil for GenericArray<u8, U64> {
|
||||
fn verify(&self, peer_public_key_bytes: &[u8], msg_bytes: &[u8]) -> bool {
|
||||
let peer_public_key = untrusted::Input::from(peer_public_key_bytes);
|
||||
let msg = untrusted::Input::from(msg_bytes);
|
||||
let sig = untrusted::Input::from(self);
|
||||
signature::verify(&signature::ED25519, peer_public_key, msg, sig).is_ok()
|
||||
}
|
||||
}
|
472
src/streamer.rs
Normal file
472
src/streamer.rs
Normal file
@@ -0,0 +1,472 @@
|
||||
use result::{Error, Result};
|
||||
use std::fmt;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
const BLOCK_SIZE: usize = 1024 * 8;
|
||||
pub const PACKET_SIZE: usize = 256;
|
||||
pub const RESP_SIZE: usize = 64 * 1024;
|
||||
pub const NUM_RESP: usize = (BLOCK_SIZE * PACKET_SIZE) / RESP_SIZE;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct Meta {
|
||||
pub size: usize,
|
||||
pub addr: [u16; 8],
|
||||
pub port: u16,
|
||||
pub v6: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Packet {
|
||||
pub data: [u8; PACKET_SIZE],
|
||||
pub meta: Meta,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Packet {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Packet {{ size: {:?}, addr: {:?} }}",
|
||||
self.meta.size,
|
||||
self.meta.get_addr()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Packet {
|
||||
fn default() -> Packet {
|
||||
Packet {
|
||||
data: [0u8; PACKET_SIZE],
|
||||
meta: Meta::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Meta {
|
||||
pub fn get_addr(&self) -> SocketAddr {
|
||||
if !self.v6 {
|
||||
let ipv4 = Ipv4Addr::new(
|
||||
self.addr[0] as u8,
|
||||
self.addr[1] as u8,
|
||||
self.addr[2] as u8,
|
||||
self.addr[3] as u8,
|
||||
);
|
||||
SocketAddr::new(IpAddr::V4(ipv4), self.port)
|
||||
} else {
|
||||
let ipv6 = Ipv6Addr::new(
|
||||
self.addr[0],
|
||||
self.addr[1],
|
||||
self.addr[2],
|
||||
self.addr[3],
|
||||
self.addr[4],
|
||||
self.addr[5],
|
||||
self.addr[6],
|
||||
self.addr[7],
|
||||
);
|
||||
SocketAddr::new(IpAddr::V6(ipv6), self.port)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_addr(&mut self, a: &SocketAddr) {
|
||||
match *a {
|
||||
SocketAddr::V4(v4) => {
|
||||
let ip = v4.ip().octets();
|
||||
self.addr[0] = u16::from(ip[0]);
|
||||
self.addr[1] = u16::from(ip[1]);
|
||||
self.addr[2] = u16::from(ip[2]);
|
||||
self.addr[3] = u16::from(ip[3]);
|
||||
self.port = a.port();
|
||||
}
|
||||
SocketAddr::V6(v6) => {
|
||||
self.addr = v6.ip().segments();
|
||||
self.port = a.port();
|
||||
self.v6 = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Packets {
|
||||
pub packets: Vec<Packet>,
|
||||
}
|
||||
|
||||
impl Default for Packets {
|
||||
fn default() -> Packets {
|
||||
Packets {
|
||||
packets: vec![Packet::default(); BLOCK_SIZE],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Response {
|
||||
pub data: [u8; RESP_SIZE],
|
||||
pub meta: Meta,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Response {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Response {{ size: {:?}, addr: {:?} }}",
|
||||
self.meta.size,
|
||||
self.meta.get_addr()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Response {
|
||||
fn default() -> Response {
|
||||
Response {
|
||||
data: [0u8; RESP_SIZE],
|
||||
meta: Meta::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Responses {
|
||||
pub responses: Vec<Response>,
|
||||
}
|
||||
|
||||
impl Default for Responses {
|
||||
fn default() -> Responses {
|
||||
Responses {
|
||||
responses: vec![Response::default(); NUM_RESP],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type SharedPackets = Arc<RwLock<Packets>>;
|
||||
pub type PacketRecycler = Arc<Mutex<Vec<SharedPackets>>>;
|
||||
pub type Receiver = mpsc::Receiver<SharedPackets>;
|
||||
pub type Sender = mpsc::Sender<SharedPackets>;
|
||||
pub type SharedResponses = Arc<RwLock<Responses>>;
|
||||
pub type ResponseRecycler = Arc<Mutex<Vec<SharedResponses>>>;
|
||||
pub type Responder = mpsc::Sender<SharedResponses>;
|
||||
pub type ResponseReceiver = mpsc::Receiver<SharedResponses>;
|
||||
|
||||
impl Packets {
|
||||
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
||||
self.packets.resize(BLOCK_SIZE, Packet::default());
|
||||
let mut i = 0;
|
||||
socket.set_nonblocking(false)?;
|
||||
for p in &mut self.packets {
|
||||
p.meta.size = 0;
|
||||
match socket.recv_from(&mut p.data) {
|
||||
Err(_) if i > 0 => {
|
||||
trace!("got {:?} messages", i);
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
info!("recv_from err {:?}", e);
|
||||
return Err(Error::IO(e));
|
||||
}
|
||||
Ok((nrecv, from)) => {
|
||||
p.meta.size = nrecv;
|
||||
p.meta.set_addr(&from);
|
||||
if i == 0 {
|
||||
socket.set_nonblocking(true)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
Ok(i)
|
||||
}
|
||||
fn read_from(&mut self, socket: &UdpSocket) -> Result<()> {
|
||||
let sz = self.run_read_from(socket)?;
|
||||
self.packets.resize(sz, Packet::default());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Responses {
|
||||
fn send_to(&self, socket: &UdpSocket, num: &mut usize) -> Result<()> {
|
||||
for p in &self.responses {
|
||||
let a = p.meta.get_addr();
|
||||
socket.send_to(&p.data[..p.meta.size], &a)?;
|
||||
//TODO(anatoly): wtf do we do about errors?
|
||||
*num += 1;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn allocate<T>(recycler: &Arc<Mutex<Vec<Arc<RwLock<T>>>>>) -> Arc<RwLock<T>>
|
||||
where
|
||||
T: Default,
|
||||
{
|
||||
let mut gc = recycler.lock().expect("lock");
|
||||
gc.pop()
|
||||
.unwrap_or_else(|| Arc::new(RwLock::new(Default::default())))
|
||||
}
|
||||
|
||||
pub fn recycle<T>(recycler: &Arc<Mutex<Vec<Arc<RwLock<T>>>>>, msgs: Arc<RwLock<T>>)
|
||||
where
|
||||
T: Default,
|
||||
{
|
||||
let mut gc = recycler.lock().expect("lock");
|
||||
gc.push(msgs);
|
||||
}
|
||||
|
||||
fn recv_loop(
|
||||
sock: &UdpSocket,
|
||||
exit: &Arc<AtomicBool>,
|
||||
recycler: &PacketRecycler,
|
||||
channel: &Sender,
|
||||
) -> Result<()> {
|
||||
loop {
|
||||
let msgs = allocate(recycler);
|
||||
let msgs_ = msgs.clone();
|
||||
loop {
|
||||
match msgs.write().unwrap().read_from(sock) {
|
||||
Ok(()) => {
|
||||
channel.send(msgs_)?;
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
recycle(recycler, msgs_);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn receiver(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
recycler: PacketRecycler,
|
||||
channel: Sender,
|
||||
) -> Result<JoinHandle<()>> {
|
||||
let timer = Duration::new(1, 0);
|
||||
sock.set_read_timeout(Some(timer))?;
|
||||
Ok(spawn(move || {
|
||||
let _ = recv_loop(&sock, &exit, &recycler, &channel);
|
||||
()
|
||||
}))
|
||||
}
|
||||
|
||||
fn recv_send(sock: &UdpSocket, recycler: &ResponseRecycler, r: &ResponseReceiver) -> Result<()> {
|
||||
let timer = Duration::new(1, 0);
|
||||
let msgs = r.recv_timeout(timer)?;
|
||||
let msgs_ = msgs.clone();
|
||||
let mut num = 0;
|
||||
msgs.read().unwrap().send_to(sock, &mut num)?;
|
||||
recycle(recycler, msgs_);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn responder(
|
||||
sock: UdpSocket,
|
||||
exit: Arc<AtomicBool>,
|
||||
recycler: ResponseRecycler,
|
||||
r: ResponseReceiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if recv_send(&sock, &recycler, &r).is_err() && exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use result::Result;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::sleep;
|
||||
use std::thread::{spawn, JoinHandle};
|
||||
use std::time::Duration;
|
||||
use std::time::SystemTime;
|
||||
use streamer::{allocate, receiver, recycle, Packet, PacketRecycler, Receiver, PACKET_SIZE};
|
||||
|
||||
fn producer(
|
||||
addr: &SocketAddr,
|
||||
recycler: PacketRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> JoinHandle<()> {
|
||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let msgs = allocate(&recycler);
|
||||
let msgs_ = msgs.clone();
|
||||
msgs.write().unwrap().packets.resize(10, Packet::default());
|
||||
for w in msgs.write().unwrap().packets.iter_mut() {
|
||||
w.meta.size = PACKET_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
}
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let mut num = 0;
|
||||
for p in msgs_.read().unwrap().packets.iter() {
|
||||
let a = p.meta.get_addr();
|
||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||
num += 1;
|
||||
}
|
||||
assert_eq!(num, 10);
|
||||
})
|
||||
}
|
||||
|
||||
fn sinc(
|
||||
recycler: PacketRecycler,
|
||||
exit: Arc<AtomicBool>,
|
||||
rvs: Arc<Mutex<usize>>,
|
||||
r: Receiver,
|
||||
) -> JoinHandle<()> {
|
||||
spawn(move || loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let timer = Duration::new(1, 0);
|
||||
match r.recv_timeout(timer) {
|
||||
Ok(msgs) => {
|
||||
let msgs_ = msgs.clone();
|
||||
*rvs.lock().unwrap() += msgs.read().unwrap().packets.len();
|
||||
recycle(&recycler, msgs_);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
})
|
||||
}
|
||||
fn run_streamer_bench() -> Result<()> {
|
||||
let read = UdpSocket::bind("127.0.0.1:0")?;
|
||||
let addr = read.local_addr()?;
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let recycler = Arc::new(Mutex::new(Vec::new()));
|
||||
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_reader = receiver(read, exit.clone(), recycler.clone(), s_reader)?;
|
||||
let t_producer1 = producer(&addr, recycler.clone(), exit.clone());
|
||||
let t_producer2 = producer(&addr, recycler.clone(), exit.clone());
|
||||
let t_producer3 = producer(&addr, recycler.clone(), exit.clone());
|
||||
|
||||
let rvs = Arc::new(Mutex::new(0));
|
||||
let t_sinc = sinc(recycler.clone(), exit.clone(), rvs.clone(), r_reader);
|
||||
|
||||
let start = SystemTime::now();
|
||||
let start_val = *rvs.lock().unwrap();
|
||||
sleep(Duration::new(5, 0));
|
||||
let elapsed = start.elapsed().unwrap();
|
||||
let end_val = *rvs.lock().unwrap();
|
||||
let time = elapsed.as_secs() * 10000000000 + elapsed.subsec_nanos() as u64;
|
||||
let ftime = (time as f64) / 10000000000f64;
|
||||
let fcount = (end_val - start_val) as f64;
|
||||
println!("performance: {:?}", fcount / ftime);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_reader.join()?;
|
||||
t_producer1.join()?;
|
||||
t_producer2.join()?;
|
||||
t_producer3.join()?;
|
||||
t_sinc.join()?;
|
||||
Ok(())
|
||||
}
|
||||
#[bench]
|
||||
pub fn streamer_bench(_bench: &mut Bencher) {
|
||||
run_streamer_bench().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
use streamer::{allocate, receiver, responder, Packet, Packets, Receiver, Response, Responses,
|
||||
PACKET_SIZE};
|
||||
|
||||
fn get_msgs(r: Receiver, num: &mut usize) {
|
||||
for _t in 0..5 {
|
||||
let timer = Duration::new(1, 0);
|
||||
match r.recv_timeout(timer) {
|
||||
Ok(m) => *num += m.read().unwrap().packets.len(),
|
||||
e => println!("error {:?}", e),
|
||||
}
|
||||
if *num == 10 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(ipv6)]
|
||||
#[test]
|
||||
pub fn streamer_send_test_ipv6() {
|
||||
let read = UdpSocket::bind("[::1]:0").expect("bind");
|
||||
let addr = read.local_addr().unwrap();
|
||||
let send = UdpSocket::bind("[::1]:0").expect("bind");
|
||||
let exit = Arc::new(Mutex::new(false));
|
||||
let recycler = Arc::new(Mutex::new(Vec::new()));
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver = receiver(read, exit.clone(), recycler.clone(), s_reader).unwrap();
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = responder(send, exit.clone(), recycler.clone(), r_responder);
|
||||
let msgs = allocate(&recycler);
|
||||
msgs.write().unwrap().packets.resize(10, Packet::default());
|
||||
for (i, w) in msgs.write().unwrap().packets.iter_mut().enumerate() {
|
||||
w.data[0] = i as u8;
|
||||
w.size = PACKET_SIZE;
|
||||
w.set_addr(&addr);
|
||||
assert_eq!(w.get_addr(), addr);
|
||||
}
|
||||
s_responder.send(msgs).expect("send");
|
||||
let mut num = 0;
|
||||
get_msgs(r_reader, &mut num);
|
||||
assert_eq!(num, 10);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_receiver.join().expect("join");
|
||||
t_responder.join().expect("join");
|
||||
}
|
||||
#[test]
|
||||
pub fn streamer_debug() {
|
||||
write!(io::sink(), "{:?}", Packet::default()).unwrap();
|
||||
write!(io::sink(), "{:?}", Packets::default()).unwrap();
|
||||
write!(io::sink(), "{:?}", Response::default()).unwrap();
|
||||
write!(io::sink(), "{:?}", Responses::default()).unwrap();
|
||||
}
|
||||
#[test]
|
||||
pub fn streamer_send_test() {
|
||||
let read = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let addr = read.local_addr().unwrap();
|
||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let packet_recycler = Arc::new(Mutex::new(Vec::new()));
|
||||
let resp_recycler = Arc::new(Mutex::new(Vec::new()));
|
||||
let (s_reader, r_reader) = channel();
|
||||
let t_receiver = receiver(read, exit.clone(), packet_recycler.clone(), s_reader).unwrap();
|
||||
let (s_responder, r_responder) = channel();
|
||||
let t_responder = responder(send, exit.clone(), resp_recycler.clone(), r_responder);
|
||||
let msgs = allocate(&resp_recycler);
|
||||
msgs.write()
|
||||
.unwrap()
|
||||
.responses
|
||||
.resize(10, Response::default());
|
||||
for (i, w) in msgs.write().unwrap().responses.iter_mut().enumerate() {
|
||||
w.data[0] = i as u8;
|
||||
w.meta.size = PACKET_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
assert_eq!(w.meta.get_addr(), addr);
|
||||
}
|
||||
s_responder.send(msgs).expect("send");
|
||||
let mut num = 0;
|
||||
get_msgs(r_reader, &mut num);
|
||||
assert_eq!(num, 10);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
t_receiver.join().expect("join");
|
||||
t_responder.join().expect("join");
|
||||
}
|
||||
}
|
136
src/transaction.rs
Normal file
136
src/transaction.rs
Normal file
@@ -0,0 +1,136 @@
|
||||
//! The `transaction` crate provides functionality for creating log transactions.
|
||||
|
||||
use bincode::serialize;
|
||||
use chrono::prelude::*;
|
||||
use hash::Hash;
|
||||
use plan::{Condition, Payment, Plan};
|
||||
use signature::{KeyPair, KeyPairUtil, PublicKey, Signature, SignatureUtil};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Transaction {
|
||||
pub from: PublicKey,
|
||||
pub plan: Plan,
|
||||
pub tokens: i64,
|
||||
pub last_id: Hash,
|
||||
pub sig: Signature,
|
||||
}
|
||||
|
||||
impl Transaction {
|
||||
pub fn new(from_keypair: &KeyPair, to: PublicKey, tokens: i64, last_id: Hash) -> Self {
|
||||
let from = from_keypair.pubkey();
|
||||
let plan = Plan::Pay(Payment { tokens, to });
|
||||
let mut tr = Transaction {
|
||||
from,
|
||||
plan,
|
||||
tokens,
|
||||
last_id,
|
||||
sig: Signature::default(),
|
||||
};
|
||||
tr.sign(from_keypair);
|
||||
tr
|
||||
}
|
||||
|
||||
pub fn new_on_date(
|
||||
from_keypair: &KeyPair,
|
||||
to: PublicKey,
|
||||
dt: DateTime<Utc>,
|
||||
tokens: i64,
|
||||
last_id: Hash,
|
||||
) -> Self {
|
||||
let from = from_keypair.pubkey();
|
||||
let plan = Plan::Race(
|
||||
(Condition::Timestamp(dt), Payment { tokens, to }),
|
||||
(Condition::Signature(from), Payment { tokens, to: from }),
|
||||
);
|
||||
let mut tr = Transaction {
|
||||
from,
|
||||
plan,
|
||||
tokens,
|
||||
last_id,
|
||||
sig: Signature::default(),
|
||||
};
|
||||
tr.sign(from_keypair);
|
||||
tr
|
||||
}
|
||||
|
||||
fn get_sign_data(&self) -> Vec<u8> {
|
||||
serialize(&(&self.from, &self.plan, &self.tokens, &self.last_id)).unwrap()
|
||||
}
|
||||
|
||||
pub fn sign(&mut self, keypair: &KeyPair) {
|
||||
let sign_data = self.get_sign_data();
|
||||
self.sig = Signature::clone_from_slice(keypair.sign(&sign_data).as_ref());
|
||||
}
|
||||
|
||||
pub fn verify(&self) -> bool {
|
||||
self.sig.verify(&self.from, &self.get_sign_data()) && self.plan.verify(self.tokens)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode::{deserialize, serialize};
|
||||
|
||||
#[test]
|
||||
fn test_claim() {
|
||||
let keypair = KeyPair::new();
|
||||
let zero = Hash::default();
|
||||
let tr0 = Transaction::new(&keypair, keypair.pubkey(), 42, zero);
|
||||
assert!(tr0.verify());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transfer() {
|
||||
let zero = Hash::default();
|
||||
let keypair0 = KeyPair::new();
|
||||
let keypair1 = KeyPair::new();
|
||||
let pubkey1 = keypair1.pubkey();
|
||||
let tr0 = Transaction::new(&keypair0, pubkey1, 42, zero);
|
||||
assert!(tr0.verify());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_claim() {
|
||||
let plan = Plan::Pay(Payment {
|
||||
tokens: 0,
|
||||
to: Default::default(),
|
||||
});
|
||||
let claim0 = Transaction {
|
||||
from: Default::default(),
|
||||
plan,
|
||||
tokens: 0,
|
||||
last_id: Default::default(),
|
||||
sig: Default::default(),
|
||||
};
|
||||
let buf = serialize(&claim0).unwrap();
|
||||
let claim1: Transaction = deserialize(&buf).unwrap();
|
||||
assert_eq!(claim1, claim0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bad_event_signature() {
|
||||
let zero = Hash::default();
|
||||
let keypair = KeyPair::new();
|
||||
let pubkey = keypair.pubkey();
|
||||
let mut tr = Transaction::new(&keypair, pubkey, 42, zero);
|
||||
tr.sign(&keypair);
|
||||
tr.tokens = 1_000_000; // <-- attack!
|
||||
assert!(!tr.verify());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hijack_attack() {
|
||||
let keypair0 = KeyPair::new();
|
||||
let keypair1 = KeyPair::new();
|
||||
let thief_keypair = KeyPair::new();
|
||||
let pubkey1 = keypair1.pubkey();
|
||||
let zero = Hash::default();
|
||||
let mut tr = Transaction::new(&keypair0, pubkey1, 42, zero);
|
||||
tr.sign(&keypair0);
|
||||
if let Plan::Pay(ref mut payment) = tr.plan {
|
||||
payment.to = thief_keypair.pubkey(); // <-- attack!
|
||||
};
|
||||
assert!(!tr.verify());
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user