Compare commits

...

68 Commits

Author SHA1 Message Date
12eba4bcc7 Merge pull request #26 from garious/add-accountant
Add testnode and client-demo
2018-02-28 19:48:05 -07:00
4610de8fdd Switch to sync_channel to preserve order 2018-02-28 19:33:28 -07:00
3fcc2dd944 Add testnode
Fixes #20
2018-02-28 18:05:20 -07:00
8299bae2d4 Add accountant stub 2018-02-28 16:01:12 -07:00
604ccf7552 Add network interface for accountant 2018-02-28 14:00:04 -07:00
f3dd47948a Merge pull request #25 from garious/verify-historian-input
Verify event signatures before adding log entries
2018-02-28 10:34:10 -07:00
c3bb207488 Verify event signatures before adding log entries 2018-02-28 10:23:01 -07:00
9009d1bfb3 Merge pull request #24 from garious/add-accountant
Add accountant
2018-02-27 11:41:40 -07:00
fa4d9e8bcb Add more tests 2018-02-27 11:28:10 -07:00
34b77efc87 Sleep longer for TravisCI 2018-02-27 11:08:28 -07:00
5ca0ccbcd2 Add accountant 2018-02-27 10:54:06 -07:00
6aa4e52480 Merge pull request #23 from garious/add-transaction
Generalize the event log
2018-02-26 17:40:55 -07:00
f98e9a2ad7 Fix overuse of search-and-replace 2018-02-26 17:03:50 -07:00
c6134cc25b Allow the historian to track ownership of any type of data 2018-02-26 17:01:22 -07:00
0443b39264 Allow event log to hold events of any serializable (hashable) type 2018-02-26 16:42:31 -07:00
8b0b8efbcb Allow Entry to hold events of any kind of data 2018-02-26 15:37:33 -07:00
97449cee43 Allow events to hold any kind of data 2018-02-26 15:31:01 -07:00
ab5252c750 Move entry verification out of Entry impl 2018-02-26 14:39:01 -07:00
05a27cb34d Merge pull request #22 from garious/add-transaction
Extend the event log with a Transaction event to transfer possession
2018-02-26 11:26:58 -07:00
b02eab57d2 Extend the event log with a Transaction event to transfer possession
This implementation assumes 'from' is the current owner of 'data'.
Once that's verified, the signature ensures that nobody modified
'data' (the asset being transferred) or 'to' the entity taking
ownership.

Fixes #14
2018-02-26 11:09:11 -07:00
b8d52cc3e4 Make the Discovery event into a struct instead of a tuple 2018-02-24 11:15:03 -07:00
7d9bab9508 Update rendered demo diagram 2018-02-24 11:09:00 -07:00
944181a30e Version bump 2018-02-24 11:06:08 -07:00
d8dd50505a Merge pull request #21 from garious/add-signatures
Add signatures
2018-02-24 10:47:25 -07:00
d78082f5e4 Test bad signature 2018-02-24 10:27:51 -07:00
08e501e57b Extend the event log with a Claim event to claim possession
Unlike a Discovery event, a Claim event associates a public key
with a hash. It's intended to to be used to claim ownership of
some hashable data. For example, a graphic designer could claim
copyright by hashing some image they created, signing it with
their private key, and publishing the hash-signature pair via
the historian. If someone else tries to claim it as their own,
the designer can point to the historian's log as cryptographically
secure evidence that the designer's copy existed before anyone
else's.

Note there's nothing here that verifies the first claim is the actual
content owner, only that the first claim almost certainly happened
before a second.
2018-02-24 10:09:49 -07:00
29a607427d Rename UserDataKey to Discovery
From the perspective of the log, when some data's hash is added,
that data is "discovered" by the historian.  Another event
might be a "claim" that some signed data belongs to the owner of a
public key.
2018-02-24 05:25:19 -07:00
afb830c91f Merge pull request #18 from garious/add-historian
self-ticking logger
2018-02-21 12:30:10 -07:00
c1326ac3d5 Up the time to sleep so that ticks are generated 2018-02-21 12:22:23 -07:00
513a1adf57 Version bump 2018-02-21 12:01:17 -07:00
7871b38c80 Update demo to use self-ticking logger 2018-02-21 11:52:03 -07:00
b34d2d7dee Allow the logger to inject Tick events on its own 2018-02-21 11:33:42 -07:00
d7dfa8c22d Readme cleanup 2018-02-21 10:07:32 -07:00
8df274f0af Add hash seed to verify_slice() 2018-02-21 09:43:34 -07:00
07c4ebb7f2 Add message sequence chart for readme demo
Fixes #17
2018-02-21 09:33:50 -07:00
49605b257d Merge pull request #16 from garious/add-serde
Add serialization/deseriation support to event log
2018-02-20 16:55:46 -07:00
fa4e232d73 Add serialization/deseriation support to event log
See bincode and serde_json for usage:
https://github.com/TyOverby/bincode

Fixes #1
2018-02-20 16:26:13 -07:00
bd84cf6586 Merge pull request #15 from garious/add-historian
Demo proof-of-history and reordering attack
2018-02-20 15:05:20 -07:00
6e37f70d55 Test reorder attack 2018-02-20 14:46:36 -07:00
d97112d7f0 Explain proof-of-history in the readme
Also:
* Hash userdata so that verification works as the readme describes.
* Drop itertools package. Found a way to use std::iter instead.

Fixes #8
2018-02-20 14:04:49 -07:00
e57bba17c1 Version bump 2018-02-19 16:59:41 -07:00
959da300cc Shorten readme lines 2018-02-19 16:53:58 -07:00
ba90e43f72 Update benchmark
* Add asm, though it doesn't make it faster. TODO: use avx instructions.
* Do 10x less hashes, since sha256 is more expensive.
2018-02-19 16:51:35 -07:00
6effd64ab0 Update readme with sha256 usage 2018-02-19 16:48:29 -07:00
e18da7c7c1 Merge pull request #13 from garious/sha256-hash
Use sha256 hashes instead of Rust's builtin hasher.
2018-02-19 16:43:26 -07:00
0297edaf1f Use sha256 hashes instead of Rust's builtin hasher.
Causes a 20x performance degradation. Enabling asm did not
speed things up.
2018-02-19 16:23:53 -07:00
b317d13b44 Add codecov configuration 2018-02-19 13:02:59 -07:00
bb22522e45 Remove assertions that fail in the kcov docker container 2018-02-19 12:54:01 -07:00
41053b6d0b Merge pull request #12 from garious/add-historian
Add historian demo
2018-02-19 12:40:44 -07:00
bd3fe5fac9 Sleep a little longer to ensure Travis context switches 2018-02-19 12:33:33 -07:00
10a70a238b Cleanup demo 2018-02-19 12:25:57 -07:00
0bead4d410 Fix markdown link 2018-02-19 12:12:45 -07:00
4a7156de43 Move hash generation into stateless function 2018-02-19 12:09:58 -07:00
d88d1b2a09 Reset historian's hasher between events
Hasher will generate different hashes for the same input if it
had already generated a hash.

Also add a binary to ensure the example in the README works.
2018-02-19 12:03:06 -07:00
a7186328e0 Add docs
Fixes #11
2018-02-19 09:27:14 -07:00
5e3c7816bd Ensure verify_slice succeeds 2018-02-19 09:09:24 -07:00
a2fa60fa31 Merge pull request #10 from garious/add-historian
Better names
2018-02-18 10:16:05 -07:00
ceb65c2669 Better function names 2018-02-18 10:05:54 -07:00
fd209ef1a9 Rename event crate to log 2018-02-18 09:59:33 -07:00
471f036444 Better names
Event -> Entry
EventData -> Event
2018-02-18 09:53:40 -07:00
6ec0e5834c Merge pull request #9 from garious/add-historian
Add historian
2018-02-17 21:22:48 -07:00
4c94754661 More coverage 2018-02-17 21:13:52 -07:00
831e2cbdc9 Add historian
A microservice that continuously generates hashes, only stopping to
tag messages with the latest hash.

Fixes #8
2018-02-17 20:58:23 -07:00
3550f703c3 Fix typo
We don't write test for the readme. :)
2018-02-16 12:53:46 -07:00
ea1d57b461 Update description 2018-02-16 12:51:57 -07:00
49386309c8 Architecture -> Specification 2018-02-16 12:44:00 -07:00
b7a95ab7cc Re-add docs link 2018-02-16 12:40:33 -07:00
bf35b730de More metadata 2018-02-16 12:37:20 -07:00
15 changed files with 1291 additions and 167 deletions

2
.codecov.yml Normal file
View File

@ -0,0 +1,2 @@
ignore:
- "src/bin"

View File

@ -9,7 +9,7 @@ matrix:
- rust: stable
- rust: nightly
env:
- FEATURES='unstable'
- FEATURES='asm,unstable'
before_script: |
export PATH="$PATH:$HOME/.cargo/bin"
rustup component add rustfmt-preview

View File

@ -1,19 +1,42 @@
[package]
name = "silk"
description = "A silky smooth implementation of the Loom architecture"
version = "0.1.1"
version = "0.3.0"
documentation = "https://docs.rs/silk"
homepage = "http://loomprotocol.com/"
repository = "https://github.com/loomprotocol/silk"
authors = [
"Anatoly Yakovenko <aeyakovenko@gmail.com>",
"Greg Fitzgerald <garious@gmail.com>",
]
license = "Apache-2.0"
[[bin]]
name = "silk-demo"
path = "src/bin/demo.rs"
[[bin]]
name = "silk-client-demo"
path = "src/bin/client-demo.rs"
[[bin]]
name = "silk-testnode"
path = "src/bin/testnode.rs"
[badges]
codecov = { repository = "loomprotocol/silk", branch = "master", service = "github" }
[features]
unstable = []
asm = ["sha2-asm"]
[dependencies]
rayon = "1.0.0"
itertools = "0.7.6"
sha2 = "0.7.0"
sha2-asm = {version="0.3", optional=true}
generic-array = { version = "0.9.0", default-features = false, features = ["serde"] }
serde = "1.0.27"
serde_derive = "1.0.27"
ring = "0.12.1"
untrusted = "0.5.1"
bincode = "1.0.0"

View File

@ -3,18 +3,83 @@
[![Build Status](https://travis-ci.org/loomprotocol/silk.svg?branch=master)](https://travis-ci.org/loomprotocol/silk)
[![codecov](https://codecov.io/gh/loomprotocol/silk/branch/master/graph/badge.svg)](https://codecov.io/gh/loomprotocol/silk)
# Silk, A Silky Smooth Implementation of the Loom Architecture
# Silk, a silky smooth implementation of the Loom specification
Loom is a new achitecture for a high performance blockchain. Its whitepaper boasts a theoretical
throughput of 710k transactions per second on a 1 gbps network. The first implementation of the
whitepaper is happening in the 'loomprotocol/loom' repository. That repo is aggressively moving
forward, looking to de-risk technical claims as quickly as possible. This repo is quite a bit
different philosophically. Here we assume the Loom architecture is sound and worthy of building
a community around. We care a great deal about quality, clarity and short learning curve. We
avoid the use of `unsafe` Rust and an write tests for *everything*. Optimizations are only
added when corresponding benchmarks are also added that demonstrate real performance boots. We
expect the feature set here will always be a long ways behind the loom repo, but that this is
an implementation you can take to the bank, literally.
throughput of 710k transactions per second on a 1 gbps network. The specification is implemented
in two git repositories. Reserach is performed in the loom repository. That work drives the
Loom specification forward. This repository, on the other hand, aims to implement the specification
as-is. We care a great deal about quality, clarity and short learning curve. We avoid the use
of `unsafe` Rust and write tests for *everything*. Optimizations are only added when
corresponding benchmarks are also added that demonstrate real performance boosts. We expect the
feature set here will always be a ways behind the loom repo, but that this is an implementation
you can take to the bank, literally.
# Usage
Add the latest [silk package](https://crates.io/crates/silk) to the `[dependencies]` section
of your Cargo.toml.
Create a *Historian* and send it *events* to generate an *event log*, where each log *entry*
is tagged with the historian's latest *hash*. Then ensure the order of events was not tampered
with by verifying each entry's hash can be generated from the hash in the previous entry:
![historian](https://user-images.githubusercontent.com/55449/36633440-f76f7bb8-1952-11e8-8328-387861d3d464.png)
```rust
extern crate silk;
use silk::historian::Historian;
use silk::log::{verify_slice, Entry, Event, Sha256Hash};
use std::thread::sleep;
use std::time::Duration;
use std::sync::mpsc::SendError;
fn create_log(hist: &Historian) -> Result<(), SendError<Event>> {
sleep(Duration::from_millis(15));
let data = Sha256Hash::default();
hist.sender.send(Event::Discovery { data })?;
sleep(Duration::from_millis(10));
Ok(())
}
fn main() {
let seed = Sha256Hash::default();
let hist = Historian::new(&seed, Some(10));
create_log(&hist).expect("send error");
drop(hist.sender);
let entries: Vec<Entry> = hist.receiver.iter().collect();
for entry in &entries {
println!("{:?}", entry);
}
// Proof-of-History: Verify the historian learned about the events
// in the same order they appear in the vector.
assert!(verify_slice(&entries, &seed));
}
```
Running the program should produce a log similar to:
```rust
Entry { num_hashes: 0, end_hash: [0, ...], event: Tick }
Entry { num_hashes: 2, end_hash: [67, ...], event: Discovery { data: [37, ...] } }
Entry { num_hashes: 3, end_hash: [123, ...], event: Tick }
```
Proof-of-History
---
Take note of the last line:
```rust
assert!(verify_slice(&entries, &seed));
```
[It's a proof!](https://en.wikipedia.org/wiki/CurryHoward_correspondence) For each entry returned by the
historian, we can verify that `end_hash` is the result of applying a sha256 hash to the previous `end_hash`
exactly `num_hashes` times, and then hashing then event data on top of that. Because the event data is
included in the hash, the events cannot be reordered without regenerating all the hashes.
# Developing
@ -57,5 +122,5 @@ $ rustup install nightly
Run the benchmarks:
```bash
$ cargo +nightly bench --features="unstable"
$ cargo +nightly bench --features="asm,unstable"
```

18
diagrams/historian.msc Normal file
View File

@ -0,0 +1,18 @@
msc {
client,historian,logger;
logger=>historian [ label = "e0 = Entry{hash: h0, n: 0, event: Tick}" ] ;
logger=>logger [ label = "h1 = hash(h0)" ] ;
logger=>logger [ label = "h2 = hash(h1)" ] ;
client=>historian [ label = "Discovery(d0)" ] ;
historian=>logger [ label = "Discovery(d0)" ] ;
logger=>logger [ label = "h3 = hash(h2 + d0)" ] ;
logger=>historian [ label = "e1 = Entry{hash: hash(h3), n: 2, event: Discovery(d0)}" ] ;
logger=>logger [ label = "h4 = hash(h3)" ] ;
logger=>logger [ label = "h5 = hash(h4)" ] ;
logger=>logger [ label = "h6 = hash(h5)" ] ;
logger=>historian [ label = "e2 = Entry{hash: h6, n: 3, event: Tick}" ] ;
client=>historian [ label = "collect()" ] ;
historian=>client [ label = "entries = [e0, e1, e2]" ] ;
client=>client [ label = "verify_slice(entries, h0)" ] ;
}

229
src/accountant.rs Normal file
View File

@ -0,0 +1,229 @@
//! The `accountant` is a client of the `historian`. It uses the historian's
//! event log to record transactions. Its users can deposit funds and
//! transfer funds to other users.
use log::{Event, PublicKey, Sha256Hash, Signature};
use historian::Historian;
use ring::signature::Ed25519KeyPair;
use std::sync::mpsc::{RecvError, SendError};
use std::collections::HashMap;
pub struct Accountant {
pub historian: Historian<u64>,
pub balances: HashMap<PublicKey, u64>,
pub end_hash: Sha256Hash,
}
impl Accountant {
pub fn new(start_hash: &Sha256Hash, ms_per_tick: Option<u64>) -> Self {
let hist = Historian::<u64>::new(start_hash, ms_per_tick);
Accountant {
historian: hist,
balances: HashMap::new(),
end_hash: *start_hash,
}
}
pub fn process_event(self: &mut Self, event: &Event<u64>) {
match *event {
Event::Claim { key, data, .. } => {
if self.balances.contains_key(&key) {
if let Some(x) = self.balances.get_mut(&key) {
*x += data;
}
} else {
self.balances.insert(key, data);
}
}
Event::Transaction { from, to, data, .. } => {
if let Some(x) = self.balances.get_mut(&from) {
*x -= data;
}
if self.balances.contains_key(&to) {
if let Some(x) = self.balances.get_mut(&to) {
*x += data;
}
} else {
self.balances.insert(to, data);
}
}
_ => (),
}
}
pub fn sync(self: &mut Self) {
let mut entries = vec![];
while let Ok(entry) = self.historian.receiver.try_recv() {
entries.push(entry);
}
// TODO: Does this cause the historian's channel to get blocked?
//use log::verify_slice_u64;
//println!("accountant: verifying {} entries...", entries.len());
//assert!(verify_slice_u64(&entries, &self.end_hash));
//println!("accountant: Done verifying {} entries.", entries.len());
if let Some(last_entry) = entries.last() {
self.end_hash = last_entry.end_hash;
}
for e in &entries {
self.process_event(&e.event);
}
}
pub fn deposit_signed(
self: &Self,
key: PublicKey,
data: u64,
sig: Signature,
) -> Result<(), SendError<Event<u64>>> {
let event = Event::Claim { key, data, sig };
self.historian.sender.send(event)
}
pub fn deposit(
self: &Self,
n: u64,
keypair: &Ed25519KeyPair,
) -> Result<(), SendError<Event<u64>>> {
use log::{get_pubkey, sign_serialized};
let key = get_pubkey(keypair);
let sig = sign_serialized(&n, keypair);
self.deposit_signed(key, n, sig)
}
pub fn transfer_signed(
self: &mut Self,
from: PublicKey,
to: PublicKey,
data: u64,
sig: Signature,
) -> Result<(), SendError<Event<u64>>> {
if self.get_balance(&from).unwrap() < data {
// TODO: Replace the SendError result with a custom one.
println!("Error: Insufficient funds");
return Ok(());
}
let event = Event::Transaction {
from,
to,
data,
sig,
};
self.historian.sender.send(event)
}
pub fn transfer(
self: &mut Self,
n: u64,
keypair: &Ed25519KeyPair,
to: PublicKey,
) -> Result<(), SendError<Event<u64>>> {
use log::{get_pubkey, sign_transaction_data};
let from = get_pubkey(keypair);
let sig = sign_transaction_data(&n, keypair, &to);
self.transfer_signed(from, to, n, sig)
}
pub fn get_balance(self: &mut Self, pubkey: &PublicKey) -> Result<u64, RecvError> {
self.sync();
Ok(*self.balances.get(pubkey).unwrap_or(&0))
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread::sleep;
use std::time::Duration;
use log::{generate_keypair, get_pubkey};
use historian::ExitReason;
#[test]
fn test_accountant() {
let zero = Sha256Hash::default();
let mut acc = Accountant::new(&zero, Some(2));
let alice_keypair = generate_keypair();
let bob_keypair = generate_keypair();
acc.deposit(10_000, &alice_keypair).unwrap();
acc.deposit(1_000, &bob_keypair).unwrap();
sleep(Duration::from_millis(30));
let bob_pubkey = get_pubkey(&bob_keypair);
acc.transfer(500, &alice_keypair, bob_pubkey).unwrap();
sleep(Duration::from_millis(30));
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_500);
drop(acc.historian.sender);
assert_eq!(
acc.historian.thread_hdl.join().unwrap().1,
ExitReason::RecvDisconnected
);
}
#[test]
fn test_invalid_transfer() {
let zero = Sha256Hash::default();
let mut acc = Accountant::new(&zero, Some(2));
let alice_keypair = generate_keypair();
let bob_keypair = generate_keypair();
acc.deposit(10_000, &alice_keypair).unwrap();
acc.deposit(1_000, &bob_keypair).unwrap();
sleep(Duration::from_millis(30));
let bob_pubkey = get_pubkey(&bob_keypair);
acc.transfer(10_001, &alice_keypair, bob_pubkey).unwrap();
sleep(Duration::from_millis(30));
let alice_pubkey = get_pubkey(&alice_keypair);
assert_eq!(acc.get_balance(&alice_pubkey).unwrap(), 10_000);
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000);
drop(acc.historian.sender);
assert_eq!(
acc.historian.thread_hdl.join().unwrap().1,
ExitReason::RecvDisconnected
);
}
#[test]
fn test_multiple_claims() {
let zero = Sha256Hash::default();
let mut acc = Accountant::new(&zero, Some(2));
let keypair = generate_keypair();
acc.deposit(1, &keypair).unwrap();
acc.deposit(2, &keypair).unwrap();
let pubkey = get_pubkey(&keypair);
sleep(Duration::from_millis(30));
assert_eq!(acc.get_balance(&pubkey).unwrap(), 3);
drop(acc.historian.sender);
assert_eq!(
acc.historian.thread_hdl.join().unwrap().1,
ExitReason::RecvDisconnected
);
}
#[test]
fn test_transfer_to_newb() {
let zero = Sha256Hash::default();
let mut acc = Accountant::new(&zero, Some(2));
let alice_keypair = generate_keypair();
let bob_keypair = generate_keypair();
acc.deposit(10_000, &alice_keypair).unwrap();
sleep(Duration::from_millis(30));
let bob_pubkey = get_pubkey(&bob_keypair);
acc.transfer(500, &alice_keypair, bob_pubkey).unwrap();
sleep(Duration::from_millis(30));
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 500);
drop(acc.historian.sender);
assert_eq!(
acc.historian.thread_hdl.join().unwrap().1,
ExitReason::RecvDisconnected
);
}
}

75
src/accountant_skel.rs Normal file
View File

@ -0,0 +1,75 @@
use std::io;
use accountant::Accountant;
use log::{PublicKey, Signature};
//use serde::Serialize;
pub struct AccountantSkel {
pub obj: Accountant,
}
#[derive(Serialize, Deserialize, Debug)]
pub enum Request {
Deposit {
key: PublicKey,
val: u64,
sig: Signature,
},
Transfer {
from: PublicKey,
to: PublicKey,
val: u64,
sig: Signature,
},
GetBalance {
key: PublicKey,
},
}
#[derive(Serialize, Deserialize, Debug)]
pub enum Response {
Balance { key: PublicKey, val: u64 },
}
impl AccountantSkel {
pub fn new(obj: Accountant) -> Self {
AccountantSkel { obj }
}
pub fn process_request(self: &mut Self, msg: Request) -> Option<Response> {
match msg {
Request::Deposit { key, val, sig } => {
let _ = self.obj.deposit_signed(key, val, sig);
None
}
Request::Transfer { from, to, val, sig } => {
let _ = self.obj.transfer_signed(from, to, val, sig);
None
}
Request::GetBalance { key } => {
let val = self.obj.get_balance(&key).unwrap();
Some(Response::Balance { key, val })
}
}
}
/// TCP Server that forwards messages to Accountant methods.
pub fn serve(self: &mut Self, addr: &str) -> io::Result<()> {
use std::net::TcpListener;
use std::io::{Read, Write};
use bincode::{deserialize, serialize};
let listener = TcpListener::bind(addr)?;
let mut buf = vec![0u8; 1024];
loop {
//println!("skel: Waiting for incoming connections...");
let (mut stream, _from_addr) = listener.accept()?;
let _sz = stream.read(&mut buf)?;
// TODO: Return a descriptive error message if deserialization fails.
let req = deserialize(&buf).expect("deserialize request");
if let Some(resp) = self.process_request(req) {
stream.write(&serialize(&resp).expect("serialize response"))?;
}
}
}
}

116
src/accountant_stub.rs Normal file
View File

@ -0,0 +1,116 @@
//! The `accountant` is a client of the `historian`. It uses the historian's
//! event log to record transactions. Its users can deposit funds and
//! transfer funds to other users.
use std::net::TcpStream;
use std::io;
use std::io::{Read, Write};
use bincode::{deserialize, serialize};
use log::{PublicKey, Signature};
use ring::signature::Ed25519KeyPair;
use accountant_skel::{Request, Response};
pub struct AccountantStub {
pub addr: String,
}
impl AccountantStub {
pub fn new(addr: &str) -> Self {
AccountantStub {
addr: addr.to_string(),
}
}
pub fn deposit_signed(
self: &mut Self,
key: PublicKey,
val: u64,
sig: Signature,
) -> io::Result<usize> {
let req = Request::Deposit { key, val, sig };
let data = serialize(&req).unwrap();
let mut stream = TcpStream::connect(&self.addr)?;
stream.write(&data)
}
pub fn deposit(self: &mut Self, n: u64, keypair: &Ed25519KeyPair) -> io::Result<usize> {
use log::{get_pubkey, sign_serialized};
let key = get_pubkey(keypair);
let sig = sign_serialized(&n, keypair);
self.deposit_signed(key, n, sig)
}
pub fn transfer_signed(
self: &mut Self,
from: PublicKey,
to: PublicKey,
val: u64,
sig: Signature,
) -> io::Result<usize> {
let req = Request::Transfer { from, to, val, sig };
let data = serialize(&req).unwrap();
let mut stream = TcpStream::connect(&self.addr)?;
stream.write(&data)
}
pub fn transfer(
self: &mut Self,
n: u64,
keypair: &Ed25519KeyPair,
to: PublicKey,
) -> io::Result<usize> {
use log::{get_pubkey, sign_transaction_data};
let from = get_pubkey(keypair);
let sig = sign_transaction_data(&n, keypair, &to);
self.transfer_signed(from, to, n, sig)
}
pub fn get_balance(self: &mut Self, pubkey: &PublicKey) -> io::Result<u64> {
let mut stream = TcpStream::connect(&self.addr)?;
let req = Request::GetBalance { key: *pubkey };
let data = serialize(&req).expect("serialize GetBalance");
stream.write(&data)?;
let mut buf = vec![0u8; 1024];
stream.read(&mut buf)?;
let resp = deserialize(&buf).expect("deserialize balance");
let Response::Balance { key, val } = resp;
assert_eq!(key, *pubkey);
Ok(val)
}
}
#[cfg(test)]
mod tests {
use super::*;
use accountant::Accountant;
use accountant_skel::AccountantSkel;
use std::thread::{sleep, spawn};
use std::time::Duration;
use log::{generate_keypair, get_pubkey, Sha256Hash};
#[test]
fn test_accountant_stub() {
let addr = "127.0.0.1:8000";
spawn(move || {
let zero = Sha256Hash::default();
let acc = Accountant::new(&zero, None);
let mut skel = AccountantSkel::new(acc);
skel.serve(addr).unwrap();
});
sleep(Duration::from_millis(30));
let mut acc = AccountantStub::new(addr);
let alice_keypair = generate_keypair();
let bob_keypair = generate_keypair();
acc.deposit(10_000, &alice_keypair).unwrap();
acc.deposit(1_000, &bob_keypair).unwrap();
sleep(Duration::from_millis(30));
let bob_pubkey = get_pubkey(&bob_keypair);
acc.transfer(500, &alice_keypair, bob_pubkey).unwrap();
sleep(Duration::from_millis(300));
assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_500);
}
}

45
src/bin/client-demo.rs Normal file
View File

@ -0,0 +1,45 @@
extern crate silk;
fn main() {
use silk::accountant_stub::AccountantStub;
use std::thread::sleep;
use std::time::Duration;
use silk::log::{generate_keypair, get_pubkey};
let addr = "127.0.0.1:8000";
let mut acc = AccountantStub::new(addr);
let alice_keypair = generate_keypair();
let bob_keypair = generate_keypair();
let txs = 10_000;
println!("Depositing {} units in Alice's account...", txs);
acc.deposit(txs, &alice_keypair).unwrap();
//acc.deposit(1_000, &bob_keypair).unwrap();
println!("Done.");
sleep(Duration::from_millis(30));
let alice_pubkey = get_pubkey(&alice_keypair);
let bob_pubkey = get_pubkey(&bob_keypair);
println!("Transferring 1 unit {} times...", txs);
for _ in 0..txs {
acc.transfer(1, &alice_keypair, bob_pubkey).unwrap();
}
println!("Done.");
sleep(Duration::from_millis(20));
let mut alice_val = acc.get_balance(&alice_pubkey).unwrap();
while alice_val > 0 {
println!("Checking on Alice's Balance {}", alice_val);
sleep(Duration::from_millis(20));
alice_val = acc.get_balance(&alice_pubkey).unwrap();
}
println!("Done. Checking balances.");
println!(
"Alice's Final Balance {}",
acc.get_balance(&alice_pubkey).unwrap()
);
println!(
"Bob's Final Balance {}",
acc.get_balance(&bob_pubkey).unwrap()
);
}

27
src/bin/demo.rs Normal file
View File

@ -0,0 +1,27 @@
extern crate silk;
use silk::historian::Historian;
use silk::log::{verify_slice, Entry, Event, Sha256Hash};
use std::thread::sleep;
use std::time::Duration;
use std::sync::mpsc::SendError;
fn create_log(hist: &Historian<Sha256Hash>) -> Result<(), SendError<Event<Sha256Hash>>> {
sleep(Duration::from_millis(15));
let data = Sha256Hash::default();
hist.sender.send(Event::Discovery { data })?;
sleep(Duration::from_millis(10));
Ok(())
}
fn main() {
let seed = Sha256Hash::default();
let hist = Historian::new(&seed, Some(10));
create_log(&hist).expect("send error");
drop(hist.sender);
let entries: Vec<Entry<Sha256Hash>> = hist.receiver.iter().collect();
for entry in &entries {
println!("{:?}", entry);
}
assert!(verify_slice(&entries, &seed));
}

14
src/bin/testnode.rs Normal file
View File

@ -0,0 +1,14 @@
extern crate silk;
use silk::accountant_skel::AccountantSkel;
use silk::accountant::Accountant;
use silk::log::Sha256Hash;
fn main() {
let addr = "127.0.0.1:8000";
let zero = Sha256Hash::default();
let acc = Accountant::new(&zero, Some(1000));
let mut skel = AccountantSkel::new(acc);
println!("Listening on {}", addr);
skel.serve(addr).unwrap();
}

View File

@ -1,151 +0,0 @@
//! The `event` crate provides the foundational data structures for Proof-of-History
/// A Proof-of-History is an ordered log of events in time. Each entry contains three
/// pieces of data. The 'num_hashes' field is the number of hashes performed since the previous
/// entry. The 'end_hash' field is the result of hashing 'end_hash' from the previous entry
/// 'num_hashes' times. The 'data' field is an optional foreign key (a hash) pointing to some
/// arbitrary data that a client is looking to associate with the entry.
///
/// If you divide 'num_hashes' by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last event. Since processing power increases
/// over time, one should expect the duration 'num_hashes' represents to decrease proportionally.
/// Though processing power varies across nodes, the network gives priority to the
/// fastest processor. Duration should therefore be estimated by assuming that the hash
/// was generated by the fastest processor at the time the entry was logged.
pub struct Event {
pub num_hashes: u64,
pub end_hash: u64,
pub data: EventData,
}
/// When 'data' is Tick, the event represents a simple clock tick, and exists for the
/// sole purpose of improving the performance of event log verification. A tick can
/// be generated in 'num_hashes' hashes and verified in 'num_hashes' hashes. By logging
/// a hash alongside the tick, each tick and be verified in parallel using the 'end_hash'
/// of the preceding tick to seed its hashing.
pub enum EventData {
Tick,
UserDataKey(u64),
}
impl Event {
/// Creates an Event from the number of hashes 'num_hashes' since the previous event
/// and that resulting 'end_hash'.
pub fn new_tick(num_hashes: u64, end_hash: u64) -> Self {
let data = EventData::Tick;
Event {
num_hashes,
end_hash,
data,
}
}
/// Verifies self.end_hash is the result of hashing a 'start_hash' 'self.num_hashes' times.
pub fn verify(self: &Self, start_hash: u64) -> bool {
self.end_hash == next_tick(start_hash, self.num_hashes).end_hash
}
}
/// Creates the next Tick Event 'num_hashes' after 'start_hash'.
pub fn next_tick(start_hash: u64, num_hashes: u64) -> Event {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut end_hash = start_hash;
let mut hasher = DefaultHasher::new();
for _ in 0..num_hashes {
end_hash.hash(&mut hasher);
end_hash = hasher.finish();
}
Event::new_tick(num_hashes, end_hash)
}
/// Verifies the hashes and counts of a slice of events are all consistent.
pub fn verify_slice(events: &[Event], start_hash: u64) -> bool {
use rayon::prelude::*;
let genesis = [Event::new_tick(0, start_hash)];
let event_pairs = genesis.par_iter().chain(events).zip(events);
event_pairs.all(|(x0, x1)| x1.verify(x0.end_hash))
}
/// Verifies the hashes and events serially. Exists only for reference.
pub fn verify_slice_seq(events: &[Event], start_hash: u64) -> bool {
let genesis = [Event::new_tick(0, start_hash)];
let mut event_pairs = genesis.iter().chain(events).zip(events);
event_pairs.all(|(x0, x1)| x1.verify(x0.end_hash))
}
/// Create a vector of Ticks of length 'len' from 'start_hash' hash and 'num_hashes'.
pub fn create_ticks(start_hash: u64, num_hashes: u64, len: usize) -> Vec<Event> {
use itertools::unfold;
let mut events = unfold(start_hash, |state| {
let event = next_tick(*state, num_hashes);
*state = event.end_hash;
return Some(event);
});
events.by_ref().take(len).collect()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_event_verify() {
assert!(Event::new_tick(0, 0).verify(0)); // base case
assert!(!Event::new_tick(0, 0).verify(1)); // base case, bad
assert!(next_tick(0, 1).verify(0)); // inductive step
assert!(!next_tick(0, 1).verify(1)); // inductive step, bad
}
#[test]
fn test_next_tick() {
assert_eq!(next_tick(0, 1).num_hashes, 1)
}
fn verify_slice_generic(verify_slice: fn(&[Event], u64) -> bool) {
assert!(verify_slice(&vec![], 0)); // base case
assert!(verify_slice(&vec![Event::new_tick(0, 0)], 0)); // singleton case 1
assert!(!verify_slice(&vec![Event::new_tick(0, 0)], 1)); // singleton case 2, bad
assert!(verify_slice(&create_ticks(0, 0, 2), 0)); // inductive step
let mut bad_ticks = create_ticks(0, 0, 2);
bad_ticks[1].end_hash = 1;
assert!(!verify_slice(&bad_ticks, 0)); // inductive step, bad
}
#[test]
fn test_verify_slice() {
verify_slice_generic(verify_slice);
}
#[test]
fn test_verify_slice_seq() {
verify_slice_generic(verify_slice_seq);
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use event;
#[bench]
fn event_bench(bencher: &mut Bencher) {
let start_hash = 0;
let events = event::create_ticks(start_hash, 100_000, 8);
bencher.iter(|| {
assert!(event::verify_slice(&events, start_hash));
});
}
#[bench]
fn event_bench_seq(bencher: &mut Bencher) {
let start_hash = 0;
let events = event::create_ticks(start_hash, 100_000, 8);
bencher.iter(|| {
assert!(event::verify_slice_seq(&events, start_hash));
});
}
}

210
src/historian.rs Normal file
View File

@ -0,0 +1,210 @@
//! The `historian` crate provides a microservice for generating a Proof-of-History.
//! It logs Event items on behalf of its users. It continuously generates
//! new hashes, only stopping to check if it has been sent an Event item. It
//! tags each Event with an Entry and sends it back. The Entry includes the
//! Event, the latest hash, and the number of hashes since the last event.
//! The resulting stream of entries represents ordered events in time.
use std::thread::JoinHandle;
use std::sync::mpsc::{Receiver, SyncSender};
use std::time::{Duration, SystemTime};
use log::{hash, hash_event, verify_event, Entry, Event, Sha256Hash};
use serde::Serialize;
use std::fmt::Debug;
pub struct Historian<T> {
pub sender: SyncSender<Event<T>>,
pub receiver: Receiver<Entry<T>>,
pub thread_hdl: JoinHandle<(Entry<T>, ExitReason)>,
}
#[derive(Debug, PartialEq, Eq)]
pub enum ExitReason {
RecvDisconnected,
SendDisconnected,
}
fn log_event<T: Serialize + Clone + Debug>(
sender: &SyncSender<Entry<T>>,
num_hashes: &mut u64,
end_hash: &mut Sha256Hash,
event: Event<T>,
) -> Result<(), (Entry<T>, ExitReason)> {
*end_hash = hash_event(end_hash, &event);
let entry = Entry {
end_hash: *end_hash,
num_hashes: *num_hashes,
event,
};
if let Err(_) = sender.send(entry.clone()) {
return Err((entry, ExitReason::SendDisconnected));
}
*num_hashes = 0;
Ok(())
}
fn log_events<T: Serialize + Clone + Debug>(
receiver: &Receiver<Event<T>>,
sender: &SyncSender<Entry<T>>,
num_hashes: &mut u64,
end_hash: &mut Sha256Hash,
epoch: SystemTime,
num_ticks: &mut u64,
ms_per_tick: Option<u64>,
) -> Result<(), (Entry<T>, ExitReason)> {
use std::sync::mpsc::TryRecvError;
loop {
if let Some(ms) = ms_per_tick {
let now = SystemTime::now();
if now > epoch + Duration::from_millis((*num_ticks + 1) * ms) {
log_event(sender, num_hashes, end_hash, Event::Tick)?;
*num_ticks += 1;
}
}
match receiver.try_recv() {
Ok(event) => {
if verify_event(&event) {
log_event(sender, num_hashes, end_hash, event)?;
}
}
Err(TryRecvError::Empty) => {
return Ok(());
}
Err(TryRecvError::Disconnected) => {
let entry = Entry {
end_hash: *end_hash,
num_hashes: *num_hashes,
event: Event::Tick,
};
return Err((entry, ExitReason::RecvDisconnected));
}
}
}
}
/// A background thread that will continue tagging received Event messages and
/// sending back Entry messages until either the receiver or sender channel is closed.
pub fn create_logger<T: 'static + Serialize + Clone + Debug + Send>(
start_hash: Sha256Hash,
ms_per_tick: Option<u64>,
receiver: Receiver<Event<T>>,
sender: SyncSender<Entry<T>>,
) -> JoinHandle<(Entry<T>, ExitReason)> {
use std::thread;
thread::spawn(move || {
let mut end_hash = start_hash;
let mut num_hashes = 0;
let mut num_ticks = 0;
let epoch = SystemTime::now();
loop {
if let Err(err) = log_events(
&receiver,
&sender,
&mut num_hashes,
&mut end_hash,
epoch,
&mut num_ticks,
ms_per_tick,
) {
return err;
}
end_hash = hash(&end_hash);
num_hashes += 1;
}
})
}
impl<T: 'static + Serialize + Clone + Debug + Send> Historian<T> {
pub fn new(start_hash: &Sha256Hash, ms_per_tick: Option<u64>) -> Self {
use std::sync::mpsc::sync_channel;
let (sender, event_receiver) = sync_channel(4000);
let (entry_sender, receiver) = sync_channel(4000);
let thread_hdl = create_logger(*start_hash, ms_per_tick, event_receiver, entry_sender);
Historian {
sender,
receiver,
thread_hdl,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use log::*;
use std::thread::sleep;
use std::time::Duration;
#[test]
fn test_historian() {
let zero = Sha256Hash::default();
let hist = Historian::new(&zero, None);
hist.sender.send(Event::Tick).unwrap();
sleep(Duration::new(0, 1_000_000));
hist.sender.send(Event::Discovery { data: zero }).unwrap();
sleep(Duration::new(0, 1_000_000));
hist.sender.send(Event::Tick).unwrap();
let entry0 = hist.receiver.recv().unwrap();
let entry1 = hist.receiver.recv().unwrap();
let entry2 = hist.receiver.recv().unwrap();
drop(hist.sender);
assert_eq!(
hist.thread_hdl.join().unwrap().1,
ExitReason::RecvDisconnected
);
assert!(verify_slice(&[entry0, entry1, entry2], &zero));
}
#[test]
fn test_historian_closed_sender() {
let zero = Sha256Hash::default();
let hist = Historian::<u8>::new(&zero, None);
drop(hist.receiver);
hist.sender.send(Event::Tick).unwrap();
assert_eq!(
hist.thread_hdl.join().unwrap().1,
ExitReason::SendDisconnected
);
}
#[test]
fn test_ticking_historian() {
let zero = Sha256Hash::default();
let hist = Historian::new(&zero, Some(20));
sleep(Duration::from_millis(30));
hist.sender.send(Event::Discovery { data: zero }).unwrap();
sleep(Duration::from_millis(15));
drop(hist.sender);
assert_eq!(
hist.thread_hdl.join().unwrap().1,
ExitReason::RecvDisconnected
);
let entries: Vec<Entry<Sha256Hash>> = hist.receiver.iter().collect();
assert!(entries.len() > 1);
assert!(verify_slice(&entries, &zero));
}
#[test]
fn test_bad_event_attack() {
let zero = Sha256Hash::default();
let hist = Historian::new(&zero, None);
let keypair = generate_keypair();
let event0 = Event::Claim {
key: get_pubkey(&keypair),
data: hash(b"goodbye cruel world"),
sig: sign_serialized(&hash(b"hello, world"), &keypair),
};
hist.sender.send(event0).unwrap();
drop(hist.sender);
assert_eq!(
hist.thread_hdl.join().unwrap().1,
ExitReason::RecvDisconnected
);
let entries: Vec<Entry<Sha256Hash>> = hist.receiver.iter().collect();
assert_eq!(entries.len(), 0);
}
}

View File

@ -1,4 +1,15 @@
#![cfg_attr(feature = "unstable", feature(test))]
pub mod event;
extern crate itertools;
pub mod log;
pub mod historian;
pub mod accountant;
pub mod accountant_skel;
pub mod accountant_stub;
extern crate bincode;
extern crate generic_array;
extern crate rayon;
extern crate ring;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate sha2;
extern crate untrusted;

440
src/log.rs Normal file
View File

@ -0,0 +1,440 @@
//! The `log` crate provides the foundational data structures for Proof-of-History,
//! an ordered log of events in time.
/// Each log entry contains three pieces of data. The 'num_hashes' field is the number
/// of hashes performed since the previous entry. The 'end_hash' field is the result
/// of hashing 'end_hash' from the previous entry 'num_hashes' times. The 'event'
/// field points to an Event that took place shortly after 'end_hash' was generated.
///
/// If you divide 'num_hashes' by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last event. Since processing power increases
/// over time, one should expect the duration 'num_hashes' represents to decrease proportionally.
/// Though processing power varies across nodes, the network gives priority to the
/// fastest processor. Duration should therefore be estimated by assuming that the hash
/// was generated by the fastest processor at the time the entry was logged.
use generic_array::GenericArray;
use generic_array::typenum::{U32, U64};
use ring::signature::Ed25519KeyPair;
use serde::Serialize;
pub type Sha256Hash = GenericArray<u8, U32>;
pub type PublicKey = GenericArray<u8, U32>;
pub type Signature = GenericArray<u8, U64>;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Entry<T> {
pub num_hashes: u64,
pub end_hash: Sha256Hash,
pub event: Event<T>,
}
/// When 'event' is Tick, the event represents a simple clock tick, and exists for the
/// sole purpose of improving the performance of event log verification. A tick can
/// be generated in 'num_hashes' hashes and verified in 'num_hashes' hashes. By logging
/// a hash alongside the tick, each tick and be verified in parallel using the 'end_hash'
/// of the preceding tick to seed its hashing.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum Event<T> {
Tick,
Discovery {
data: T,
},
Claim {
key: PublicKey,
data: T,
sig: Signature,
},
Transaction {
from: PublicKey,
to: PublicKey,
data: T,
sig: Signature,
},
}
impl<T> Entry<T> {
/// Creates a Entry from the number of hashes 'num_hashes' since the previous event
/// and that resulting 'end_hash'.
pub fn new_tick(num_hashes: u64, end_hash: &Sha256Hash) -> Self {
Entry {
num_hashes,
end_hash: *end_hash,
event: Event::Tick,
}
}
}
/// Return a new ED25519 keypair
pub fn generate_keypair() -> Ed25519KeyPair {
use ring::{rand, signature};
use untrusted;
let rng = rand::SystemRandom::new();
let pkcs8_bytes = signature::Ed25519KeyPair::generate_pkcs8(&rng).unwrap();
signature::Ed25519KeyPair::from_pkcs8(untrusted::Input::from(&pkcs8_bytes)).unwrap()
}
/// Return the public key for the given keypair
pub fn get_pubkey(keypair: &Ed25519KeyPair) -> PublicKey {
GenericArray::clone_from_slice(keypair.public_key_bytes())
}
/// Return a signature for the given data using the private key from the given keypair.
pub fn sign_serialized<T: Serialize>(data: &T, keypair: &Ed25519KeyPair) -> Signature {
use bincode::serialize;
let serialized = serialize(data).unwrap();
GenericArray::clone_from_slice(keypair.sign(&serialized).as_ref())
}
/// Return a signature for the given transaction data using the private key from the given keypair.
pub fn sign_transaction_data<T: Serialize>(
data: &T,
keypair: &Ed25519KeyPair,
to: &PublicKey,
) -> Signature {
sign_serialized(&(data, to), keypair)
}
/// Return a Sha256 hash for the given data.
pub fn hash(val: &[u8]) -> Sha256Hash {
use sha2::{Digest, Sha256};
let mut hasher = Sha256::default();
hasher.input(val);
hasher.result()
}
/// Return the hash of the given hash extended with the given value.
pub fn extend_and_hash(end_hash: &Sha256Hash, ty: u8, val: &[u8]) -> Sha256Hash {
let mut hash_data = end_hash.to_vec();
hash_data.push(ty);
hash_data.extend_from_slice(val);
hash(&hash_data)
}
pub fn hash_event<T: Serialize>(end_hash: &Sha256Hash, event: &Event<T>) -> Sha256Hash {
use bincode::serialize;
match *event {
Event::Tick => *end_hash,
Event::Discovery { ref data } => extend_and_hash(end_hash, 1, &serialize(&data).unwrap()),
Event::Claim { key, ref data, sig } => {
let mut event_data = serialize(&data).unwrap();
event_data.extend_from_slice(&sig);
event_data.extend_from_slice(&key);
extend_and_hash(end_hash, 2, &event_data)
}
Event::Transaction {
from,
to,
ref data,
sig,
} => {
let mut event_data = serialize(&data).unwrap();
event_data.extend_from_slice(&sig);
event_data.extend_from_slice(&from);
event_data.extend_from_slice(&to);
extend_and_hash(end_hash, 2, &event_data)
}
}
}
/// Creates the hash 'num_hashes' after start_hash, plus an additional hash for any event data.
pub fn next_hash<T: Serialize>(
start_hash: &Sha256Hash,
num_hashes: u64,
event: &Event<T>,
) -> Sha256Hash {
let mut end_hash = *start_hash;
for _ in 0..num_hashes {
end_hash = hash(&end_hash);
}
hash_event(&end_hash, event)
}
/// Creates the next Tick Entry 'num_hashes' after 'start_hash'.
pub fn next_entry<T: Serialize>(
start_hash: &Sha256Hash,
num_hashes: u64,
event: Event<T>,
) -> Entry<T> {
Entry {
num_hashes,
end_hash: next_hash(start_hash, num_hashes, &event),
event,
}
}
/// Creates the next Tick Entry 'num_hashes' after 'start_hash'.
pub fn next_entry_mut<T: Serialize>(
start_hash: &mut Sha256Hash,
num_hashes: u64,
event: Event<T>,
) -> Entry<T> {
let entry = next_entry(start_hash, num_hashes, event);
*start_hash = entry.end_hash;
entry
}
/// Creates the next Tick Entry 'num_hashes' after 'start_hash'.
pub fn next_tick<T: Serialize>(start_hash: &Sha256Hash, num_hashes: u64) -> Entry<T> {
next_entry(start_hash, num_hashes, Event::Tick)
}
pub fn verify_event<T: Serialize>(event: &Event<T>) -> bool {
use bincode::serialize;
if let Event::Claim { key, ref data, sig } = *event {
let mut claim_data = serialize(&data).unwrap();
if !verify_signature(&key, &claim_data, &sig) {
return false;
}
}
if let Event::Transaction {
from,
to,
ref data,
sig,
} = *event
{
let sign_data = serialize(&(&data, &to)).unwrap();
if !verify_signature(&from, &sign_data, &sig) {
return false;
}
}
true
}
/// Verifies self.end_hash is the result of hashing a 'start_hash' 'self.num_hashes' times.
/// If the event is not a Tick, then hash that as well.
pub fn verify_entry<T: Serialize>(entry: &Entry<T>, start_hash: &Sha256Hash) -> bool {
if !verify_event(&entry.event) {
return false;
}
entry.end_hash == next_hash(start_hash, entry.num_hashes, &entry.event)
}
/// Verifies the hashes and counts of a slice of events are all consistent.
pub fn verify_slice(events: &[Entry<Sha256Hash>], start_hash: &Sha256Hash) -> bool {
use rayon::prelude::*;
let genesis = [Entry::new_tick(Default::default(), start_hash)];
let event_pairs = genesis.par_iter().chain(events).zip(events);
event_pairs.all(|(x0, x1)| verify_entry(&x1, &x0.end_hash))
}
/// Verifies the hashes and counts of a slice of events are all consistent.
pub fn verify_slice_u64(events: &[Entry<u64>], start_hash: &Sha256Hash) -> bool {
use rayon::prelude::*;
let genesis = [Entry::new_tick(Default::default(), start_hash)];
let event_pairs = genesis.par_iter().chain(events).zip(events);
event_pairs.all(|(x0, x1)| verify_entry(&x1, &x0.end_hash))
}
/// Verifies the hashes and events serially. Exists only for reference.
pub fn verify_slice_seq<T: Serialize>(events: &[Entry<T>], start_hash: &Sha256Hash) -> bool {
let genesis = [Entry::new_tick(0, start_hash)];
let mut event_pairs = genesis.iter().chain(events).zip(events);
event_pairs.all(|(x0, x1)| verify_entry(&x1, &x0.end_hash))
}
/// Verify a signed message with the given public key.
pub fn verify_signature(peer_public_key_bytes: &[u8], msg_bytes: &[u8], sig_bytes: &[u8]) -> bool {
use untrusted;
use ring::signature;
let peer_public_key = untrusted::Input::from(peer_public_key_bytes);
let msg = untrusted::Input::from(msg_bytes);
let sig = untrusted::Input::from(sig_bytes);
signature::verify(&signature::ED25519, peer_public_key, msg, sig).is_ok()
}
pub fn create_entries<T: Serialize>(
start_hash: &Sha256Hash,
num_hashes: u64,
events: Vec<Event<T>>,
) -> Vec<Entry<T>> {
let mut end_hash = *start_hash;
events
.into_iter()
.map(|event| next_entry_mut(&mut end_hash, num_hashes, event))
.collect()
}
/// Create a vector of Ticks of length 'len' from 'start_hash' hash and 'num_hashes'.
pub fn create_ticks(
start_hash: &Sha256Hash,
num_hashes: u64,
len: usize,
) -> Vec<Entry<Sha256Hash>> {
use std::iter;
let mut end_hash = *start_hash;
iter::repeat(Event::Tick)
.take(len)
.map(|event| next_entry_mut(&mut end_hash, num_hashes, event))
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_event_verify() {
let zero = Sha256Hash::default();
let one = hash(&zero);
assert!(verify_entry::<u8>(&Entry::new_tick(0, &zero), &zero)); // base case
assert!(!verify_entry::<u8>(&Entry::new_tick(0, &zero), &one)); // base case, bad
assert!(verify_entry::<u8>(&next_tick(&zero, 1), &zero)); // inductive step
assert!(!verify_entry::<u8>(&next_tick(&zero, 1), &one)); // inductive step, bad
}
#[test]
fn test_next_tick() {
let zero = Sha256Hash::default();
assert_eq!(next_tick::<Sha256Hash>(&zero, 1).num_hashes, 1)
}
fn verify_slice_generic(verify_slice: fn(&[Entry<Sha256Hash>], &Sha256Hash) -> bool) {
let zero = Sha256Hash::default();
let one = hash(&zero);
assert!(verify_slice(&vec![], &zero)); // base case
assert!(verify_slice(&vec![Entry::new_tick(0, &zero)], &zero)); // singleton case 1
assert!(!verify_slice(&vec![Entry::new_tick(0, &zero)], &one)); // singleton case 2, bad
assert!(verify_slice(&create_ticks(&zero, 0, 2), &zero)); // inductive step
let mut bad_ticks = create_ticks(&zero, 0, 2);
bad_ticks[1].end_hash = one;
assert!(!verify_slice(&bad_ticks, &zero)); // inductive step, bad
}
#[test]
fn test_verify_slice() {
verify_slice_generic(verify_slice);
}
#[test]
fn test_verify_slice_seq() {
verify_slice_generic(verify_slice_seq::<Sha256Hash>);
}
#[test]
fn test_reorder_attack() {
let zero = Sha256Hash::default();
let one = hash(&zero);
// First, verify Discovery events
let events = vec![
Event::Discovery { data: zero },
Event::Discovery { data: one },
];
let mut entries = create_entries(&zero, 0, events);
assert!(verify_slice(&entries, &zero));
// Next, swap two Discovery events and ensure verification fails.
let event0 = entries[0].event.clone();
let event1 = entries[1].event.clone();
entries[0].event = event1;
entries[1].event = event0;
assert!(!verify_slice(&entries, &zero));
}
#[test]
fn test_claim() {
let keypair = generate_keypair();
let data = hash(b"hello, world");
let event0 = Event::Claim {
key: get_pubkey(&keypair),
data,
sig: sign_serialized(&data, &keypair),
};
let zero = Sha256Hash::default();
let entries = create_entries(&zero, 0, vec![event0]);
assert!(verify_slice(&entries, &zero));
}
#[test]
fn test_wrong_data_claim_attack() {
let keypair = generate_keypair();
let event0 = Event::Claim {
key: get_pubkey(&keypair),
data: hash(b"goodbye cruel world"),
sig: sign_serialized(&hash(b"hello, world"), &keypair),
};
let zero = Sha256Hash::default();
let entries = create_entries(&zero, 0, vec![event0]);
assert!(!verify_slice(&entries, &zero));
}
#[test]
fn test_transfer() {
let keypair0 = generate_keypair();
let keypair1 = generate_keypair();
let pubkey1 = get_pubkey(&keypair1);
let data = hash(b"hello, world");
let event0 = Event::Transaction {
from: get_pubkey(&keypair0),
to: pubkey1,
data,
sig: sign_transaction_data(&data, &keypair0, &pubkey1),
};
let zero = Sha256Hash::default();
let entries = create_entries(&zero, 0, vec![event0]);
assert!(verify_slice(&entries, &zero));
}
#[test]
fn test_wrong_data_transfer_attack() {
let keypair0 = generate_keypair();
let keypair1 = generate_keypair();
let pubkey1 = get_pubkey(&keypair1);
let data = hash(b"hello, world");
let event0 = Event::Transaction {
from: get_pubkey(&keypair0),
to: pubkey1,
data: hash(b"goodbye cruel world"), // <-- attack!
sig: sign_transaction_data(&data, &keypair0, &pubkey1),
};
let zero = Sha256Hash::default();
let entries = create_entries(&zero, 0, vec![event0]);
assert!(!verify_slice(&entries, &zero));
}
#[test]
fn test_transfer_hijack_attack() {
let keypair0 = generate_keypair();
let keypair1 = generate_keypair();
let thief_keypair = generate_keypair();
let pubkey1 = get_pubkey(&keypair1);
let data = hash(b"hello, world");
let event0 = Event::Transaction {
from: get_pubkey(&keypair0),
to: get_pubkey(&thief_keypair), // <-- attack!
data: hash(b"goodbye cruel world"),
sig: sign_transaction_data(&data, &keypair0, &pubkey1),
};
let zero = Sha256Hash::default();
let entries = create_entries(&zero, 0, vec![event0]);
assert!(!verify_slice(&entries, &zero));
}
}
#[cfg(all(feature = "unstable", test))]
mod bench {
extern crate test;
use self::test::Bencher;
use log::*;
#[bench]
fn event_bench(bencher: &mut Bencher) {
let start_hash = Default::default();
let events = create_ticks(&start_hash, 10_000, 8);
bencher.iter(|| {
assert!(verify_slice(&events, &start_hash));
});
}
#[bench]
fn event_bench_seq(bencher: &mut Bencher) {
let start_hash = Default::default();
let events = create_ticks(&start_hash, 10_000, 8);
bencher.iter(|| {
assert!(verify_slice_seq(&events, &start_hash));
});
}
}