Compare commits
28 Commits
Author | SHA1 | Date | |
---|---|---|---|
e57bba17c1 | |||
959da300cc | |||
ba90e43f72 | |||
6effd64ab0 | |||
e18da7c7c1 | |||
0297edaf1f | |||
b317d13b44 | |||
bb22522e45 | |||
41053b6d0b | |||
bd3fe5fac9 | |||
10a70a238b | |||
0bead4d410 | |||
4a7156de43 | |||
d88d1b2a09 | |||
a7186328e0 | |||
5e3c7816bd | |||
a2fa60fa31 | |||
ceb65c2669 | |||
fd209ef1a9 | |||
471f036444 | |||
6ec0e5834c | |||
4c94754661 | |||
831e2cbdc9 | |||
3550f703c3 | |||
ea1d57b461 | |||
49386309c8 | |||
b7a95ab7cc | |||
bf35b730de |
2
.codecov.yml
Normal file
2
.codecov.yml
Normal file
@ -0,0 +1,2 @@
|
||||
ignore:
|
||||
- "src/bin"
|
@ -9,7 +9,7 @@ matrix:
|
||||
- rust: stable
|
||||
- rust: nightly
|
||||
env:
|
||||
- FEATURES='unstable'
|
||||
- FEATURES='asm,unstable'
|
||||
before_script: |
|
||||
export PATH="$PATH:$HOME/.cargo/bin"
|
||||
rustup component add rustfmt-preview
|
||||
|
13
Cargo.toml
13
Cargo.toml
@ -1,19 +1,30 @@
|
||||
[package]
|
||||
name = "silk"
|
||||
description = "A silky smooth implementation of the Loom architecture"
|
||||
version = "0.1.1"
|
||||
version = "0.2.1"
|
||||
documentation = "https://docs.rs/silk"
|
||||
homepage = "http://loomprotocol.com/"
|
||||
repository = "https://github.com/loomprotocol/silk"
|
||||
authors = [
|
||||
"Anatoly Yakovenko <aeyakovenko@gmail.com>",
|
||||
"Greg Fitzgerald <garious@gmail.com>",
|
||||
]
|
||||
license = "Apache-2.0"
|
||||
|
||||
[[bin]]
|
||||
name = "silk-demo"
|
||||
path = "src/bin/demo.rs"
|
||||
|
||||
[badges]
|
||||
codecov = { repository = "loomprotocol/silk", branch = "master", service = "github" }
|
||||
|
||||
[features]
|
||||
unstable = []
|
||||
asm = ["sha2-asm"]
|
||||
|
||||
[dependencies]
|
||||
rayon = "1.0.0"
|
||||
itertools = "0.7.6"
|
||||
sha2 = "0.7.0"
|
||||
sha2-asm = {version="0.3", optional=true}
|
||||
digest = "0.7.2"
|
||||
|
69
README.md
69
README.md
@ -3,18 +3,65 @@
|
||||
[](https://travis-ci.org/loomprotocol/silk)
|
||||
[](https://codecov.io/gh/loomprotocol/silk)
|
||||
|
||||
# Silk, A Silky Smooth Implementation of the Loom Architecture
|
||||
# Silk, a silky smooth implementation of the Loom specification
|
||||
|
||||
Loom is a new achitecture for a high performance blockchain. Its whitepaper boasts a theoretical
|
||||
throughput of 710k transactions per second on a 1 gbps network. The first implementation of the
|
||||
whitepaper is happening in the 'loomprotocol/loom' repository. That repo is aggressively moving
|
||||
forward, looking to de-risk technical claims as quickly as possible. This repo is quite a bit
|
||||
different philosophically. Here we assume the Loom architecture is sound and worthy of building
|
||||
a community around. We care a great deal about quality, clarity and short learning curve. We
|
||||
avoid the use of `unsafe` Rust and an write tests for *everything*. Optimizations are only
|
||||
added when corresponding benchmarks are also added that demonstrate real performance boots. We
|
||||
expect the feature set here will always be a long ways behind the loom repo, but that this is
|
||||
an implementation you can take to the bank, literally.
|
||||
throughput of 710k transactions per second on a 1 gbps network. The specification is implemented
|
||||
in two git repositories. Reserach is performed in the loom repository. That work drives the
|
||||
Loom specification forward. This repository, on the other hand, aims to implement the specification
|
||||
as-is. We care a great deal about quality, clarity and short learning curve. We avoid the use
|
||||
of `unsafe` Rust and write tests for *everything*. Optimizations are only added when
|
||||
corresponding benchmarks are also added that demonstrate real performance boots. We expect the
|
||||
feature set here will always be a ways behind the loom repo, but that this is an implementation
|
||||
you can take to the bank, literally.
|
||||
|
||||
# Usage
|
||||
|
||||
Add the latest [silk package](https://crates.io/crates/silk) to the `[dependencies]` section
|
||||
of your Cargo.toml.
|
||||
|
||||
Create a *Historian* and send it *events* to generate an *event log*, where each log *entry*
|
||||
is tagged with the historian's latest *hash*. Then ensure the order of events was not tampered
|
||||
with by verifying each entry's hash can be generated from the hash in the previous entry:
|
||||
|
||||
```rust
|
||||
extern crate silk;
|
||||
|
||||
use silk::historian::Historian;
|
||||
use silk::log::{verify_slice, Entry, Event, Sha256Hash};
|
||||
use std::{thread, time};
|
||||
use std::sync::mpsc::SendError;
|
||||
|
||||
fn create_log(hist: &Historian) -> Result<(), SendError<Event>> {
|
||||
hist.sender.send(Event::Tick)?;
|
||||
thread::sleep(time::Duration::new(0, 100_000));
|
||||
hist.sender.send(Event::UserDataKey(0xdeadbeef))?;
|
||||
thread::sleep(time::Duration::new(0, 100_000));
|
||||
hist.sender.send(Event::Tick)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let seed = Sha256Hash::default();
|
||||
let hist = Historian::new(&seed);
|
||||
create_log(&hist).expect("send error");
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry> = hist.receiver.iter().collect();
|
||||
for entry in &entries {
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
assert!(verify_slice(&entries, &seed));
|
||||
}
|
||||
```
|
||||
|
||||
Running the program should produce a log similar to:
|
||||
|
||||
```rust
|
||||
Entry { num_hashes: 0, end_hash: [0, ...], event: Tick }
|
||||
Entry { num_hashes: 6, end_hash: [67, ...], event: UserDataKey(3735928559) }
|
||||
Entry { num_hashes: 5, end_hash: [123, ...], event: Tick }
|
||||
```
|
||||
|
||||
|
||||
# Developing
|
||||
|
||||
@ -57,5 +104,5 @@ $ rustup install nightly
|
||||
Run the benchmarks:
|
||||
|
||||
```bash
|
||||
$ cargo +nightly bench --features="unstable"
|
||||
$ cargo +nightly bench --features="asm,unstable"
|
||||
```
|
||||
|
27
src/bin/demo.rs
Normal file
27
src/bin/demo.rs
Normal file
@ -0,0 +1,27 @@
|
||||
extern crate silk;
|
||||
|
||||
use silk::historian::Historian;
|
||||
use silk::log::{verify_slice, Entry, Event, Sha256Hash};
|
||||
use std::{thread, time};
|
||||
use std::sync::mpsc::SendError;
|
||||
|
||||
fn create_log(hist: &Historian) -> Result<(), SendError<Event>> {
|
||||
hist.sender.send(Event::Tick)?;
|
||||
thread::sleep(time::Duration::new(0, 100_000));
|
||||
hist.sender.send(Event::UserDataKey(0xdeadbeef))?;
|
||||
thread::sleep(time::Duration::new(0, 100_000));
|
||||
hist.sender.send(Event::Tick)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let seed = Sha256Hash::default();
|
||||
let hist = Historian::new(&seed);
|
||||
create_log(&hist).expect("send error");
|
||||
drop(hist.sender);
|
||||
let entries: Vec<Entry> = hist.receiver.iter().collect();
|
||||
for entry in &entries {
|
||||
println!("{:?}", entry);
|
||||
}
|
||||
assert!(verify_slice(&entries, &seed));
|
||||
}
|
151
src/event.rs
151
src/event.rs
@ -1,151 +0,0 @@
|
||||
//! The `event` crate provides the foundational data structures for Proof-of-History
|
||||
|
||||
/// A Proof-of-History is an ordered log of events in time. Each entry contains three
|
||||
/// pieces of data. The 'num_hashes' field is the number of hashes performed since the previous
|
||||
/// entry. The 'end_hash' field is the result of hashing 'end_hash' from the previous entry
|
||||
/// 'num_hashes' times. The 'data' field is an optional foreign key (a hash) pointing to some
|
||||
/// arbitrary data that a client is looking to associate with the entry.
|
||||
///
|
||||
/// If you divide 'num_hashes' by the amount of time it takes to generate a new hash, you
|
||||
/// get a duration estimate since the last event. Since processing power increases
|
||||
/// over time, one should expect the duration 'num_hashes' represents to decrease proportionally.
|
||||
/// Though processing power varies across nodes, the network gives priority to the
|
||||
/// fastest processor. Duration should therefore be estimated by assuming that the hash
|
||||
/// was generated by the fastest processor at the time the entry was logged.
|
||||
pub struct Event {
|
||||
pub num_hashes: u64,
|
||||
pub end_hash: u64,
|
||||
pub data: EventData,
|
||||
}
|
||||
|
||||
/// When 'data' is Tick, the event represents a simple clock tick, and exists for the
|
||||
/// sole purpose of improving the performance of event log verification. A tick can
|
||||
/// be generated in 'num_hashes' hashes and verified in 'num_hashes' hashes. By logging
|
||||
/// a hash alongside the tick, each tick and be verified in parallel using the 'end_hash'
|
||||
/// of the preceding tick to seed its hashing.
|
||||
pub enum EventData {
|
||||
Tick,
|
||||
UserDataKey(u64),
|
||||
}
|
||||
|
||||
impl Event {
|
||||
/// Creates an Event from the number of hashes 'num_hashes' since the previous event
|
||||
/// and that resulting 'end_hash'.
|
||||
pub fn new_tick(num_hashes: u64, end_hash: u64) -> Self {
|
||||
let data = EventData::Tick;
|
||||
Event {
|
||||
num_hashes,
|
||||
end_hash,
|
||||
data,
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies self.end_hash is the result of hashing a 'start_hash' 'self.num_hashes' times.
|
||||
pub fn verify(self: &Self, start_hash: u64) -> bool {
|
||||
self.end_hash == next_tick(start_hash, self.num_hashes).end_hash
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the next Tick Event 'num_hashes' after 'start_hash'.
|
||||
pub fn next_tick(start_hash: u64, num_hashes: u64) -> Event {
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
let mut end_hash = start_hash;
|
||||
let mut hasher = DefaultHasher::new();
|
||||
for _ in 0..num_hashes {
|
||||
end_hash.hash(&mut hasher);
|
||||
end_hash = hasher.finish();
|
||||
}
|
||||
Event::new_tick(num_hashes, end_hash)
|
||||
}
|
||||
|
||||
/// Verifies the hashes and counts of a slice of events are all consistent.
|
||||
pub fn verify_slice(events: &[Event], start_hash: u64) -> bool {
|
||||
use rayon::prelude::*;
|
||||
let genesis = [Event::new_tick(0, start_hash)];
|
||||
let event_pairs = genesis.par_iter().chain(events).zip(events);
|
||||
event_pairs.all(|(x0, x1)| x1.verify(x0.end_hash))
|
||||
}
|
||||
|
||||
/// Verifies the hashes and events serially. Exists only for reference.
|
||||
pub fn verify_slice_seq(events: &[Event], start_hash: u64) -> bool {
|
||||
let genesis = [Event::new_tick(0, start_hash)];
|
||||
let mut event_pairs = genesis.iter().chain(events).zip(events);
|
||||
event_pairs.all(|(x0, x1)| x1.verify(x0.end_hash))
|
||||
}
|
||||
|
||||
/// Create a vector of Ticks of length 'len' from 'start_hash' hash and 'num_hashes'.
|
||||
pub fn create_ticks(start_hash: u64, num_hashes: u64, len: usize) -> Vec<Event> {
|
||||
use itertools::unfold;
|
||||
let mut events = unfold(start_hash, |state| {
|
||||
let event = next_tick(*state, num_hashes);
|
||||
*state = event.end_hash;
|
||||
return Some(event);
|
||||
});
|
||||
events.by_ref().take(len).collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_event_verify() {
|
||||
assert!(Event::new_tick(0, 0).verify(0)); // base case
|
||||
assert!(!Event::new_tick(0, 0).verify(1)); // base case, bad
|
||||
assert!(next_tick(0, 1).verify(0)); // inductive step
|
||||
assert!(!next_tick(0, 1).verify(1)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_tick() {
|
||||
assert_eq!(next_tick(0, 1).num_hashes, 1)
|
||||
}
|
||||
|
||||
fn verify_slice_generic(verify_slice: fn(&[Event], u64) -> bool) {
|
||||
assert!(verify_slice(&vec![], 0)); // base case
|
||||
assert!(verify_slice(&vec![Event::new_tick(0, 0)], 0)); // singleton case 1
|
||||
assert!(!verify_slice(&vec![Event::new_tick(0, 0)], 1)); // singleton case 2, bad
|
||||
assert!(verify_slice(&create_ticks(0, 0, 2), 0)); // inductive step
|
||||
|
||||
let mut bad_ticks = create_ticks(0, 0, 2);
|
||||
bad_ticks[1].end_hash = 1;
|
||||
assert!(!verify_slice(&bad_ticks, 0)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice() {
|
||||
verify_slice_generic(verify_slice);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice_seq() {
|
||||
verify_slice_generic(verify_slice_seq);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use event;
|
||||
|
||||
#[bench]
|
||||
fn event_bench(bencher: &mut Bencher) {
|
||||
let start_hash = 0;
|
||||
let events = event::create_ticks(start_hash, 100_000, 8);
|
||||
bencher.iter(|| {
|
||||
assert!(event::verify_slice(&events, start_hash));
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn event_bench_seq(bencher: &mut Bencher) {
|
||||
let start_hash = 0;
|
||||
let events = event::create_ticks(start_hash, 100_000, 8);
|
||||
bencher.iter(|| {
|
||||
assert!(event::verify_slice_seq(&events, start_hash));
|
||||
});
|
||||
}
|
||||
}
|
139
src/historian.rs
Normal file
139
src/historian.rs
Normal file
@ -0,0 +1,139 @@
|
||||
//! The `historian` crate provides a microservice for generating a Proof-of-History.
|
||||
//! It logs Event items on behalf of its users. It continuously generates
|
||||
//! new hashes, only stopping to check if it has been sent an Event item. It
|
||||
//! tags each Event with an Entry and sends it back. The Entry includes the
|
||||
//! Event, the latest hash, and the number of hashes since the last event.
|
||||
//! The resulting stream of entries represents ordered events in time.
|
||||
|
||||
use std::thread::JoinHandle;
|
||||
use std::sync::mpsc::{Receiver, Sender};
|
||||
use log::{hash, Entry, Event, Sha256Hash};
|
||||
|
||||
pub struct Historian {
|
||||
pub sender: Sender<Event>,
|
||||
pub receiver: Receiver<Entry>,
|
||||
pub thread_hdl: JoinHandle<(Entry, ExitReason)>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum ExitReason {
|
||||
RecvDisconnected,
|
||||
SendDisconnected,
|
||||
}
|
||||
|
||||
fn log_events(
|
||||
receiver: &Receiver<Event>,
|
||||
sender: &Sender<Entry>,
|
||||
num_hashes: u64,
|
||||
end_hash: Sha256Hash,
|
||||
) -> Result<u64, (Entry, ExitReason)> {
|
||||
use std::sync::mpsc::TryRecvError;
|
||||
let mut num_hashes = num_hashes;
|
||||
loop {
|
||||
match receiver.try_recv() {
|
||||
Ok(event) => {
|
||||
let entry = Entry {
|
||||
end_hash,
|
||||
num_hashes,
|
||||
event,
|
||||
};
|
||||
if let Err(_) = sender.send(entry.clone()) {
|
||||
return Err((entry, ExitReason::SendDisconnected));
|
||||
}
|
||||
num_hashes = 0;
|
||||
}
|
||||
Err(TryRecvError::Empty) => {
|
||||
return Ok(num_hashes);
|
||||
}
|
||||
Err(TryRecvError::Disconnected) => {
|
||||
let entry = Entry {
|
||||
end_hash,
|
||||
num_hashes,
|
||||
event: Event::Tick,
|
||||
};
|
||||
return Err((entry, ExitReason::RecvDisconnected));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A background thread that will continue tagging received Event messages and
|
||||
/// sending back Entry messages until either the receiver or sender channel is closed.
|
||||
pub fn create_logger(
|
||||
start_hash: Sha256Hash,
|
||||
receiver: Receiver<Event>,
|
||||
sender: Sender<Entry>,
|
||||
) -> JoinHandle<(Entry, ExitReason)> {
|
||||
use std::thread;
|
||||
thread::spawn(move || {
|
||||
let mut end_hash = start_hash;
|
||||
let mut num_hashes = 0;
|
||||
loop {
|
||||
match log_events(&receiver, &sender, num_hashes, end_hash) {
|
||||
Ok(n) => num_hashes = n,
|
||||
Err(err) => return err,
|
||||
}
|
||||
end_hash = hash(&end_hash);
|
||||
num_hashes += 1;
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
impl Historian {
|
||||
pub fn new(start_hash: &Sha256Hash) -> Self {
|
||||
use std::sync::mpsc::channel;
|
||||
let (sender, event_receiver) = channel();
|
||||
let (entry_sender, receiver) = channel();
|
||||
let thread_hdl = create_logger(*start_hash, event_receiver, entry_sender);
|
||||
Historian {
|
||||
sender,
|
||||
receiver,
|
||||
thread_hdl,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use log::*;
|
||||
|
||||
#[test]
|
||||
fn test_historian() {
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
let zero = Sha256Hash::default();
|
||||
let hist = Historian::new(&zero);
|
||||
|
||||
hist.sender.send(Event::Tick).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
hist.sender.send(Event::UserDataKey(0xdeadbeef)).unwrap();
|
||||
sleep(Duration::new(0, 1_000_000));
|
||||
hist.sender.send(Event::Tick).unwrap();
|
||||
|
||||
let entry0 = hist.receiver.recv().unwrap();
|
||||
let entry1 = hist.receiver.recv().unwrap();
|
||||
let entry2 = hist.receiver.recv().unwrap();
|
||||
|
||||
drop(hist.sender);
|
||||
assert_eq!(
|
||||
hist.thread_hdl.join().unwrap().1,
|
||||
ExitReason::RecvDisconnected
|
||||
);
|
||||
|
||||
assert!(verify_slice(&[entry0, entry1, entry2], &zero));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_historian_closed_sender() {
|
||||
let zero = Sha256Hash::default();
|
||||
let hist = Historian::new(&zero);
|
||||
drop(hist.receiver);
|
||||
hist.sender.send(Event::Tick).unwrap();
|
||||
assert_eq!(
|
||||
hist.thread_hdl.join().unwrap().1,
|
||||
ExitReason::SendDisconnected
|
||||
);
|
||||
}
|
||||
}
|
@ -1,4 +1,7 @@
|
||||
#![cfg_attr(feature = "unstable", feature(test))]
|
||||
pub mod event;
|
||||
pub mod log;
|
||||
pub mod historian;
|
||||
extern crate digest;
|
||||
extern crate itertools;
|
||||
extern crate rayon;
|
||||
extern crate sha2;
|
||||
|
166
src/log.rs
Normal file
166
src/log.rs
Normal file
@ -0,0 +1,166 @@
|
||||
//! The `log` crate provides the foundational data structures for Proof-of-History,
|
||||
//! an ordered log of events in time.
|
||||
|
||||
/// Each log entry contains three pieces of data. The 'num_hashes' field is the number
|
||||
/// of hashes performed since the previous entry. The 'end_hash' field is the result
|
||||
/// of hashing 'end_hash' from the previous entry 'num_hashes' times. The 'event'
|
||||
/// field points to an Event that took place shortly after 'end_hash' was generated.
|
||||
///
|
||||
/// If you divide 'num_hashes' by the amount of time it takes to generate a new hash, you
|
||||
/// get a duration estimate since the last event. Since processing power increases
|
||||
/// over time, one should expect the duration 'num_hashes' represents to decrease proportionally.
|
||||
/// Though processing power varies across nodes, the network gives priority to the
|
||||
/// fastest processor. Duration should therefore be estimated by assuming that the hash
|
||||
/// was generated by the fastest processor at the time the entry was logged.
|
||||
|
||||
use digest::generic_array::GenericArray;
|
||||
use digest::generic_array::typenum::U32;
|
||||
pub type Sha256Hash = GenericArray<u8, U32>;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Entry {
|
||||
pub num_hashes: u64,
|
||||
pub end_hash: Sha256Hash,
|
||||
pub event: Event,
|
||||
}
|
||||
|
||||
/// When 'event' is Tick, the event represents a simple clock tick, and exists for the
|
||||
/// sole purpose of improving the performance of event log verification. A tick can
|
||||
/// be generated in 'num_hashes' hashes and verified in 'num_hashes' hashes. By logging
|
||||
/// a hash alongside the tick, each tick and be verified in parallel using the 'end_hash'
|
||||
/// of the preceding tick to seed its hashing.
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub enum Event {
|
||||
Tick,
|
||||
UserDataKey(u64),
|
||||
}
|
||||
|
||||
impl Entry {
|
||||
/// Creates a Entry from the number of hashes 'num_hashes' since the previous event
|
||||
/// and that resulting 'end_hash'.
|
||||
pub fn new_tick(num_hashes: u64, end_hash: &Sha256Hash) -> Self {
|
||||
let event = Event::Tick;
|
||||
Entry {
|
||||
num_hashes,
|
||||
end_hash: *end_hash,
|
||||
event,
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies self.end_hash is the result of hashing a 'start_hash' 'self.num_hashes' times.
|
||||
pub fn verify(self: &Self, start_hash: &Sha256Hash) -> bool {
|
||||
self.end_hash == next_tick(start_hash, self.num_hashes).end_hash
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hash(val: &[u8]) -> Sha256Hash {
|
||||
use sha2::{Digest, Sha256};
|
||||
let mut hasher = Sha256::default();
|
||||
hasher.input(val);
|
||||
hasher.result()
|
||||
}
|
||||
|
||||
/// Creates the next Tick Entry 'num_hashes' after 'start_hash'.
|
||||
pub fn next_tick(start_hash: &Sha256Hash, num_hashes: u64) -> Entry {
|
||||
let mut end_hash = *start_hash;
|
||||
for _ in 0..num_hashes {
|
||||
end_hash = hash(&end_hash);
|
||||
}
|
||||
Entry::new_tick(num_hashes, &end_hash)
|
||||
}
|
||||
|
||||
/// Verifies the hashes and counts of a slice of events are all consistent.
|
||||
pub fn verify_slice(events: &[Entry], start_hash: &Sha256Hash) -> bool {
|
||||
use rayon::prelude::*;
|
||||
let genesis = [Entry::new_tick(Default::default(), start_hash)];
|
||||
let event_pairs = genesis.par_iter().chain(events).zip(events);
|
||||
event_pairs.all(|(x0, x1)| x1.verify(&x0.end_hash))
|
||||
}
|
||||
|
||||
/// Verifies the hashes and events serially. Exists only for reference.
|
||||
pub fn verify_slice_seq(events: &[Entry], start_hash: &Sha256Hash) -> bool {
|
||||
let genesis = [Entry::new_tick(0, start_hash)];
|
||||
let mut event_pairs = genesis.iter().chain(events).zip(events);
|
||||
event_pairs.all(|(x0, x1)| x1.verify(&x0.end_hash))
|
||||
}
|
||||
|
||||
/// Create a vector of Ticks of length 'len' from 'start_hash' hash and 'num_hashes'.
|
||||
pub fn create_ticks(start_hash: &Sha256Hash, num_hashes: u64, len: usize) -> Vec<Entry> {
|
||||
use itertools::unfold;
|
||||
let mut events = unfold(*start_hash, |state| {
|
||||
let event = next_tick(state, num_hashes);
|
||||
*state = event.end_hash;
|
||||
return Some(event);
|
||||
});
|
||||
events.by_ref().take(len).collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_event_verify() {
|
||||
let zero = Sha256Hash::default();
|
||||
let one = hash(&zero);
|
||||
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case
|
||||
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
|
||||
assert!(next_tick(&zero, 1).verify(&zero)); // inductive step
|
||||
assert!(!next_tick(&zero, 1).verify(&one)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_tick() {
|
||||
let zero = Sha256Hash::default();
|
||||
assert_eq!(next_tick(&zero, 1).num_hashes, 1)
|
||||
}
|
||||
|
||||
fn verify_slice_generic(verify_slice: fn(&[Entry], &Sha256Hash) -> bool) {
|
||||
let zero = Sha256Hash::default();
|
||||
let one = hash(&zero);
|
||||
assert!(verify_slice(&vec![], &zero)); // base case
|
||||
assert!(verify_slice(&vec![Entry::new_tick(0, &zero)], &zero)); // singleton case 1
|
||||
assert!(!verify_slice(&vec![Entry::new_tick(0, &zero)], &one)); // singleton case 2, bad
|
||||
assert!(verify_slice(&create_ticks(&zero, 0, 2), &zero)); // inductive step
|
||||
|
||||
let mut bad_ticks = create_ticks(&zero, 0, 2);
|
||||
bad_ticks[1].end_hash = one;
|
||||
assert!(!verify_slice(&bad_ticks, &zero)); // inductive step, bad
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice() {
|
||||
verify_slice_generic(verify_slice);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_slice_seq() {
|
||||
verify_slice_generic(verify_slice_seq);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "unstable", test))]
|
||||
mod bench {
|
||||
extern crate test;
|
||||
use self::test::Bencher;
|
||||
use log::*;
|
||||
|
||||
#[bench]
|
||||
fn event_bench(bencher: &mut Bencher) {
|
||||
let start_hash = Default::default();
|
||||
let events = create_ticks(&start_hash, 10_000, 8);
|
||||
bencher.iter(|| {
|
||||
assert!(verify_slice(&events, &start_hash));
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn event_bench_seq(bencher: &mut Bencher) {
|
||||
let start_hash = Default::default();
|
||||
let events = create_ticks(&start_hash, 10_000, 8);
|
||||
bencher.iter(|| {
|
||||
assert!(verify_slice_seq(&events, &start_hash));
|
||||
});
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user