From f6e5f2439d75b5fb593de8378a8fbb9d474fa027 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Fri, 27 Apr 2018 15:47:13 -0700 Subject: [PATCH 01/51] Add GPU library for Linux systems To get solana to use the GPU, invoke cargo with "--features=cuda". Former-commit-id: ea904df6e53d98a32e3f6103ee82cdf7ba08bf21 --- .gitattributes | 1 + 1 file changed, 1 insertion(+) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..ce24744af8 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.a filter=lfs diff=lfs merge=lfs -text \ No newline at end of file From 3cfb571356c1479b9b1d4558e2ddafbdf092dd22 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Fri, 27 Apr 2018 15:49:48 -0700 Subject: [PATCH 02/51] Version bump Former-commit-id: f7385e866207b3ec2269bac36d52ef1e7f09337c --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 747cd4f38d..877fbde79e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana" description = "High Performance Blockchain" -version = "0.4.0" +version = "0.5.0-beta" documentation = "https://docs.rs/solana" homepage = "http://solana.io/" repository = "https://github.com/solana-labs/solana" From 6268d540a8e0b3613437bcb3a45b9bd30b58fba8 Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Sun, 29 Apr 2018 23:43:43 -0700 Subject: [PATCH 03/51] move CI specific scripts to solana-labs/buildkite repo Former-commit-id: 77dd1bdd4a314975e36eaee0cbd8661ae526c2f9 --- scripts/buildkite.sh | 6 ------ 1 file changed, 6 deletions(-) delete mode 100755 scripts/buildkite.sh diff --git a/scripts/buildkite.sh b/scripts/buildkite.sh deleted file mode 100755 index db1a72e497..0000000000 --- a/scripts/buildkite.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -export LD_LIBRARY_PATH=/usr/local/cuda/lib64 -source $HOME/.cargo/env -export PATH=$PATH:/usr/local/cuda/bin -cp /tmp/libcuda_verify_ed25519.a . -cargo test --features=cuda From 5c66bbde0143e7884630c4f3b50540e052c19eb6 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Mon, 30 Apr 2018 15:20:39 -0600 Subject: [PATCH 04/51] Add a note about running with GPU optimizations --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 729d99ac72..0edec791a3 100644 --- a/README.md +++ b/README.md @@ -133,6 +133,12 @@ Run the benchmarks: $ cargo +nightly bench --features="unstable" ``` +To run the benchmarks on Linux with GPU optimizations enabled: + +```bash +$ cargo +nightly bench --features="unstable,cuda" +``` + Code coverage --- From e6c3c215abb458393a36e7677ad64c5507a5d2b1 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Mon, 30 Apr 2018 15:26:31 -0600 Subject: [PATCH 05/51] Add note about installing git-lfs --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 0edec791a3..7f4b4f24df 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,10 @@ $ curl https://sh.rustup.rs -sSf | sh $ source $HOME/.cargo/env ``` +If you plan to run with GPU optimizations enabled (not recommended), you'll need a CUDA library stored in git LFS. Install git-lfs here: + +https://git-lfs.github.com/ + Now checkout the code from github: ```bash From 6af3680f996fec64865e2d83f8307256778f5380 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Mon, 30 Apr 2018 22:37:55 -0600 Subject: [PATCH 06/51] Version bump --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 877fbde79e..4317a31b15 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana" description = "High Performance Blockchain" -version = "0.5.0-beta" +version = "0.5.0" documentation = "https://docs.rs/solana" homepage = "http://solana.io/" repository = "https://github.com/solana-labs/solana" From ccb478c1f6c53066a6419c60cb9fb196d0315713 Mon Sep 17 00:00:00 2001 From: Robert Kelly Date: Tue, 1 May 2018 16:22:33 -0400 Subject: [PATCH 07/51] improved error handling and atomic transactions --- src/accountant.rs | 49 +++++++++++++++++++++++++++++++----------- src/accountant_skel.rs | 13 ++++++++--- 2 files changed, 46 insertions(+), 16 deletions(-) diff --git a/src/accountant.rs b/src/accountant.rs index 683474efa7..472cf2f19c 100644 --- a/src/accountant.rs +++ b/src/accountant.rs @@ -14,6 +14,7 @@ use rayon::prelude::*; use signature::{KeyPair, PublicKey, Signature}; use std::collections::hash_map::Entry::Occupied; use std::collections::{HashMap, HashSet, VecDeque}; +use std::sync::atomic::{AtomicIsize, Ordering}; use std::result; use std::sync::RwLock; use transaction::Transaction; @@ -23,6 +24,7 @@ pub const MAX_ENTRY_IDS: usize = 1024 * 4; #[derive(Debug, PartialEq, Eq)] pub enum AccountingError { AccountNotFound, + BalanceUpdatedBeforeTransactionCompleted, InsufficientFunds, InvalidTransferSignature, } @@ -30,18 +32,18 @@ pub enum AccountingError { pub type Result = result::Result; /// Commit funds to the 'to' party. -fn apply_payment(balances: &RwLock>>, payment: &Payment) { +fn apply_payment(balances: &RwLock>, payment: &Payment) { if balances.read().unwrap().contains_key(&payment.to) { let bals = balances.read().unwrap(); - *bals[&payment.to].write().unwrap() += payment.tokens; + bals[&payment.to].fetch_add(payment.tokens as isize, Ordering::Relaxed); } else { let mut bals = balances.write().unwrap(); - bals.insert(payment.to, RwLock::new(payment.tokens)); + bals.insert(payment.to, AtomicIsize::new(payment.tokens as isize)); } } pub struct Accountant { - balances: RwLock>>, + balances: RwLock>, pending: RwLock>, last_ids: RwLock>)>>, time_sources: RwLock>, @@ -131,23 +133,34 @@ impl Accountant { // Hold a write lock before the condition check, so that a debit can't occur // between checking the balance and the withdraw. let option = bals.get(&tr.from); + if option.is_none() { return Err(AccountingError::AccountNotFound); } - let mut bal = option.unwrap().write().unwrap(); if !self.reserve_signature_with_last_id(&tr.sig, &tr.data.last_id) { return Err(AccountingError::InvalidTransferSignature); } - if *bal < tr.data.tokens { + let bal = option.unwrap(); + let current = bal.load(Ordering::Relaxed) as i64; + + if current < tr.data.tokens { self.forget_signature_with_last_id(&tr.sig, &tr.data.last_id); return Err(AccountingError::InsufficientFunds); } - *bal -= tr.data.tokens; + let result = bal.compare_exchange( + current as isize, + (current - tr.data.tokens) as isize, + Ordering::Relaxed, + Ordering::Relaxed, + ); - Ok(()) + return match result { + Ok(_) => Ok(()), + Err(_) => Err(AccountingError::BalanceUpdatedBeforeTransactionCompleted), + } } pub fn process_verified_transaction_credits(&self, tr: &Transaction) { @@ -164,9 +177,15 @@ impl Accountant { /// Process a Transaction that has already been verified. pub fn process_verified_transaction(&self, tr: &Transaction) -> Result<()> { - self.process_verified_transaction_debits(tr)?; - self.process_verified_transaction_credits(tr); - Ok(()) + return match self.process_verified_transaction_debits(tr) { + Ok(_) => { + self.process_verified_transaction_credits(tr); + Ok(()) + }, + Err(err) => { + Err(err) + } + }; } /// Process a batch of verified transactions. @@ -174,7 +193,11 @@ impl Accountant { // Run all debits first to filter out any transactions that can't be processed // in parallel deterministically. let results: Vec<_> = trs.into_par_iter() - .map(|tr| self.process_verified_transaction_debits(&tr).map(|_| tr)) + .filter_map(|tr| match self.process_verified_transaction_debits(&tr) { + Ok(_x) => Some(Ok(tr)), + Err(_e) => None, + }) + // .flat_map(|tr| self.process_verified_transaction_debits(&tr).map(|_| tr)) .collect(); // Calling collect() here forces all debits to complete before moving on. results @@ -300,7 +323,7 @@ impl Accountant { pub fn get_balance(&self, pubkey: &PublicKey) -> Option { let bals = self.balances.read().unwrap(); - bals.get(pubkey).map(|x| *x.read().unwrap()) + bals.get(pubkey).map(|x| x.load(Ordering::Relaxed) as i64) } } diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index 9712c6c031..598cc54e58 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -757,6 +757,7 @@ mod bench { // Create transactions between unrelated parties. let txs = 100_000; let last_ids: Mutex> = Mutex::new(HashSet::new()); + let errors: Mutex = Mutex::new(0); let transactions: Vec<_> = (0..txs) .into_par_iter() .map(|i| { @@ -774,11 +775,17 @@ mod bench { // Seed the 'from' account. let rando0 = KeyPair::new(); let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id); - acc.process_verified_transaction(&tr).unwrap(); + // some of these will fail because balance updates before transaction completes + match acc.process_verified_transaction(&tr) { + Ok(_) => (), + Err(_) => *errors.lock().unwrap() += 1, + }; let rando1 = KeyPair::new(); let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id); - acc.process_verified_transaction(&tr).unwrap(); + // these will fail if the prior transaction does not go through + // but won't typically fail otherwise since the addresses are randomly generated + let _ = acc.process_verified_transaction(&tr); // Finally, return a transaction that's unique Transaction::new(&rando0, rando1.pubkey(), 1, last_id) @@ -803,7 +810,7 @@ mod bench { drop(skel.historian.sender); let entries: Vec = skel.historian.receiver.iter().collect(); assert_eq!(entries.len(), 1); - assert_eq!(entries[0].events.len(), txs as usize); + assert_eq!(entries[0].events.len() + *errors.lock().unwrap(), txs as usize); println!("{} tps", tps); } From cb362e9052acdad9f22aa1893a75b1aa10f8b9b8 Mon Sep 17 00:00:00 2001 From: Robert Kelly Date: Tue, 1 May 2018 16:38:08 -0400 Subject: [PATCH 08/51] rust format --- src/accountant.rs | 6 ++---- src/accountant_skel.rs | 9 ++++++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/accountant.rs b/src/accountant.rs index 472cf2f19c..3be68a8d76 100644 --- a/src/accountant.rs +++ b/src/accountant.rs @@ -160,7 +160,7 @@ impl Accountant { return match result { Ok(_) => Ok(()), Err(_) => Err(AccountingError::BalanceUpdatedBeforeTransactionCompleted), - } + }; } pub fn process_verified_transaction_credits(&self, tr: &Transaction) { @@ -181,10 +181,8 @@ impl Accountant { Ok(_) => { self.process_verified_transaction_credits(tr); Ok(()) - }, - Err(err) => { - Err(err) } + Err(err) => Err(err), }; } diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index 598cc54e58..0c31c2b20a 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -777,8 +777,8 @@ mod bench { let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id); // some of these will fail because balance updates before transaction completes match acc.process_verified_transaction(&tr) { - Ok(_) => (), - Err(_) => *errors.lock().unwrap() += 1, + Ok(_) => (), + Err(_) => *errors.lock().unwrap() += 1, }; let rando1 = KeyPair::new(); @@ -810,7 +810,10 @@ mod bench { drop(skel.historian.sender); let entries: Vec = skel.historian.receiver.iter().collect(); assert_eq!(entries.len(), 1); - assert_eq!(entries[0].events.len() + *errors.lock().unwrap(), txs as usize); + assert_eq!( + entries[0].events.len() + *errors.lock().unwrap(), + txs as usize + ); println!("{} tps", tps); } From b992a84d67d6e20ca7d1b18e8e68c3d480dfe054 Mon Sep 17 00:00:00 2001 From: Robert Kelly Date: Wed, 2 May 2018 10:15:08 -0400 Subject: [PATCH 09/51] modified verification to loop until success or failure --- src/accountant.rs | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/src/accountant.rs b/src/accountant.rs index 3be68a8d76..1f92ef844c 100644 --- a/src/accountant.rs +++ b/src/accountant.rs @@ -24,7 +24,6 @@ pub const MAX_ENTRY_IDS: usize = 1024 * 4; #[derive(Debug, PartialEq, Eq)] pub enum AccountingError { AccountNotFound, - BalanceUpdatedBeforeTransactionCompleted, InsufficientFunds, InvalidTransferSignature, } @@ -142,25 +141,27 @@ impl Accountant { return Err(AccountingError::InvalidTransferSignature); } - let bal = option.unwrap(); - let current = bal.load(Ordering::Relaxed) as i64; + loop { + let bal = option.unwrap(); + let current = bal.load(Ordering::Relaxed) as i64; - if current < tr.data.tokens { - self.forget_signature_with_last_id(&tr.sig, &tr.data.last_id); - return Err(AccountingError::InsufficientFunds); + if current < tr.data.tokens { + self.forget_signature_with_last_id(&tr.sig, &tr.data.last_id); + return Err(AccountingError::InsufficientFunds); + } + + let result = bal.compare_exchange( + current as isize, + (current - tr.data.tokens) as isize, + Ordering::Relaxed, + Ordering::Relaxed, + ); + + match result { + Ok(_) => return Ok(()), + Err(_) => continue, + }; } - - let result = bal.compare_exchange( - current as isize, - (current - tr.data.tokens) as isize, - Ordering::Relaxed, - Ordering::Relaxed, - ); - - return match result { - Ok(_) => Ok(()), - Err(_) => Err(AccountingError::BalanceUpdatedBeforeTransactionCompleted), - }; } pub fn process_verified_transaction_credits(&self, tr: &Transaction) { From 6b45d453b8ef6cf48259a3b68d34a148d4af1984 Mon Sep 17 00:00:00 2001 From: Robert Kelly Date: Wed, 2 May 2018 10:44:41 -0400 Subject: [PATCH 10/51] modified verification map --- src/accountant.rs | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/src/accountant.rs b/src/accountant.rs index 1f92ef844c..735798f9f7 100644 --- a/src/accountant.rs +++ b/src/accountant.rs @@ -128,9 +128,6 @@ impl Accountant { /// funds and isn't a duplicate. pub fn process_verified_transaction_debits(&self, tr: &Transaction) -> Result<()> { let bals = self.balances.read().unwrap(); - - // Hold a write lock before the condition check, so that a debit can't occur - // between checking the balance and the withdraw. let option = bals.get(&tr.from); if option.is_none() { @@ -178,13 +175,9 @@ impl Accountant { /// Process a Transaction that has already been verified. pub fn process_verified_transaction(&self, tr: &Transaction) -> Result<()> { - return match self.process_verified_transaction_debits(tr) { - Ok(_) => { - self.process_verified_transaction_credits(tr); - Ok(()) - } - Err(err) => Err(err), - }; + self.process_verified_transaction_debits(tr)?; + self.process_verified_transaction_credits(tr); + Ok(()) } /// Process a batch of verified transactions. @@ -192,11 +185,7 @@ impl Accountant { // Run all debits first to filter out any transactions that can't be processed // in parallel deterministically. let results: Vec<_> = trs.into_par_iter() - .filter_map(|tr| match self.process_verified_transaction_debits(&tr) { - Ok(_x) => Some(Ok(tr)), - Err(_e) => None, - }) - // .flat_map(|tr| self.process_verified_transaction_debits(&tr).map(|_| tr)) + .map(|tr| self.process_verified_transaction_debits(&tr).map(|_| tr)) .collect(); // Calling collect() here forces all debits to complete before moving on. results From d0151d2b79a2221d73a7a9018d5fa5b905e2b7af Mon Sep 17 00:00:00 2001 From: Robert Kelly Date: Wed, 2 May 2018 12:04:05 -0400 Subject: [PATCH 11/51] restored original test logic --- src/accountant_skel.rs | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index 0c31c2b20a..c5969fe097 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -757,7 +757,6 @@ mod bench { // Create transactions between unrelated parties. let txs = 100_000; let last_ids: Mutex> = Mutex::new(HashSet::new()); - let errors: Mutex = Mutex::new(0); let transactions: Vec<_> = (0..txs) .into_par_iter() .map(|i| { @@ -775,16 +774,10 @@ mod bench { // Seed the 'from' account. let rando0 = KeyPair::new(); let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id); - // some of these will fail because balance updates before transaction completes - match acc.process_verified_transaction(&tr) { - Ok(_) => (), - Err(_) => *errors.lock().unwrap() += 1, - }; + let _ = acc.process_verified_transaction(&tr); let rando1 = KeyPair::new(); let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id); - // these will fail if the prior transaction does not go through - // but won't typically fail otherwise since the addresses are randomly generated let _ = acc.process_verified_transaction(&tr); // Finally, return a transaction that's unique @@ -811,7 +804,7 @@ mod bench { let entries: Vec = skel.historian.receiver.iter().collect(); assert_eq!(entries.len(), 1); assert_eq!( - entries[0].events.len() + *errors.lock().unwrap(), + entries[0].events.len(), txs as usize ); From cc6de605ace8adad853cfefd2eba26dd6224940b Mon Sep 17 00:00:00 2001 From: Robert Kelly Date: Wed, 2 May 2018 12:21:20 -0400 Subject: [PATCH 12/51] rustfmt --- src/accountant.rs | 2 +- src/accountant_skel.rs | 23 ++++++++++------------- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/src/accountant.rs b/src/accountant.rs index 735798f9f7..49c5d2c883 100644 --- a/src/accountant.rs +++ b/src/accountant.rs @@ -14,8 +14,8 @@ use rayon::prelude::*; use signature::{KeyPair, PublicKey, Signature}; use std::collections::hash_map::Entry::Occupied; use std::collections::{HashMap, HashSet, VecDeque}; -use std::sync::atomic::{AtomicIsize, Ordering}; use std::result; +use std::sync::atomic::{AtomicIsize, Ordering}; use std::sync::RwLock; use transaction::Transaction; diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index c5969fe097..65a0ddd097 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -487,14 +487,14 @@ mod tests { use std::time::Duration; use transaction::Transaction; - use subscribers::{Node, Subscribers}; - use streamer; - use std::sync::mpsc::channel; - use std::collections::VecDeque; - use hash::{hash, Hash}; - use event::Event; - use entry; use chrono::prelude::*; + use entry; + use event::Event; + use hash::{hash, Hash}; + use std::collections::VecDeque; + use std::sync::mpsc::channel; + use streamer; + use subscribers::{Node, Subscribers}; #[test] fn test_layout() { @@ -774,11 +774,11 @@ mod bench { // Seed the 'from' account. let rando0 = KeyPair::new(); let tr = Transaction::new(&mint.keypair(), rando0.pubkey(), 1_000, last_id); - let _ = acc.process_verified_transaction(&tr); + acc.process_verified_transaction(&tr).unwrap(); let rando1 = KeyPair::new(); let tr = Transaction::new(&rando0, rando1.pubkey(), 2, last_id); - let _ = acc.process_verified_transaction(&tr); + acc.process_verified_transaction(&tr).unwrap(); // Finally, return a transaction that's unique Transaction::new(&rando0, rando1.pubkey(), 1, last_id) @@ -803,10 +803,7 @@ mod bench { drop(skel.historian.sender); let entries: Vec = skel.historian.receiver.iter().collect(); assert_eq!(entries.len(), 1); - assert_eq!( - entries[0].events.len(), - txs as usize - ); + assert_eq!(entries[0].events.len(), txs as usize); println!("{} tps", tps); } From 63cf6363a2424b61c5649e443135b7c3dda5c6bf Mon Sep 17 00:00:00 2001 From: Robert Kelly Date: Wed, 2 May 2018 12:24:25 -0400 Subject: [PATCH 13/51] more rustfmt --- src/packet.rs | 2 +- src/result.rs | 2 +- src/streamer.rs | 5 +++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/packet.rs b/src/packet.rs index d97b261e9f..c4b09eb56e 100644 --- a/src/packet.rs +++ b/src/packet.rs @@ -4,9 +4,9 @@ use result::{Error, Result}; use std::collections::VecDeque; use std::fmt; use std::io; +use std::mem::size_of; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}; use std::sync::{Arc, Mutex, RwLock}; -use std::mem::size_of; pub type SharedPackets = Arc>; pub type SharedBlob = Arc>; diff --git a/src/result.rs b/src/result.rs index 01872dfbe1..532a64c3b2 100644 --- a/src/result.rs +++ b/src/result.rs @@ -1,10 +1,10 @@ //! The `result` module exposes a Result type that propagates one of many different Error types. +use accountant; use bincode; use serde_json; use std; use std::any::Any; -use accountant; #[derive(Debug)] pub enum Error { diff --git a/src/streamer.rs b/src/streamer.rs index 43e6f2ac35..7f0e7fbdba 100644 --- a/src/streamer.rs +++ b/src/streamer.rs @@ -382,8 +382,9 @@ mod test { use std::sync::mpsc::channel; use std::sync::{Arc, RwLock}; use std::time::Duration; - use streamer::{blob_receiver, receiver, responder, retransmitter, window, BlobReceiver, - PacketReceiver}; + use streamer::{ + blob_receiver, receiver, responder, retransmitter, window, BlobReceiver, PacketReceiver, + }; use subscribers::{Node, Subscribers}; fn get_msgs(r: PacketReceiver, num: &mut usize) { From 48d94143e78d7e474dee0460b2e39948c9a6052d Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Wed, 2 May 2018 11:05:11 -0600 Subject: [PATCH 14/51] Fix CI --- src/streamer.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/streamer.rs b/src/streamer.rs index 7f0e7fbdba..43e6f2ac35 100644 --- a/src/streamer.rs +++ b/src/streamer.rs @@ -382,9 +382,8 @@ mod test { use std::sync::mpsc::channel; use std::sync::{Arc, RwLock}; use std::time::Duration; - use streamer::{ - blob_receiver, receiver, responder, retransmitter, window, BlobReceiver, PacketReceiver, - }; + use streamer::{blob_receiver, receiver, responder, retransmitter, window, BlobReceiver, + PacketReceiver}; use subscribers::{Node, Subscribers}; fn get_msgs(r: PacketReceiver, num: &mut usize) { From c5cc91443e497f62bfaa457a00c54e74441b94ce Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Wed, 2 May 2018 15:54:53 -0600 Subject: [PATCH 15/51] Rename sender/receiver to input/output --- src/accountant_skel.rs | 16 ++++++++-------- src/bin/historian-demo.rs | 6 +++--- src/historian.rs | 36 ++++++++++++++++++------------------ 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index 65a0ddd097..d3141f7084 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -105,7 +105,7 @@ impl AccountantSkel { /// Process any Entry items that have been published by the Historian. pub fn sync(&mut self) -> Hash { - while let Ok(entry) = self.historian.receiver.try_recv() { + while let Ok(entry) = self.historian.output.try_recv() { self.last_id = entry.id; self.acc.register_entry_id(&self.last_id); writeln!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap(); @@ -215,14 +215,14 @@ impl AccountantSkel { for result in self.acc.process_verified_transactions(trs) { if let Ok(tr) = result { self.historian - .sender + .input .send(Signal::Event(Event::Transaction(tr)))?; } } // Let validators know they should not attempt to process additional // transactions in parallel. - self.historian.sender.send(Signal::Tick)?; + self.historian.input.send(Signal::Tick)?; // Process the remaining requests serially. let rsps = reqs.into_iter() @@ -545,9 +545,9 @@ mod tests { assert!(skel.process_packets(req_vers).is_ok()); // Collect the ledger and feed it to a new accountant. - skel.historian.sender.send(Signal::Tick).unwrap(); - drop(skel.historian.sender); - let entries: Vec = skel.historian.receiver.iter().collect(); + skel.historian.input.send(Signal::Tick).unwrap(); + drop(skel.historian.input); + let entries: Vec = skel.historian.output.iter().collect(); // Assert the user holds one token, not two. If the server only output one // entry, then the second transaction will be rejected, because it drives @@ -800,8 +800,8 @@ mod bench { let tps = txs as f64 / sec; // Ensure that all transactions were successfully logged. - drop(skel.historian.sender); - let entries: Vec = skel.historian.receiver.iter().collect(); + drop(skel.historian.input); + let entries: Vec = skel.historian.output.iter().collect(); assert_eq!(entries.len(), 1); assert_eq!(entries[0].events.len(), txs as usize); diff --git a/src/bin/historian-demo.rs b/src/bin/historian-demo.rs index 306b8ffbaa..fe4180c457 100644 --- a/src/bin/historian-demo.rs +++ b/src/bin/historian-demo.rs @@ -17,7 +17,7 @@ fn create_ledger(hist: &Historian, seed: &Hash) -> Result<(), SendError> let keypair = KeyPair::new(); let tr = Transaction::new(&keypair, keypair.pubkey(), 42, *seed); let signal0 = Signal::Event(Event::Transaction(tr)); - hist.sender.send(signal0)?; + hist.input.send(signal0)?; sleep(Duration::from_millis(10)); Ok(()) } @@ -26,8 +26,8 @@ fn main() { let seed = Hash::default(); let hist = Historian::new(&seed, Some(10)); create_ledger(&hist, &seed).expect("send error"); - drop(hist.sender); - let entries: Vec = hist.receiver.iter().collect(); + drop(hist.input); + let entries: Vec = hist.output.iter().collect(); for entry in &entries { println!("{:?}", entry); } diff --git a/src/historian.rs b/src/historian.rs index 412027846f..970a75db0b 100644 --- a/src/historian.rs +++ b/src/historian.rs @@ -9,20 +9,20 @@ use std::thread::{spawn, JoinHandle}; use std::time::Instant; pub struct Historian { - pub sender: SyncSender, - pub receiver: Receiver, + pub input: SyncSender, + pub output: Receiver, pub thread_hdl: JoinHandle, } impl Historian { pub fn new(start_hash: &Hash, ms_per_tick: Option) -> Self { - let (sender, event_receiver) = sync_channel(10_000); - let (entry_sender, receiver) = sync_channel(10_000); + let (input, event_receiver) = sync_channel(10_000); + let (entry_sender, output) = sync_channel(10_000); let thread_hdl = Historian::create_recorder(*start_hash, ms_per_tick, event_receiver, entry_sender); Historian { - sender, - receiver, + input, + output, thread_hdl, } } @@ -62,21 +62,21 @@ mod tests { let zero = Hash::default(); let hist = Historian::new(&zero, None); - hist.sender.send(Signal::Tick).unwrap(); + hist.input.send(Signal::Tick).unwrap(); sleep(Duration::new(0, 1_000_000)); - hist.sender.send(Signal::Tick).unwrap(); + hist.input.send(Signal::Tick).unwrap(); sleep(Duration::new(0, 1_000_000)); - hist.sender.send(Signal::Tick).unwrap(); + hist.input.send(Signal::Tick).unwrap(); - let entry0 = hist.receiver.recv().unwrap(); - let entry1 = hist.receiver.recv().unwrap(); - let entry2 = hist.receiver.recv().unwrap(); + let entry0 = hist.output.recv().unwrap(); + let entry1 = hist.output.recv().unwrap(); + let entry2 = hist.output.recv().unwrap(); assert_eq!(entry0.num_hashes, 0); assert_eq!(entry1.num_hashes, 0); assert_eq!(entry2.num_hashes, 0); - drop(hist.sender); + drop(hist.input); assert_eq!( hist.thread_hdl.join().unwrap(), ExitReason::RecvDisconnected @@ -89,8 +89,8 @@ mod tests { fn test_historian_closed_sender() { let zero = Hash::default(); let hist = Historian::new(&zero, None); - drop(hist.receiver); - hist.sender.send(Signal::Tick).unwrap(); + drop(hist.output); + hist.input.send(Signal::Tick).unwrap(); assert_eq!( hist.thread_hdl.join().unwrap(), ExitReason::SendDisconnected @@ -102,9 +102,9 @@ mod tests { let zero = Hash::default(); let hist = Historian::new(&zero, Some(20)); sleep(Duration::from_millis(300)); - hist.sender.send(Signal::Tick).unwrap(); - drop(hist.sender); - let entries: Vec = hist.receiver.iter().collect(); + hist.input.send(Signal::Tick).unwrap(); + drop(hist.input); + let entries: Vec = hist.output.iter().collect(); assert!(entries.len() > 1); // Ensure the ID is not the seed. From 4b9f11558691fd40777706ca3831803c320b309d Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Wed, 2 May 2018 16:35:37 -0600 Subject: [PATCH 16/51] Hoist Historian input --- src/accountant_skel.rs | 42 ++++++++++++++++++++++++++------------- src/accountant_stub.rs | 5 ++++- src/bin/historian-demo.rs | 13 ++++++------ src/bin/testnode.rs | 5 ++++- src/historian.rs | 37 +++++++++++++++++----------------- 5 files changed, 62 insertions(+), 40 deletions(-) diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index d3141f7084..527e57a9f0 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -21,7 +21,7 @@ use std::collections::VecDeque; use std::io::Write; use std::net::{SocketAddr, UdpSocket}; use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::mpsc::{channel, Receiver, Sender}; +use std::sync::mpsc::{channel, Receiver, Sender, SyncSender}; use std::sync::{Arc, Mutex, RwLock}; use std::thread::{spawn, JoinHandle}; use std::time::Duration; @@ -34,6 +34,7 @@ pub struct AccountantSkel { acc: Accountant, last_id: Hash, writer: W, + historian_input: SyncSender, historian: Historian, entry_info_subscribers: Vec, } @@ -78,11 +79,18 @@ pub enum Response { impl AccountantSkel { /// Create a new AccountantSkel that wraps the given Accountant. - pub fn new(acc: Accountant, last_id: Hash, writer: W, historian: Historian) -> Self { + pub fn new( + acc: Accountant, + last_id: Hash, + writer: W, + historian_input: SyncSender, + historian: Historian, + ) -> Self { AccountantSkel { acc, last_id, writer, + historian_input, historian, entry_info_subscribers: vec![], } @@ -214,15 +222,14 @@ impl AccountantSkel { // Process the transactions in parallel and then log the successful ones. for result in self.acc.process_verified_transactions(trs) { if let Ok(tr) = result { - self.historian - .input + self.historian_input .send(Signal::Event(Event::Transaction(tr)))?; } } // Let validators know they should not attempt to process additional // transactions in parallel. - self.historian.input.send(Signal::Tick)?; + self.historian_input.send(Signal::Tick)?; // Process the remaining requests serially. let rsps = reqs.into_iter() @@ -482,6 +489,7 @@ mod tests { use std::io::sink; use std::net::{SocketAddr, UdpSocket}; use std::sync::atomic::{AtomicBool, Ordering}; + use std::sync::mpsc::sync_channel; use std::sync::{Arc, Mutex}; use std::thread::sleep; use std::time::Duration; @@ -530,8 +538,9 @@ mod tests { let mint = Mint::new(2); let acc = Accountant::new(&mint); let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address"); - let historian = Historian::new(&mint.last_id(), None); - let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian); + let (input, event_receiver) = sync_channel(10); + let historian = Historian::new(event_receiver, &mint.last_id(), None); + let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), input, historian); // Process a batch that includes a transaction that receives two tokens. let alice = KeyPair::new(); @@ -545,8 +554,8 @@ mod tests { assert!(skel.process_packets(req_vers).is_ok()); // Collect the ledger and feed it to a new accountant. - skel.historian.input.send(Signal::Tick).unwrap(); - drop(skel.historian.input); + skel.historian_input.send(Signal::Tick).unwrap(); + drop(skel.historian_input); let entries: Vec = skel.historian.output.iter().collect(); // Assert the user holds one token, not two. If the server only output one @@ -569,11 +578,13 @@ mod tests { let acc = Accountant::new(&alice); let bob_pubkey = KeyPair::new().pubkey(); let exit = Arc::new(AtomicBool::new(false)); - let historian = Historian::new(&alice.last_id(), Some(30)); + let (input, event_receiver) = sync_channel(10); + let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); let acc = Arc::new(Mutex::new(AccountantSkel::new( acc, alice.last_id(), sink(), + input, historian, ))); let _threads = AccountantSkel::serve(&acc, &addr, exit.clone()).unwrap(); @@ -651,11 +662,13 @@ mod tests { let starting_balance = 10_000; let alice = Mint::new(starting_balance); let acc = Accountant::new(&alice); - let historian = Historian::new(&alice.last_id(), Some(30)); + let (input, event_receiver) = sync_channel(10); + let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); let acc = Arc::new(Mutex::new(AccountantSkel::new( acc, alice.last_id(), sink(), + input, historian, ))); @@ -790,8 +803,9 @@ mod bench { .map(|tr| (Request::Transaction(tr), rsp_addr, 1_u8)) .collect(); - let historian = Historian::new(&mint.last_id(), None); - let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), historian); + let (input, event_receiver) = sync_channel(10); + let historian = Historian::new(event_receiver, &mint.last_id(), None); + let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), input, historian); let now = Instant::now(); assert!(skel.process_packets(req_vers).is_ok()); @@ -800,7 +814,7 @@ mod bench { let tps = txs as f64 / sec; // Ensure that all transactions were successfully logged. - drop(skel.historian.input); + drop(input); let entries: Vec = skel.historian.output.iter().collect(); assert_eq!(entries.len(), 1); assert_eq!(entries[0].events.len(), txs as usize); diff --git a/src/accountant_stub.rs b/src/accountant_stub.rs index 2dd331da7b..d03866099c 100644 --- a/src/accountant_stub.rs +++ b/src/accountant_stub.rs @@ -165,6 +165,7 @@ mod tests { use signature::{KeyPair, KeyPairUtil}; use std::io::sink; use std::sync::atomic::{AtomicBool, Ordering}; + use std::sync::mpsc::sync_channel; use std::sync::{Arc, Mutex}; use std::thread::sleep; use std::time::Duration; @@ -178,11 +179,13 @@ mod tests { let acc = Accountant::new(&alice); let bob_pubkey = KeyPair::new().pubkey(); let exit = Arc::new(AtomicBool::new(false)); - let historian = Historian::new(&alice.last_id(), Some(30)); + let (input, event_receiver) = sync_channel(10); + let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); let acc = Arc::new(Mutex::new(AccountantSkel::new( acc, alice.last_id(), sink(), + input, historian, ))); let _threads = AccountantSkel::serve(&acc, addr, exit.clone()).unwrap(); diff --git a/src/bin/historian-demo.rs b/src/bin/historian-demo.rs index fe4180c457..010391cbab 100644 --- a/src/bin/historian-demo.rs +++ b/src/bin/historian-demo.rs @@ -8,25 +8,26 @@ use solana::ledger::Block; use solana::recorder::Signal; use solana::signature::{KeyPair, KeyPairUtil}; use solana::transaction::Transaction; -use std::sync::mpsc::SendError; +use std::sync::mpsc::{sync_channel, SendError, SyncSender}; use std::thread::sleep; use std::time::Duration; -fn create_ledger(hist: &Historian, seed: &Hash) -> Result<(), SendError> { +fn create_ledger(input: &SyncSender, seed: &Hash) -> Result<(), SendError> { sleep(Duration::from_millis(15)); let keypair = KeyPair::new(); let tr = Transaction::new(&keypair, keypair.pubkey(), 42, *seed); let signal0 = Signal::Event(Event::Transaction(tr)); - hist.input.send(signal0)?; + input.send(signal0)?; sleep(Duration::from_millis(10)); Ok(()) } fn main() { + let (input, event_receiver) = sync_channel(10); let seed = Hash::default(); - let hist = Historian::new(&seed, Some(10)); - create_ledger(&hist, &seed).expect("send error"); - drop(hist.input); + let hist = Historian::new(event_receiver, &seed, Some(10)); + create_ledger(&input, &seed).expect("send error"); + drop(input); let entries: Vec = hist.output.iter().collect(); for entry in &entries { println!("{:?}", entry); diff --git a/src/bin/testnode.rs b/src/bin/testnode.rs index 8b6da2c1c8..6a6d8ac9ea 100644 --- a/src/bin/testnode.rs +++ b/src/bin/testnode.rs @@ -15,6 +15,7 @@ use std::env; use std::io::{stdin, stdout, Read}; use std::process::exit; use std::sync::atomic::AtomicBool; +use std::sync::mpsc::sync_channel; use std::sync::{Arc, Mutex}; fn print_usage(program: &str, opts: Options) { @@ -95,12 +96,14 @@ fn main() { acc.register_entry_id(&last_id); } - let historian = Historian::new(&last_id, Some(1000)); + let (input, event_receiver) = sync_channel(10_000); + let historian = Historian::new(event_receiver, &last_id, Some(1000)); let exit = Arc::new(AtomicBool::new(false)); let skel = Arc::new(Mutex::new(AccountantSkel::new( acc, last_id, stdout(), + input, historian, ))); let threads = AccountantSkel::serve(&skel, &addr, exit.clone()).unwrap(); diff --git a/src/historian.rs b/src/historian.rs index 970a75db0b..0d56b1deab 100644 --- a/src/historian.rs +++ b/src/historian.rs @@ -9,22 +9,20 @@ use std::thread::{spawn, JoinHandle}; use std::time::Instant; pub struct Historian { - pub input: SyncSender, pub output: Receiver, pub thread_hdl: JoinHandle, } impl Historian { - pub fn new(start_hash: &Hash, ms_per_tick: Option) -> Self { - let (input, event_receiver) = sync_channel(10_000); + pub fn new( + event_receiver: Receiver, + start_hash: &Hash, + ms_per_tick: Option, + ) -> Self { let (entry_sender, output) = sync_channel(10_000); let thread_hdl = Historian::create_recorder(*start_hash, ms_per_tick, event_receiver, entry_sender); - Historian { - input, - output, - thread_hdl, - } + Historian { output, thread_hdl } } /// A background thread that will continue tagging received Event messages and @@ -59,14 +57,15 @@ mod tests { #[test] fn test_historian() { + let (input, event_receiver) = sync_channel(10); let zero = Hash::default(); - let hist = Historian::new(&zero, None); + let hist = Historian::new(event_receiver, &zero, None); - hist.input.send(Signal::Tick).unwrap(); + input.send(Signal::Tick).unwrap(); sleep(Duration::new(0, 1_000_000)); - hist.input.send(Signal::Tick).unwrap(); + input.send(Signal::Tick).unwrap(); sleep(Duration::new(0, 1_000_000)); - hist.input.send(Signal::Tick).unwrap(); + input.send(Signal::Tick).unwrap(); let entry0 = hist.output.recv().unwrap(); let entry1 = hist.output.recv().unwrap(); @@ -76,7 +75,7 @@ mod tests { assert_eq!(entry1.num_hashes, 0); assert_eq!(entry2.num_hashes, 0); - drop(hist.input); + drop(input); assert_eq!( hist.thread_hdl.join().unwrap(), ExitReason::RecvDisconnected @@ -87,10 +86,11 @@ mod tests { #[test] fn test_historian_closed_sender() { + let (input, event_receiver) = sync_channel(10); let zero = Hash::default(); - let hist = Historian::new(&zero, None); + let hist = Historian::new(event_receiver, &zero, None); drop(hist.output); - hist.input.send(Signal::Tick).unwrap(); + input.send(Signal::Tick).unwrap(); assert_eq!( hist.thread_hdl.join().unwrap(), ExitReason::SendDisconnected @@ -99,11 +99,12 @@ mod tests { #[test] fn test_ticking_historian() { + let (input, event_receiver) = sync_channel(10); let zero = Hash::default(); - let hist = Historian::new(&zero, Some(20)); + let hist = Historian::new(event_receiver, &zero, Some(20)); sleep(Duration::from_millis(300)); - hist.input.send(Signal::Tick).unwrap(); - drop(hist.input); + input.send(Signal::Tick).unwrap(); + drop(input); let entries: Vec = hist.output.iter().collect(); assert!(entries.len() > 1); From 00a16db9cde2ff677b636cd21bb231ce67424167 Mon Sep 17 00:00:00 2001 From: kwangin Date: Thu, 3 May 2018 08:38:09 +0900 Subject: [PATCH 17/51] Add comment about rustc version in README --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 7f4b4f24df..61c7d8dc2b 100644 --- a/README.md +++ b/README.md @@ -106,6 +106,13 @@ $ source $HOME/.cargo/env $ rustup component add rustfmt-preview ``` +If your rustc version is lower than 1.25.0, please update and install rustfmt: + +```bash +$ rustup update +$ rustup component add rustfmt-preview +``` + Download the source code: ```bash From ec8cfc77adb3c3bdeb550cffa4e57338c9ff1ce8 Mon Sep 17 00:00:00 2001 From: kwangin Date: Thu, 3 May 2018 09:04:56 +0900 Subject: [PATCH 18/51] Remove component adding part --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 61c7d8dc2b..93d2af8ac3 100644 --- a/README.md +++ b/README.md @@ -110,7 +110,6 @@ If your rustc version is lower than 1.25.0, please update and install rustfmt: ```bash $ rustup update -$ rustup component add rustfmt-preview ``` Download the source code: From b950e33d81c2dc6d8e323bb51440455048453a55 Mon Sep 17 00:00:00 2001 From: kwangin Date: Thu, 3 May 2018 09:06:41 +0900 Subject: [PATCH 19/51] Remove useless comment --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 93d2af8ac3..46bd76e70e 100644 --- a/README.md +++ b/README.md @@ -106,7 +106,7 @@ $ source $HOME/.cargo/env $ rustup component add rustfmt-preview ``` -If your rustc version is lower than 1.25.0, please update and install rustfmt: +If your rustc version is lower than 1.25.0, please update it: ```bash $ rustup update From b65f04d5003674a50106f6ffe04bc89ac4e94c1b Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Wed, 2 May 2018 20:38:07 -0700 Subject: [PATCH 20/51] multi host client demo Bind to the same interface as the user supplied client address. --- src/accountant_stub.rs | 2 ++ src/bin/client-demo.rs | 17 ++++++++++------- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/accountant_stub.rs b/src/accountant_stub.rs index 2dd331da7b..900ca7a8fb 100644 --- a/src/accountant_stub.rs +++ b/src/accountant_stub.rs @@ -41,6 +41,7 @@ impl AccountantStub { let subscriptions = vec![Subscription::EntryInfo]; let req = Request::Subscribe { subscriptions }; let data = serialize(&req).expect("serialize Subscribe"); + trace!("subscribing to {}", self.addr); let _res = self.socket.send_to(&data, &self.addr); } @@ -114,6 +115,7 @@ impl AccountantStub { pub fn get_last_id(&mut self) -> FutureResult { let req = Request::GetLastId; let data = serialize(&req).expect("serialize GetId"); + assert!(data.len() < 4096); self.socket .send_to(&data, &self.addr) .expect("buffer error"); diff --git a/src/bin/client-demo.rs b/src/bin/client-demo.rs index 50f2e8a2ec..ece7cc876f 100644 --- a/src/bin/client-demo.rs +++ b/src/bin/client-demo.rs @@ -16,7 +16,7 @@ use solana::signature::{KeyPair, KeyPairUtil}; use solana::transaction::Transaction; use std::env; use std::io::{stdin, Read}; -use std::net::UdpSocket; +use std::net::{SocketAddr, UdpSocket}; use std::process::exit; use std::time::Instant; use untrusted::Input; @@ -33,7 +33,7 @@ fn print_usage(program: &str, opts: Options) { fn main() { let mut threads = 4usize; let mut addr: String = "127.0.0.1:8000".to_string(); - let mut send_addr: String = "127.0.0.1:8001".to_string(); + let mut client_addr: String = "127.0.0.1:8001".to_string(); let mut opts = Options::new(); opts.optopt("s", "", "server address", "host:port"); @@ -58,7 +58,7 @@ fn main() { addr = matches.opt_str("s").unwrap(); } if matches.opt_present("c") { - send_addr = matches.opt_str("c").unwrap(); + client_addr = matches.opt_str("c").unwrap(); } if matches.opt_present("t") { threads = matches.opt_str("t").unwrap().parse().expect("integer"); @@ -82,11 +82,13 @@ fn main() { exit(1); }); - let socket = UdpSocket::bind(&send_addr).unwrap(); + println!("Binding to {}", client_addr); + let socket = UdpSocket::bind(&client_addr).unwrap(); let mut acc = AccountantStub::new(&addr, socket); println!("Get last ID..."); let last_id = acc.get_last_id().wait().unwrap(); + println!("Got last ID {:?}", last_id); println!("Creating keypairs..."); let txs = demo.users.len() / 2; @@ -119,9 +121,10 @@ fn main() { let sz = transactions.len() / threads; let chunks: Vec<_> = transactions.chunks(sz).collect(); chunks.into_par_iter().for_each(|trs| { - println!("Transferring 1 unit {} times...", trs.len()); - let send_addr = "0.0.0.0:0"; - let socket = UdpSocket::bind(send_addr).unwrap(); + println!("Transferring 1 unit {} times... to", trs.len()); + let mut client_addr: SocketAddr = client_addr.parse().unwrap(); + client_addr.set_port(0); + let socket = UdpSocket::bind(client_addr).unwrap(); let acc = AccountantStub::new(&addr, socket); for tr in trs { acc.transfer_signed(tr.clone()).unwrap(); From 078f917e61f9a0790f06e9ba9403356e6fad729b Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Thu, 3 May 2018 08:34:57 -0700 Subject: [PATCH 21/51] useless assert --- src/accountant_stub.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/accountant_stub.rs b/src/accountant_stub.rs index 900ca7a8fb..f0c4890d39 100644 --- a/src/accountant_stub.rs +++ b/src/accountant_stub.rs @@ -115,7 +115,6 @@ impl AccountantStub { pub fn get_last_id(&mut self) -> FutureResult { let req = Request::GetLastId; let data = serialize(&req).expect("serialize GetId"); - assert!(data.len() < 4096); self.socket .send_to(&data, &self.addr) .expect("buffer error"); From ae29c9b4a0812afbc7ba0b36a8b262ba053efd16 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Thu, 3 May 2018 09:38:59 -0600 Subject: [PATCH 22/51] Fix nightly build --- src/accountant_skel.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index 527e57a9f0..76496a3252 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -759,6 +759,7 @@ mod bench { use signature::{KeyPair, KeyPairUtil}; use std::collections::HashSet; use std::io::sink; + use std::sync::mpsc::sync_channel; use std::time::Instant; use transaction::Transaction; @@ -814,7 +815,7 @@ mod bench { let tps = txs as f64 / sec; // Ensure that all transactions were successfully logged. - drop(input); + drop(skel.historian_input); let entries: Vec = skel.historian.output.iter().collect(); assert_eq!(entries.len(), 1); assert_eq!(entries[0].events.len(), txs as usize); From c2e2960bf74e2f2f54947600bd92b315c317f843 Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Sat, 28 Apr 2018 00:31:20 -0700 Subject: [PATCH 23/51] Add broadcast impl --- src/accountant_skel.rs | 477 +++++++++++++++++++++++++++----------- src/accountant_stub.rs | 39 ++-- src/bin/client-demo.rs | 4 +- src/bin/historian-demo.rs | 2 +- src/bin/testnode.rs | 32 ++- src/crdt.rs | 276 +++++++++++++++++++--- src/erasure.rs | 5 +- src/historian.rs | 23 +- src/lib.rs | 2 +- src/logger.rs | 11 + src/packet.rs | 28 ++- src/streamer.rs | 221 ++++++++++++++---- src/subscribers.rs | 149 ------------ 13 files changed, 848 insertions(+), 421 deletions(-) create mode 100644 src/logger.rs delete mode 100644 src/subscribers.rs diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index 76496a3252..bd8c7ddd24 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -3,22 +3,25 @@ //! in flux. Clients should use AccountantStub to interact with it. use accountant::Accountant; -use bincode::{deserialize, serialize}; +use bincode::{deserialize, serialize, serialize_into}; +use crdt::{Crdt, ReplicatedData}; use ecdsa; use entry::Entry; use event::Event; use hash::Hash; use historian::Historian; use packet; -use packet::SharedPackets; +use packet::{SharedPackets, BLOB_SIZE}; use rayon::prelude::*; use recorder::Signal; use result::Result; use serde_json; use signature::PublicKey; use std::cmp::max; +use std::collections::LinkedList; use std::collections::VecDeque; -use std::io::Write; +use std::io::{Cursor, Write}; +use std::mem::size_of; use std::net::{SocketAddr, UdpSocket}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::{channel, Receiver, Sender, SyncSender}; @@ -28,15 +31,12 @@ use std::time::Duration; use streamer; use transaction::Transaction; -use subscribers; - -pub struct AccountantSkel { - acc: Accountant, - last_id: Hash, - writer: W, - historian_input: SyncSender, +pub struct AccountantSkel { + acc: Mutex, + last_id: Mutex, + historian_input: Mutex>, historian: Historian, - entry_info_subscribers: Vec, + entry_info_subscribers: Mutex>, } #[cfg_attr(feature = "cargo-clippy", allow(large_enum_variant))] @@ -70,6 +70,8 @@ impl Request { } } +type SharedSkel = Arc; + #[derive(Serialize, Deserialize, Debug)] pub enum Response { Balance { key: PublicKey, val: Option }, @@ -77,30 +79,31 @@ pub enum Response { LastId { id: Hash }, } -impl AccountantSkel { +impl AccountantSkel { /// Create a new AccountantSkel that wraps the given Accountant. pub fn new( acc: Accountant, last_id: Hash, - writer: W, historian_input: SyncSender, historian: Historian, ) -> Self { AccountantSkel { - acc, - last_id, - writer, - historian_input, + acc: Mutex::new(acc), + last_id: Mutex::new(last_id), + entry_info_subscribers: Mutex::new(vec![]), + historian_input: Mutex::new(historian_input), historian, - entry_info_subscribers: vec![], } } - fn notify_entry_info_subscribers(&mut self, entry: &Entry) { + fn notify_entry_info_subscribers(obj: &SharedSkel, entry: &Entry) { // TODO: No need to bind(). let socket = UdpSocket::bind("127.0.0.1:0").expect("bind"); - for addr in &self.entry_info_subscribers { + // copy subscribers to avoid taking lock while doing io + let addrs = obj.entry_info_subscribers.lock().unwrap().clone(); + trace!("Sending to {} addrs", addrs.len()); + for addr in addrs { let entry_info = EntryInfo { id: entry.id, num_hashes: entry.num_hashes, @@ -111,34 +114,131 @@ impl AccountantSkel { } } - /// Process any Entry items that have been published by the Historian. - pub fn sync(&mut self) -> Hash { - while let Ok(entry) = self.historian.output.try_recv() { - self.last_id = entry.id; - self.acc.register_entry_id(&self.last_id); - writeln!(self.writer, "{}", serde_json::to_string(&entry).unwrap()).unwrap(); - self.notify_entry_info_subscribers(&entry); + fn update_entry(obj: &SharedSkel, writer: &Arc>, entry: &Entry) { + trace!("update_entry entry"); + let mut last_id_l = obj.last_id.lock().unwrap(); + *last_id_l = entry.id; + obj.acc.lock().unwrap().register_entry_id(&last_id_l); + drop(last_id_l); + writeln!( + writer.lock().unwrap(), + "{}", + serde_json::to_string(&entry).unwrap() + ).unwrap(); + trace!("notify_entry_info entry"); + Self::notify_entry_info_subscribers(obj, &entry); + trace!("notify_entry_info done"); + } + + fn receive_to_list( + obj: &SharedSkel, + writer: &Arc>, + max: usize, + ) -> Result> { + //TODO implement a serialize for channel that does this without allocations + let mut num = 0; + let mut l = LinkedList::new(); + let entry = obj.historian + .output + .lock() + .unwrap() + .recv_timeout(Duration::new(1, 0))?; + Self::update_entry(obj, writer, &entry); + l.push_back(entry); + while let Ok(entry) = obj.historian.receive() { + Self::update_entry(obj, writer, &entry); + l.push_back(entry); + num += 1; + if num == max { + break; + } + trace!("receive_to_list entries num: {}", num); } - self.last_id + Ok(l) + } + + /// Process any Entry items that have been published by the Historian. + /// continuosly broadcast blobs of entries out + fn run_sync( + obj: SharedSkel, + broadcast: &streamer::BlobSender, + blob_recycler: &packet::BlobRecycler, + writer: &Arc>, + exit: Arc, + ) -> Result<()> { + // TODO: should it be the serialized Entry size? + let max = BLOB_SIZE / size_of::(); + let mut q = VecDeque::new(); + let mut count = 0; + trace!("max: {}", max); + while let Ok(list) = Self::receive_to_list(&obj, writer, max) { + trace!("New blobs? {} {}", count, list.len()); + let b = blob_recycler.allocate(); + let pos = { + let mut bd = b.write().unwrap(); + let mut out = Cursor::new(bd.data_mut()); + serialize_into(&mut out, &list).expect("failed to serialize output"); + out.position() as usize + }; + assert!(pos < BLOB_SIZE); + b.write().unwrap().set_size(pos); + q.push_back(b); + count += 1; + if exit.load(Ordering::Relaxed) { + break; + } + } + if !q.is_empty() { + broadcast.send(q)?; + } + Ok(()) + } + + pub fn sync_service( + obj: SharedSkel, + exit: Arc, + broadcast: streamer::BlobSender, + blob_recycler: packet::BlobRecycler, + writer: Arc>, + ) -> JoinHandle<()> { + spawn(move || loop { + let e = Self::run_sync( + obj.clone(), + &broadcast, + &blob_recycler, + &writer, + exit.clone(), + ); + if e.is_err() && exit.load(Ordering::Relaxed) { + break; + } + }) } /// Process Request items sent by clients. pub fn process_request( - &mut self, + &self, msg: Request, rsp_addr: SocketAddr, ) -> Option<(Response, SocketAddr)> { match msg { Request::GetBalance { key } => { - let val = self.acc.get_balance(&key); + let val = self.acc.lock().unwrap().get_balance(&key); Some((Response::Balance { key, val }, rsp_addr)) } - Request::GetLastId => Some((Response::LastId { id: self.sync() }, rsp_addr)), + Request::GetLastId => Some(( + Response::LastId { + id: *self.last_id.lock().unwrap(), + }, + rsp_addr, + )), Request::Transaction(_) => unreachable!(), Request::Subscribe { subscriptions } => { for subscription in subscriptions { match subscription { - Subscription::EntryInfo => self.entry_info_subscribers.push(rsp_addr), + Subscription::EntryInfo => { + self.entry_info_subscribers.lock().unwrap().push(rsp_addr) + } } } None @@ -214,22 +314,25 @@ impl AccountantSkel { } fn process_packets( - &mut self, + &self, req_vers: Vec<(Request, SocketAddr, u8)>, ) -> Result> { + trace!("partitioning"); let (trs, reqs) = Self::partition_requests(req_vers); // Process the transactions in parallel and then log the successful ones. - for result in self.acc.process_verified_transactions(trs) { + for result in self.acc.lock().unwrap().process_verified_transactions(trs) { if let Ok(tr) = result { self.historian_input + .lock() + .unwrap() .send(Signal::Event(Event::Transaction(tr)))?; } } // Let validators know they should not attempt to process additional // transactions in parallel. - self.historian_input.send(Signal::Tick)?; + self.historian_input.lock().unwrap().send(Signal::Tick)?; // Process the remaining requests serially. let rsps = reqs.into_iter() @@ -268,39 +371,44 @@ impl AccountantSkel { } fn process( - obj: &Arc>>, + obj: &SharedSkel, verified_receiver: &Receiver)>>, - blob_sender: &streamer::BlobSender, + responder_sender: &streamer::BlobSender, packet_recycler: &packet::PacketRecycler, blob_recycler: &packet::BlobRecycler, ) -> Result<()> { let timer = Duration::new(1, 0); let mms = verified_receiver.recv_timeout(timer)?; + trace!("got some messages: {}", mms.len()); for (msgs, vers) in mms { let reqs = Self::deserialize_packets(&msgs.read().unwrap()); let req_vers = reqs.into_iter() .zip(vers) .filter_map(|(req, ver)| req.map(|(msg, addr)| (msg, addr, ver))) - .filter(|x| x.0.verify()) + .filter(|x| { + let v = x.0.verify(); + trace!("v:{} x:{:?}", v, x); + v + }) .collect(); - let rsps = obj.lock().unwrap().process_packets(req_vers)?; + trace!("process_packets"); + let rsps = obj.process_packets(req_vers)?; + trace!("done process_packets"); let blobs = Self::serialize_responses(rsps, blob_recycler)?; + trace!("sending blobs: {}", blobs.len()); if !blobs.is_empty() { //don't wake up the other side if there is nothing - blob_sender.send(blobs)?; + responder_sender.send(blobs)?; } packet_recycler.recycle(msgs); - - // Write new entries to the ledger and notify subscribers. - obj.lock().unwrap().sync(); } - + trace!("done responding"); Ok(()) } /// Process verified blobs, already in order /// Respond with a signed hash of the state fn replicate_state( - obj: &Arc>>, + obj: &SharedSkel, verified_receiver: &streamer::BlobReceiver, blob_recycler: &packet::BlobRecycler, ) -> Result<()> { @@ -310,11 +418,11 @@ impl AccountantSkel { let blob = msgs.read().unwrap(); let entries: Vec = deserialize(&blob.data()[..blob.meta.size]).unwrap(); for entry in entries { - obj.lock().unwrap().acc.register_entry_id(&entry.id); + obj.acc.lock().unwrap().register_entry_id(&entry.id); - obj.lock() + obj.acc + .lock() .unwrap() - .acc .process_verified_events(entry.events)?; } //TODO respond back to leader with hash of the state @@ -328,25 +436,35 @@ impl AccountantSkel { /// Create a UDP microservice that forwards messages the given AccountantSkel. /// This service is the network leader /// Set `exit` to shutdown its threads. - pub fn serve( - obj: &Arc>>, - addr: &str, + pub fn serve( + obj: &SharedSkel, + me: ReplicatedData, + serve: UdpSocket, + gossip: UdpSocket, exit: Arc, + writer: W, ) -> Result>> { - let read = UdpSocket::bind(addr)?; + let crdt = Arc::new(RwLock::new(Crdt::new(me))); + let t_gossip = Crdt::gossip(crdt.clone(), exit.clone()); + let t_listen = Crdt::listen(crdt.clone(), gossip, exit.clone()); + // make sure we are on the same interface - let mut local = read.local_addr()?; + let mut local = serve.local_addr()?; local.set_port(0); - let write = UdpSocket::bind(local)?; + let respond_socket = UdpSocket::bind(local.clone())?; let packet_recycler = packet::PacketRecycler::default(); let blob_recycler = packet::BlobRecycler::default(); let (packet_sender, packet_receiver) = channel(); let t_receiver = - streamer::receiver(read, exit.clone(), packet_recycler.clone(), packet_sender)?; - let (blob_sender, blob_receiver) = channel(); - let t_responder = - streamer::responder(write, exit.clone(), blob_recycler.clone(), blob_receiver); + streamer::receiver(serve, exit.clone(), packet_recycler.clone(), packet_sender)?; + let (responder_sender, responder_receiver) = channel(); + let t_responder = streamer::responder( + respond_socket, + exit.clone(), + blob_recycler.clone(), + responder_receiver, + ); let (verified_sender, verified_receiver) = channel(); let exit_ = exit.clone(); @@ -357,32 +475,58 @@ impl AccountantSkel { } }); + let (broadcast_sender, broadcast_receiver) = channel(); + + let broadcast_socket = UdpSocket::bind(local)?; + let t_broadcast = streamer::broadcaster( + broadcast_socket, + exit.clone(), + crdt.clone(), + blob_recycler.clone(), + broadcast_receiver, + ); + + let t_sync = Self::sync_service( + obj.clone(), + exit.clone(), + broadcast_sender, + blob_recycler.clone(), + Arc::new(Mutex::new(writer)), + ); + let skel = obj.clone(); let t_server = spawn(move || loop { let e = Self::process( - &skel, + &mut skel.clone(), &verified_receiver, - &blob_sender, + &responder_sender, &packet_recycler, &blob_recycler, ); if e.is_err() { - // Assume this was a timeout, so sync any empty entries. - skel.lock().unwrap().sync(); - if exit.load(Ordering::Relaxed) { break; } } }); - Ok(vec![t_receiver, t_responder, t_server, t_verifier]) + Ok(vec![ + t_receiver, + t_responder, + t_server, + t_verifier, + t_sync, + t_gossip, + t_listen, + t_broadcast, + ]) } /// This service receives messages from a leader in the network and processes the transactions /// on the accountant state. /// # Arguments /// * `obj` - The accountant state. - /// * `rsubs` - The subscribers. + /// * `me` - my configuration + /// * `leader` - leader configuration /// * `exit` - The exit signal. /// # Remarks /// The pipeline is constructed as follows: @@ -396,13 +540,21 @@ impl AccountantSkel { /// 4. process the transaction state machine /// 5. respond with the hash of the state back to the leader pub fn replicate( - obj: &Arc>>, - rsubs: subscribers::Subscribers, + obj: &SharedSkel, + me: ReplicatedData, + gossip: UdpSocket, + replicate: UdpSocket, + leader: ReplicatedData, exit: Arc, ) -> Result>> { - let read = UdpSocket::bind(rsubs.me.addr)?; + let crdt = Arc::new(RwLock::new(Crdt::new(me))); + crdt.write().unwrap().set_leader(leader.id); + crdt.write().unwrap().insert(leader); + let t_gossip = Crdt::gossip(crdt.clone(), exit.clone()); + let t_listen = Crdt::listen(crdt.clone(), gossip, exit.clone()); + // make sure we are on the same interface - let mut local = read.local_addr()?; + let mut local = replicate.local_addr()?; local.set_port(0); let write = UdpSocket::bind(local)?; @@ -411,26 +563,26 @@ impl AccountantSkel { let t_blob_receiver = streamer::blob_receiver( exit.clone(), blob_recycler.clone(), - read, + replicate, blob_sender.clone(), )?; let (window_sender, window_receiver) = channel(); let (retransmit_sender, retransmit_receiver) = channel(); - let subs = Arc::new(RwLock::new(rsubs)); let t_retransmit = streamer::retransmitter( write, exit.clone(), - subs.clone(), + crdt.clone(), blob_recycler.clone(), retransmit_receiver, ); + //TODO //the packets coming out of blob_receiver need to be sent to the GPU and verified //then sent to the window, which does the erasure coding reconstruction let t_window = streamer::window( exit.clone(), - subs, + crdt, blob_recycler.clone(), blob_receiver, window_sender, @@ -444,7 +596,14 @@ impl AccountantSkel { break; } }); - Ok(vec![t_blob_receiver, t_retransmit, t_window, t_server]) + Ok(vec![ + t_blob_receiver, + t_retransmit, + t_window, + t_server, + t_gossip, + t_listen, + ]) } } @@ -479,30 +638,30 @@ mod tests { use accountant::Accountant; use accountant_skel::AccountantSkel; use accountant_stub::AccountantStub; + use chrono::prelude::*; + use crdt::Crdt; + use crdt::ReplicatedData; + use entry; use entry::Entry; + use event::Event; use futures::Future; + use hash::{hash, Hash}; use historian::Historian; use mint::Mint; use plan::Plan; use recorder::Signal; use signature::{KeyPair, KeyPairUtil}; + use std::collections::VecDeque; use std::io::sink; use std::net::{SocketAddr, UdpSocket}; use std::sync::atomic::{AtomicBool, Ordering}; + use std::sync::mpsc::channel; use std::sync::mpsc::sync_channel; - use std::sync::{Arc, Mutex}; + use std::sync::{Arc, RwLock}; use std::thread::sleep; use std::time::Duration; - use transaction::Transaction; - - use chrono::prelude::*; - use entry; - use event::Event; - use hash::{hash, Hash}; - use std::collections::VecDeque; - use std::sync::mpsc::channel; use streamer; - use subscribers::{Node, Subscribers}; + use transaction::Transaction; #[test] fn test_layout() { @@ -540,7 +699,7 @@ mod tests { let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address"); let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &mint.last_id(), None); - let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), input, historian); + let skel = AccountantSkel::new(acc, mint.last_id(), input, historian); // Process a batch that includes a transaction that receives two tokens. let alice = KeyPair::new(); @@ -554,9 +713,13 @@ mod tests { assert!(skel.process_packets(req_vers).is_ok()); // Collect the ledger and feed it to a new accountant. - skel.historian_input.send(Signal::Tick).unwrap(); + skel.historian_input + .lock() + .unwrap() + .send(Signal::Tick) + .unwrap(); drop(skel.historian_input); - let entries: Vec = skel.historian.output.iter().collect(); + let entries: Vec = skel.historian.output.lock().unwrap().iter().collect(); // Assert the user holds one token, not two. If the server only output one // entry, then the second transaction will be rejected, because it drives @@ -570,45 +733,50 @@ mod tests { #[test] fn test_accountant_bad_sig() { - let serve_port = 9002; - let send_port = 9003; - let addr = format!("127.0.0.1:{}", serve_port); - let send_addr = format!("127.0.0.1:{}", send_port); + let (leader_data, leader_gossip, _, leader_serve) = test_node(); let alice = Mint::new(10_000); let acc = Accountant::new(&alice); let bob_pubkey = KeyPair::new().pubkey(); let exit = Arc::new(AtomicBool::new(false)); let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); - let acc = Arc::new(Mutex::new(AccountantSkel::new( - acc, - alice.last_id(), + let acc_skel = Arc::new(AccountantSkel::new(acc, alice.last_id(), input, historian)); + let serve_addr = leader_serve.local_addr().unwrap(); + let threads = AccountantSkel::serve( + &acc_skel, + leader_data, + leader_serve, + leader_gossip, + exit.clone(), sink(), - input, - historian, - ))); - let _threads = AccountantSkel::serve(&acc, &addr, exit.clone()).unwrap(); + ).unwrap(); sleep(Duration::from_millis(300)); - let socket = UdpSocket::bind(send_addr).unwrap(); + let socket = UdpSocket::bind("127.0.0.1:0").unwrap(); socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap(); + let mut acc_stub = AccountantStub::new(serve_addr, socket); + let last_id = acc_stub.get_last_id().wait().unwrap(); - let mut acc = AccountantStub::new(&addr, socket); - let last_id = acc.get_last_id().wait().unwrap(); + trace!("doing stuff"); let tr = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id); - let _sig = acc.transfer_signed(tr).unwrap(); + let _sig = acc_stub.transfer_signed(tr).unwrap(); - let last_id = acc.get_last_id().wait().unwrap(); + let last_id = acc_stub.get_last_id().wait().unwrap(); let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id); tr2.data.tokens = 502; tr2.data.plan = Plan::new_payment(502, bob_pubkey); - let _sig = acc.transfer_signed(tr2).unwrap(); + let _sig = acc_stub.transfer_signed(tr2).unwrap(); - assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500); + assert_eq!(acc_stub.get_balance(&bob_pubkey).wait().unwrap(), 500); + trace!("exiting"); exit.store(true, Ordering::Relaxed); + trace!("joining threads"); + for t in threads { + t.join().unwrap(); + } } use std::sync::{Once, ONCE_INIT}; @@ -623,21 +791,45 @@ mod tests { }); } + fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket) { + let gossip = UdpSocket::bind("127.0.0.1:0").unwrap(); + let replicate = UdpSocket::bind("127.0.0.1:0").unwrap(); + let serve = UdpSocket::bind("127.0.0.1:0").unwrap(); + let pubkey = KeyPair::new().pubkey(); + let d = ReplicatedData::new( + pubkey, + gossip.local_addr().unwrap(), + replicate.local_addr().unwrap(), + serve.local_addr().unwrap(), + ); + (d, gossip, replicate, serve) + } + + /// Test that mesasge sent from leader to target1 and repliated to target2 #[test] fn test_replicate() { setup(); - let leader_sock = UdpSocket::bind("127.0.0.1:0").expect("bind"); - let leader_addr = leader_sock.local_addr().unwrap(); - let me_addr = "127.0.0.1:9010".parse().unwrap(); - let target_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind"); - let target_peer_addr = target_peer_sock.local_addr().unwrap(); - let source_peer_sock = UdpSocket::bind("127.0.0.1:0").expect("bind"); + let (leader_data, leader_gossip, _, leader_serve) = test_node(); + let (target1_data, target1_gossip, target1_replicate, _) = test_node(); + let (target2_data, target2_gossip, target2_replicate, _) = test_node(); let exit = Arc::new(AtomicBool::new(false)); - let node_me = Node::new([0, 0, 0, 0, 0, 0, 0, 1], 10, me_addr); - let node_subs = vec![Node::new([0, 0, 0, 0, 0, 0, 0, 2], 8, target_peer_addr); 1]; - let node_leader = Node::new([0, 0, 0, 0, 0, 0, 0, 3], 20, leader_addr); - let subs = Subscribers::new(node_me, node_leader, &node_subs); + //start crdt_leader + let mut crdt_l = Crdt::new(leader_data.clone()); + crdt_l.set_leader(leader_data.id); + + let cref_l = Arc::new(RwLock::new(crdt_l)); + let t_l_gossip = Crdt::gossip(cref_l.clone(), exit.clone()); + let t_l_listen = Crdt::listen(cref_l, leader_gossip, exit.clone()); + + //start crdt2 + let mut crdt2 = Crdt::new(target2_data.clone()); + crdt2.insert(leader_data.clone()); + crdt2.set_leader(leader_data.id); + let leader_id = leader_data.id; + let cref2 = Arc::new(RwLock::new(crdt2)); + let t2_gossip = Crdt::gossip(cref2.clone(), exit.clone()); + let t2_listen = Crdt::listen(cref2, target2_gossip, exit.clone()); // setup some blob services to send blobs into the socket // to simulate the source peer and get blobs out of the socket to @@ -648,12 +840,14 @@ mod tests { let t_receiver = streamer::blob_receiver( exit.clone(), recv_recycler.clone(), - target_peer_sock, + target2_replicate, s_reader, ).unwrap(); + + // simulate leader sending messages let (s_responder, r_responder) = channel(); let t_responder = streamer::responder( - source_peer_sock, + leader_serve, exit.clone(), resp_recycler.clone(), r_responder, @@ -664,15 +858,16 @@ mod tests { let acc = Accountant::new(&alice); let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); - let acc = Arc::new(Mutex::new(AccountantSkel::new( - acc, - alice.last_id(), - sink(), - input, - historian, - ))); - - let _threads = AccountantSkel::replicate(&acc, subs, exit.clone()).unwrap(); + let acc = Arc::new(AccountantSkel::new(acc, alice.last_id(), input, historian)); + let replicate_addr = target1_data.replicate_addr; + let threads = AccountantSkel::replicate( + &acc, + target1_data, + target1_gossip, + target1_replicate, + leader_data, + exit.clone(), + ).unwrap(); let mut alice_ref_balance = starting_balance; let mut msgs = VecDeque::new(); @@ -685,10 +880,11 @@ mod tests { let b_ = b.clone(); let mut w = b.write().unwrap(); w.set_index(i).unwrap(); + w.set_id(leader_id).unwrap(); let tr0 = Event::new_timestamp(&bob_keypair, Utc::now()); let entry0 = entry::create_entry(&cur_hash, i, vec![tr0]); - acc.lock().unwrap().acc.register_entry_id(&cur_hash); + acc.acc.lock().unwrap().register_entry_id(&cur_hash); cur_hash = hash(&cur_hash); let tr1 = Transaction::new( @@ -697,11 +893,11 @@ mod tests { transfer_amount, cur_hash, ); - acc.lock().unwrap().acc.register_entry_id(&cur_hash); + acc.acc.lock().unwrap().register_entry_id(&cur_hash); cur_hash = hash(&cur_hash); let entry1 = entry::create_entry(&cur_hash, i + num_blobs, vec![Event::Transaction(tr1)]); - acc.lock().unwrap().acc.register_entry_id(&cur_hash); + acc.acc.lock().unwrap().register_entry_id(&cur_hash); cur_hash = hash(&cur_hash); alice_ref_balance -= transfer_amount; @@ -710,7 +906,7 @@ mod tests { w.data_mut()[..serialized_entry.len()].copy_from_slice(&serialized_entry); w.set_size(serialized_entry.len()); - w.meta.set_addr(&me_addr); + w.meta.set_addr(&replicate_addr); drop(w); msgs.push_back(b_); } @@ -726,25 +922,31 @@ mod tests { msgs.push(msg); } - let alice_balance = acc.lock() + let alice_balance = acc.acc + .lock() .unwrap() - .acc .get_balance(&alice.keypair().pubkey()) .unwrap(); assert_eq!(alice_balance, alice_ref_balance); - let bob_balance = acc.lock() + let bob_balance = acc.acc + .lock() .unwrap() - .acc .get_balance(&bob_keypair.pubkey()) .unwrap(); assert_eq!(bob_balance, starting_balance - alice_ref_balance); exit.store(true, Ordering::Relaxed); + for t in threads { + t.join().expect("join"); + } + t2_gossip.join().expect("join"); + t2_listen.join().expect("join"); t_receiver.join().expect("join"); t_responder.join().expect("join"); + t_l_gossip.join().expect("join"); + t_l_listen.join().expect("join"); } - } #[cfg(all(feature = "unstable", test))] @@ -758,7 +960,6 @@ mod bench { use mint::Mint; use signature::{KeyPair, KeyPairUtil}; use std::collections::HashSet; - use std::io::sink; use std::sync::mpsc::sync_channel; use std::time::Instant; use transaction::Transaction; @@ -806,7 +1007,7 @@ mod bench { let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &mint.last_id(), None); - let mut skel = AccountantSkel::new(acc, mint.last_id(), sink(), input, historian); + let skel = AccountantSkel::new(acc, mint.last_id(), input, historian); let now = Instant::now(); assert!(skel.process_packets(req_vers).is_ok()); @@ -816,7 +1017,7 @@ mod bench { // Ensure that all transactions were successfully logged. drop(skel.historian_input); - let entries: Vec = skel.historian.output.iter().collect(); + let entries: Vec = skel.historian.output.lock().unwrap().iter().collect(); assert_eq!(entries.len(), 1); assert_eq!(entries[0].events.len(), txs as usize); diff --git a/src/accountant_stub.rs b/src/accountant_stub.rs index 5fa7acb720..15a6a69cb7 100644 --- a/src/accountant_stub.rs +++ b/src/accountant_stub.rs @@ -10,11 +10,11 @@ use hash::Hash; use signature::{KeyPair, PublicKey, Signature}; use std::collections::HashMap; use std::io; -use std::net::UdpSocket; +use std::net::{SocketAddr, UdpSocket}; use transaction::Transaction; pub struct AccountantStub { - pub addr: String, + pub addr: SocketAddr, pub socket: UdpSocket, last_id: Option, num_events: u64, @@ -25,9 +25,9 @@ impl AccountantStub { /// Create a new AccountantStub that will interface with AccountantSkel /// over `socket`. To receive responses, the caller must bind `socket` /// to a public address before invoking AccountantStub methods. - pub fn new(addr: &str, socket: UdpSocket) -> Self { + pub fn new(addr: SocketAddr, socket: UdpSocket) -> Self { let stub = AccountantStub { - addr: addr.to_string(), + addr: addr, socket, last_id: None, num_events: 0, @@ -160,6 +160,7 @@ mod tests { use super::*; use accountant::Accountant; use accountant_skel::AccountantSkel; + use crdt::ReplicatedData; use futures::Future; use historian::Historian; use mint::Mint; @@ -167,32 +168,35 @@ mod tests { use std::io::sink; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::sync_channel; - use std::sync::{Arc, Mutex}; + use std::sync::Arc; use std::thread::sleep; use std::time::Duration; // TODO: Figure out why this test sometimes hangs on TravisCI. #[test] fn test_accountant_stub() { - let addr = "127.0.0.1:9000"; - let send_addr = "127.0.0.1:9001"; + let gossip = UdpSocket::bind("0.0.0.0:0").unwrap(); + let serve = UdpSocket::bind("0.0.0.0:0").unwrap(); + let addr = serve.local_addr().unwrap(); + let pubkey = KeyPair::new().pubkey(); + let d = ReplicatedData::new( + pubkey, + gossip.local_addr().unwrap(), + "0.0.0.0:0".parse().unwrap(), + serve.local_addr().unwrap(), + ); + let alice = Mint::new(10_000); let acc = Accountant::new(&alice); let bob_pubkey = KeyPair::new().pubkey(); let exit = Arc::new(AtomicBool::new(false)); let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); - let acc = Arc::new(Mutex::new(AccountantSkel::new( - acc, - alice.last_id(), - sink(), - input, - historian, - ))); - let _threads = AccountantSkel::serve(&acc, addr, exit.clone()).unwrap(); + let acc = Arc::new(AccountantSkel::new(acc, alice.last_id(), input, historian)); + let threads = AccountantSkel::serve(&acc, d, serve, gossip, exit.clone(), sink()).unwrap(); sleep(Duration::from_millis(300)); - let socket = UdpSocket::bind(send_addr).unwrap(); + let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap(); let mut acc = AccountantStub::new(addr, socket); @@ -201,5 +205,8 @@ mod tests { .unwrap(); assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500); exit.store(true, Ordering::Relaxed); + for t in threads { + t.join().unwrap(); + } } } diff --git a/src/bin/client-demo.rs b/src/bin/client-demo.rs index ece7cc876f..739b6cea4a 100644 --- a/src/bin/client-demo.rs +++ b/src/bin/client-demo.rs @@ -84,7 +84,7 @@ fn main() { println!("Binding to {}", client_addr); let socket = UdpSocket::bind(&client_addr).unwrap(); - let mut acc = AccountantStub::new(&addr, socket); + let mut acc = AccountantStub::new(addr.parse().unwrap(), socket); println!("Get last ID..."); let last_id = acc.get_last_id().wait().unwrap(); @@ -125,7 +125,7 @@ fn main() { let mut client_addr: SocketAddr = client_addr.parse().unwrap(); client_addr.set_port(0); let socket = UdpSocket::bind(client_addr).unwrap(); - let acc = AccountantStub::new(&addr, socket); + let acc = AccountantStub::new(addr.parse().unwrap(), socket); for tr in trs { acc.transfer_signed(tr.clone()).unwrap(); } diff --git a/src/bin/historian-demo.rs b/src/bin/historian-demo.rs index 010391cbab..134e7950f5 100644 --- a/src/bin/historian-demo.rs +++ b/src/bin/historian-demo.rs @@ -28,7 +28,7 @@ fn main() { let hist = Historian::new(event_receiver, &seed, Some(10)); create_ledger(&input, &seed).expect("send error"); drop(input); - let entries: Vec = hist.output.iter().collect(); + let entries: Vec = hist.output.lock().unwrap().iter().collect(); for entry in &entries { println!("{:?}", entry); } diff --git a/src/bin/testnode.rs b/src/bin/testnode.rs index 6a6d8ac9ea..2c585e6f27 100644 --- a/src/bin/testnode.rs +++ b/src/bin/testnode.rs @@ -8,15 +8,18 @@ use getopts::Options; use isatty::stdin_isatty; use solana::accountant::Accountant; use solana::accountant_skel::AccountantSkel; +use solana::crdt::ReplicatedData; use solana::entry::Entry; use solana::event::Event; use solana::historian::Historian; +use solana::signature::{KeyPair, KeyPairUtil}; use std::env; use std::io::{stdin, stdout, Read}; +use std::net::UdpSocket; use std::process::exit; use std::sync::atomic::AtomicBool; use std::sync::mpsc::sync_channel; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; fn print_usage(program: &str, opts: Options) { let mut brief = format!("Usage: cat | {} [options]\n\n", program); @@ -49,7 +52,9 @@ fn main() { if matches.opt_present("p") { port = matches.opt_str("p").unwrap().parse().expect("port"); } - let addr = format!("0.0.0.0:{}", port); + let serve_addr = format!("0.0.0.0:{}", port); + let gossip_addr = format!("0.0.0.0:{}", port + 1); + let replicate_addr = format!("0.0.0.0:{}", port + 2); if stdin_isatty() { eprintln!("nothing found on stdin, expected a log file"); @@ -99,15 +104,20 @@ fn main() { let (input, event_receiver) = sync_channel(10_000); let historian = Historian::new(event_receiver, &last_id, Some(1000)); let exit = Arc::new(AtomicBool::new(false)); - let skel = Arc::new(Mutex::new(AccountantSkel::new( - acc, - last_id, - stdout(), - input, - historian, - ))); - let threads = AccountantSkel::serve(&skel, &addr, exit.clone()).unwrap(); - eprintln!("Ready. Listening on {}", addr); + let skel = Arc::new(AccountantSkel::new(acc, last_id, input, historian)); + let serve_sock = UdpSocket::bind(&serve_addr).unwrap(); + let gossip_sock = UdpSocket::bind(&gossip_addr).unwrap(); + let replicate_sock = UdpSocket::bind(&replicate_addr).unwrap(); + let pubkey = KeyPair::new().pubkey(); + let d = ReplicatedData::new( + pubkey, + gossip_sock.local_addr().unwrap(), + replicate_sock.local_addr().unwrap(), + serve_sock.local_addr().unwrap(), + ); + let threads = + AccountantSkel::serve(&skel, d, serve_sock, gossip_sock, exit.clone(), stdout()).unwrap(); + eprintln!("Ready. Listening on {}", serve_addr); for t in threads { t.join().expect("join"); } diff --git a/src/crdt.rs b/src/crdt.rs index b7742d5cd3..a0ece4319b 100644 --- a/src/crdt.rs +++ b/src/crdt.rs @@ -1,14 +1,24 @@ //! The `crdt` module defines a data structure that is shared by all the nodes in the network over -//! a gossip control plane. The goal is to share small bits of of-chain information and detect and +//! a gossip control plane. The goal is to share small bits of off-chain information and detect and //! repair partitions. //! //! This CRDT only supports a very limited set of types. A map of PublicKey -> Versioned Struct. //! The last version is always picked durring an update. +//! +//! The network is arranged in layers: +//! +//! * layer 0 - Leader. +//! * layer 1 - As many nodes as we can fit +//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes. +//! +//! Accountant needs to provide an interface for us to query the stake weight use bincode::{deserialize, serialize}; use byteorder::{LittleEndian, ReadBytesExt}; use hash::Hash; -use result::Result; +use packet::SharedBlob; +use rayon::prelude::*; +use result::{Error, Result}; use ring::rand::{SecureRandom, SystemRandom}; use signature::{PublicKey, Signature}; use std::collections::HashMap; @@ -22,16 +32,16 @@ use std::time::Duration; /// Structure to be replicated by the network #[derive(Serialize, Deserialize, Clone)] pub struct ReplicatedData { - id: PublicKey, + pub id: PublicKey, sig: Signature, /// should always be increasing version: u64, /// address to connect to for gossip - gossip_addr: SocketAddr, + pub gossip_addr: SocketAddr, /// address to connect to for replication - replicate_addr: SocketAddr, + pub replicate_addr: SocketAddr, /// address to connect to when this node is leader - lead_addr: SocketAddr, + pub serve_addr: SocketAddr, /// current leader identity current_leader_id: PublicKey, /// last verified hash that was submitted to the leader @@ -41,15 +51,19 @@ pub struct ReplicatedData { } impl ReplicatedData { - pub fn new(id: PublicKey, gossip_addr: SocketAddr) -> ReplicatedData { - let daddr = "0.0.0.0:0".parse().unwrap(); + pub fn new( + id: PublicKey, + gossip_addr: SocketAddr, + replicate_addr: SocketAddr, + serve_addr: SocketAddr, + ) -> ReplicatedData { ReplicatedData { id, sig: Signature::default(), version: 0, gossip_addr, - replicate_addr: daddr, - lead_addr: daddr, + replicate_addr, + serve_addr, current_leader_id: PublicKey::default(), last_verified_hash: Hash::default(), last_verified_count: 0, @@ -78,7 +92,7 @@ pub struct Crdt { /// The value of the remote update index that i have last seen /// This Node will ask external nodes for updates since the value in this list remote: HashMap, - update_index: u64, + pub update_index: u64, me: PublicKey, timeout: Duration, } @@ -109,23 +123,117 @@ impl Crdt { g.table.insert(me.id, me); g } - pub fn import(&mut self, v: &ReplicatedData) { - // TODO check that last_verified types are always increasing - // TODO probably an error or attack - if self.me != v.id { - self.insert(v); - } + pub fn my_data(&self) -> &ReplicatedData { + &self.table[&self.me] } - pub fn insert(&mut self, v: &ReplicatedData) { + pub fn leader_data(&self) -> &ReplicatedData { + &self.table[&self.table[&self.me].current_leader_id] + } + + pub fn set_leader(&mut self, key: PublicKey) -> () { + let mut me = self.my_data().clone(); + me.current_leader_id = key; + me.version += 1; + self.insert(me); + } + + pub fn insert(&mut self, v: ReplicatedData) { + // TODO check that last_verified types are always increasing if self.table.get(&v.id).is_none() || (v.version > self.table[&v.id].version) { + //somehow we signed a message for our own identity with a higher version that + // we have stored ourselves + trace!("me: {:?}", self.me[0]); + trace!("v.id: {:?}", v.id[0]); trace!("insert! {}", v.version); self.update_index += 1; - let _ = self.table.insert(v.id, v.clone()); + let _ = self.table.insert(v.id.clone(), v.clone()); let _ = self.local.insert(v.id, self.update_index); } else { - trace!("INSERT FAILED {}", v.version); + trace!( + "INSERT FAILED new.version: {} me.version: {}", + v.version, + self.table[&v.id].version + ); } } + + /// broadcast messages from the leader to layer 1 nodes + /// # Remarks + /// We need to avoid having obj locked while doing any io, such as the `send_to` + pub fn broadcast( + obj: &Arc>, + blobs: &Vec, + s: &UdpSocket, + transmit_index: &mut u64, + ) -> Result<()> { + let (me, table): (ReplicatedData, Vec) = { + // copy to avoid locking durring IO + let robj = obj.read().unwrap(); + let cloned_table: Vec = robj.table.values().cloned().collect(); + (robj.table[&robj.me].clone(), cloned_table) + }; + let errs: Vec<_> = table + .iter() + .enumerate() + .cycle() + .zip(blobs.iter()) + .map(|((i, v), b)| { + if me.id == v.id { + return Ok(0); + } + // only leader should be broadcasting + assert!(me.current_leader_id != v.id); + let mut blob = b.write().unwrap(); + blob.set_index(*transmit_index + i as u64) + .expect("set_index"); + s.send_to(&blob.data[..blob.meta.size], &v.replicate_addr) + }) + .collect(); + for e in errs { + trace!("retransmit result {:?}", e); + match e { + Err(e) => return Err(Error::IO(e)), + _ => (), + } + *transmit_index += 1; + } + Ok(()) + } + + /// retransmit messages from the leader to layer 1 nodes + /// # Remarks + /// We need to avoid having obj locked while doing any io, such as the `send_to` + pub fn retransmit(obj: &Arc>, blob: &SharedBlob, s: &UdpSocket) -> Result<()> { + let (me, table): (ReplicatedData, Vec) = { + // copy to avoid locking durring IO + let s = obj.read().unwrap(); + (s.table[&s.me].clone(), s.table.values().cloned().collect()) + }; + let rblob = blob.read().unwrap(); + let errs: Vec<_> = table + .par_iter() + .map(|v| { + if me.id == v.id { + return Ok(0); + } + if me.current_leader_id == v.id { + trace!("skip retransmit to leader{:?}", v.id); + return Ok(0); + } + trace!("retransmit blob to {}", v.replicate_addr); + s.send_to(&rblob.data[..rblob.meta.size], &v.replicate_addr) + }) + .collect(); + for e in errs { + trace!("retransmit result {:?}", e); + match e { + Err(e) => return Err(Error::IO(e)), + _ => (), + } + } + Ok(()) + } + fn random() -> u64 { let rnd = SystemRandom::new(); let mut buf = [0u8; 8]; @@ -134,7 +242,7 @@ impl Crdt { rdr.read_u64::().unwrap() } fn get_updates_since(&self, v: u64) -> (PublicKey, u64, Vec) { - trace!("get updates since {}", v); + //trace!("get updates since {}", v); let data = self.table .values() .filter(|x| self.local[&x.id] > v) @@ -147,10 +255,9 @@ impl Crdt { /// Create a random gossip request /// # Returns - /// (A,B,C) - /// * A - Remote gossip address - /// * B - My gossip address - /// * C - Remote update index to request updates since + /// (A,B) + /// * A - Address to send to + /// * B - RequestUpdates protocol message fn gossip_request(&self) -> (SocketAddr, Protocol) { let n = (Self::random() as usize) % self.table.len(); trace!("random {:?} {}", &self.me[0..1], n); @@ -186,7 +293,7 @@ impl Crdt { // TODO we need to punish/spam resist here // sig verify the whole update and slash anyone who sends a bad update for v in data { - self.import(&v); + self.insert(v.clone()); } *self.remote.entry(from).or_insert(update_index) = update_index; } @@ -222,7 +329,7 @@ impl Crdt { let rsp = serialize(&Protocol::ReceiveUpdates(from, ups, data))?; trace!("send_to {}", addr); //TODO verify reqdata belongs to sender - obj.write().unwrap().import(&reqdata); + obj.write().unwrap().insert(reqdata); sock.send_to(&rsp, addr).unwrap(); trace!("send_to done!"); } @@ -251,6 +358,9 @@ impl Crdt { #[cfg(test)] mod test { use crdt::{Crdt, ReplicatedData}; + use logger; + use packet::Blob; + use rayon::iter::*; use signature::KeyPair; use signature::KeyPairUtil; use std::net::UdpSocket; @@ -259,6 +369,28 @@ mod test { use std::thread::{sleep, JoinHandle}; use std::time::Duration; + fn test_node() -> (Crdt, UdpSocket, UdpSocket, UdpSocket) { + let gossip = UdpSocket::bind("0.0.0.0:0").unwrap(); + let replicate = UdpSocket::bind("0.0.0.0:0").unwrap(); + let serve = UdpSocket::bind("0.0.0.0:0").unwrap(); + let pubkey = KeyPair::new().pubkey(); + let d = ReplicatedData::new( + pubkey, + gossip.local_addr().unwrap(), + replicate.local_addr().unwrap(), + serve.local_addr().unwrap(), + ); + let crdt = Crdt::new(d); + trace!( + "id: {} gossip: {} replicate: {} serve: {}", + crdt.my_data().id[0], + gossip.local_addr().unwrap(), + replicate.local_addr().unwrap(), + serve.local_addr().unwrap(), + ); + (crdt, gossip, replicate, serve) + } + /// Test that the network converges. /// Run until every node in the network has a full ReplicatedData set. /// Check that nodes stop sending updates after all the ReplicatedData has been shared. @@ -271,12 +403,9 @@ mod test { let exit = Arc::new(AtomicBool::new(false)); let listen: Vec<_> = (0..num) .map(|_| { - let listener = UdpSocket::bind("0.0.0.0:0").unwrap(); - let pubkey = KeyPair::new().pubkey(); - let d = ReplicatedData::new(pubkey, listener.local_addr().unwrap()); - let crdt = Crdt::new(d); + let (crdt, gossip, _, _) = test_node(); let c = Arc::new(RwLock::new(crdt)); - let l = Crdt::listen(c.clone(), listener, exit.clone()); + let l = Crdt::listen(c.clone(), gossip, exit.clone()); (c, l) }) .collect(); @@ -332,7 +461,7 @@ mod test { let yv = listen[y].0.read().unwrap(); let mut d = yv.table[&yv.me].clone(); d.version = 0; - xv.insert(&d); + xv.insert(d); } }); } @@ -349,7 +478,7 @@ mod test { let yv = listen[y].0.read().unwrap(); let mut d = yv.table[&yv.me].clone(); d.version = 0; - xv.insert(&d); + xv.insert(d); } }); } @@ -357,16 +486,89 @@ mod test { /// Test that insert drops messages that are older #[test] fn insert_test() { - let mut d = ReplicatedData::new(KeyPair::new().pubkey(), "127.0.0.1:1234".parse().unwrap()); + let mut d = ReplicatedData::new( + KeyPair::new().pubkey(), + "127.0.0.1:1234".parse().unwrap(), + "127.0.0.1:1235".parse().unwrap(), + "127.0.0.1:1236".parse().unwrap(), + ); assert_eq!(d.version, 0); let mut crdt = Crdt::new(d.clone()); assert_eq!(crdt.table[&d.id].version, 0); d.version = 2; - crdt.insert(&d); + crdt.insert(d.clone()); assert_eq!(crdt.table[&d.id].version, 2); d.version = 1; - crdt.insert(&d); + crdt.insert(d.clone()); assert_eq!(crdt.table[&d.id].version, 2); } + #[test] + pub fn test_crdt_retransmit() { + logger::setup(); + trace!("c1:"); + let (mut c1, s1, r1, e1) = test_node(); + trace!("c2:"); + let (mut c2, s2, r2, _) = test_node(); + trace!("c3:"); + let (mut c3, s3, r3, _) = test_node(); + let c1_id = c1.my_data().id; + c1.set_leader(c1_id); + + c2.insert(c1.my_data().clone()); + c3.insert(c1.my_data().clone()); + + c2.set_leader(c1.my_data().id); + c3.set_leader(c1.my_data().id); + + let exit = Arc::new(AtomicBool::new(false)); + + // Create listen threads + let a1 = Arc::new(RwLock::new(c1)); + let t1 = Crdt::listen(a1.clone(), s1, exit.clone()); + + let a2 = Arc::new(RwLock::new(c2)); + let t2 = Crdt::listen(a2.clone(), s2, exit.clone()); + + let a3 = Arc::new(RwLock::new(c3)); + let t3 = Crdt::listen(a3.clone(), s3, exit.clone()); + + // Create gossip threads + let t1_gossip = Crdt::gossip(a1.clone(), exit.clone()); + let t2_gossip = Crdt::gossip(a2.clone(), exit.clone()); + let t3_gossip = Crdt::gossip(a3.clone(), exit.clone()); + + //wait to converge + trace!("waitng to converge:"); + let mut done = false; + for _ in 0..10 { + done = a1.read().unwrap().table.len() == 3 && a2.read().unwrap().table.len() == 3 + && a3.read().unwrap().table.len() == 3; + if done { + break; + } + sleep(Duration::new(1, 0)); + } + assert!(done); + let mut b = Blob::default(); + b.meta.size = 10; + Crdt::retransmit(&a1, &Arc::new(RwLock::new(b)), &e1).unwrap(); + let res: Vec<_> = [r1, r2, r3] + .into_par_iter() + .map(|s| { + let mut b = Blob::default(); + s.set_read_timeout(Some(Duration::new(1, 0))).unwrap(); + let res = s.recv_from(&mut b.data); + res.is_err() //true if failed to receive the retransmit packet + }) + .collect(); + //true if failed receive the retransmit packet, r2, and r3 should succeed + //r1 was the sender, so it should fail to receive the packet + assert_eq!(res, [true, false, false]); + exit.store(true, Ordering::Relaxed); + let threads = vec![t1, t2, t3, t1_gossip, t2_gossip, t3_gossip]; + for t in threads.into_iter() { + t.join().unwrap(); + } + } } diff --git a/src/erasure.rs b/src/erasure.rs index b8480a73d7..12b4223bb9 100644 --- a/src/erasure.rs +++ b/src/erasure.rs @@ -153,7 +153,7 @@ pub fn decode_blocks(data: &mut [&mut [u8]], coding: &[&[u8]], erasures: &[i32]) // Generate coding blocks in window from consumed to consumed+NUM_DATA pub fn generate_coding( re: &BlobRecycler, - window: &mut Vec>, + window: &mut Vec, consumed: usize, ) -> Result<()> { let mut data_blobs = Vec::new(); @@ -179,7 +179,7 @@ pub fn generate_coding( let coding_end = consumed + NUM_CODED; for i in coding_start..coding_end { let n = i % window.len(); - window[n] = Some(re.allocate()); + window[n] = re.allocate(); coding_blobs.push(window[n].clone().unwrap()); } for b in &coding_blobs { @@ -272,7 +272,6 @@ pub fn recover( mod test { use erasure; use packet::{BlobRecycler, SharedBlob, PACKET_DATA_SIZE}; - extern crate env_logger; #[test] pub fn test_coding() { diff --git a/src/historian.rs b/src/historian.rs index 0d56b1deab..7d2478bf15 100644 --- a/src/historian.rs +++ b/src/historian.rs @@ -4,12 +4,13 @@ use entry::Entry; use hash::Hash; use recorder::{ExitReason, Recorder, Signal}; -use std::sync::mpsc::{sync_channel, Receiver, SyncSender}; +use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TryRecvError}; +use std::sync::{Arc, Mutex}; use std::thread::{spawn, JoinHandle}; use std::time::Instant; pub struct Historian { - pub output: Receiver, + pub output: Arc>>, pub thread_hdl: JoinHandle, } @@ -22,7 +23,11 @@ impl Historian { let (entry_sender, output) = sync_channel(10_000); let thread_hdl = Historian::create_recorder(*start_hash, ms_per_tick, event_receiver, entry_sender); - Historian { output, thread_hdl } + let loutput = Arc::new(Mutex::new(output)); + Historian { + output: loutput, + thread_hdl, + } } /// A background thread that will continue tagging received Event messages and @@ -46,6 +51,10 @@ impl Historian { } }) } + + pub fn receive(self: &Self) -> Result { + self.output.lock().unwrap().try_recv() + } } #[cfg(test)] @@ -67,9 +76,9 @@ mod tests { sleep(Duration::new(0, 1_000_000)); input.send(Signal::Tick).unwrap(); - let entry0 = hist.output.recv().unwrap(); - let entry1 = hist.output.recv().unwrap(); - let entry2 = hist.output.recv().unwrap(); + let entry0 = hist.output.lock().unwrap().recv().unwrap(); + let entry1 = hist.output.lock().unwrap().recv().unwrap(); + let entry2 = hist.output.lock().unwrap().recv().unwrap(); assert_eq!(entry0.num_hashes, 0); assert_eq!(entry1.num_hashes, 0); @@ -105,7 +114,7 @@ mod tests { sleep(Duration::from_millis(300)); input.send(Signal::Tick).unwrap(); drop(input); - let entries: Vec = hist.output.iter().collect(); + let entries: Vec = hist.output.lock().unwrap().iter().collect(); assert!(entries.len() > 1); // Ensure the ID is not the seed. diff --git a/src/lib.rs b/src/lib.rs index 7a316f9ade..75c9b65f84 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,6 +11,7 @@ pub mod event; pub mod hash; pub mod historian; pub mod ledger; +pub mod logger; pub mod mint; pub mod packet; pub mod plan; @@ -18,7 +19,6 @@ pub mod recorder; pub mod result; pub mod signature; pub mod streamer; -pub mod subscribers; pub mod transaction; extern crate bincode; extern crate byteorder; diff --git a/src/logger.rs b/src/logger.rs new file mode 100644 index 0000000000..88bcc911bf --- /dev/null +++ b/src/logger.rs @@ -0,0 +1,11 @@ +use std::sync::{Once, ONCE_INIT}; +extern crate env_logger; + +static INIT: Once = ONCE_INIT; + +/// Setup function that is only run once, even if called multiple times. +pub fn setup() { + INIT.call_once(|| { + let _ = env_logger::init(); + }); +} diff --git a/src/packet.rs b/src/packet.rs index c4b09eb56e..a2d0db4ff8 100644 --- a/src/packet.rs +++ b/src/packet.rs @@ -1,6 +1,8 @@ //! The `packet` module defines data structures and methods to pull data from the network. +use bincode::{deserialize, serialize}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use result::{Error, Result}; +use signature::PublicKey; use std::collections::VecDeque; use std::fmt; use std::io; @@ -14,7 +16,7 @@ pub type PacketRecycler = Recycler; pub type BlobRecycler = Recycler; pub const NUM_PACKETS: usize = 1024 * 8; -const BLOB_SIZE: usize = 64 * 1024; +pub const BLOB_SIZE: usize = 64 * 1024; pub const PACKET_DATA_SIZE: usize = 256; pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE; @@ -211,28 +213,40 @@ impl Packets { } } -const BLOB_INDEX_SIZE: usize = size_of::(); +const BLOB_INDEX_END: usize = size_of::(); +const BLOB_ID_END: usize = BLOB_INDEX_END + size_of::() + size_of::(); impl Blob { pub fn get_index(&self) -> Result { - let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_SIZE]); + let mut rdr = io::Cursor::new(&self.data[0..BLOB_INDEX_END]); let r = rdr.read_u64::()?; Ok(r) } pub fn set_index(&mut self, ix: u64) -> Result<()> { let mut wtr = vec![]; wtr.write_u64::(ix)?; - self.data[..BLOB_INDEX_SIZE].clone_from_slice(&wtr); + self.data[..BLOB_INDEX_END].clone_from_slice(&wtr); Ok(()) } + + pub fn get_id(&self) -> Result { + let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?; + Ok(e) + } + pub fn set_id(&mut self, id: PublicKey) -> Result<()> { + let wtr = serialize(&id)?; + self.data[BLOB_INDEX_END..BLOB_ID_END].clone_from_slice(&wtr); + Ok(()) + } + pub fn data(&self) -> &[u8] { - &self.data[BLOB_INDEX_SIZE..] + &self.data[BLOB_ID_END..] } pub fn data_mut(&mut self) -> &mut [u8] { - &mut self.data[BLOB_INDEX_SIZE..] + &mut self.data[BLOB_ID_END..] } pub fn set_size(&mut self, size: usize) { - self.meta.size = size + BLOB_INDEX_SIZE; + self.meta.size = size + BLOB_ID_END; } pub fn recv_from(re: &BlobRecycler, socket: &UdpSocket) -> Result> { let mut v = VecDeque::new(); diff --git a/src/streamer.rs b/src/streamer.rs index 43e6f2ac35..471f1f29ce 100644 --- a/src/streamer.rs +++ b/src/streamer.rs @@ -1,4 +1,7 @@ //! The `streamer` module defines a set of services for effecently pulling data from udp sockets. +use crdt::Crdt; +#[cfg(feature = "erasure")] +use erasure; use packet::{Blob, BlobRecycler, PacketRecycler, SharedBlob, SharedPackets, NUM_BLOBS}; use result::Result; use std::collections::VecDeque; @@ -8,7 +11,6 @@ use std::sync::mpsc; use std::sync::{Arc, RwLock}; use std::thread::{spawn, JoinHandle}; use std::time::Duration; -use subscribers::Subscribers; pub type PacketReceiver = mpsc::Receiver; pub type PacketSender = mpsc::Sender; @@ -99,17 +101,14 @@ pub fn blob_receiver( if exit.load(Ordering::Relaxed) { break; } - let ret = recv_blobs(&recycler, &sock, &s); - if ret.is_err() { - break; - } + let _ = recv_blobs(&recycler, &sock, &s); }); Ok(t) } fn recv_window( window: &mut Vec>, - subs: &Arc>, + crdt: &Arc>, recycler: &BlobRecycler, consumed: &mut usize, r: &BlobReceiver, @@ -118,24 +117,25 @@ fn recv_window( ) -> Result<()> { let timer = Duration::new(1, 0); let mut dq = r.recv_timeout(timer)?; + let leader_id = crdt.read().unwrap().leader_data().id; while let Ok(mut nq) = r.try_recv() { dq.append(&mut nq) } { //retransmit all leader blocks let mut retransmitq = VecDeque::new(); - let rsubs = subs.read().unwrap(); for b in &dq { let p = b.read().unwrap(); //TODO this check isn't safe against adverserial packets //we need to maintain a sequence window trace!( - "idx: {} addr: {:?} leader: {:?}", + "idx: {} addr: {:?} id: {:?} leader: {:?}", p.get_index().unwrap(), + p.get_id().unwrap(), p.meta.addr(), - rsubs.leader.addr + leader_id ); - if p.meta.addr() == rsubs.leader.addr { + if p.get_id().unwrap() == leader_id { //TODO //need to copy the retransmited blob //otherwise we get into races with which thread @@ -195,7 +195,7 @@ fn recv_window( pub fn window( exit: Arc, - subs: Arc>, + crdt: Arc>, recycler: BlobRecycler, r: BlobReceiver, s: BlobSender, @@ -210,7 +210,7 @@ pub fn window( } let _ = recv_window( &mut window, - &subs, + &crdt, &recycler, &mut consumed, &r, @@ -221,8 +221,57 @@ pub fn window( }) } +fn broadcast( + crdt: &Arc>, + recycler: &BlobRecycler, + r: &BlobReceiver, + sock: &UdpSocket, + transmit_index: &mut u64, +) -> Result<()> { + let timer = Duration::new(1, 0); + let mut dq = r.recv_timeout(timer)?; + while let Ok(mut nq) = r.try_recv() { + dq.append(&mut nq); + } + let mut blobs = dq.into_iter().collect(); + /// appends codes to the list of blobs allowing us to reconstruct the stream + #[cfg(feature = "erasure")] + erasure::generate_codes(blobs); + Crdt::broadcast(crdt, &blobs, &sock, transmit_index)?; + while let Some(b) = blobs.pop() { + recycler.recycle(b); + } + Ok(()) +} + +/// Service to broadcast messages from the leader to layer 1 nodes. +/// See `crdt` for network layer definitions. +/// # Arguments +/// * `sock` - Socket to send from. +/// * `exit` - Boolean to signal system exit. +/// * `crdt` - CRDT structure +/// * `recycler` - Blob recycler. +/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes. +pub fn broadcaster( + sock: UdpSocket, + exit: Arc, + crdt: Arc>, + recycler: BlobRecycler, + r: BlobReceiver, +) -> JoinHandle<()> { + spawn(move || { + let mut transmit_index = 0; + loop { + if exit.load(Ordering::Relaxed) { + break; + } + let _ = broadcast(&crdt, &recycler, &r, &sock, &mut transmit_index); + } + }) +} + fn retransmit( - subs: &Arc>, + crdt: &Arc>, recycler: &BlobRecycler, r: &BlobReceiver, sock: &UdpSocket, @@ -233,10 +282,8 @@ fn retransmit( dq.append(&mut nq); } { - let wsubs = subs.read().unwrap(); for b in &dq { - let mut mb = b.write().unwrap(); - wsubs.retransmit(&mut mb, sock)?; + Crdt::retransmit(&crdt, b, sock)?; } } while let Some(b) = dq.pop_front() { @@ -246,26 +293,30 @@ fn retransmit( } /// Service to retransmit messages from the leader to layer 1 nodes. -/// See `subscribers` for network layer definitions. +/// See `crdt` for network layer definitions. /// # Arguments /// * `sock` - Socket to read from. Read timeout is set to 1. /// * `exit` - Boolean to signal system exit. -/// * `subs` - Shared Subscriber structure. This structure needs to be updated and popualted by -/// the accountant. +/// * `crdt` - This structure needs to be updated and populated by the accountant and via gossip. /// * `recycler` - Blob recycler. /// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes. pub fn retransmitter( sock: UdpSocket, exit: Arc, - subs: Arc>, + crdt: Arc>, recycler: BlobRecycler, r: BlobReceiver, ) -> JoinHandle<()> { - spawn(move || loop { - if exit.load(Ordering::Relaxed) { - break; + spawn(move || { + trace!("retransmitter started"); + loop { + if exit.load(Ordering::Relaxed) { + break; + } + // TODO: handle this error + let _ = retransmit(&crdt, &recycler, &r, &sock); } - let _ = retransmit(&subs, &recycler, &r, &sock); + trace!("exiting retransmitter"); }) } @@ -356,7 +407,7 @@ mod bench { let time = elapsed.as_secs() * 10000000000 + elapsed.subsec_nanos() as u64; let ftime = (time as f64) / 10000000000f64; let fcount = (end_val - start_val) as f64; - println!("performance: {:?}", fcount / ftime); + trace!("performance: {:?}", fcount / ftime); exit.store(true, Ordering::Relaxed); t_reader.join()?; t_producer1.join()?; @@ -373,7 +424,11 @@ mod bench { #[cfg(test)] mod test { + use crdt::{Crdt, ReplicatedData}; + use logger; use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE}; + use signature::KeyPair; + use signature::KeyPairUtil; use std::collections::VecDeque; use std::io; use std::io::Write; @@ -381,17 +436,17 @@ mod test { use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::channel; use std::sync::{Arc, RwLock}; + use std::thread::sleep; use std::time::Duration; use streamer::{blob_receiver, receiver, responder, retransmitter, window, BlobReceiver, PacketReceiver}; - use subscribers::{Node, Subscribers}; fn get_msgs(r: PacketReceiver, num: &mut usize) { for _t in 0..5 { let timer = Duration::new(1, 0); match r.recv_timeout(timer) { Ok(m) => *num += m.read().unwrap().packets.len(), - e => println!("error {:?}", e), + e => info!("error {:?}", e), } if *num == 10 { break; @@ -445,7 +500,7 @@ mod test { } *num += m.len(); } - e => println!("error {:?}", e), + e => info!("error {:?}", e), } if *num == 10 { break; @@ -455,15 +510,23 @@ mod test { #[test] pub fn window_send_test() { + let pubkey_me = KeyPair::new().pubkey(); let read = UdpSocket::bind("127.0.0.1:0").expect("bind"); let addr = read.local_addr().unwrap(); let send = UdpSocket::bind("127.0.0.1:0").expect("bind"); + let serve = UdpSocket::bind("127.0.0.1:0").expect("bind"); let exit = Arc::new(AtomicBool::new(false)); - let subs = Arc::new(RwLock::new(Subscribers::new( - Node::default(), - Node::new([0; 8], 0, send.local_addr().unwrap()), - &[], - ))); + let rep_data = ReplicatedData::new( + pubkey_me, + read.local_addr().unwrap(), + send.local_addr().unwrap(), + serve.local_addr().unwrap(), + ); + let mut crdt_me = Crdt::new(rep_data); + let me_id = crdt_me.my_data().id; + crdt_me.set_leader(me_id); + let subs = Arc::new(RwLock::new(crdt_me)); + let resp_recycler = BlobRecycler::default(); let (s_reader, r_reader) = channel(); let t_receiver = @@ -487,6 +550,7 @@ mod test { let b_ = b.clone(); let mut w = b.write().unwrap(); w.set_index(i).unwrap(); + w.set_id(me_id).unwrap(); assert_eq!(i, w.get_index().unwrap()); w.meta.size = PACKET_DATA_SIZE; w.meta.set_addr(&addr); @@ -507,43 +571,102 @@ mod test { t_window.join().expect("join"); } + fn test_node() -> (Arc>, UdpSocket, UdpSocket, UdpSocket) { + let gossip = UdpSocket::bind("127.0.0.1:0").unwrap(); + let replicate = UdpSocket::bind("127.0.0.1:0").unwrap(); + let serve = UdpSocket::bind("127.0.0.1:0").unwrap(); + let pubkey = KeyPair::new().pubkey(); + let d = ReplicatedData::new( + pubkey, + gossip.local_addr().unwrap(), + replicate.local_addr().unwrap(), + serve.local_addr().unwrap(), + ); + let crdt = Crdt::new(d); + trace!( + "id: {} gossip: {} replicate: {} serve: {}", + crdt.my_data().id[0], + gossip.local_addr().unwrap(), + replicate.local_addr().unwrap(), + serve.local_addr().unwrap(), + ); + (Arc::new(RwLock::new(crdt)), gossip, replicate, serve) + } + #[test] + //retransmit from leader to replicate target pub fn retransmit() { - let read = UdpSocket::bind("127.0.0.1:0").expect("bind"); - let send = UdpSocket::bind("127.0.0.1:0").expect("bind"); + logger::setup(); + trace!("retransmit test start"); let exit = Arc::new(AtomicBool::new(false)); - let subs = Arc::new(RwLock::new(Subscribers::new( - Node::default(), - Node::default(), - &[Node::new([0; 8], 1, read.local_addr().unwrap())], - ))); + let (crdt_leader, sock_gossip_leader, _, sock_leader) = test_node(); + let (crdt_target, sock_gossip_target, sock_replicate_target, _) = test_node(); + let leader_data = crdt_leader.read().unwrap().my_data().clone(); + crdt_leader.write().unwrap().insert(leader_data.clone()); + crdt_leader.write().unwrap().set_leader(leader_data.id); + let t_crdt_leader_g = Crdt::gossip(crdt_leader.clone(), exit.clone()); + let t_crdt_leader_l = Crdt::listen(crdt_leader.clone(), sock_gossip_leader, exit.clone()); + + crdt_target.write().unwrap().insert(leader_data.clone()); + crdt_target.write().unwrap().set_leader(leader_data.id); + let t_crdt_target_g = Crdt::gossip(crdt_target.clone(), exit.clone()); + let t_crdt_target_l = Crdt::listen(crdt_target.clone(), sock_gossip_target, exit.clone()); + //leader retransmitter let (s_retransmit, r_retransmit) = channel(); let blob_recycler = BlobRecycler::default(); - let saddr = send.local_addr().unwrap(); + let saddr = sock_leader.local_addr().unwrap(); let t_retransmit = retransmitter( - send, + sock_leader, exit.clone(), - subs, + crdt_leader.clone(), blob_recycler.clone(), r_retransmit, ); + + //target receiver + let (s_blob_receiver, r_blob_receiver) = channel(); + let t_receiver = blob_receiver( + exit.clone(), + blob_recycler.clone(), + sock_replicate_target, + s_blob_receiver, + ).unwrap(); + for _ in 0..10 { + let done = crdt_target.read().unwrap().update_index == 2 + && crdt_leader.read().unwrap().update_index == 2; + if done { + break; + } + let timer = Duration::new(1, 0); + sleep(timer); + } + + //send the data through let mut bq = VecDeque::new(); let b = blob_recycler.allocate(); b.write().unwrap().meta.size = 10; bq.push_back(b); s_retransmit.send(bq).unwrap(); - let (s_blob_receiver, r_blob_receiver) = channel(); - let t_receiver = - blob_receiver(exit.clone(), blob_recycler.clone(), read, s_blob_receiver).unwrap(); - let mut oq = r_blob_receiver.recv().unwrap(); + let timer = Duration::new(5, 0); + trace!("Waiting for timeout"); + let mut oq = r_blob_receiver.recv_timeout(timer).unwrap(); assert_eq!(oq.len(), 1); let o = oq.pop_front().unwrap(); let ro = o.read().unwrap(); assert_eq!(ro.meta.size, 10); assert_eq!(ro.meta.addr(), saddr); exit.store(true, Ordering::Relaxed); - t_receiver.join().expect("join"); - t_retransmit.join().expect("join"); + let threads = vec![ + t_receiver, + t_retransmit, + t_crdt_target_g, + t_crdt_target_l, + t_crdt_leader_g, + t_crdt_leader_l, + ]; + for t in threads { + t.join().unwrap(); + } } } diff --git a/src/subscribers.rs b/src/subscribers.rs deleted file mode 100644 index f0b271c439..0000000000 --- a/src/subscribers.rs +++ /dev/null @@ -1,149 +0,0 @@ -//! The `subscribers` module defines data structures to keep track of nodes on the network. -//! The network is arranged in layers: -//! -//! * layer 0 - Leader. -//! * layer 1 - As many nodes as we can fit to quickly get reliable `2/3+1` finality -//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes. -//! -//! It's up to the external state machine to keep this updated. -use packet::Blob; -use rayon::prelude::*; -use result::{Error, Result}; -use std::net::{SocketAddr, UdpSocket}; - -use std::fmt; - -#[derive(Clone, PartialEq)] -pub struct Node { - pub id: [u64; 8], - pub weight: u64, - pub addr: SocketAddr, -} - -//sockaddr doesn't implement default -impl Default for Node { - fn default() -> Node { - Node { - id: [0; 8], - weight: 0, - addr: "0.0.0.0:0".parse().unwrap(), - } - } -} - -impl Node { - pub fn new(id: [u64; 8], weight: u64, addr: SocketAddr) -> Node { - Node { id, weight, addr } - } - fn key(&self) -> i64 { - (self.weight as i64).checked_neg().unwrap() - } -} - -impl fmt::Debug for Node { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Node {{ weight: {} addr: {} }}", self.weight, self.addr) - } -} - -pub struct Subscribers { - data: Vec, - pub me: Node, - pub leader: Node, -} - -impl Subscribers { - pub fn new(me: Node, leader: Node, network: &[Node]) -> Subscribers { - let mut h = Subscribers { - data: vec![], - me: me.clone(), - leader: leader.clone(), - }; - h.insert(&[me, leader]); - h.insert(network); - h - } - - /// retransmit messages from the leader to layer 1 nodes - pub fn retransmit(&self, blob: &mut Blob, s: &UdpSocket) -> Result<()> { - let errs: Vec<_> = self.data - .par_iter() - .map(|i| { - if self.me == *i { - return Ok(0); - } - if self.leader == *i { - return Ok(0); - } - trace!("retransmit blob to {}", i.addr); - s.send_to(&blob.data[..blob.meta.size], &i.addr) - }) - .collect(); - for e in errs { - trace!("retransmit result {:?}", e); - match e { - Err(e) => return Err(Error::IO(e)), - _ => (), - } - } - Ok(()) - } - pub fn insert(&mut self, ns: &[Node]) { - self.data.extend_from_slice(ns); - self.data.sort_by_key(Node::key); - } -} - -#[cfg(test)] -mod test { - use packet::Blob; - use rayon::prelude::*; - use std::net::UdpSocket; - use std::time::Duration; - use subscribers::{Node, Subscribers}; - - #[test] - pub fn subscriber() { - let mut me = Node::default(); - me.weight = 10; - let mut leader = Node::default(); - leader.weight = 11; - let mut s = Subscribers::new(me, leader, &[]); - assert_eq!(s.data.len(), 2); - assert_eq!(s.data[0].weight, 11); - assert_eq!(s.data[1].weight, 10); - let mut n = Node::default(); - n.weight = 12; - s.insert(&[n]); - assert_eq!(s.data.len(), 3); - assert_eq!(s.data[0].weight, 12); - } - #[test] - pub fn retransmit() { - let s1 = UdpSocket::bind("127.0.0.1:0").expect("bind"); - let s2 = UdpSocket::bind("127.0.0.1:0").expect("bind"); - let s3 = UdpSocket::bind("127.0.0.1:0").expect("bind"); - let n1 = Node::new([0; 8], 0, s1.local_addr().unwrap()); - let n2 = Node::new([0; 8], 0, s2.local_addr().unwrap()); - let mut s = Subscribers::new(n1.clone(), n2.clone(), &[]); - let n3 = Node::new([0; 8], 0, s3.local_addr().unwrap()); - s.insert(&[n3]); - let mut b = Blob::default(); - b.meta.size = 10; - let s4 = UdpSocket::bind("127.0.0.1:0").expect("bind"); - s.retransmit(&mut b, &s4).unwrap(); - let res: Vec<_> = [s1, s2, s3] - .into_par_iter() - .map(|s| { - let mut b = Blob::default(); - s.set_read_timeout(Some(Duration::new(1, 0))).unwrap(); - s.recv_from(&mut b.data).is_err() - }) - .collect(); - assert_eq!(res, [true, true, false]); - let mut n4 = Node::default(); - n4.addr = "255.255.255.255:1".parse().unwrap(); - s.insert(&[n4]); - assert!(s.retransmit(&mut b, &s4).is_err()); - } -} From de680c2a8efc6207a6f3d2ef814930de9b6e8971 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Thu, 3 May 2018 13:24:35 -0600 Subject: [PATCH 24/51] Remove duplicate state --- src/accountant.rs | 9 +++++++++ src/accountant_skel.rs | 24 +++++++----------------- src/accountant_stub.rs | 2 +- src/bin/testnode.rs | 2 +- 4 files changed, 18 insertions(+), 19 deletions(-) diff --git a/src/accountant.rs b/src/accountant.rs index 49c5d2c883..a94629837e 100644 --- a/src/accountant.rs +++ b/src/accountant.rs @@ -74,6 +74,13 @@ impl Accountant { acc } + /// Return the last entry ID registered + pub fn last_id(&self) -> Hash { + let last_ids = self.last_ids.read().unwrap(); + let last_item = last_ids.iter().last().expect("empty last_ids list"); + last_item.0 + } + fn reserve_signature(signatures: &RwLock>, sig: &Signature) -> bool { if signatures.read().unwrap().contains(sig) { return false; @@ -327,6 +334,8 @@ mod tests { let alice = Mint::new(10_000); let bob_pubkey = KeyPair::new().pubkey(); let acc = Accountant::new(&alice); + assert_eq!(acc.last_id(), alice.last_id()); + acc.transfer(1_000, &alice.keypair(), bob_pubkey, alice.last_id()) .unwrap(); assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 1_000); diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index bd8c7ddd24..1d213cb731 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -33,7 +33,6 @@ use transaction::Transaction; pub struct AccountantSkel { acc: Mutex, - last_id: Mutex, historian_input: Mutex>, historian: Historian, entry_info_subscribers: Mutex>, @@ -81,15 +80,9 @@ pub enum Response { impl AccountantSkel { /// Create a new AccountantSkel that wraps the given Accountant. - pub fn new( - acc: Accountant, - last_id: Hash, - historian_input: SyncSender, - historian: Historian, - ) -> Self { + pub fn new(acc: Accountant, historian_input: SyncSender, historian: Historian) -> Self { AccountantSkel { acc: Mutex::new(acc), - last_id: Mutex::new(last_id), entry_info_subscribers: Mutex::new(vec![]), historian_input: Mutex::new(historian_input), historian, @@ -116,10 +109,7 @@ impl AccountantSkel { fn update_entry(obj: &SharedSkel, writer: &Arc>, entry: &Entry) { trace!("update_entry entry"); - let mut last_id_l = obj.last_id.lock().unwrap(); - *last_id_l = entry.id; - obj.acc.lock().unwrap().register_entry_id(&last_id_l); - drop(last_id_l); + obj.acc.lock().unwrap().register_entry_id(&entry.id); writeln!( writer.lock().unwrap(), "{}", @@ -228,7 +218,7 @@ impl AccountantSkel { } Request::GetLastId => Some(( Response::LastId { - id: *self.last_id.lock().unwrap(), + id: self.acc.lock().unwrap().last_id(), }, rsp_addr, )), @@ -699,7 +689,7 @@ mod tests { let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address"); let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &mint.last_id(), None); - let skel = AccountantSkel::new(acc, mint.last_id(), input, historian); + let skel = AccountantSkel::new(acc, input, historian); // Process a batch that includes a transaction that receives two tokens. let alice = KeyPair::new(); @@ -740,7 +730,7 @@ mod tests { let exit = Arc::new(AtomicBool::new(false)); let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); - let acc_skel = Arc::new(AccountantSkel::new(acc, alice.last_id(), input, historian)); + let acc_skel = Arc::new(AccountantSkel::new(acc, input, historian)); let serve_addr = leader_serve.local_addr().unwrap(); let threads = AccountantSkel::serve( &acc_skel, @@ -858,7 +848,7 @@ mod tests { let acc = Accountant::new(&alice); let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); - let acc = Arc::new(AccountantSkel::new(acc, alice.last_id(), input, historian)); + let acc = Arc::new(AccountantSkel::new(acc, input, historian)); let replicate_addr = target1_data.replicate_addr; let threads = AccountantSkel::replicate( &acc, @@ -1007,7 +997,7 @@ mod bench { let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &mint.last_id(), None); - let skel = AccountantSkel::new(acc, mint.last_id(), input, historian); + let skel = AccountantSkel::new(acc, input, historian); let now = Instant::now(); assert!(skel.process_packets(req_vers).is_ok()); diff --git a/src/accountant_stub.rs b/src/accountant_stub.rs index 15a6a69cb7..b8c72167a4 100644 --- a/src/accountant_stub.rs +++ b/src/accountant_stub.rs @@ -192,7 +192,7 @@ mod tests { let exit = Arc::new(AtomicBool::new(false)); let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); - let acc = Arc::new(AccountantSkel::new(acc, alice.last_id(), input, historian)); + let acc = Arc::new(AccountantSkel::new(acc, input, historian)); let threads = AccountantSkel::serve(&acc, d, serve, gossip, exit.clone(), sink()).unwrap(); sleep(Duration::from_millis(300)); diff --git a/src/bin/testnode.rs b/src/bin/testnode.rs index 2c585e6f27..c12840e843 100644 --- a/src/bin/testnode.rs +++ b/src/bin/testnode.rs @@ -104,7 +104,7 @@ fn main() { let (input, event_receiver) = sync_channel(10_000); let historian = Historian::new(event_receiver, &last_id, Some(1000)); let exit = Arc::new(AtomicBool::new(false)); - let skel = Arc::new(AccountantSkel::new(acc, last_id, input, historian)); + let skel = Arc::new(AccountantSkel::new(acc, input, historian)); let serve_sock = UdpSocket::bind(&serve_addr).unwrap(); let gossip_sock = UdpSocket::bind(&gossip_addr).unwrap(); let replicate_sock = UdpSocket::bind(&replicate_addr).unwrap(); From c9c7fb0a27a103dc8ed0af3b9edeeee48a999fb3 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Thu, 3 May 2018 13:26:45 -0600 Subject: [PATCH 25/51] Update comment The last PR added a thread that logs entries without needing to be driven by the client. --- src/accountant_stub.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/accountant_stub.rs b/src/accountant_stub.rs index b8c72167a4..b782c614e7 100644 --- a/src/accountant_stub.rs +++ b/src/accountant_stub.rs @@ -109,9 +109,7 @@ impl AccountantStub { } /// Request the last Entry ID from the server. This method blocks - /// until the server sends a response. At the time of this writing, - /// it also has the side-effect of causing the server to log any - /// entries that have been published by the Historian. + /// until the server sends a response. pub fn get_last_id(&mut self) -> FutureResult { let req = Request::GetLastId; let data = serialize(&req).expect("serialize GetId"); From f752e024876d29c6ecb2123fc48fa51a885b2f6f Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Thu, 3 May 2018 13:29:54 -0600 Subject: [PATCH 26/51] Implement GetLastId with EntryInfo subscription --- src/accountant_skel.rs | 8 -------- src/accountant_stub.rs | 10 +--------- 2 files changed, 1 insertion(+), 17 deletions(-) diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index 1d213cb731..c08e86620d 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -43,7 +43,6 @@ pub struct AccountantSkel { pub enum Request { Transaction(Transaction), GetBalance { key: PublicKey }, - GetLastId, Subscribe { subscriptions: Vec }, } @@ -75,7 +74,6 @@ type SharedSkel = Arc; pub enum Response { Balance { key: PublicKey, val: Option }, EntryInfo(EntryInfo), - LastId { id: Hash }, } impl AccountantSkel { @@ -216,12 +214,6 @@ impl AccountantSkel { let val = self.acc.lock().unwrap().get_balance(&key); Some((Response::Balance { key, val }, rsp_addr)) } - Request::GetLastId => Some(( - Response::LastId { - id: self.acc.lock().unwrap().last_id(), - }, - rsp_addr, - )), Request::Transaction(_) => unreachable!(), Request::Subscribe { subscriptions } => { for subscription in subscriptions { diff --git a/src/accountant_stub.rs b/src/accountant_stub.rs index b782c614e7..48ba9c5c17 100644 --- a/src/accountant_stub.rs +++ b/src/accountant_stub.rs @@ -57,9 +57,6 @@ impl AccountantStub { Response::Balance { key, val } => { self.balances.insert(key, val); } - Response::LastId { id } => { - self.last_id = Some(id); - } Response::EntryInfo(entry_info) => { self.last_id = Some(entry_info.id); self.num_events += entry_info.num_events; @@ -111,15 +108,10 @@ impl AccountantStub { /// Request the last Entry ID from the server. This method blocks /// until the server sends a response. pub fn get_last_id(&mut self) -> FutureResult { - let req = Request::GetLastId; - let data = serialize(&req).expect("serialize GetId"); - self.socket - .send_to(&data, &self.addr) - .expect("buffer error"); let mut done = false; while !done { let resp = self.recv_response().expect("recv response"); - if let &Response::LastId { .. } = &resp { + if let &Response::EntryInfo { .. } = &resp { done = true; } self.process_response(resp); From 1feff408ffae755168aa90ab4e7e839cf651aaa5 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Thu, 3 May 2018 13:34:54 -0600 Subject: [PATCH 27/51] Implement get_last_id() with transaction_count() This is more precice than the previous implementation because it'll drain the EntryInfo queue and return the most recent last_id instead of the first one. --- src/accountant_stub.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/accountant_stub.rs b/src/accountant_stub.rs index 48ba9c5c17..1797a53e1c 100644 --- a/src/accountant_stub.rs +++ b/src/accountant_stub.rs @@ -108,14 +108,7 @@ impl AccountantStub { /// Request the last Entry ID from the server. This method blocks /// until the server sends a response. pub fn get_last_id(&mut self) -> FutureResult { - let mut done = false; - while !done { - let resp = self.recv_response().expect("recv response"); - if let &Response::EntryInfo { .. } = &resp { - done = true; - } - self.process_response(resp); - } + self.transaction_count(); ok(self.last_id.unwrap_or(Hash::default())) } From 68c7f992faad528778c8ac4776bdf01c5ab10641 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Thu, 3 May 2018 13:56:10 -0600 Subject: [PATCH 28/51] Sooth all versions of rustfmt --- src/streamer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/streamer.rs b/src/streamer.rs index 471f1f29ce..808eea1e76 100644 --- a/src/streamer.rs +++ b/src/streamer.rs @@ -438,8 +438,8 @@ mod test { use std::sync::{Arc, RwLock}; use std::thread::sleep; use std::time::Duration; - use streamer::{blob_receiver, receiver, responder, retransmitter, window, BlobReceiver, - PacketReceiver}; + use streamer::{blob_receiver, receiver, responder, retransmitter, window}; + use streamer::{BlobReceiver, PacketReceiver}; fn get_msgs(r: PacketReceiver, num: &mut usize) { for _t in 0..5 { From 0aad71d46e22fca05bc3e76beee752ccad3e6b1c Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Thu, 3 May 2018 14:35:04 -0700 Subject: [PATCH 29/51] fix entry serialize --- src/accountant_skel.rs | 64 ++++++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 28 deletions(-) diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index c08e86620d..7e548b8710 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -118,29 +118,27 @@ impl AccountantSkel { trace!("notify_entry_info done"); } - fn receive_to_list( + fn receive_all( obj: &SharedSkel, writer: &Arc>, - max: usize, - ) -> Result> { + ) -> Result> { //TODO implement a serialize for channel that does this without allocations - let mut num = 0; - let mut l = LinkedList::new(); + let mut l = vec![]; let entry = obj.historian .output .lock() .unwrap() .recv_timeout(Duration::new(1, 0))?; + trace!("obj.write 1 {:?}", entry); Self::update_entry(obj, writer, &entry); - l.push_back(entry); + trace!("obj.write 1.end"); + l.push(entry); while let Ok(entry) = obj.historian.receive() { + trace!("obj.write 2"); Self::update_entry(obj, writer, &entry); - l.push_back(entry); - num += 1; - if num == max { - break; - } - trace!("receive_to_list entries num: {}", num); + trace!("obj.write 2.end"); + l.push(entry); + trace!("num: {}", num); } Ok(l) } @@ -154,24 +152,34 @@ impl AccountantSkel { writer: &Arc>, exit: Arc, ) -> Result<()> { - // TODO: should it be the serialized Entry size? - let max = BLOB_SIZE / size_of::(); let mut q = VecDeque::new(); - let mut count = 0; trace!("max: {}", max); - while let Ok(list) = Self::receive_to_list(&obj, writer, max) { - trace!("New blobs? {} {}", count, list.len()); - let b = blob_recycler.allocate(); - let pos = { - let mut bd = b.write().unwrap(); - let mut out = Cursor::new(bd.data_mut()); - serialize_into(&mut out, &list).expect("failed to serialize output"); - out.position() as usize - }; - assert!(pos < BLOB_SIZE); - b.write().unwrap().set_size(pos); - q.push_back(b); - count += 1; + while let Ok(list) = Self::receive_all(&obj, writer) { + trace!("New blobs? {}", list.len()); + let mut start = 0; + let mut end = 0; + while start < list.len() { + let total = 0; + for i in list[start..] { + total += size_of::() * i.events.len(); + total += size_of::(); + if total >= BLOB_SIZE { + break; + } + end += 1; + } + let b = blob_recycler.allocate(); + let pos = { + let mut bd = b.write().unwrap(); + let mut out = Cursor::new(bd.data_mut()); + serialize_into(&mut out, &list[start .. end]).expect("failed to serialize output"); + out.position() as usize + }; + assert!(pos < BLOB_SIZE); + b.write().unwrap().set_size(pos); + q.push_back(b); + start = end; + } if exit.load(Ordering::Relaxed) { break; } From 2cdd515b129a18db45fb4fed1337c7146cf9eea5 Mon Sep 17 00:00:00 2001 From: Stephen Akridge Date: Thu, 3 May 2018 14:47:05 -0700 Subject: [PATCH 30/51] Compiles/fmt and add assert for forward progress --- src/accountant_skel.rs | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index 7e548b8710..c9a7a08703 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -18,7 +18,6 @@ use result::Result; use serde_json; use signature::PublicKey; use std::cmp::max; -use std::collections::LinkedList; use std::collections::VecDeque; use std::io::{Cursor, Write}; use std::mem::size_of; @@ -113,15 +112,10 @@ impl AccountantSkel { "{}", serde_json::to_string(&entry).unwrap() ).unwrap(); - trace!("notify_entry_info entry"); Self::notify_entry_info_subscribers(obj, &entry); - trace!("notify_entry_info done"); } - fn receive_all( - obj: &SharedSkel, - writer: &Arc>, - ) -> Result> { + fn receive_all(obj: &SharedSkel, writer: &Arc>) -> Result> { //TODO implement a serialize for channel that does this without allocations let mut l = vec![]; let entry = obj.historian @@ -129,16 +123,11 @@ impl AccountantSkel { .lock() .unwrap() .recv_timeout(Duration::new(1, 0))?; - trace!("obj.write 1 {:?}", entry); Self::update_entry(obj, writer, &entry); - trace!("obj.write 1.end"); l.push(entry); while let Ok(entry) = obj.historian.receive() { - trace!("obj.write 2"); Self::update_entry(obj, writer, &entry); - trace!("obj.write 2.end"); l.push(entry); - trace!("num: {}", num); } Ok(l) } @@ -153,14 +142,13 @@ impl AccountantSkel { exit: Arc, ) -> Result<()> { let mut q = VecDeque::new(); - trace!("max: {}", max); while let Ok(list) = Self::receive_all(&obj, writer) { trace!("New blobs? {}", list.len()); let mut start = 0; let mut end = 0; while start < list.len() { - let total = 0; - for i in list[start..] { + let mut total = 0; + for i in &list[start..] { total += size_of::() * i.events.len(); total += size_of::(); if total >= BLOB_SIZE { @@ -168,11 +156,15 @@ impl AccountantSkel { } end += 1; } + // See that we made progress and a single + // vec of Events wasn't too big for a single packet + assert!(end > start); let b = blob_recycler.allocate(); let pos = { let mut bd = b.write().unwrap(); let mut out = Cursor::new(bd.data_mut()); - serialize_into(&mut out, &list[start .. end]).expect("failed to serialize output"); + serialize_into(&mut out, &list[start..end]) + .expect("failed to serialize output"); out.position() as usize }; assert!(pos < BLOB_SIZE); From 888c2ffb202d3df1281ed717b28058f351424c5c Mon Sep 17 00:00:00 2001 From: Stephen Akridge Date: Thu, 3 May 2018 17:03:14 -0700 Subject: [PATCH 31/51] Fix bind so we can talk on external interfaces and surface send error --- src/accountant_skel.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index c9a7a08703..b687fd69dc 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -88,7 +88,7 @@ impl AccountantSkel { fn notify_entry_info_subscribers(obj: &SharedSkel, entry: &Entry) { // TODO: No need to bind(). - let socket = UdpSocket::bind("127.0.0.1:0").expect("bind"); + let socket = UdpSocket::bind("0.0.0.0:0").expect("bind"); // copy subscribers to avoid taking lock while doing io let addrs = obj.entry_info_subscribers.lock().unwrap().clone(); @@ -100,7 +100,10 @@ impl AccountantSkel { num_events: entry.events.len() as u64, }; let data = serialize(&Response::EntryInfo(entry_info)).expect("serialize EntryInfo"); - let _res = socket.send_to(&data, addr); + let res = socket.send_to(&data, addr); + if res.is_err() { + eprintln!("couldn't send response: {:?}", res); + } } } From 2d5313639a0c883f33a10c1e8873478174e740cf Mon Sep 17 00:00:00 2001 From: Stephen Akridge Date: Thu, 3 May 2018 15:55:59 -0700 Subject: [PATCH 32/51] Factor out entry processing and fix replicate test to call global setup fn --- src/accountant_skel.rs | 109 +++++++++++++++++++++++++---------------- 1 file changed, 66 insertions(+), 43 deletions(-) diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index b687fd69dc..7982ae4fef 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -11,7 +11,7 @@ use event::Event; use hash::Hash; use historian::Historian; use packet; -use packet::{SharedPackets, BLOB_SIZE}; +use packet::{SharedBlob, SharedPackets, BLOB_SIZE}; use rayon::prelude::*; use recorder::Signal; use result::Result; @@ -135,6 +135,46 @@ impl AccountantSkel { Ok(l) } + fn process_entry_list_into_blobs( + list: &Vec, + blob_recycler: &packet::BlobRecycler, + q: &mut VecDeque, + ) { + let mut start = 0; + let mut end = 0; + while start < list.len() { + let mut total = 0; + for i in &list[start..] { + total += size_of::() * i.events.len(); + total += size_of::(); + if total >= BLOB_SIZE { + break; + } + end += 1; + } + // See that we made progress and a single + // vec of Events wasn't too big for a single packet + if end <= start { + eprintln!("Event too big for the blob!"); + start += 1; + end = start; + continue; + } + + let b = blob_recycler.allocate(); + let pos = { + let mut bd = b.write().unwrap(); + let mut out = Cursor::new(bd.data_mut()); + serialize_into(&mut out, &list[start..end]).expect("failed to serialize output"); + out.position() as usize + }; + assert!(pos < BLOB_SIZE); + b.write().unwrap().set_size(pos); + q.push_back(b); + start = end; + } + } + /// Process any Entry items that have been published by the Historian. /// continuosly broadcast blobs of entries out fn run_sync( @@ -147,34 +187,7 @@ impl AccountantSkel { let mut q = VecDeque::new(); while let Ok(list) = Self::receive_all(&obj, writer) { trace!("New blobs? {}", list.len()); - let mut start = 0; - let mut end = 0; - while start < list.len() { - let mut total = 0; - for i in &list[start..] { - total += size_of::() * i.events.len(); - total += size_of::(); - if total >= BLOB_SIZE { - break; - } - end += 1; - } - // See that we made progress and a single - // vec of Events wasn't too big for a single packet - assert!(end > start); - let b = blob_recycler.allocate(); - let pos = { - let mut bd = b.write().unwrap(); - let mut out = Cursor::new(bd.data_mut()); - serialize_into(&mut out, &list[start..end]) - .expect("failed to serialize output"); - out.position() as usize - }; - assert!(pos < BLOB_SIZE); - b.write().unwrap().set_size(pos); - q.push_back(b); - start = end; - } + Self::process_entry_list_into_blobs(&list, blob_recycler, &mut q); if exit.load(Ordering::Relaxed) { break; } @@ -617,7 +630,7 @@ mod tests { use accountant_skel::{to_packets, Request}; use bincode::serialize; use ecdsa; - use packet::{BlobRecycler, PacketRecycler, NUM_PACKETS}; + use packet::{BlobRecycler, PacketRecycler, BLOB_SIZE, NUM_PACKETS}; use transaction::{memfind, test_tx}; use accountant::Accountant; @@ -632,6 +645,7 @@ mod tests { use futures::Future; use hash::{hash, Hash}; use historian::Historian; + use logger; use mint::Mint; use plan::Plan; use recorder::Signal; @@ -764,18 +778,6 @@ mod tests { } } - use std::sync::{Once, ONCE_INIT}; - extern crate env_logger; - - static INIT: Once = ONCE_INIT; - - /// Setup function that is only run once, even if called multiple times. - fn setup() { - INIT.call_once(|| { - env_logger::init().unwrap(); - }); - } - fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket) { let gossip = UdpSocket::bind("127.0.0.1:0").unwrap(); let replicate = UdpSocket::bind("127.0.0.1:0").unwrap(); @@ -793,7 +795,7 @@ mod tests { /// Test that mesasge sent from leader to target1 and repliated to target2 #[test] fn test_replicate() { - setup(); + logger::setup(); let (leader_data, leader_gossip, _, leader_serve) = test_node(); let (target1_data, target1_gossip, target1_replicate, _) = test_node(); let (target2_data, target2_gossip, target2_replicate, _) = test_node(); @@ -932,6 +934,27 @@ mod tests { t_l_gossip.join().expect("join"); t_l_listen.join().expect("join"); } + + #[test] + fn test_entry_to_blobs() { + let zero = Hash::default(); + let keypair = KeyPair::new(); + let tr0 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 0, zero)); + let tr1 = Event::Transaction(Transaction::new(&keypair, keypair.pubkey(), 1, zero)); + let e0 = entry::create_entry(&zero, 0, vec![tr0.clone(), tr1.clone()]); + + let entry_list = vec![e0; 1000]; + let blob_recycler = BlobRecycler::default(); + let mut blob_q = VecDeque::new(); + AccountantSkel::process_entry_list_into_blobs(&entry_list, &blob_recycler, &mut blob_q); + let serialized_entry_list = serialize(&entry_list).unwrap(); + let mut num_blobs_ref = serialized_entry_list.len() / BLOB_SIZE; + if serialized_entry_list.len() % BLOB_SIZE != 0 { + num_blobs_ref += 1 + } + trace!("len: {} ref_len: {}", blob_q.len(), num_blobs_ref); + assert!(blob_q.len() > num_blobs_ref); + } } #[cfg(all(feature = "unstable", test))] From e162f2411978c753210e27ff49cfe0edf4a36865 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Fri, 4 May 2018 11:50:13 -0600 Subject: [PATCH 33/51] Limit 256 events per entry Attempt to keep blob size under 64kb --- src/recorder.rs | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/src/recorder.rs b/src/recorder.rs index faf0759357..38cc6d87a1 100644 --- a/src/recorder.rs +++ b/src/recorder.rs @@ -79,6 +79,13 @@ impl Recorder { } Signal::Event(event) => { self.events.push(event); + + // Record an entry early if we anticipate its serialized size will + // be larger than 64kb. At the time of this writing, we assume each + // event will be well under 256 bytes. + if self.events.len() >= 65_536 / 256 { + self.record_entry()?; + } } }, Err(TryRecvError::Empty) => return Ok(()), @@ -87,3 +94,34 @@ impl Recorder { } } } + +#[cfg(test)] +mod tests { + use super::*; + use bincode::serialize; + use signature::{KeyPair, KeyPairUtil}; + use transaction::Transaction; + use std::sync::mpsc::sync_channel; + + #[test] + fn test_sub64k_entry_size() { + let (signal_sender, signal_receiver) = sync_channel(500); + let (entry_sender, entry_receiver) = sync_channel(10); + let zero = Hash::default(); + let mut recorder = Recorder::new(signal_receiver, entry_sender, zero); + let alice_keypair = KeyPair::new(); + let bob_pubkey = KeyPair::new().pubkey(); + for _ in 0..256 { + let tx = Transaction::new(&alice_keypair, bob_pubkey, 1, zero); + let event = Event::Transaction(tx); + signal_sender.send(Signal::Event(event)).unwrap(); + } + + recorder.process_events(Instant::now(), None).unwrap(); + + drop(recorder.sender); + let entries: Vec<_> = entry_receiver.iter().collect(); + assert_eq!(entries.len(), 1); + assert!(serialize(&entries[0]).unwrap().len() <= 65_536); + } +} From e8f5fb35acafcf35dc3ad13b2b549ed1e1de26ff Mon Sep 17 00:00:00 2001 From: Stephen Akridge Date: Fri, 4 May 2018 11:11:39 -0700 Subject: [PATCH 34/51] Multinode fixes and test * Replace magic numbers for 64k event size * Fix gossip, dont ping yourself * Retransmit only to listening nodes * Multinode test in stub marked unstable --- src/accountant_skel.rs | 143 +++++++++++++++++++++++++-------- src/accountant_stub.rs | 176 ++++++++++++++++++++++++++++++++++++++++- src/bin/client-demo.rs | 28 ++++--- src/bin/testnode.rs | 9 +++ src/crdt.rs | 69 +++++++++++----- src/packet.rs | 16 +++- src/recorder.rs | 5 +- src/result.rs | 1 + 8 files changed, 375 insertions(+), 72 deletions(-) diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index 7982ae4fef..a2a8d21d42 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -19,6 +19,7 @@ use serde_json; use signature::PublicKey; use std::cmp::max; use std::collections::VecDeque; +use std::io::sink; use std::io::{Cursor, Write}; use std::mem::size_of; use std::net::{SocketAddr, UdpSocket}; @@ -100,6 +101,8 @@ impl AccountantSkel { num_events: entry.events.len() as u64, }; let data = serialize(&Response::EntryInfo(entry_info)).expect("serialize EntryInfo"); + trace!("sending {} to {}", data.len(), addr); + //TODO dont do IO here, this needs to be on a separate channel let res = socket.send_to(&data, addr); if res.is_err() { eprintln!("couldn't send response: {:?}", res); @@ -182,16 +185,11 @@ impl AccountantSkel { broadcast: &streamer::BlobSender, blob_recycler: &packet::BlobRecycler, writer: &Arc>, - exit: Arc, ) -> Result<()> { let mut q = VecDeque::new(); - while let Ok(list) = Self::receive_all(&obj, writer) { - trace!("New blobs? {}", list.len()); - Self::process_entry_list_into_blobs(&list, blob_recycler, &mut q); - if exit.load(Ordering::Relaxed) { - break; - } - } + let list = Self::receive_all(&obj, writer)?; + trace!("New blobs? {}", list.len()); + Self::process_entry_list_into_blobs(&list, blob_recycler, &mut q); if !q.is_empty() { broadcast.send(q)?; } @@ -206,14 +204,26 @@ impl AccountantSkel { writer: Arc>, ) -> JoinHandle<()> { spawn(move || loop { - let e = Self::run_sync( - obj.clone(), - &broadcast, - &blob_recycler, - &writer, - exit.clone(), - ); - if e.is_err() && exit.load(Ordering::Relaxed) { + let _ = Self::run_sync(obj.clone(), &broadcast, &blob_recycler, &writer); + if exit.load(Ordering::Relaxed) { + info!("sync_service exiting"); + break; + } + }) + } + + /// Process any Entry items that have been published by the Historian. + /// continuosly broadcast blobs of entries out + fn run_sync_no_broadcast(obj: SharedSkel) -> Result<()> { + Self::receive_all(&obj, &Arc::new(Mutex::new(sink())))?; + Ok(()) + } + + pub fn sync_no_broadcast_service(obj: SharedSkel, exit: Arc) -> JoinHandle<()> { + spawn(move || loop { + let _ = Self::run_sync_no_broadcast(obj.clone()); + if exit.load(Ordering::Relaxed) { + info!("sync_no_broadcast_service exiting"); break; } }) @@ -228,7 +238,9 @@ impl AccountantSkel { match msg { Request::GetBalance { key } => { let val = self.acc.lock().unwrap().get_balance(&key); - Some((Response::Balance { key, val }, rsp_addr)) + let rsp = (Response::Balance { key, val }, rsp_addr); + info!("Response::Balance {:?}", rsp); + Some(rsp) } Request::Transaction(_) => unreachable!(), Request::Subscribe { subscriptions } => { @@ -247,10 +259,10 @@ impl AccountantSkel { fn recv_batch(recvr: &streamer::PacketReceiver) -> Result> { let timer = Duration::new(1, 0); let msgs = recvr.recv_timeout(timer)?; - trace!("got msgs"); + debug!("got msgs"); let mut batch = vec![msgs]; while let Ok(more) = recvr.try_recv() { - trace!("got more msgs"); + debug!("got more msgs"); batch.push(more); } info!("batch len {}", batch.len()); @@ -275,6 +287,7 @@ impl AccountantSkel { ) -> Result<()> { let batch = Self::recv_batch(recvr)?; let verified_batches = Self::verify_batch(batch); + debug!("verified batches: {}", verified_batches.len()); for xs in verified_batches { sendr.send(xs)?; } @@ -315,8 +328,9 @@ impl AccountantSkel { &self, req_vers: Vec<(Request, SocketAddr, u8)>, ) -> Result> { - trace!("partitioning"); + debug!("partitioning"); let (trs, reqs) = Self::partition_requests(req_vers); + debug!("trs: {} reqs: {}", trs.len(), reqs.len()); // Process the transactions in parallel and then log the successful ones. for result in self.acc.lock().unwrap().process_verified_transactions(trs) { @@ -328,15 +342,21 @@ impl AccountantSkel { } } + debug!("processing verified"); + // Let validators know they should not attempt to process additional // transactions in parallel. self.historian_input.lock().unwrap().send(Signal::Tick)?; + debug!("after historian_input"); + // Process the remaining requests serially. let rsps = reqs.into_iter() .filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr)) .collect(); + debug!("returning rsps"); + Ok(rsps) } @@ -377,7 +397,7 @@ impl AccountantSkel { ) -> Result<()> { let timer = Duration::new(1, 0); let mms = verified_receiver.recv_timeout(timer)?; - trace!("got some messages: {}", mms.len()); + debug!("got some messages: {}", mms.len()); for (msgs, vers) in mms { let reqs = Self::deserialize_packets(&msgs.read().unwrap()); let req_vers = reqs.into_iter() @@ -389,18 +409,18 @@ impl AccountantSkel { v }) .collect(); - trace!("process_packets"); + debug!("process_packets"); let rsps = obj.process_packets(req_vers)?; - trace!("done process_packets"); + debug!("done process_packets"); let blobs = Self::serialize_responses(rsps, blob_recycler)?; - trace!("sending blobs: {}", blobs.len()); if !blobs.is_empty() { + info!("process: sending blobs: {}", blobs.len()); //don't wake up the other side if there is nothing responder_sender.send(blobs)?; } packet_recycler.recycle(msgs); } - trace!("done responding"); + debug!("done responding"); Ok(()) } /// Process verified blobs, already in order @@ -412,6 +432,7 @@ impl AccountantSkel { ) -> Result<()> { let timer = Duration::new(1, 0); let blobs = verified_receiver.recv_timeout(timer)?; + trace!("replicating blobs {}", blobs.len()); for msgs in &blobs { let blob = msgs.read().unwrap(); let entries: Vec = deserialize(&blob.data()[..blob.meta.size]).unwrap(); @@ -541,10 +562,12 @@ impl AccountantSkel { obj: &SharedSkel, me: ReplicatedData, gossip: UdpSocket, + serve: UdpSocket, replicate: UdpSocket, leader: ReplicatedData, exit: Arc, ) -> Result>> { + //replicate pipeline let crdt = Arc::new(RwLock::new(Crdt::new(me))); crdt.write().unwrap().set_leader(leader.id); crdt.write().unwrap().insert(leader); @@ -580,7 +603,7 @@ impl AccountantSkel { //then sent to the window, which does the erasure coding reconstruction let t_window = streamer::window( exit.clone(), - crdt, + crdt.clone(), blob_recycler.clone(), blob_receiver, window_sender, @@ -588,19 +611,76 @@ impl AccountantSkel { ); let skel = obj.clone(); - let t_server = spawn(move || loop { + let s_exit = exit.clone(); + let t_replicator = spawn(move || loop { let e = Self::replicate_state(&skel, &window_receiver, &blob_recycler); - if e.is_err() && exit.load(Ordering::Relaxed) { + if e.is_err() && s_exit.load(Ordering::Relaxed) { break; } }); + + //serve pipeline + // make sure we are on the same interface + let mut local = serve.local_addr()?; + local.set_port(0); + let respond_socket = UdpSocket::bind(local.clone())?; + + let packet_recycler = packet::PacketRecycler::default(); + let blob_recycler = packet::BlobRecycler::default(); + let (packet_sender, packet_receiver) = channel(); + let t_packet_receiver = + streamer::receiver(serve, exit.clone(), packet_recycler.clone(), packet_sender)?; + let (responder_sender, responder_receiver) = channel(); + let t_responder = streamer::responder( + respond_socket, + exit.clone(), + blob_recycler.clone(), + responder_receiver, + ); + let (verified_sender, verified_receiver) = channel(); + + let exit_ = exit.clone(); + let t_verifier = spawn(move || loop { + let e = Self::verifier(&packet_receiver, &verified_sender); + if e.is_err() && exit_.load(Ordering::Relaxed) { + trace!("verifier exiting"); + break; + } + }); + + let t_sync = Self::sync_no_broadcast_service(obj.clone(), exit.clone()); + + let skel = obj.clone(); + let s_exit = exit.clone(); + let t_server = spawn(move || loop { + let e = Self::process( + &mut skel.clone(), + &verified_receiver, + &responder_sender, + &packet_recycler, + &blob_recycler, + ); + if e.is_err() { + if s_exit.load(Ordering::Relaxed) { + break; + } + } + }); + Ok(vec![ + //replicate threads t_blob_receiver, t_retransmit, t_window, - t_server, + t_replicator, t_gossip, t_listen, + //serve threads + t_packet_receiver, + t_responder, + t_server, + t_verifier, + t_sync, ]) } } @@ -769,7 +849,7 @@ mod tests { tr2.data.plan = Plan::new_payment(502, bob_pubkey); let _sig = acc_stub.transfer_signed(tr2).unwrap(); - assert_eq!(acc_stub.get_balance(&bob_pubkey).wait().unwrap(), 500); + assert_eq!(acc_stub.get_balance(&bob_pubkey).unwrap(), 500); trace!("exiting"); exit.store(true, Ordering::Relaxed); trace!("joining threads"); @@ -797,7 +877,7 @@ mod tests { fn test_replicate() { logger::setup(); let (leader_data, leader_gossip, _, leader_serve) = test_node(); - let (target1_data, target1_gossip, target1_replicate, _) = test_node(); + let (target1_data, target1_gossip, target1_replicate, target1_serve) = test_node(); let (target2_data, target2_gossip, target2_replicate, _) = test_node(); let exit = Arc::new(AtomicBool::new(false)); @@ -851,6 +931,7 @@ mod tests { &acc, target1_data, target1_gossip, + target1_serve, target1_replicate, leader_data, exit.clone(), diff --git a/src/accountant_stub.rs b/src/accountant_stub.rs index 1797a53e1c..6bedd38861 100644 --- a/src/accountant_stub.rs +++ b/src/accountant_stub.rs @@ -11,6 +11,7 @@ use signature::{KeyPair, PublicKey, Signature}; use std::collections::HashMap; use std::io; use std::net::{SocketAddr, UdpSocket}; +use std::time::Duration; use transaction::Transaction; pub struct AccountantStub { @@ -47,7 +48,10 @@ impl AccountantStub { pub fn recv_response(&self) -> io::Result { let mut buf = vec![0u8; 1024]; + self.socket.set_read_timeout(Some(Duration::new(1, 0)))?; + info!("start recv_from"); self.socket.recv_from(&mut buf)?; + info!("end recv_from"); let resp = deserialize(&buf).expect("deserialize balance"); Ok(resp) } @@ -55,9 +59,11 @@ impl AccountantStub { pub fn process_response(&mut self, resp: Response) { match resp { Response::Balance { key, val } => { + info!("Response balance {:?} {:?}", key, val); self.balances.insert(key, val); } Response::EntryInfo(entry_info) => { + trace!("Response entry_info {:?}", entry_info.id); self.last_id = Some(entry_info.id); self.num_events += entry_info.num_events; } @@ -88,7 +94,8 @@ impl AccountantStub { /// Request the balance of the user holding `pubkey`. This method blocks /// until the server sends a response. If the response packet is dropped /// by the network, this method will hang indefinitely. - pub fn get_balance(&mut self, pubkey: &PublicKey) -> FutureResult { + pub fn get_balance(&mut self, pubkey: &PublicKey) -> io::Result { + info!("get_balance"); let req = Request::GetBalance { key: *pubkey }; let data = serialize(&req).expect("serialize GetBalance"); self.socket @@ -96,13 +103,14 @@ impl AccountantStub { .expect("buffer error"); let mut done = false; while !done { - let resp = self.recv_response().expect("recv response"); + let resp = self.recv_response()?; + info!("recv_response {:?}", resp); if let &Response::Balance { ref key, .. } = &resp { done = key == pubkey; } self.process_response(resp); } - ok(self.balances[pubkey].unwrap()) + self.balances[pubkey].ok_or(io::Error::new(io::ErrorKind::Other, "nokey")) } /// Request the last Entry ID from the server. This method blocks @@ -146,6 +154,7 @@ mod tests { use crdt::ReplicatedData; use futures::Future; use historian::Historian; + use logger; use mint::Mint; use signature::{KeyPair, KeyPairUtil}; use std::io::sink; @@ -158,6 +167,7 @@ mod tests { // TODO: Figure out why this test sometimes hangs on TravisCI. #[test] fn test_accountant_stub() { + logger::setup(); let gossip = UdpSocket::bind("0.0.0.0:0").unwrap(); let serve = UdpSocket::bind("0.0.0.0:0").unwrap(); let addr = serve.local_addr().unwrap(); @@ -186,10 +196,168 @@ mod tests { let last_id = acc.get_last_id().wait().unwrap(); let _sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id) .unwrap(); - assert_eq!(acc.get_balance(&bob_pubkey).wait().unwrap(), 500); + assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 500); exit.store(true, Ordering::Relaxed); for t in threads { t.join().unwrap(); } } } + +#[cfg(all(feature = "unstable", test))] +mod unstsable_tests { + use super::*; + use accountant::Accountant; + use accountant_skel::AccountantSkel; + use crdt::{Crdt, ReplicatedData}; + use futures::Future; + use historian::Historian; + use logger; + use mint::Mint; + use signature::{KeyPair, KeyPairUtil}; + use std::io::sink; + use std::sync::atomic::{AtomicBool, Ordering}; + use std::sync::mpsc::sync_channel; + use std::sync::{Arc, RwLock}; + use std::thread::sleep; + use std::time::Duration; + + fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket) { + let gossip = UdpSocket::bind("0.0.0.0:0").unwrap(); + let serve = UdpSocket::bind("0.0.0.0:0").unwrap(); + let replicate = UdpSocket::bind("0.0.0.0:0").unwrap(); + let pubkey = KeyPair::new().pubkey(); + let leader = ReplicatedData::new( + pubkey, + gossip.local_addr().unwrap(), + replicate.local_addr().unwrap(), + serve.local_addr().unwrap(), + ); + (leader, gossip, serve, replicate) + } + + #[test] + fn test_multi_accountant_stub() { + logger::setup(); + info!("test_multi_accountant_stub"); + let leader = test_node(); + let replicant = test_node(); + let alice = Mint::new(10_000); + let bob_pubkey = KeyPair::new().pubkey(); + let exit = Arc::new(AtomicBool::new(false)); + + let leader_acc = { + let (input, event_receiver) = sync_channel(10); + let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); + let acc = Accountant::new(&alice); + Arc::new(AccountantSkel::new(acc, input, historian)) + }; + + let replicant_acc = { + let (input, event_receiver) = sync_channel(10); + let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); + let acc = Accountant::new(&alice); + Arc::new(AccountantSkel::new(acc, input, historian)) + }; + + let leader_threads = AccountantSkel::serve( + &leader_acc, + leader.0.clone(), + leader.2, + leader.1, + exit.clone(), + sink(), + ).unwrap(); + let replicant_threads = AccountantSkel::replicate( + &replicant_acc, + replicant.0.clone(), + replicant.1, + replicant.2, + replicant.3, + leader.0.clone(), + exit.clone(), + ).unwrap(); + + //lets spy on the network + let (mut spy, spy_gossip, _, _) = test_node(); + let daddr = "0.0.0.0:0".parse().unwrap(); + spy.replicate_addr = daddr; + spy.serve_addr = daddr; + let mut spy_crdt = Crdt::new(spy); + spy_crdt.insert(leader.0.clone()); + spy_crdt.set_leader(leader.0.id); + + let spy_ref = Arc::new(RwLock::new(spy_crdt)); + let t_spy_listen = Crdt::listen(spy_ref.clone(), spy_gossip, exit.clone()); + let t_spy_gossip = Crdt::gossip(spy_ref.clone(), exit.clone()); + //wait for the network to converge + for _ in 0..20 { + let ix = spy_ref.read().unwrap().update_index; + info!("my update index is {}", ix); + let len = spy_ref.read().unwrap().remote.values().len(); + let mut done = false; + info!("remote len {}", len); + if len > 1 && ix > 2 { + done = true; + //check if everyones remote index is greater or equal to ours + let vs: Vec = spy_ref.read().unwrap().remote.values().cloned().collect(); + for t in vs.into_iter() { + info!("remote update index is {} vs {}", t, ix); + if t < 3 { + done = false; + } + } + } + if done == true { + info!("converged!"); + break; + } + sleep(Duration::new(1, 0)); + } + + //verify leader can do transfer + let leader_balance = { + let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); + socket.set_read_timeout(Some(Duration::new(1, 0))).unwrap(); + + let mut acc = AccountantStub::new(leader.0.serve_addr, socket); + info!("getting leader last_id"); + let last_id = acc.get_last_id().wait().unwrap(); + info!("executing leader transer"); + let _sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id) + .unwrap(); + info!("getting leader balance"); + acc.get_balance(&bob_pubkey).unwrap() + }; + assert_eq!(leader_balance, 500); + //verify replicant has the same balance + let mut replicant_balance = 0; + for _ in 0..10 { + let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); + socket.set_read_timeout(Some(Duration::new(1, 0))).unwrap(); + + let mut acc = AccountantStub::new(replicant.0.serve_addr, socket); + info!("getting replicant balance"); + if let Ok(bal) = acc.get_balance(&bob_pubkey) { + replicant_balance = bal; + } + info!("replicant balance {}", replicant_balance); + if replicant_balance == leader_balance { + break; + } + sleep(Duration::new(1, 0)); + } + assert_eq!(replicant_balance, leader_balance); + + exit.store(true, Ordering::Relaxed); + for t in leader_threads { + t.join().unwrap(); + } + for t in replicant_threads { + t.join().unwrap(); + } + for t in vec![t_spy_listen, t_spy_gossip] { + t.join().unwrap(); + } + } +} diff --git a/src/bin/client-demo.rs b/src/bin/client-demo.rs index 739b6cea4a..4c868dfcf0 100644 --- a/src/bin/client-demo.rs +++ b/src/bin/client-demo.rs @@ -18,6 +18,8 @@ use std::env; use std::io::{stdin, Read}; use std::net::{SocketAddr, UdpSocket}; use std::process::exit; +use std::thread::sleep; +use std::time::Duration; use std::time::Instant; use untrusted::Input; @@ -38,7 +40,7 @@ fn main() { let mut opts = Options::new(); opts.optopt("s", "", "server address", "host:port"); opts.optopt("c", "", "client address", "host:port"); - opts.optopt("t", "", "number of threads", "4"); + opts.optopt("t", "", "number of threads", &format!("{}", threads)); opts.optflag("h", "help", "print help"); let args: Vec = env::args().collect(); let matches = match opts.parse(&args[1..]) { @@ -84,6 +86,7 @@ fn main() { println!("Binding to {}", client_addr); let socket = UdpSocket::bind(&client_addr).unwrap(); + socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap(); let mut acc = AccountantStub::new(addr.parse().unwrap(), socket); println!("Get last ID..."); @@ -104,7 +107,7 @@ fn main() { .into_par_iter() .map(|chunk| Transaction::new(&chunk[0], chunk[1].pubkey(), 1, last_id)) .collect(); - let duration = now.elapsed(); + let mut duration = now.elapsed(); let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos()); let bsps = txs as f64 / ns as f64; let nsps = ns as f64 / txs as f64; @@ -115,6 +118,7 @@ fn main() { ); let initial_tx_count = acc.transaction_count(); + println!("initial count {}", initial_tx_count); println!("Transfering {} transactions in {} batches", txs, threads); let now = Instant::now(); @@ -131,16 +135,16 @@ fn main() { } }); - println!("Waiting for half the transactions to complete...",); - let mut tx_count = acc.transaction_count(); - while tx_count < transactions.len() as u64 / 2 { + println!("Waiting for transactions to complete...",); + let mut tx_count; + for _ in 0..5 { tx_count = acc.transaction_count(); + duration = now.elapsed(); + let txs = tx_count - initial_tx_count; + println!("Transactions processed {}", txs); + let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos()); + let tps = (txs * 1_000_000_000) as f64 / ns as f64; + println!("{} tps", tps); + sleep(Duration::new(1, 0)); } - let txs = tx_count - initial_tx_count; - println!("Transactions processed {}", txs); - - let duration = now.elapsed(); - let ns = duration.as_secs() * 1_000_000_000 + u64::from(duration.subsec_nanos()); - let tps = (txs * 1_000_000_000) as f64 / ns as f64; - println!("Done. {} tps", tps); } diff --git a/src/bin/testnode.rs b/src/bin/testnode.rs index c12840e843..e9318bbb8f 100644 --- a/src/bin/testnode.rs +++ b/src/bin/testnode.rs @@ -76,6 +76,8 @@ fn main() { }) }); + eprintln!("done parsing..."); + // The first item in the ledger is required to be an entry with zero num_hashes, // which implies its id can be used as the ledger's seed. let entry0 = entries.next().unwrap(); @@ -90,10 +92,14 @@ fn main() { None }; + eprintln!("creating accountant..."); + let acc = Accountant::new_from_deposit(&deposit.unwrap()); acc.register_entry_id(&entry0.id); acc.register_entry_id(&entry1.id); + eprintln!("processing entries..."); + let mut last_id = entry1.id; for entry in entries { last_id = entry.id; @@ -101,6 +107,8 @@ fn main() { acc.register_entry_id(&last_id); } + eprintln!("creating networking stack..."); + let (input, event_receiver) = sync_channel(10_000); let historian = Historian::new(event_receiver, &last_id, Some(1000)); let exit = Arc::new(AtomicBool::new(false)); @@ -115,6 +123,7 @@ fn main() { replicate_sock.local_addr().unwrap(), serve_sock.local_addr().unwrap(), ); + eprintln!("starting server..."); let threads = AccountantSkel::serve(&skel, d, serve_sock, gossip_sock, exit.clone(), stdout()).unwrap(); eprintln!("Ready. Listening on {}", serve_addr); diff --git a/src/crdt.rs b/src/crdt.rs index a0ece4319b..df01d89ec1 100644 --- a/src/crdt.rs +++ b/src/crdt.rs @@ -91,7 +91,7 @@ pub struct Crdt { local: HashMap, /// The value of the remote update index that i have last seen /// This Node will ask external nodes for updates since the value in this list - remote: HashMap, + pub remote: HashMap, pub update_index: u64, me: PublicKey, timeout: Duration, @@ -172,20 +172,33 @@ impl Crdt { let cloned_table: Vec = robj.table.values().cloned().collect(); (robj.table[&robj.me].clone(), cloned_table) }; - let errs: Vec<_> = table + let daddr = "0.0.0.0:0".parse().unwrap(); + let items: Vec<(usize, &ReplicatedData)> = table .iter() - .enumerate() - .cycle() - .zip(blobs.iter()) - .map(|((i, v), b)| { + .filter(|v| { if me.id == v.id { - return Ok(0); + //filter myself + false + } else if v.replicate_addr == daddr { + //filter nodes that are not listening + false + } else { + true } + }) + .enumerate() + .collect(); + let orders: Vec<_> = items.into_iter().cycle().zip(blobs.iter()).collect(); + let errs: Vec<_> = orders + .into_par_iter() + .map(|((i, v), b)| { // only leader should be broadcasting assert!(me.current_leader_id != v.id); let mut blob = b.write().unwrap(); + blob.set_id(me.id).expect("set_id"); blob.set_index(*transmit_index + i as u64) .expect("set_index"); + //TODO profile this, may need multiple sockets for par_iter s.send_to(&blob.data[..blob.meta.size], &v.replicate_addr) }) .collect(); @@ -210,17 +223,28 @@ impl Crdt { (s.table[&s.me].clone(), s.table.values().cloned().collect()) }; let rblob = blob.read().unwrap(); - let errs: Vec<_> = table + let daddr = "0.0.0.0:0".parse().unwrap(); + let orders: Vec<_> = table + .iter() + .filter(|v| { + if me.id == v.id { + false + } else if me.current_leader_id == v.id { + trace!("skip retransmit to leader {:?}", v.id); + false + } else if v.replicate_addr == daddr { + trace!("skip nodes that are not listening {:?}", v.id); + false + } else { + true + } + }) + .collect(); + let errs: Vec<_> = orders .par_iter() .map(|v| { - if me.id == v.id { - return Ok(0); - } - if me.current_leader_id == v.id { - trace!("skip retransmit to leader{:?}", v.id); - return Ok(0); - } trace!("retransmit blob to {}", v.replicate_addr); + //TODO profile this, may need multiple sockets for par_iter s.send_to(&rblob.data[..rblob.meta.size], &v.replicate_addr) }) .collect(); @@ -258,13 +282,18 @@ impl Crdt { /// (A,B) /// * A - Address to send to /// * B - RequestUpdates protocol message - fn gossip_request(&self) -> (SocketAddr, Protocol) { - let n = (Self::random() as usize) % self.table.len(); - trace!("random {:?} {}", &self.me[0..1], n); + fn gossip_request(&self) -> Result<(SocketAddr, Protocol)> { + if self.table.len() <= 1 { + return Err(Error::GeneralError); + } + let mut n = (Self::random() as usize) % self.table.len(); + while self.table.values().nth(n).unwrap().id == self.me { + n = (Self::random() as usize) % self.table.len(); + } let v = self.table.values().nth(n).unwrap().clone(); let remote_update_index = *self.remote.get(&v.id).unwrap_or(&0); let req = Protocol::RequestUpdates(remote_update_index, self.table[&self.me].clone()); - (v.gossip_addr, req) + Ok((v.gossip_addr, req)) } /// At random pick a node and try to get updated changes from them @@ -274,7 +303,7 @@ impl Crdt { // Lock the object only to do this operation and not for any longer // especially not when doing the `sock.send_to` - let (remote_gossip_addr, req) = obj.read().unwrap().gossip_request(); + let (remote_gossip_addr, req) = obj.read().unwrap().gossip_request()?; let sock = UdpSocket::bind("0.0.0.0:0")?; // TODO this will get chatty, so we need to first ask for number of updates since // then only ask for specific data that we dont have diff --git a/src/packet.rs b/src/packet.rs index a2d0db4ff8..471fe67fa5 100644 --- a/src/packet.rs +++ b/src/packet.rs @@ -176,18 +176,26 @@ impl Packets { // * read until it fails // * set it back to blocking before returning socket.set_nonblocking(false)?; + let mut error_msgs = 0; for p in &mut self.packets { p.meta.size = 0; + trace!("receiving"); match socket.recv_from(&mut p.data) { Err(_) if i > 0 => { - trace!("got {:?} messages", i); - break; + debug!("got {:?} messages", i); + error_msgs += 1; + if error_msgs > 30 { + break; + } else { + continue; + } } Err(e) => { - info!("recv_from err {:?}", e); + trace!("recv_from err {:?}", e); return Err(Error::IO(e)); } Ok((nrecv, from)) => { + error_msgs = 0; p.meta.size = nrecv; p.meta.set_addr(&from); if i == 0 { @@ -202,6 +210,7 @@ impl Packets { pub fn recv_from(&mut self, socket: &UdpSocket) -> Result<()> { let sz = self.run_read_from(socket)?; self.packets.resize(sz, Packet::default()); + debug!("recv_from: {}", sz); Ok(()) } pub fn send_to(&self, socket: &UdpSocket) -> Result<()> { @@ -233,6 +242,7 @@ impl Blob { let e = deserialize(&self.data[BLOB_INDEX_END..BLOB_ID_END])?; Ok(e) } + pub fn set_id(&mut self, id: PublicKey) -> Result<()> { let wtr = serialize(&id)?; self.data[BLOB_INDEX_END..BLOB_ID_END].clone_from_slice(&wtr); diff --git a/src/recorder.rs b/src/recorder.rs index 38cc6d87a1..674b99c370 100644 --- a/src/recorder.rs +++ b/src/recorder.rs @@ -8,6 +8,7 @@ use entry::{create_entry_mut, Entry}; use event::Event; use hash::{hash, Hash}; +use packet::BLOB_SIZE; use std::mem; use std::sync::mpsc::{Receiver, SyncSender, TryRecvError}; use std::time::{Duration, Instant}; @@ -83,7 +84,7 @@ impl Recorder { // Record an entry early if we anticipate its serialized size will // be larger than 64kb. At the time of this writing, we assume each // event will be well under 256 bytes. - if self.events.len() >= 65_536 / 256 { + if self.events.len() >= BLOB_SIZE / (2 * mem::size_of::()) { self.record_entry()?; } } @@ -100,8 +101,8 @@ mod tests { use super::*; use bincode::serialize; use signature::{KeyPair, KeyPairUtil}; - use transaction::Transaction; use std::sync::mpsc::sync_channel; + use transaction::Transaction; #[test] fn test_sub64k_entry_size() { diff --git a/src/result.rs b/src/result.rs index 532a64c3b2..d2cb485add 100644 --- a/src/result.rs +++ b/src/result.rs @@ -18,6 +18,7 @@ pub enum Error { AccountingError(accountant::AccountingError), SendError, Services, + GeneralError, } pub type Result = std::result::Result; From a68e50935e089c8ec8c9ef175228f831d13af579 Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Sun, 6 May 2018 21:48:46 -0700 Subject: [PATCH 35/51] useless timeouts i think --- src/accountant_stub.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/accountant_stub.rs b/src/accountant_stub.rs index 6bedd38861..dd853ca649 100644 --- a/src/accountant_stub.rs +++ b/src/accountant_stub.rs @@ -11,7 +11,6 @@ use signature::{KeyPair, PublicKey, Signature}; use std::collections::HashMap; use std::io; use std::net::{SocketAddr, UdpSocket}; -use std::time::Duration; use transaction::Transaction; pub struct AccountantStub { @@ -48,7 +47,6 @@ impl AccountantStub { pub fn recv_response(&self) -> io::Result { let mut buf = vec![0u8; 1024]; - self.socket.set_read_timeout(Some(Duration::new(1, 0)))?; info!("start recv_from"); self.socket.recv_from(&mut buf)?; info!("end recv_from"); @@ -190,7 +188,6 @@ mod tests { sleep(Duration::from_millis(300)); let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); - socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap(); let mut acc = AccountantStub::new(addr, socket); let last_id = acc.get_last_id().wait().unwrap(); From 138efa6cec7d4d174137a9c599ec5eab43b8e6dc Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Sun, 6 May 2018 22:06:19 -0700 Subject: [PATCH 36/51] fixed constant --- src/recorder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/recorder.rs b/src/recorder.rs index 674b99c370..cb6f81f09c 100644 --- a/src/recorder.rs +++ b/src/recorder.rs @@ -84,7 +84,7 @@ impl Recorder { // Record an entry early if we anticipate its serialized size will // be larger than 64kb. At the time of this writing, we assume each // event will be well under 256 bytes. - if self.events.len() >= BLOB_SIZE / (2 * mem::size_of::()) { + if self.events.len() >= BLOB_SIZE / 256 { self.record_entry()?; } } From 4751e459ccfea19b7b67b4ac1190c962ee7b7cd3 Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Sun, 6 May 2018 22:25:05 -0700 Subject: [PATCH 37/51] fixed! --- src/accountant_stub.rs | 24 +++--------------------- src/packet.rs | 10 ++-------- src/recorder.rs | 4 ++-- 3 files changed, 7 insertions(+), 31 deletions(-) diff --git a/src/accountant_stub.rs b/src/accountant_stub.rs index dd853ca649..a3715cf837 100644 --- a/src/accountant_stub.rs +++ b/src/accountant_stub.rs @@ -149,7 +149,7 @@ mod tests { use super::*; use accountant::Accountant; use accountant_skel::AccountantSkel; - use crdt::ReplicatedData; + use crdt::{Crdt, ReplicatedData}; use futures::Future; use historian::Historian; use logger; @@ -158,7 +158,7 @@ mod tests { use std::io::sink; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::sync_channel; - use std::sync::Arc; + use std::sync::{Arc, RwLock}; use std::thread::sleep; use std::time::Duration; @@ -199,25 +199,6 @@ mod tests { t.join().unwrap(); } } -} - -#[cfg(all(feature = "unstable", test))] -mod unstsable_tests { - use super::*; - use accountant::Accountant; - use accountant_skel::AccountantSkel; - use crdt::{Crdt, ReplicatedData}; - use futures::Future; - use historian::Historian; - use logger; - use mint::Mint; - use signature::{KeyPair, KeyPairUtil}; - use std::io::sink; - use std::sync::atomic::{AtomicBool, Ordering}; - use std::sync::mpsc::sync_channel; - use std::sync::{Arc, RwLock}; - use std::thread::sleep; - use std::time::Duration; fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket) { let gossip = UdpSocket::bind("0.0.0.0:0").unwrap(); @@ -234,6 +215,7 @@ mod unstsable_tests { } #[test] + // #[ignore] fn test_multi_accountant_stub() { logger::setup(); info!("test_multi_accountant_stub"); diff --git a/src/packet.rs b/src/packet.rs index 471fe67fa5..713a166f68 100644 --- a/src/packet.rs +++ b/src/packet.rs @@ -17,6 +17,7 @@ pub type BlobRecycler = Recycler; pub const NUM_PACKETS: usize = 1024 * 8; pub const BLOB_SIZE: usize = 64 * 1024; +pub const BLOB_DATA_SIZE: usize = BLOB_SIZE - BLOB_ID_END; pub const PACKET_DATA_SIZE: usize = 256; pub const NUM_BLOBS: usize = (NUM_PACKETS * PACKET_DATA_SIZE) / BLOB_SIZE; @@ -176,26 +177,19 @@ impl Packets { // * read until it fails // * set it back to blocking before returning socket.set_nonblocking(false)?; - let mut error_msgs = 0; for p in &mut self.packets { p.meta.size = 0; trace!("receiving"); match socket.recv_from(&mut p.data) { Err(_) if i > 0 => { debug!("got {:?} messages", i); - error_msgs += 1; - if error_msgs > 30 { - break; - } else { - continue; - } + break; } Err(e) => { trace!("recv_from err {:?}", e); return Err(Error::IO(e)); } Ok((nrecv, from)) => { - error_msgs = 0; p.meta.size = nrecv; p.meta.set_addr(&from); if i == 0 { diff --git a/src/recorder.rs b/src/recorder.rs index cb6f81f09c..68a8cf8dae 100644 --- a/src/recorder.rs +++ b/src/recorder.rs @@ -8,7 +8,7 @@ use entry::{create_entry_mut, Entry}; use event::Event; use hash::{hash, Hash}; -use packet::BLOB_SIZE; +use packet::BLOB_DATA_SIZE; use std::mem; use std::sync::mpsc::{Receiver, SyncSender, TryRecvError}; use std::time::{Duration, Instant}; @@ -84,7 +84,7 @@ impl Recorder { // Record an entry early if we anticipate its serialized size will // be larger than 64kb. At the time of this writing, we assume each // event will be well under 256 bytes. - if self.events.len() >= BLOB_SIZE / 256 { + if self.events.len() >= BLOB_DATA_SIZE / 256 { self.record_entry()?; } } From 85f83f2c747990d3cb4a280e651bab53d24538c9 Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Sun, 6 May 2018 22:29:33 -0700 Subject: [PATCH 38/51] fmt --- src/accountant_stub.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/accountant_stub.rs b/src/accountant_stub.rs index a3715cf837..d5356083be 100644 --- a/src/accountant_stub.rs +++ b/src/accountant_stub.rs @@ -215,7 +215,6 @@ mod tests { } #[test] - // #[ignore] fn test_multi_accountant_stub() { logger::setup(); info!("test_multi_accountant_stub"); From 893011c3bae9e8e56acd35cd4cd00b5e3ef5a116 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Mon, 7 May 2018 14:51:08 -0600 Subject: [PATCH 39/51] Process events instead of processing only transactions Prep work to allow clients to send any type that can end up in the ledger. --- src/accountant.rs | 20 +++++++++++++------- src/accountant_skel.rs | 35 +++++++++++++++++++---------------- src/bin/testnode.rs | 8 +++++++- 3 files changed, 39 insertions(+), 24 deletions(-) diff --git a/src/accountant.rs b/src/accountant.rs index a94629837e..a214aa4196 100644 --- a/src/accountant.rs +++ b/src/accountant.rs @@ -218,13 +218,18 @@ impl Accountant { (trs, rest) } - pub fn process_verified_events(&self, events: Vec) -> Result<()> { + pub fn process_verified_events(&self, events: Vec) -> Vec> { let (trs, rest) = Self::partition_events(events); - self.process_verified_transactions(trs); + let mut results: Vec<_> = self.process_verified_transactions(trs) + .into_iter() + .map(|x| x.map(Event::Transaction)) + .collect(); + for event in rest { - self.process_verified_event(&event)?; + results.push(self.process_verified_event(event)); } - Ok(()) + + results } /// Process a Witness Signature that has already been verified. @@ -278,12 +283,13 @@ impl Accountant { } /// Process an Transaction or Witness that has already been verified. - pub fn process_verified_event(&self, event: &Event) -> Result<()> { - match *event { + pub fn process_verified_event(&self, event: Event) -> Result { + match event { Event::Transaction(ref tr) => self.process_verified_transaction(tr), Event::Signature { from, tx_sig, .. } => self.process_verified_sig(from, tx_sig), Event::Timestamp { from, dt, .. } => self.process_verified_timestamp(from, dt), - } + }?; + Ok(event) } /// Create, sign, and process a Transaction from `keypair` to `to` of diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index a2a8d21d42..e334665687 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -308,20 +308,20 @@ impl AccountantSkel { /// Split Request list into verified transactions and the rest fn partition_requests( req_vers: Vec<(Request, SocketAddr, u8)>, - ) -> (Vec, Vec<(Request, SocketAddr)>) { - let mut trs = vec![]; + ) -> (Vec, Vec<(Request, SocketAddr)>) { + let mut events = vec![]; let mut reqs = vec![]; for (msg, rsp_addr, verify) in req_vers { match msg { Request::Transaction(tr) => { if verify != 0 { - trs.push(tr); + events.push(Event::Transaction(tr)); } } _ => reqs.push((msg, rsp_addr)), } } - (trs, reqs) + (events, reqs) } fn process_packets( @@ -329,16 +329,16 @@ impl AccountantSkel { req_vers: Vec<(Request, SocketAddr, u8)>, ) -> Result> { debug!("partitioning"); - let (trs, reqs) = Self::partition_requests(req_vers); - debug!("trs: {} reqs: {}", trs.len(), reqs.len()); + let (events, reqs) = Self::partition_requests(req_vers); + debug!("events: {} reqs: {}", events.len(), reqs.len()); // Process the transactions in parallel and then log the successful ones. - for result in self.acc.lock().unwrap().process_verified_transactions(trs) { - if let Ok(tr) = result { + for result in self.acc.lock().unwrap().process_verified_events(events) { + if let Ok(event) = result { self.historian_input .lock() .unwrap() - .send(Signal::Event(Event::Transaction(tr)))?; + .send(Signal::Event(event))?; } } @@ -436,13 +436,12 @@ impl AccountantSkel { for msgs in &blobs { let blob = msgs.read().unwrap(); let entries: Vec = deserialize(&blob.data()[..blob.meta.size]).unwrap(); + let acc = obj.acc.lock().unwrap(); for entry in entries { - obj.acc.lock().unwrap().register_entry_id(&entry.id); - - obj.acc - .lock() - .unwrap() - .process_verified_events(entry.events)?; + acc.register_entry_id(&entry.id); + for result in acc.process_verified_events(entry.events) { + result?; + } } //TODO respond back to leader with hash of the state } @@ -805,7 +804,11 @@ mod tests { // the account balance below zero before the credit is added. let acc = Accountant::new(&mint); for entry in entries { - acc.process_verified_events(entry.events).unwrap(); + assert!( + acc.process_verified_events(entry.events) + .into_iter() + .all(|x| x.is_ok()) + ); } assert_eq!(acc.get_balance(&alice.pubkey()), Some(1)); } diff --git a/src/bin/testnode.rs b/src/bin/testnode.rs index e9318bbb8f..8d1a4e1eab 100644 --- a/src/bin/testnode.rs +++ b/src/bin/testnode.rs @@ -103,7 +103,13 @@ fn main() { let mut last_id = entry1.id; for entry in entries { last_id = entry.id; - acc.process_verified_events(entry.events).unwrap(); + let results = acc.process_verified_events(entry.events); + for result in results { + if let Err(e) = result { + eprintln!("failed to process event {:?}", e); + exit(1); + } + } acc.register_entry_id(&last_id); } From 62bb78f58d3cff60afe4c291e9f551da7026ecd9 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Mon, 7 May 2018 15:09:08 -0600 Subject: [PATCH 40/51] Prepwork to hoist processing requests --- src/accountant_skel.rs | 54 +++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index e334665687..18ce6cb6b9 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -324,15 +324,8 @@ impl AccountantSkel { (events, reqs) } - fn process_packets( - &self, - req_vers: Vec<(Request, SocketAddr, u8)>, - ) -> Result> { - debug!("partitioning"); - let (events, reqs) = Self::partition_requests(req_vers); - debug!("events: {} reqs: {}", events.len(), reqs.len()); - - // Process the transactions in parallel and then log the successful ones. + /// Process the transactions in parallel and then log the successful ones. + fn process_events(&self, events: Vec) -> Result<()> { for result in self.acc.lock().unwrap().process_verified_events(events) { if let Ok(event) = result { self.historian_input @@ -347,17 +340,15 @@ impl AccountantSkel { // Let validators know they should not attempt to process additional // transactions in parallel. self.historian_input.lock().unwrap().send(Signal::Tick)?; - debug!("after historian_input"); - // Process the remaining requests serially. - let rsps = reqs.into_iter() + Ok(()) + } + + fn process_requests(&self, reqs: Vec<(Request, SocketAddr)>) -> Vec<(Response, SocketAddr)> { + reqs.into_iter() .filter_map(|(req, rsp_addr)| self.process_request(req, rsp_addr)) - .collect(); - - debug!("returning rsps"); - - Ok(rsps) + .collect() } fn serialize_response( @@ -409,9 +400,19 @@ impl AccountantSkel { v }) .collect(); - debug!("process_packets"); - let rsps = obj.process_packets(req_vers)?; - debug!("done process_packets"); + + debug!("partitioning"); + let (events, reqs) = Self::partition_requests(req_vers); + debug!("events: {} reqs: {}", events.len(), reqs.len()); + + debug!("process_events"); + obj.process_events(events)?; + debug!("done process_events"); + + debug!("process_requests"); + let rsps = obj.process_requests(reqs); + debug!("done process_requests"); + let blobs = Self::serialize_responses(rsps, blob_recycler)?; if !blobs.is_empty() { info!("process: sending blobs: {}", blobs.len()); @@ -731,7 +732,7 @@ mod tests { use signature::{KeyPair, KeyPairUtil}; use std::collections::VecDeque; use std::io::sink; - use std::net::{SocketAddr, UdpSocket}; + use std::net::UdpSocket; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::channel; use std::sync::mpsc::sync_channel; @@ -774,7 +775,6 @@ mod tests { // Entry OR if the verifier tries to parallelize across multiple Entries. let mint = Mint::new(2); let acc = Accountant::new(&mint); - let rsp_addr: SocketAddr = "0.0.0.0:0".parse().expect("socket address"); let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &mint.last_id(), None); let skel = AccountantSkel::new(acc, input, historian); @@ -782,13 +782,13 @@ mod tests { // Process a batch that includes a transaction that receives two tokens. let alice = KeyPair::new(); let tr = Transaction::new(&mint.keypair(), alice.pubkey(), 2, mint.last_id()); - let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)]; - assert!(skel.process_packets(req_vers).is_ok()); + let events = vec![Event::Transaction(tr)]; + assert!(skel.process_events(events).is_ok()); // Process a second batch that spends one of those tokens. let tr = Transaction::new(&alice, mint.pubkey(), 1, mint.last_id()); - let req_vers = vec![(Request::Transaction(tr), rsp_addr, 1_u8)]; - assert!(skel.process_packets(req_vers).is_ok()); + let events = vec![Event::Transaction(tr)]; + assert!(skel.process_events(events).is_ok()); // Collect the ledger and feed it to a new accountant. skel.historian_input @@ -1102,7 +1102,7 @@ mod bench { let skel = AccountantSkel::new(acc, input, historian); let now = Instant::now(); - assert!(skel.process_packets(req_vers).is_ok()); + assert!(skel.process_events(req_vers).is_ok()); let duration = now.elapsed(); let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0; let tps = txs as f64 / sec; From f159dfd15a6328a3392f9a6d272212b5f839fefb Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Mon, 7 May 2018 16:32:45 -0600 Subject: [PATCH 41/51] Update README with proposed way to download the gpu lib If you checked here yesterday, this was a top-level file in git-lfs, but that made the developer workflow more painful so we boot that file and are making it available via an http endpoint. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 46bd76e70e..61fa07f5c8 100644 --- a/README.md +++ b/README.md @@ -146,6 +146,7 @@ $ cargo +nightly bench --features="unstable" To run the benchmarks on Linux with GPU optimizations enabled: ```bash +$ wget https://solana.com/gpu/latest/libcuda_verify_ed25519.a $ cargo +nightly bench --features="unstable,cuda" ``` From a4ecd09723ea39e8a46fa0069d63ae1d367933f0 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Mon, 7 May 2018 16:35:52 -0600 Subject: [PATCH 42/51] Delete .gitattributes This was used by git-lfs. --- .gitattributes | 1 - 1 file changed, 1 deletion(-) delete mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes deleted file mode 100644 index ce24744af8..0000000000 --- a/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -*.a filter=lfs diff=lfs merge=lfs -text \ No newline at end of file From 9ff1a6f0cd35eb0322731a03eb2d4c2e0324e664 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Mon, 7 May 2018 21:44:44 -0600 Subject: [PATCH 43/51] Add a thread to support thin clients --- src/accountant_skel.rs | 36 ++++++++++++++++++++++++++++++------ src/accountant_stub.rs | 12 ++++++++---- src/bin/testnode.rs | 13 +++++++++++-- 3 files changed, 49 insertions(+), 12 deletions(-) diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index 18ce6cb6b9..58dad07712 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -212,6 +212,24 @@ impl AccountantSkel { }) } + fn process_thin_client_requests(_obj: SharedSkel, _socket: &UdpSocket) -> Result<()> { + Ok(()) + } + + fn thin_client_service( + obj: SharedSkel, + exit: Arc, + socket: UdpSocket, + ) -> JoinHandle<()> { + spawn(move || loop { + let _ = Self::process_thin_client_requests(obj.clone(), &socket); + if exit.load(Ordering::Relaxed) { + info!("sync_service exiting"); + break; + } + }) + } + /// Process any Entry items that have been published by the Historian. /// continuosly broadcast blobs of entries out fn run_sync_no_broadcast(obj: SharedSkel) -> Result<()> { @@ -459,6 +477,7 @@ impl AccountantSkel { obj: &SharedSkel, me: ReplicatedData, serve: UdpSocket, + skinny: UdpSocket, gossip: UdpSocket, exit: Arc, writer: W, @@ -513,6 +532,8 @@ impl AccountantSkel { Arc::new(Mutex::new(writer)), ); + let t_skinny = Self::thin_client_service(obj.clone(), exit.clone(), skinny); + let skel = obj.clone(); let t_server = spawn(move || loop { let e = Self::process( @@ -534,6 +555,7 @@ impl AccountantSkel { t_server, t_verifier, t_sync, + t_skinny, t_gossip, t_listen, t_broadcast, @@ -815,7 +837,7 @@ mod tests { #[test] fn test_accountant_bad_sig() { - let (leader_data, leader_gossip, _, leader_serve) = test_node(); + let (leader_data, leader_gossip, _, leader_serve, leader_skinny) = test_node(); let alice = Mint::new(10_000); let acc = Accountant::new(&alice); let bob_pubkey = KeyPair::new().pubkey(); @@ -828,6 +850,7 @@ mod tests { &acc_skel, leader_data, leader_serve, + leader_skinny, leader_gossip, exit.clone(), sink(), @@ -861,7 +884,8 @@ mod tests { } } - fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket) { + fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket, UdpSocket) { + let skinny = UdpSocket::bind("127.0.0.1:0").unwrap(); let gossip = UdpSocket::bind("127.0.0.1:0").unwrap(); let replicate = UdpSocket::bind("127.0.0.1:0").unwrap(); let serve = UdpSocket::bind("127.0.0.1:0").unwrap(); @@ -872,16 +896,16 @@ mod tests { replicate.local_addr().unwrap(), serve.local_addr().unwrap(), ); - (d, gossip, replicate, serve) + (d, gossip, replicate, serve, skinny) } /// Test that mesasge sent from leader to target1 and repliated to target2 #[test] fn test_replicate() { logger::setup(); - let (leader_data, leader_gossip, _, leader_serve) = test_node(); - let (target1_data, target1_gossip, target1_replicate, target1_serve) = test_node(); - let (target2_data, target2_gossip, target2_replicate, _) = test_node(); + let (leader_data, leader_gossip, _, leader_serve, _) = test_node(); + let (target1_data, target1_gossip, target1_replicate, target1_serve, _) = test_node(); + let (target2_data, target2_gossip, target2_replicate, _, _) = test_node(); let exit = Arc::new(AtomicBool::new(false)); //start crdt_leader diff --git a/src/accountant_stub.rs b/src/accountant_stub.rs index d5356083be..0527f6b6b5 100644 --- a/src/accountant_stub.rs +++ b/src/accountant_stub.rs @@ -168,6 +168,7 @@ mod tests { logger::setup(); let gossip = UdpSocket::bind("0.0.0.0:0").unwrap(); let serve = UdpSocket::bind("0.0.0.0:0").unwrap(); + let skinny = UdpSocket::bind("0.0.0.0:0").unwrap(); let addr = serve.local_addr().unwrap(); let pubkey = KeyPair::new().pubkey(); let d = ReplicatedData::new( @@ -184,7 +185,8 @@ mod tests { let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); let acc = Arc::new(AccountantSkel::new(acc, input, historian)); - let threads = AccountantSkel::serve(&acc, d, serve, gossip, exit.clone(), sink()).unwrap(); + let threads = + AccountantSkel::serve(&acc, d, serve, skinny, gossip, exit.clone(), sink()).unwrap(); sleep(Duration::from_millis(300)); let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); @@ -200,9 +202,10 @@ mod tests { } } - fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket) { + fn test_node() -> (ReplicatedData, UdpSocket, UdpSocket, UdpSocket, UdpSocket) { let gossip = UdpSocket::bind("0.0.0.0:0").unwrap(); let serve = UdpSocket::bind("0.0.0.0:0").unwrap(); + let skinny = UdpSocket::bind("0.0.0.0:0").unwrap(); let replicate = UdpSocket::bind("0.0.0.0:0").unwrap(); let pubkey = KeyPair::new().pubkey(); let leader = ReplicatedData::new( @@ -211,7 +214,7 @@ mod tests { replicate.local_addr().unwrap(), serve.local_addr().unwrap(), ); - (leader, gossip, serve, replicate) + (leader, gossip, serve, replicate, skinny) } #[test] @@ -242,6 +245,7 @@ mod tests { &leader_acc, leader.0.clone(), leader.2, + leader.4, leader.1, exit.clone(), sink(), @@ -257,7 +261,7 @@ mod tests { ).unwrap(); //lets spy on the network - let (mut spy, spy_gossip, _, _) = test_node(); + let (mut spy, spy_gossip, _, _, _) = test_node(); let daddr = "0.0.0.0:0".parse().unwrap(); spy.replicate_addr = daddr; spy.serve_addr = daddr; diff --git a/src/bin/testnode.rs b/src/bin/testnode.rs index 8d1a4e1eab..5aede0c3f1 100644 --- a/src/bin/testnode.rs +++ b/src/bin/testnode.rs @@ -55,6 +55,7 @@ fn main() { let serve_addr = format!("0.0.0.0:{}", port); let gossip_addr = format!("0.0.0.0:{}", port + 1); let replicate_addr = format!("0.0.0.0:{}", port + 2); + let skinny_addr = format!("0.0.0.0:{}", port + 3); if stdin_isatty() { eprintln!("nothing found on stdin, expected a log file"); @@ -122,6 +123,7 @@ fn main() { let serve_sock = UdpSocket::bind(&serve_addr).unwrap(); let gossip_sock = UdpSocket::bind(&gossip_addr).unwrap(); let replicate_sock = UdpSocket::bind(&replicate_addr).unwrap(); + let skinny_sock = UdpSocket::bind(&skinny_addr).unwrap(); let pubkey = KeyPair::new().pubkey(); let d = ReplicatedData::new( pubkey, @@ -130,8 +132,15 @@ fn main() { serve_sock.local_addr().unwrap(), ); eprintln!("starting server..."); - let threads = - AccountantSkel::serve(&skel, d, serve_sock, gossip_sock, exit.clone(), stdout()).unwrap(); + let threads = AccountantSkel::serve( + &skel, + d, + serve_sock, + skinny_sock, + gossip_sock, + exit.clone(), + stdout(), + ).unwrap(); eprintln!("Ready. Listening on {}", serve_addr); for t in threads { t.join().expect("join"); From 9a0bf13febd1943c6be02da126620bf6b9dd34e1 Mon Sep 17 00:00:00 2001 From: Anatoly Yakovenko Date: Tue, 8 May 2018 06:44:24 -0700 Subject: [PATCH 44/51] update link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 61fa07f5c8..ce30207387 100644 --- a/README.md +++ b/README.md @@ -146,7 +146,7 @@ $ cargo +nightly bench --features="unstable" To run the benchmarks on Linux with GPU optimizations enabled: ```bash -$ wget https://solana.com/gpu/latest/libcuda_verify_ed25519.a +$ wget https://solana-build-artifacts.s3.amazonaws.com/v0.5.0/libcuda_verify_ed25519.a $ cargo +nightly bench --features="unstable,cuda" ``` From bd0671e123721b9127d9667830a3e08f138382c2 Mon Sep 17 00:00:00 2001 From: Stephen Akridge Date: Mon, 7 May 2018 16:49:15 -0700 Subject: [PATCH 45/51] Rework sig processing threads and add perf for process/verify --- Cargo.toml | 1 + src/accountant_skel.rs | 158 ++++++++++++++++++++++++++++------------- src/accountant_stub.rs | 14 +++- src/lib.rs | 3 + src/timing.rs | 15 ++++ 5 files changed, 140 insertions(+), 51 deletions(-) create mode 100644 src/timing.rs diff --git a/Cargo.toml b/Cargo.toml index 4317a31b15..6cf9607b6b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,3 +68,4 @@ libc = "^0.2.1" getopts = "^0.2" isatty = "0.1" futures = "0.1" +rand = "0.4.2" diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index 18ce6cb6b9..b4645dc896 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -17,7 +17,6 @@ use recorder::Signal; use result::Result; use serde_json; use signature::PublicKey; -use std::cmp::max; use std::collections::VecDeque; use std::io::sink; use std::io::{Cursor, Write}; @@ -30,6 +29,9 @@ use std::thread::{spawn, JoinHandle}; use std::time::Duration; use streamer; use transaction::Transaction; +use timing; +use std::time::Instant; +use rand::{thread_rng, Rng}; pub struct AccountantSkel { acc: Mutex, @@ -256,41 +258,64 @@ impl AccountantSkel { } } - fn recv_batch(recvr: &streamer::PacketReceiver) -> Result> { + fn recv_batch(recvr: &streamer::PacketReceiver) -> Result<(Vec, usize)> { let timer = Duration::new(1, 0); let msgs = recvr.recv_timeout(timer)?; debug!("got msgs"); + let mut len = msgs.read().unwrap().packets.len(); let mut batch = vec![msgs]; while let Ok(more) = recvr.try_recv() { - debug!("got more msgs"); + trace!("got more msgs"); + len += more.read().unwrap().packets.len(); batch.push(more); + + if len > 100_000 { + break; + } } - info!("batch len {}", batch.len()); - Ok(batch) + debug!("batch len {}", batch.len()); + Ok((batch, len)) } - fn verify_batch(batch: Vec) -> Vec)>> { - let chunk_size = max(1, (batch.len() + 3) / 4); - let batches: Vec<_> = batch.chunks(chunk_size).map(|x| x.to_vec()).collect(); - batches - .into_par_iter() - .map(|batch| { - let r = ecdsa::ed25519_verify(&batch); - batch.into_iter().zip(r).collect() - }) - .collect() + fn verify_batch( + batch: Vec, + sendr: &Arc)>>>>, + ) -> Result<()> { + let r = ecdsa::ed25519_verify(&batch); + let res = batch.into_iter().zip(r).collect(); + sendr.lock().unwrap().send(res)?; + // TODO: fix error handling here? + Ok(()) } fn verifier( - recvr: &streamer::PacketReceiver, - sendr: &Sender)>>, + recvr: &Arc>, + sendr: &Arc)>>>>, ) -> Result<()> { - let batch = Self::recv_batch(recvr)?; - let verified_batches = Self::verify_batch(batch); - debug!("verified batches: {}", verified_batches.len()); - for xs in verified_batches { - sendr.send(xs)?; - } + let (batch, len) = Self::recv_batch(&recvr.lock().unwrap())?; + let now = Instant::now(); + let batch_len = batch.len(); + let rand_id = thread_rng().gen_range(0, 100); + info!( + "@{:?} verifier: verifying: {} id: {}", + timing::timestamp(), + batch.len(), + rand_id + ); + + Self::verify_batch(batch, sendr).unwrap(); + + let total_time_ms = timing::duration_as_ms(&now.elapsed()); + let total_time_s = timing::duration_as_s(&now.elapsed()); + info!( + "@{:?} verifier: done. batches: {} total verify time: {:?} id: {} verified: {} v/s {}", + timing::timestamp(), + batch_len, + total_time_ms, + rand_id, + len, + (len as f32 / total_time_s) + ); Ok(()) } @@ -335,8 +360,6 @@ impl AccountantSkel { } } - debug!("processing verified"); - // Let validators know they should not attempt to process additional // transactions in parallel. self.historian_input.lock().unwrap().send(Signal::Tick)?; @@ -387,16 +410,25 @@ impl AccountantSkel { blob_recycler: &packet::BlobRecycler, ) -> Result<()> { let timer = Duration::new(1, 0); + let recv_start = Instant::now(); let mms = verified_receiver.recv_timeout(timer)?; - debug!("got some messages: {}", mms.len()); + let mut reqs_len = 0; + let mms_len = mms.len(); + info!( + "@{:?} process start stalled for: {:?}ms batches: {}", + timing::timestamp(), + timing::duration_as_ms(&recv_start.elapsed()), + mms.len(), + ); + let proc_start = Instant::now(); for (msgs, vers) in mms { let reqs = Self::deserialize_packets(&msgs.read().unwrap()); + reqs_len += reqs.len(); let req_vers = reqs.into_iter() .zip(vers) .filter_map(|(req, ver)| req.map(|(msg, addr)| (msg, addr, ver))) .filter(|x| { let v = x.0.verify(); - trace!("v:{} x:{:?}", v, x); v }) .collect(); @@ -421,7 +453,16 @@ impl AccountantSkel { } packet_recycler.recycle(msgs); } - debug!("done responding"); + let total_time_s = timing::duration_as_s(&proc_start.elapsed()); + let total_time_ms = timing::duration_as_ms(&proc_start.elapsed()); + info!( + "@{:?} done process batches: {} time: {:?}ms reqs: {} reqs/s: {}", + timing::timestamp(), + mms_len, + total_time_ms, + reqs_len, + (reqs_len as f32) / (total_time_s) + ); Ok(()) } /// Process verified blobs, already in order @@ -486,13 +527,21 @@ impl AccountantSkel { ); let (verified_sender, verified_receiver) = channel(); - let exit_ = exit.clone(); - let t_verifier = spawn(move || loop { - let e = Self::verifier(&packet_receiver, &verified_sender); - if e.is_err() && exit_.load(Ordering::Relaxed) { - break; - } - }); + let mut verify_threads = Vec::new(); + let shared_verified_sender = Arc::new(Mutex::new(verified_sender)); + let shared_packet_receiver = Arc::new(Mutex::new(packet_receiver)); + for _ in 0..4 { + let exit_ = exit.clone(); + let recv = shared_packet_receiver.clone(); + let sender = shared_verified_sender.clone(); + let thread = spawn(move || loop { + let e = Self::verifier(&recv, &sender); + if e.is_err() && exit_.load(Ordering::Relaxed) { + break; + } + }); + verify_threads.push(thread); + } let (broadcast_sender, broadcast_receiver) = channel(); @@ -528,16 +577,18 @@ impl AccountantSkel { } } }); - Ok(vec![ + + let mut threads = vec![ t_receiver, t_responder, t_server, - t_verifier, t_sync, t_gossip, t_listen, t_broadcast, - ]) + ]; + threads.extend(verify_threads.into_iter()); + Ok(threads) } /// This service receives messages from a leader in the network and processes the transactions @@ -639,15 +690,21 @@ impl AccountantSkel { ); let (verified_sender, verified_receiver) = channel(); - let exit_ = exit.clone(); - let t_verifier = spawn(move || loop { - let e = Self::verifier(&packet_receiver, &verified_sender); - if e.is_err() && exit_.load(Ordering::Relaxed) { - trace!("verifier exiting"); - break; - } - }); - + let mut verify_threads = Vec::new(); + let shared_verified_sender = Arc::new(Mutex::new(verified_sender)); + let shared_packet_receiver = Arc::new(Mutex::new(packet_receiver)); + for _ in 0..4 { + let exit_ = exit.clone(); + let recv = shared_packet_receiver.clone(); + let sender = shared_verified_sender.clone(); + let thread = spawn(move || loop { + let e = Self::verifier(&recv, &sender); + if e.is_err() && exit_.load(Ordering::Relaxed) { + break; + } + }); + verify_threads.push(thread); + } let t_sync = Self::sync_no_broadcast_service(obj.clone(), exit.clone()); let skel = obj.clone(); @@ -667,7 +724,7 @@ impl AccountantSkel { } }); - Ok(vec![ + let mut threads = vec![ //replicate threads t_blob_receiver, t_retransmit, @@ -679,9 +736,10 @@ impl AccountantSkel { t_packet_receiver, t_responder, t_server, - t_verifier, t_sync, - ]) + ]; + threads.extend(verify_threads.into_iter()); + Ok(threads) } } diff --git a/src/accountant_stub.rs b/src/accountant_stub.rs index d5356083be..c0fc43bf17 100644 --- a/src/accountant_stub.rs +++ b/src/accountant_stub.rs @@ -161,6 +161,7 @@ mod tests { use std::sync::{Arc, RwLock}; use std::thread::sleep; use std::time::Duration; + use std::time::Instant; // TODO: Figure out why this test sometimes hangs on TravisCI. #[test] @@ -193,7 +194,18 @@ mod tests { let last_id = acc.get_last_id().wait().unwrap(); let _sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id) .unwrap(); - assert_eq!(acc.get_balance(&bob_pubkey).unwrap(), 500); + let mut balance; + let now = Instant::now(); + loop { + balance = acc.get_balance(&bob_pubkey); + if balance.is_ok() { + break; + } + if now.elapsed().as_secs() > 0 { + break; + } + } + assert_eq!(balance.unwrap(), 500); exit.store(true, Ordering::Relaxed); for t in threads { t.join().unwrap(); diff --git a/src/lib.rs b/src/lib.rs index 75c9b65f84..b316a79c98 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,6 +20,7 @@ pub mod result; pub mod signature; pub mod streamer; pub mod transaction; +pub mod timing; extern crate bincode; extern crate byteorder; extern crate chrono; @@ -41,3 +42,5 @@ extern crate futures; #[cfg(test)] #[macro_use] extern crate matches; + +extern crate rand; diff --git a/src/timing.rs b/src/timing.rs new file mode 100644 index 0000000000..5c36fad805 --- /dev/null +++ b/src/timing.rs @@ -0,0 +1,15 @@ +use std::time::{SystemTime, UNIX_EPOCH}; +use std::time::Duration; + +pub fn duration_as_ms(d: &Duration) -> u64 { + return (d.as_secs() * 1000) + (d.subsec_nanos() as u64 / 1_000_000); +} + +pub fn duration_as_s(d: &Duration) -> f32 { + return d.as_secs() as f32 + (d.subsec_nanos() as f32 / 1_000_000_000.0); +} + +pub fn timestamp() -> u64 { + let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); + return duration_as_ms(&now); +} From c9c9afa4720d9e38888241a4e9a9a76f5c3dfd67 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Tue, 8 May 2018 12:52:24 -0600 Subject: [PATCH 46/51] Remove the note about git-lfs --- README.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/README.md b/README.md index ce30207387..963921819a 100644 --- a/README.md +++ b/README.md @@ -29,10 +29,6 @@ $ curl https://sh.rustup.rs -sSf | sh $ source $HOME/.cargo/env ``` -If you plan to run with GPU optimizations enabled (not recommended), you'll need a CUDA library stored in git LFS. Install git-lfs here: - -https://git-lfs.github.com/ - Now checkout the code from github: ```bash From fae019b974bb81463fe58cc0284bf4c15a4f6638 Mon Sep 17 00:00:00 2001 From: Stephen Akridge Date: Tue, 8 May 2018 13:25:59 -0700 Subject: [PATCH 47/51] Add message about trace debugging --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index 963921819a..8a902bcd9a 100644 --- a/README.md +++ b/README.md @@ -124,6 +124,18 @@ Run the test suite: cargo test ``` +Debugging +--- + +There are some useful debug messages in the code, you can enable them on a per-module and per-level +basis with the normal RUST\_LOG environment variable. Run the testnode with this syntax: +```bash +$ RUST_LOG=solana::streamer=debug,solana::accountant_skel=info cat genesis.log | ./target/release/solana-testnode > transactions0.log +``` +to see the debug and info sections for streamer and accountant\_skel respectively. Generally +we are using debug for infrequent debug messages, trace for potentially frequent messages and +info for performance-related logging. + Benchmarking --- From 3e73fb923332d9e18594673897714476c1353e2e Mon Sep 17 00:00:00 2001 From: Stephen Akridge Date: Tue, 8 May 2018 15:21:28 -0700 Subject: [PATCH 48/51] Trust the recorder not to give us more than we can serialize Also run client for 10 seconds, 5 is bit too short --- src/accountant_skel.rs | 7 +++---- src/bin/client-demo.rs | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/accountant_skel.rs b/src/accountant_skel.rs index b4645dc896..87407f5f98 100644 --- a/src/accountant_skel.rs +++ b/src/accountant_skel.rs @@ -160,10 +160,9 @@ impl AccountantSkel { // See that we made progress and a single // vec of Events wasn't too big for a single packet if end <= start { - eprintln!("Event too big for the blob!"); - start += 1; - end = start; - continue; + // Trust the recorder to not package more than we can + // serialize + end += 1; } let b = blob_recycler.allocate(); diff --git a/src/bin/client-demo.rs b/src/bin/client-demo.rs index 4c868dfcf0..a4585dbac5 100644 --- a/src/bin/client-demo.rs +++ b/src/bin/client-demo.rs @@ -137,7 +137,7 @@ fn main() { println!("Waiting for transactions to complete...",); let mut tx_count; - for _ in 0..5 { + for _ in 0..10 { tx_count = acc.transaction_count(); duration = now.elapsed(); let txs = tx_count - initial_tx_count; From 4870def1fb4f991cc76f1edc51c9d1cec7f0cdb2 Mon Sep 17 00:00:00 2001 From: Stephen Akridge Date: Tue, 8 May 2018 13:36:49 -0700 Subject: [PATCH 49/51] Fix default client port, server uses 8000-8002 for gossip. --- src/bin/client-demo.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bin/client-demo.rs b/src/bin/client-demo.rs index 4c868dfcf0..fc7d511bce 100644 --- a/src/bin/client-demo.rs +++ b/src/bin/client-demo.rs @@ -35,7 +35,7 @@ fn print_usage(program: &str, opts: Options) { fn main() { let mut threads = 4usize; let mut addr: String = "127.0.0.1:8000".to_string(); - let mut client_addr: String = "127.0.0.1:8001".to_string(); + let mut client_addr: String = "127.0.0.1:8010".to_string(); let mut opts = Options::new(); opts.optopt("s", "", "server address", "host:port"); From 785e97169879b585d8b10b2e6b7db626f2d61862 Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Tue, 8 May 2018 17:32:50 -0600 Subject: [PATCH 50/51] AccountantSkel -> Tpu The terms Stub and Skel come from OMG IDL and only made sense while the Stub was acting as an RPC client for the the Accountant object. Nowadays, the Stub interface looks nothing like the Accountant and meanwhile we've recognized the multithreaded implementation is more reminiscent of a pipelined CPU. Thus, we finally bite the bullet and rename our modules. AccountantSkel -> Tpu AccountantStub -> ThinClient Up next will be moving much of the TPU code into separate modules, each representing a stage of the pipeline. The interface of each will follow the precedent set by the Historian object. --- src/bin/client-demo.rs | 6 +- src/bin/testnode.rs | 8 +- src/ecdsa.rs | 2 +- src/lib.rs | 6 +- src/{accountant_stub.rs => thin_client.rs} | 53 +++++----- src/{accountant_skel.rs => tpu.rs} | 107 ++++++++++----------- 6 files changed, 90 insertions(+), 92 deletions(-) rename src/{accountant_stub.rs => thin_client.rs} (88%) rename src/{accountant_skel.rs => tpu.rs} (93%) diff --git a/src/bin/client-demo.rs b/src/bin/client-demo.rs index 826e2fac6a..82cd42b2c1 100644 --- a/src/bin/client-demo.rs +++ b/src/bin/client-demo.rs @@ -10,7 +10,7 @@ use futures::Future; use getopts::Options; use isatty::stdin_isatty; use rayon::prelude::*; -use solana::accountant_stub::AccountantStub; +use solana::thin_client::ThinClient; use solana::mint::MintDemo; use solana::signature::{KeyPair, KeyPairUtil}; use solana::transaction::Transaction; @@ -87,7 +87,7 @@ fn main() { println!("Binding to {}", client_addr); let socket = UdpSocket::bind(&client_addr).unwrap(); socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap(); - let mut acc = AccountantStub::new(addr.parse().unwrap(), socket); + let mut acc = ThinClient::new(addr.parse().unwrap(), socket); println!("Get last ID..."); let last_id = acc.get_last_id().wait().unwrap(); @@ -129,7 +129,7 @@ fn main() { let mut client_addr: SocketAddr = client_addr.parse().unwrap(); client_addr.set_port(0); let socket = UdpSocket::bind(client_addr).unwrap(); - let acc = AccountantStub::new(addr.parse().unwrap(), socket); + let acc = ThinClient::new(addr.parse().unwrap(), socket); for tr in trs { acc.transfer_signed(tr.clone()).unwrap(); } diff --git a/src/bin/testnode.rs b/src/bin/testnode.rs index 5aede0c3f1..736acb0179 100644 --- a/src/bin/testnode.rs +++ b/src/bin/testnode.rs @@ -7,7 +7,7 @@ extern crate solana; use getopts::Options; use isatty::stdin_isatty; use solana::accountant::Accountant; -use solana::accountant_skel::AccountantSkel; +use solana::tpu::Tpu; use solana::crdt::ReplicatedData; use solana::entry::Entry; use solana::event::Event; @@ -119,7 +119,7 @@ fn main() { let (input, event_receiver) = sync_channel(10_000); let historian = Historian::new(event_receiver, &last_id, Some(1000)); let exit = Arc::new(AtomicBool::new(false)); - let skel = Arc::new(AccountantSkel::new(acc, input, historian)); + let tpu = Arc::new(Tpu::new(acc, input, historian)); let serve_sock = UdpSocket::bind(&serve_addr).unwrap(); let gossip_sock = UdpSocket::bind(&gossip_addr).unwrap(); let replicate_sock = UdpSocket::bind(&replicate_addr).unwrap(); @@ -132,8 +132,8 @@ fn main() { serve_sock.local_addr().unwrap(), ); eprintln!("starting server..."); - let threads = AccountantSkel::serve( - &skel, + let threads = Tpu::serve( + &tpu, d, serve_sock, skinny_sock, diff --git a/src/ecdsa.rs b/src/ecdsa.rs index c0a06646d0..c029f1e1f9 100644 --- a/src/ecdsa.rs +++ b/src/ecdsa.rs @@ -130,7 +130,7 @@ pub fn ed25519_verify(batches: &Vec) -> Vec> { #[cfg(test)] mod tests { - use accountant_skel::Request; + use tpu::Request; use bincode::serialize; use ecdsa; use packet::{Packet, Packets, SharedPackets}; diff --git a/src/lib.rs b/src/lib.rs index b316a79c98..5c7568c54d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,5 @@ #![cfg_attr(feature = "unstable", feature(test))] pub mod accountant; -pub mod accountant_skel; -pub mod accountant_stub; pub mod crdt; pub mod ecdsa; pub mod entry; @@ -19,8 +17,10 @@ pub mod recorder; pub mod result; pub mod signature; pub mod streamer; -pub mod transaction; +pub mod thin_client; pub mod timing; +pub mod transaction; +pub mod tpu; extern crate bincode; extern crate byteorder; extern crate chrono; diff --git a/src/accountant_stub.rs b/src/thin_client.rs similarity index 88% rename from src/accountant_stub.rs rename to src/thin_client.rs index 274bbb0d38..920081a4f6 100644 --- a/src/accountant_stub.rs +++ b/src/thin_client.rs @@ -1,9 +1,9 @@ -//! The `accountant_stub` module is a client-side object that interfaces with a server-side Accountant -//! object via the network interface exposed by AccountantSkel. Client code should use -//! this object instead of writing messages to the network directly. The binary -//! encoding of its messages are unstable and may change in future releases. +//! The `thin_client` module is a client-side object that interfaces with +//! a server-side TPU. Client code should use this object instead of writing +//! messages to the network directly. The binary encoding of its messages are +//! unstable and may change in future releases. -use accountant_skel::{Request, Response, Subscription}; +use tpu::{Request, Response, Subscription}; use bincode::{deserialize, serialize}; use futures::future::{ok, FutureResult}; use hash::Hash; @@ -13,7 +13,7 @@ use std::io; use std::net::{SocketAddr, UdpSocket}; use transaction::Transaction; -pub struct AccountantStub { +pub struct ThinClient { pub addr: SocketAddr, pub socket: UdpSocket, last_id: Option, @@ -21,20 +21,20 @@ pub struct AccountantStub { balances: HashMap>, } -impl AccountantStub { - /// Create a new AccountantStub that will interface with AccountantSkel +impl ThinClient { + /// Create a new ThinClient that will interface with Tpu /// over `socket`. To receive responses, the caller must bind `socket` - /// to a public address before invoking AccountantStub methods. + /// to a public address before invoking ThinClient methods. pub fn new(addr: SocketAddr, socket: UdpSocket) -> Self { - let stub = AccountantStub { + let client = ThinClient { addr: addr, socket, last_id: None, num_events: 0, balances: HashMap::new(), }; - stub.init(); - stub + client.init(); + client } pub fn init(&self) { @@ -119,7 +119,7 @@ impl AccountantStub { } /// Return the number of transactions the server processed since creating - /// this stub instance. + /// this client instance. pub fn transaction_count(&mut self) -> u64 { // Wait for at least one EntryInfo. let mut done = false; @@ -148,7 +148,7 @@ impl AccountantStub { mod tests { use super::*; use accountant::Accountant; - use accountant_skel::AccountantSkel; + use tpu::Tpu; use crdt::{Crdt, ReplicatedData}; use futures::Future; use historian::Historian; @@ -165,7 +165,7 @@ mod tests { // TODO: Figure out why this test sometimes hangs on TravisCI. #[test] - fn test_accountant_stub() { + fn test_thin_client() { logger::setup(); let gossip = UdpSocket::bind("0.0.0.0:0").unwrap(); let serve = UdpSocket::bind("0.0.0.0:0").unwrap(); @@ -185,14 +185,13 @@ mod tests { let exit = Arc::new(AtomicBool::new(false)); let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); - let acc = Arc::new(AccountantSkel::new(acc, input, historian)); - let threads = - AccountantSkel::serve(&acc, d, serve, skinny, gossip, exit.clone(), sink()).unwrap(); + let acc = Arc::new(Tpu::new(acc, input, historian)); + let threads = Tpu::serve(&acc, d, serve, skinny, gossip, exit.clone(), sink()).unwrap(); sleep(Duration::from_millis(300)); let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); - let mut acc = AccountantStub::new(addr, socket); + let mut acc = ThinClient::new(addr, socket); let last_id = acc.get_last_id().wait().unwrap(); let _sig = acc.transfer(500, &alice.keypair(), bob_pubkey, &last_id) .unwrap(); @@ -230,9 +229,9 @@ mod tests { } #[test] - fn test_multi_accountant_stub() { + fn test_multi_node() { logger::setup(); - info!("test_multi_accountant_stub"); + info!("test_multi_node"); let leader = test_node(); let replicant = test_node(); let alice = Mint::new(10_000); @@ -243,17 +242,17 @@ mod tests { let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); let acc = Accountant::new(&alice); - Arc::new(AccountantSkel::new(acc, input, historian)) + Arc::new(Tpu::new(acc, input, historian)) }; let replicant_acc = { let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); let acc = Accountant::new(&alice); - Arc::new(AccountantSkel::new(acc, input, historian)) + Arc::new(Tpu::new(acc, input, historian)) }; - let leader_threads = AccountantSkel::serve( + let leader_threads = Tpu::serve( &leader_acc, leader.0.clone(), leader.2, @@ -262,7 +261,7 @@ mod tests { exit.clone(), sink(), ).unwrap(); - let replicant_threads = AccountantSkel::replicate( + let replicant_threads = Tpu::replicate( &replicant_acc, replicant.0.clone(), replicant.1, @@ -314,7 +313,7 @@ mod tests { let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); socket.set_read_timeout(Some(Duration::new(1, 0))).unwrap(); - let mut acc = AccountantStub::new(leader.0.serve_addr, socket); + let mut acc = ThinClient::new(leader.0.serve_addr, socket); info!("getting leader last_id"); let last_id = acc.get_last_id().wait().unwrap(); info!("executing leader transer"); @@ -330,7 +329,7 @@ mod tests { let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); socket.set_read_timeout(Some(Duration::new(1, 0))).unwrap(); - let mut acc = AccountantStub::new(replicant.0.serve_addr, socket); + let mut acc = ThinClient::new(replicant.0.serve_addr, socket); info!("getting replicant balance"); if let Ok(bal) = acc.get_balance(&bob_pubkey) { replicant_balance = bal; diff --git a/src/accountant_skel.rs b/src/tpu.rs similarity index 93% rename from src/accountant_skel.rs rename to src/tpu.rs index d30975ae83..32142e6ed8 100644 --- a/src/accountant_skel.rs +++ b/src/tpu.rs @@ -1,6 +1,5 @@ -//! The `accountant_skel` module is a microservice that exposes the high-level -//! Accountant API to the network. Its message encoding is currently -//! in flux. Clients should use AccountantStub to interact with it. +//! The `tpu` module implements the Transaction Processing Unit, a +//! 5-stage transaction processing pipeline in software. use accountant::Accountant; use bincode::{deserialize, serialize, serialize_into}; @@ -33,7 +32,7 @@ use timing; use std::time::Instant; use rand::{thread_rng, Rng}; -pub struct AccountantSkel { +pub struct Tpu { acc: Mutex, historian_input: Mutex>, historian: Historian, @@ -70,7 +69,7 @@ impl Request { } } -type SharedSkel = Arc; +type SharedTpu = Arc; #[derive(Serialize, Deserialize, Debug)] pub enum Response { @@ -78,10 +77,10 @@ pub enum Response { EntryInfo(EntryInfo), } -impl AccountantSkel { - /// Create a new AccountantSkel that wraps the given Accountant. +impl Tpu { + /// Create a new Tpu that wraps the given Accountant. pub fn new(acc: Accountant, historian_input: SyncSender, historian: Historian) -> Self { - AccountantSkel { + Tpu { acc: Mutex::new(acc), entry_info_subscribers: Mutex::new(vec![]), historian_input: Mutex::new(historian_input), @@ -89,7 +88,7 @@ impl AccountantSkel { } } - fn notify_entry_info_subscribers(obj: &SharedSkel, entry: &Entry) { + fn notify_entry_info_subscribers(obj: &SharedTpu, entry: &Entry) { // TODO: No need to bind(). let socket = UdpSocket::bind("0.0.0.0:0").expect("bind"); @@ -112,7 +111,7 @@ impl AccountantSkel { } } - fn update_entry(obj: &SharedSkel, writer: &Arc>, entry: &Entry) { + fn update_entry(obj: &SharedTpu, writer: &Arc>, entry: &Entry) { trace!("update_entry entry"); obj.acc.lock().unwrap().register_entry_id(&entry.id); writeln!( @@ -123,7 +122,7 @@ impl AccountantSkel { Self::notify_entry_info_subscribers(obj, &entry); } - fn receive_all(obj: &SharedSkel, writer: &Arc>) -> Result> { + fn receive_all(obj: &SharedTpu, writer: &Arc>) -> Result> { //TODO implement a serialize for channel that does this without allocations let mut l = vec![]; let entry = obj.historian @@ -182,7 +181,7 @@ impl AccountantSkel { /// Process any Entry items that have been published by the Historian. /// continuosly broadcast blobs of entries out fn run_sync( - obj: SharedSkel, + obj: SharedTpu, broadcast: &streamer::BlobSender, blob_recycler: &packet::BlobRecycler, writer: &Arc>, @@ -198,7 +197,7 @@ impl AccountantSkel { } pub fn sync_service( - obj: SharedSkel, + obj: SharedTpu, exit: Arc, broadcast: streamer::BlobSender, blob_recycler: packet::BlobRecycler, @@ -213,12 +212,12 @@ impl AccountantSkel { }) } - fn process_thin_client_requests(_obj: SharedSkel, _socket: &UdpSocket) -> Result<()> { + fn process_thin_client_requests(_obj: SharedTpu, _socket: &UdpSocket) -> Result<()> { Ok(()) } fn thin_client_service( - obj: SharedSkel, + obj: SharedTpu, exit: Arc, socket: UdpSocket, ) -> JoinHandle<()> { @@ -233,12 +232,12 @@ impl AccountantSkel { /// Process any Entry items that have been published by the Historian. /// continuosly broadcast blobs of entries out - fn run_sync_no_broadcast(obj: SharedSkel) -> Result<()> { + fn run_sync_no_broadcast(obj: SharedTpu) -> Result<()> { Self::receive_all(&obj, &Arc::new(Mutex::new(sink())))?; Ok(()) } - pub fn sync_no_broadcast_service(obj: SharedSkel, exit: Arc) -> JoinHandle<()> { + pub fn sync_no_broadcast_service(obj: SharedTpu, exit: Arc) -> JoinHandle<()> { spawn(move || loop { let _ = Self::run_sync_no_broadcast(obj.clone()); if exit.load(Ordering::Relaxed) { @@ -420,7 +419,7 @@ impl AccountantSkel { } fn process( - obj: &SharedSkel, + obj: &SharedTpu, verified_receiver: &Receiver)>>, responder_sender: &streamer::BlobSender, packet_recycler: &packet::PacketRecycler, @@ -485,7 +484,7 @@ impl AccountantSkel { /// Process verified blobs, already in order /// Respond with a signed hash of the state fn replicate_state( - obj: &SharedSkel, + obj: &SharedTpu, verified_receiver: &streamer::BlobReceiver, blob_recycler: &packet::BlobRecycler, ) -> Result<()> { @@ -510,11 +509,11 @@ impl AccountantSkel { Ok(()) } - /// Create a UDP microservice that forwards messages the given AccountantSkel. + /// Create a UDP microservice that forwards messages the given Tpu. /// This service is the network leader /// Set `exit` to shutdown its threads. pub fn serve( - obj: &SharedSkel, + obj: &SharedTpu, me: ReplicatedData, serve: UdpSocket, skinny: UdpSocket, @@ -582,10 +581,10 @@ impl AccountantSkel { let t_skinny = Self::thin_client_service(obj.clone(), exit.clone(), skinny); - let skel = obj.clone(); + let tpu = obj.clone(); let t_server = spawn(move || loop { let e = Self::process( - &mut skel.clone(), + &mut tpu.clone(), &verified_receiver, &responder_sender, &packet_recycler, @@ -631,7 +630,7 @@ impl AccountantSkel { /// 4. process the transaction state machine /// 5. respond with the hash of the state back to the leader pub fn replicate( - obj: &SharedSkel, + obj: &SharedTpu, me: ReplicatedData, gossip: UdpSocket, serve: UdpSocket, @@ -682,10 +681,10 @@ impl AccountantSkel { retransmit_sender, ); - let skel = obj.clone(); + let tpu = obj.clone(); let s_exit = exit.clone(); let t_replicator = spawn(move || loop { - let e = Self::replicate_state(&skel, &window_receiver, &blob_recycler); + let e = Self::replicate_state(&tpu, &window_receiver, &blob_recycler); if e.is_err() && s_exit.load(Ordering::Relaxed) { break; } @@ -728,11 +727,11 @@ impl AccountantSkel { } let t_sync = Self::sync_no_broadcast_service(obj.clone(), exit.clone()); - let skel = obj.clone(); + let tpu = obj.clone(); let s_exit = exit.clone(); let t_server = spawn(move || loop { let e = Self::process( - &mut skel.clone(), + &mut tpu.clone(), &verified_receiver, &responder_sender, &packet_recycler, @@ -786,15 +785,15 @@ pub fn to_packets(r: &packet::PacketRecycler, reqs: Vec) -> Vec = skel.historian.output.lock().unwrap().iter().collect(); + drop(tpu.historian_input); + let entries: Vec = tpu.historian.output.lock().unwrap().iter().collect(); // Assert the user holds one token, not two. If the server only output one // entry, then the second transaction will be rejected, because it drives @@ -901,10 +900,10 @@ mod tests { let exit = Arc::new(AtomicBool::new(false)); let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); - let acc_skel = Arc::new(AccountantSkel::new(acc, input, historian)); + let tpu = Arc::new(Tpu::new(acc, input, historian)); let serve_addr = leader_serve.local_addr().unwrap(); - let threads = AccountantSkel::serve( - &acc_skel, + let threads = Tpu::serve( + &tpu, leader_data, leader_serve, leader_skinny, @@ -916,23 +915,23 @@ mod tests { let socket = UdpSocket::bind("127.0.0.1:0").unwrap(); socket.set_read_timeout(Some(Duration::new(5, 0))).unwrap(); - let mut acc_stub = AccountantStub::new(serve_addr, socket); - let last_id = acc_stub.get_last_id().wait().unwrap(); + let mut client = ThinClient::new(serve_addr, socket); + let last_id = client.get_last_id().wait().unwrap(); trace!("doing stuff"); let tr = Transaction::new(&alice.keypair(), bob_pubkey, 500, last_id); - let _sig = acc_stub.transfer_signed(tr).unwrap(); + let _sig = client.transfer_signed(tr).unwrap(); - let last_id = acc_stub.get_last_id().wait().unwrap(); + let last_id = client.get_last_id().wait().unwrap(); let mut tr2 = Transaction::new(&alice.keypair(), bob_pubkey, 501, last_id); tr2.data.tokens = 502; tr2.data.plan = Plan::new_payment(502, bob_pubkey); - let _sig = acc_stub.transfer_signed(tr2).unwrap(); + let _sig = client.transfer_signed(tr2).unwrap(); - assert_eq!(acc_stub.get_balance(&bob_pubkey).unwrap(), 500); + assert_eq!(client.get_balance(&bob_pubkey).unwrap(), 500); trace!("exiting"); exit.store(true, Ordering::Relaxed); trace!("joining threads"); @@ -1009,9 +1008,9 @@ mod tests { let acc = Accountant::new(&alice); let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &alice.last_id(), Some(30)); - let acc = Arc::new(AccountantSkel::new(acc, input, historian)); + let acc = Arc::new(Tpu::new(acc, input, historian)); let replicate_addr = target1_data.replicate_addr; - let threads = AccountantSkel::replicate( + let threads = Tpu::replicate( &acc, target1_data, target1_gossip, @@ -1111,7 +1110,7 @@ mod tests { let entry_list = vec![e0; 1000]; let blob_recycler = BlobRecycler::default(); let mut blob_q = VecDeque::new(); - AccountantSkel::process_entry_list_into_blobs(&entry_list, &blob_recycler, &mut blob_q); + Tpu::process_entry_list_into_blobs(&entry_list, &blob_recycler, &mut blob_q); let serialized_entry_list = serialize(&entry_list).unwrap(); let mut num_blobs_ref = serialized_entry_list.len() / BLOB_SIZE; if serialized_entry_list.len() % BLOB_SIZE != 0 { @@ -1127,7 +1126,7 @@ mod bench { extern crate test; use self::test::Bencher; use accountant::{Accountant, MAX_ENTRY_IDS}; - use accountant_skel::*; + use tpu::*; use bincode::serialize; use hash::hash; use mint::Mint; @@ -1180,17 +1179,17 @@ mod bench { let (input, event_receiver) = sync_channel(10); let historian = Historian::new(event_receiver, &mint.last_id(), None); - let skel = AccountantSkel::new(acc, input, historian); + let tpu = Tpu::new(acc, input, historian); let now = Instant::now(); - assert!(skel.process_events(req_vers).is_ok()); + assert!(tpu.process_events(req_vers).is_ok()); let duration = now.elapsed(); let sec = duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 1_000_000_000.0; let tps = txs as f64 / sec; // Ensure that all transactions were successfully logged. - drop(skel.historian_input); - let entries: Vec = skel.historian.output.lock().unwrap().iter().collect(); + drop(tpu.historian_input); + let entries: Vec = tpu.historian.output.lock().unwrap().iter().collect(); assert_eq!(entries.len(), 1); assert_eq!(entries[0].events.len(), txs as usize); From 1dca17fdb45903a62a3704c4bcdaacc59c3bf7db Mon Sep 17 00:00:00 2001 From: Greg Fitzgerald Date: Tue, 8 May 2018 18:59:01 -0600 Subject: [PATCH 51/51] cargo +nightly fmt --- src/bin/client-demo.rs | 2 +- src/bin/testnode.rs | 2 +- src/ecdsa.rs | 2 +- src/lib.rs | 2 +- src/thin_client.rs | 4 ++-- src/timing.rs | 2 +- src/tpu.rs | 16 ++++++++-------- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/bin/client-demo.rs b/src/bin/client-demo.rs index 82cd42b2c1..013e029300 100644 --- a/src/bin/client-demo.rs +++ b/src/bin/client-demo.rs @@ -10,9 +10,9 @@ use futures::Future; use getopts::Options; use isatty::stdin_isatty; use rayon::prelude::*; -use solana::thin_client::ThinClient; use solana::mint::MintDemo; use solana::signature::{KeyPair, KeyPairUtil}; +use solana::thin_client::ThinClient; use solana::transaction::Transaction; use std::env; use std::io::{stdin, Read}; diff --git a/src/bin/testnode.rs b/src/bin/testnode.rs index 736acb0179..cc4ad246ad 100644 --- a/src/bin/testnode.rs +++ b/src/bin/testnode.rs @@ -7,12 +7,12 @@ extern crate solana; use getopts::Options; use isatty::stdin_isatty; use solana::accountant::Accountant; -use solana::tpu::Tpu; use solana::crdt::ReplicatedData; use solana::entry::Entry; use solana::event::Event; use solana::historian::Historian; use solana::signature::{KeyPair, KeyPairUtil}; +use solana::tpu::Tpu; use std::env; use std::io::{stdin, stdout, Read}; use std::net::UdpSocket; diff --git a/src/ecdsa.rs b/src/ecdsa.rs index c029f1e1f9..4d7abbdbb4 100644 --- a/src/ecdsa.rs +++ b/src/ecdsa.rs @@ -130,11 +130,11 @@ pub fn ed25519_verify(batches: &Vec) -> Vec> { #[cfg(test)] mod tests { - use tpu::Request; use bincode::serialize; use ecdsa; use packet::{Packet, Packets, SharedPackets}; use std::sync::RwLock; + use tpu::Request; use transaction::test_tx; use transaction::Transaction; diff --git a/src/lib.rs b/src/lib.rs index 5c7568c54d..ab3fc2ff56 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,8 +19,8 @@ pub mod signature; pub mod streamer; pub mod thin_client; pub mod timing; -pub mod transaction; pub mod tpu; +pub mod transaction; extern crate bincode; extern crate byteorder; extern crate chrono; diff --git a/src/thin_client.rs b/src/thin_client.rs index 920081a4f6..3ae436ef8d 100644 --- a/src/thin_client.rs +++ b/src/thin_client.rs @@ -3,7 +3,6 @@ //! messages to the network directly. The binary encoding of its messages are //! unstable and may change in future releases. -use tpu::{Request, Response, Subscription}; use bincode::{deserialize, serialize}; use futures::future::{ok, FutureResult}; use hash::Hash; @@ -11,6 +10,7 @@ use signature::{KeyPair, PublicKey, Signature}; use std::collections::HashMap; use std::io; use std::net::{SocketAddr, UdpSocket}; +use tpu::{Request, Response, Subscription}; use transaction::Transaction; pub struct ThinClient { @@ -148,7 +148,6 @@ impl ThinClient { mod tests { use super::*; use accountant::Accountant; - use tpu::Tpu; use crdt::{Crdt, ReplicatedData}; use futures::Future; use historian::Historian; @@ -162,6 +161,7 @@ mod tests { use std::thread::sleep; use std::time::Duration; use std::time::Instant; + use tpu::Tpu; // TODO: Figure out why this test sometimes hangs on TravisCI. #[test] diff --git a/src/timing.rs b/src/timing.rs index 5c36fad805..0d3c383839 100644 --- a/src/timing.rs +++ b/src/timing.rs @@ -1,5 +1,5 @@ -use std::time::{SystemTime, UNIX_EPOCH}; use std::time::Duration; +use std::time::{SystemTime, UNIX_EPOCH}; pub fn duration_as_ms(d: &Duration) -> u64 { return (d.as_secs() * 1000) + (d.subsec_nanos() as u64 / 1_000_000); diff --git a/src/tpu.rs b/src/tpu.rs index 32142e6ed8..6da34e1332 100644 --- a/src/tpu.rs +++ b/src/tpu.rs @@ -11,6 +11,7 @@ use hash::Hash; use historian::Historian; use packet; use packet::{SharedBlob, SharedPackets, BLOB_SIZE}; +use rand::{thread_rng, Rng}; use rayon::prelude::*; use recorder::Signal; use result::Result; @@ -26,11 +27,10 @@ use std::sync::mpsc::{channel, Receiver, Sender, SyncSender}; use std::sync::{Arc, Mutex, RwLock}; use std::thread::{spawn, JoinHandle}; use std::time::Duration; -use streamer; -use transaction::Transaction; -use timing; use std::time::Instant; -use rand::{thread_rng, Rng}; +use streamer; +use timing; +use transaction::Transaction; pub struct Tpu { acc: Mutex, @@ -785,15 +785,13 @@ pub fn to_packets(r: &packet::PacketRecycler, reqs: Vec) -> Vec