Compare commits

...

6 Commits
v0.8.1 ... v0.8

Author SHA1 Message Date
390af512de Permit testnets without a GPU 2018-09-26 10:38:10 -07:00
2a9be901da Mark --outfile parameter as required 2018-09-26 10:38:06 -07:00
d794fee66f Remove unused variables and imports after cherry-picking from master 2018-09-19 11:49:47 -07:00
9b66d4d363 Read multiple entries in write stage (#1259)
- Also use rayon to parallelize to_blobs() to maximize CPU usage
2018-09-19 11:49:47 -07:00
bff8f2614b Move entry->blob creation out of write stage (#1257)
- The write stage will output vector of entries
- Broadcast stage will create blobs out of the entries
- Helps reduce MIPS requirements for write stage
2018-09-19 11:49:47 -07:00
8f0648e8fc Move register_entry_id() call out of write stage (#1253)
* Move register_entry_id() call out of write stage

- Write stage is MIPS intensive and has become a bottleneck for
  TPU pipeline
- This will reduce the MIPS requirements for the stage

* Fix rust format issues
2018-09-19 11:49:47 -07:00
7 changed files with 155 additions and 71 deletions

View File

@ -9,6 +9,7 @@ validatorNodeCount=10
publicNetwork=false publicNetwork=false
snapChannel=edge snapChannel=edge
delete=false delete=false
enableGpu=false
usage() { usage() {
exitcode=0 exitcode=0
@ -30,6 +31,7 @@ Deploys a CD testnet
-n [number] - Number of validator nodes (default: $validatorNodeCount) -n [number] - Number of validator nodes (default: $validatorNodeCount)
-c [number] - Number of client nodes (default: $clientNodeCount) -c [number] - Number of client nodes (default: $clientNodeCount)
-P - Use public network IP addresses (default: $publicNetwork) -P - Use public network IP addresses (default: $publicNetwork)
-g - Enable GPU (default: $enableGpu)
-a [address] - Set the leader node's external IP address to this GCE address -a [address] - Set the leader node's external IP address to this GCE address
-d - Delete the network -d - Delete the network
@ -45,7 +47,7 @@ zone=$2
[[ -n $zone ]] || usage "Zone not specified" [[ -n $zone ]] || usage "Zone not specified"
shift 2 shift 2
while getopts "h?p:Pn:c:s:a:d" opt; do while getopts "h?p:Pn:c:s:ga:d" opt; do
case $opt in case $opt in
h | \?) h | \?)
usage usage
@ -69,6 +71,9 @@ while getopts "h?p:Pn:c:s:a:d" opt; do
;; ;;
esac esac
;; ;;
g)
enableGpu=true
;;
a) a)
leaderAddress=$OPTARG leaderAddress=$OPTARG
;; ;;
@ -86,11 +91,14 @@ gce_create_args=(
-a "$leaderAddress" -a "$leaderAddress"
-c "$clientNodeCount" -c "$clientNodeCount"
-n "$validatorNodeCount" -n "$validatorNodeCount"
-g
-p "$netName" -p "$netName"
-z "$zone" -z "$zone"
) )
if $enableGpu; then
gce_create_args+=(-g)
fi
if $publicNetwork; then if $publicNetwork; then
gce_create_args+=(-P) gce_create_args+=(-P)
fi fi

View File

@ -21,6 +21,7 @@ fn main() -> Result<(), Box<error::Error>> {
.long("outfile") .long("outfile")
.value_name("PATH") .value_name("PATH")
.takes_value(true) .takes_value(true)
.required(true)
.help("path to generated file"), .help("path to generated file"),
).get_matches(); ).get_matches();

View File

@ -2,20 +2,23 @@
//! //!
use counter::Counter; use counter::Counter;
use crdt::{Crdt, CrdtError, NodeInfo}; use crdt::{Crdt, CrdtError, NodeInfo};
use entry::Entry;
#[cfg(feature = "erasure")] #[cfg(feature = "erasure")]
use erasure; use erasure;
use ledger::Block;
use log::Level; use log::Level;
use packet::BlobRecycler; use packet::{BlobRecycler, SharedBlobs};
use rayon::prelude::*;
use result::{Error, Result}; use result::{Error, Result};
use service::Service; use service::Service;
use std::mem; use std::mem;
use std::net::UdpSocket; use std::net::UdpSocket;
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
use std::sync::mpsc::RecvTimeoutError; use std::sync::mpsc::{Receiver, RecvTimeoutError};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::thread::{self, Builder, JoinHandle}; use std::thread::{self, Builder, JoinHandle};
use std::time::Duration; use std::time::{Duration, Instant};
use streamer::BlobReceiver; use timing::duration_as_ms;
use window::{self, SharedWindow, WindowIndex, WindowUtil, WINDOW_SIZE}; use window::{self, SharedWindow, WindowIndex, WindowUtil, WINDOW_SIZE};
fn broadcast( fn broadcast(
@ -23,27 +26,42 @@ fn broadcast(
broadcast_table: &[NodeInfo], broadcast_table: &[NodeInfo],
window: &SharedWindow, window: &SharedWindow,
recycler: &BlobRecycler, recycler: &BlobRecycler,
receiver: &BlobReceiver, receiver: &Receiver<Vec<Entry>>,
sock: &UdpSocket, sock: &UdpSocket,
transmit_index: &mut WindowIndex, transmit_index: &mut WindowIndex,
receive_index: &mut u64, receive_index: &mut u64,
) -> Result<()> { ) -> Result<()> {
let id = node_info.id; let id = node_info.id;
let timer = Duration::new(1, 0); let timer = Duration::new(1, 0);
let mut dq = receiver.recv_timeout(timer)?; let entries = receiver.recv_timeout(timer)?;
while let Ok(mut nq) = receiver.try_recv() { let mut num_entries = entries.len();
dq.append(&mut nq); let mut ventries = Vec::new();
ventries.push(entries);
while let Ok(entries) = receiver.try_recv() {
num_entries += entries.len();
ventries.push(entries);
} }
let to_blobs_start = Instant::now();
let dq: SharedBlobs = ventries
.into_par_iter()
.flat_map(|p| p.to_blobs(recycler))
.collect();
let to_blobs_elapsed = duration_as_ms(&to_blobs_start.elapsed());
// flatten deque to vec // flatten deque to vec
let blobs_vec: Vec<_> = dq.into_iter().collect(); let blobs_vec: Vec<_> = dq.into_iter().collect();
let blobs_chunking = Instant::now();
// We could receive more blobs than window slots so // We could receive more blobs than window slots so
// break them up into window-sized chunks to process // break them up into window-sized chunks to process
let blobs_chunked = blobs_vec.chunks(WINDOW_SIZE as usize).map(|x| x.to_vec()); let blobs_chunked = blobs_vec.chunks(WINDOW_SIZE as usize).map(|x| x.to_vec());
let chunking_elapsed = duration_as_ms(&blobs_chunking.elapsed());
trace!("{}", window.read().unwrap().print(&id, *receive_index)); trace!("{}", window.read().unwrap().print(&id, *receive_index));
let broadcast_start = Instant::now();
for mut blobs in blobs_chunked { for mut blobs in blobs_chunked {
let blobs_len = blobs.len(); let blobs_len = blobs.len();
trace!("{}: broadcast blobs.len: {}", id, blobs_len); trace!("{}: broadcast blobs.len: {}", id, blobs_len);
@ -115,6 +133,13 @@ fn broadcast(
*receive_index, *receive_index,
)?; )?;
} }
let broadcast_elapsed = duration_as_ms(&broadcast_start.elapsed());
info!(
"broadcast: {} entries, blob time {} chunking time {} broadcast time {}",
num_entries, to_blobs_elapsed, chunking_elapsed, broadcast_elapsed
);
Ok(()) Ok(())
} }
@ -129,7 +154,7 @@ impl BroadcastStage {
window: &SharedWindow, window: &SharedWindow,
entry_height: u64, entry_height: u64,
recycler: &BlobRecycler, recycler: &BlobRecycler,
receiver: &BlobReceiver, receiver: &Receiver<Vec<Entry>>,
) { ) {
let mut transmit_index = WindowIndex { let mut transmit_index = WindowIndex {
data: entry_height, data: entry_height,
@ -177,7 +202,7 @@ impl BroadcastStage {
window: SharedWindow, window: SharedWindow,
entry_height: u64, entry_height: u64,
recycler: BlobRecycler, recycler: BlobRecycler,
receiver: BlobReceiver, receiver: Receiver<Vec<Entry>>,
) -> Self { ) -> Self {
let thread_hdl = Builder::new() let thread_hdl = Builder::new()
.name("solana-broadcaster".to_string()) .name("solana-broadcaster".to_string())

View File

@ -243,7 +243,7 @@ impl Fullnode {
// TODO: To light up PoH, uncomment the following line: // TODO: To light up PoH, uncomment the following line:
//let tick_duration = Some(Duration::from_millis(1000)); //let tick_duration = Some(Duration::from_millis(1000));
let (tpu, blob_receiver) = Tpu::new( let (tpu, entry_receiver) = Tpu::new(
keypair, keypair,
&bank, &bank,
&crdt, &crdt,
@ -262,7 +262,7 @@ impl Fullnode {
shared_window, shared_window,
entry_height, entry_height,
blob_recycler.clone(), blob_recycler.clone(),
blob_receiver, entry_receiver,
); );
thread_hdls.extend(broadcast_stage.thread_hdls()); thread_hdls.extend(broadcast_stage.thread_hdls());
} }

View File

@ -5,11 +5,12 @@
//! Transaction, the latest hash, and the number of hashes since the last transaction. //! Transaction, the latest hash, and the number of hashes since the last transaction.
//! The resulting stream of entries represents ordered transactions in time. //! The resulting stream of entries represents ordered transactions in time.
use bank::Bank;
use entry::Entry; use entry::Entry;
use hash::Hash;
use recorder::Recorder; use recorder::Recorder;
use service::Service; use service::Service;
use std::sync::mpsc::{channel, Receiver, RecvError, Sender, TryRecvError}; use std::sync::mpsc::{channel, Receiver, RecvError, Sender, TryRecvError};
use std::sync::Arc;
use std::thread::{self, Builder, JoinHandle}; use std::thread::{self, Builder, JoinHandle};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use transaction::Transaction; use transaction::Transaction;
@ -27,18 +28,15 @@ pub struct RecordStage {
impl RecordStage { impl RecordStage {
/// A background thread that will continue tagging received Transaction messages and /// A background thread that will continue tagging received Transaction messages and
/// sending back Entry messages until either the receiver or sender channel is closed. /// sending back Entry messages until either the receiver or sender channel is closed.
pub fn new( pub fn new(signal_receiver: Receiver<Signal>, bank: Arc<Bank>) -> (Self, Receiver<Vec<Entry>>) {
signal_receiver: Receiver<Signal>,
start_hash: &Hash,
) -> (Self, Receiver<Vec<Entry>>) {
let (entry_sender, entry_receiver) = channel(); let (entry_sender, entry_receiver) = channel();
let start_hash = *start_hash; let start_hash = bank.last_id();
let thread_hdl = Builder::new() let thread_hdl = Builder::new()
.name("solana-record-stage".to_string()) .name("solana-record-stage".to_string())
.spawn(move || { .spawn(move || {
let mut recorder = Recorder::new(start_hash); let mut recorder = Recorder::new(start_hash);
let _ = Self::process_signals(&mut recorder, &signal_receiver, &entry_sender); let _ = Self::process_signals(&mut recorder, &signal_receiver, bank, &entry_sender);
}).unwrap(); }).unwrap();
(RecordStage { thread_hdl }, entry_receiver) (RecordStage { thread_hdl }, entry_receiver)
@ -47,11 +45,11 @@ impl RecordStage {
/// Same as `RecordStage::new`, but will automatically produce entries every `tick_duration`. /// Same as `RecordStage::new`, but will automatically produce entries every `tick_duration`.
pub fn new_with_clock( pub fn new_with_clock(
signal_receiver: Receiver<Signal>, signal_receiver: Receiver<Signal>,
start_hash: &Hash, bank: Arc<Bank>,
tick_duration: Duration, tick_duration: Duration,
) -> (Self, Receiver<Vec<Entry>>) { ) -> (Self, Receiver<Vec<Entry>>) {
let (entry_sender, entry_receiver) = channel(); let (entry_sender, entry_receiver) = channel();
let start_hash = *start_hash; let start_hash = bank.last_id();
let thread_hdl = Builder::new() let thread_hdl = Builder::new()
.name("solana-record-stage".to_string()) .name("solana-record-stage".to_string())
@ -64,6 +62,7 @@ impl RecordStage {
start_time, start_time,
tick_duration, tick_duration,
&signal_receiver, &signal_receiver,
bank.clone(),
&entry_sender, &entry_sender,
).is_err() ).is_err()
{ {
@ -78,6 +77,7 @@ impl RecordStage {
fn process_signal( fn process_signal(
signal: Signal, signal: Signal,
bank: &Arc<Bank>,
recorder: &mut Recorder, recorder: &mut Recorder,
sender: &Sender<Vec<Entry>>, sender: &Sender<Vec<Entry>>,
) -> Result<(), ()> { ) -> Result<(), ()> {
@ -87,6 +87,13 @@ impl RecordStage {
vec![] vec![]
}; };
let entries = recorder.record(txs); let entries = recorder.record(txs);
for entry in entries.iter() {
if !entry.has_more {
bank.register_entry_id(&entry.id);
}
}
sender.send(entries).or(Err(()))?; sender.send(entries).or(Err(()))?;
Ok(()) Ok(())
} }
@ -94,11 +101,12 @@ impl RecordStage {
fn process_signals( fn process_signals(
recorder: &mut Recorder, recorder: &mut Recorder,
receiver: &Receiver<Signal>, receiver: &Receiver<Signal>,
bank: Arc<Bank>,
sender: &Sender<Vec<Entry>>, sender: &Sender<Vec<Entry>>,
) -> Result<(), ()> { ) -> Result<(), ()> {
loop { loop {
match receiver.recv() { match receiver.recv() {
Ok(signal) => Self::process_signal(signal, recorder, sender)?, Ok(signal) => Self::process_signal(signal, &bank, recorder, sender)?,
Err(RecvError) => return Err(()), Err(RecvError) => return Err(()),
} }
} }
@ -109,6 +117,7 @@ impl RecordStage {
start_time: Instant, start_time: Instant,
tick_duration: Duration, tick_duration: Duration,
receiver: &Receiver<Signal>, receiver: &Receiver<Signal>,
bank: Arc<Bank>,
sender: &Sender<Vec<Entry>>, sender: &Sender<Vec<Entry>>,
) -> Result<(), ()> { ) -> Result<(), ()> {
loop { loop {
@ -116,7 +125,7 @@ impl RecordStage {
sender.send(vec![entry]).or(Err(()))?; sender.send(vec![entry]).or(Err(()))?;
} }
match receiver.try_recv() { match receiver.try_recv() {
Ok(signal) => Self::process_signal(signal, recorder, sender)?, Ok(signal) => Self::process_signal(signal, &bank, recorder, sender)?,
Err(TryRecvError::Empty) => return Ok(()), Err(TryRecvError::Empty) => return Ok(()),
Err(TryRecvError::Disconnected) => return Err(()), Err(TryRecvError::Disconnected) => return Err(()),
}; };
@ -137,16 +146,21 @@ impl Service for RecordStage {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use bank::Bank;
use ledger::Block; use ledger::Block;
use mint::Mint;
use signature::{Keypair, KeypairUtil}; use signature::{Keypair, KeypairUtil};
use std::sync::mpsc::channel; use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread::sleep; use std::thread::sleep;
#[test] #[test]
fn test_historian() { fn test_historian() {
let (tx_sender, tx_receiver) = channel(); let (tx_sender, tx_receiver) = channel();
let zero = Hash::default(); let mint = Mint::new(1234);
let (record_stage, entry_receiver) = RecordStage::new(tx_receiver, &zero); let bank = Arc::new(Bank::new(&mint));
let zero = bank.last_id();
let (record_stage, entry_receiver) = RecordStage::new(tx_receiver, bank);
tx_sender.send(Signal::Tick).unwrap(); tx_sender.send(Signal::Tick).unwrap();
sleep(Duration::new(0, 1_000_000)); sleep(Duration::new(0, 1_000_000));
@ -171,8 +185,9 @@ mod tests {
#[test] #[test]
fn test_historian_closed_sender() { fn test_historian_closed_sender() {
let (tx_sender, tx_receiver) = channel(); let (tx_sender, tx_receiver) = channel();
let zero = Hash::default(); let mint = Mint::new(1234);
let (record_stage, entry_receiver) = RecordStage::new(tx_receiver, &zero); let bank = Arc::new(Bank::new(&mint));
let (record_stage, entry_receiver) = RecordStage::new(tx_receiver, bank);
drop(entry_receiver); drop(entry_receiver);
tx_sender.send(Signal::Tick).unwrap(); tx_sender.send(Signal::Tick).unwrap();
assert_eq!(record_stage.thread_hdl.join().unwrap(), ()); assert_eq!(record_stage.thread_hdl.join().unwrap(), ());
@ -181,8 +196,10 @@ mod tests {
#[test] #[test]
fn test_transactions() { fn test_transactions() {
let (tx_sender, signal_receiver) = channel(); let (tx_sender, signal_receiver) = channel();
let zero = Hash::default(); let mint = Mint::new(1234);
let (_record_stage, entry_receiver) = RecordStage::new(signal_receiver, &zero); let bank = Arc::new(Bank::new(&mint));
let zero = bank.last_id();
let (_record_stage, entry_receiver) = RecordStage::new(signal_receiver, bank);
let alice_keypair = Keypair::new(); let alice_keypair = Keypair::new();
let bob_pubkey = Keypair::new().pubkey(); let bob_pubkey = Keypair::new().pubkey();
let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero); let tx0 = Transaction::new(&alice_keypair, bob_pubkey, 1, zero);
@ -198,9 +215,11 @@ mod tests {
#[test] #[test]
fn test_clock() { fn test_clock() {
let (tx_sender, tx_receiver) = channel(); let (tx_sender, tx_receiver) = channel();
let zero = Hash::default(); let mint = Mint::new(1234);
let bank = Arc::new(Bank::new(&mint));
let zero = bank.last_id();
let (_record_stage, entry_receiver) = let (_record_stage, entry_receiver) =
RecordStage::new_with_clock(tx_receiver, &zero, Duration::from_millis(20)); RecordStage::new_with_clock(tx_receiver, bank, Duration::from_millis(20));
sleep(Duration::from_millis(900)); sleep(Duration::from_millis(900));
tx_sender.send(Signal::Tick).unwrap(); tx_sender.send(Signal::Tick).unwrap();
drop(tx_sender); drop(tx_sender);

View File

@ -28,6 +28,7 @@
use bank::Bank; use bank::Bank;
use banking_stage::BankingStage; use banking_stage::BankingStage;
use crdt::Crdt; use crdt::Crdt;
use entry::Entry;
use fetch_stage::FetchStage; use fetch_stage::FetchStage;
use packet::{BlobRecycler, PacketRecycler}; use packet::{BlobRecycler, PacketRecycler};
use record_stage::RecordStage; use record_stage::RecordStage;
@ -36,10 +37,10 @@ use signature::Keypair;
use sigverify_stage::SigVerifyStage; use sigverify_stage::SigVerifyStage;
use std::net::UdpSocket; use std::net::UdpSocket;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use std::sync::mpsc::Receiver;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::thread::{self, JoinHandle}; use std::thread::{self, JoinHandle};
use std::time::Duration; use std::time::Duration;
use streamer::BlobReceiver;
use write_stage::WriteStage; use write_stage::WriteStage;
pub struct Tpu { pub struct Tpu {
@ -61,7 +62,7 @@ impl Tpu {
exit: Arc<AtomicBool>, exit: Arc<AtomicBool>,
ledger_path: &str, ledger_path: &str,
sigverify_disabled: bool, sigverify_disabled: bool,
) -> (Self, BlobReceiver) { ) -> (Self, Receiver<Vec<Entry>>) {
let packet_recycler = PacketRecycler::default(); let packet_recycler = PacketRecycler::default();
let (fetch_stage, packet_receiver) = let (fetch_stage, packet_receiver) =
@ -75,12 +76,12 @@ impl Tpu {
let (record_stage, entry_receiver) = match tick_duration { let (record_stage, entry_receiver) = match tick_duration {
Some(tick_duration) => { Some(tick_duration) => {
RecordStage::new_with_clock(signal_receiver, &bank.last_id(), tick_duration) RecordStage::new_with_clock(signal_receiver, bank.clone(), tick_duration)
} }
None => RecordStage::new(signal_receiver, &bank.last_id()), None => RecordStage::new(signal_receiver, bank.clone()),
}; };
let (write_stage, blob_receiver) = WriteStage::new( let (write_stage, entry_forwarder) = WriteStage::new(
keypair, keypair,
bank.clone(), bank.clone(),
crdt.clone(), crdt.clone(),
@ -96,7 +97,7 @@ impl Tpu {
record_stage, record_stage,
write_stage, write_stage,
}; };
(tpu, blob_receiver) (tpu, entry_forwarder)
} }
pub fn close(self) -> thread::Result<()> { pub fn close(self) -> thread::Result<()> {

View File

@ -14,11 +14,12 @@ use service::Service;
use signature::Keypair; use signature::Keypair;
use std::net::UdpSocket; use std::net::UdpSocket;
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError}; use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, Sender};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::thread::{self, Builder, JoinHandle}; use std::thread::{self, Builder, JoinHandle};
use std::time::Duration; use std::time::{Duration, Instant};
use streamer::{responder, BlobReceiver, BlobSender}; use streamer::responder;
use timing::{duration_as_ms, duration_as_s};
use vote_stage::send_leader_vote; use vote_stage::send_leader_vote;
pub struct WriteStage { pub struct WriteStage {
@ -27,41 +28,72 @@ pub struct WriteStage {
impl WriteStage { impl WriteStage {
/// Process any Entry items that have been published by the RecordStage. /// Process any Entry items that have been published by the RecordStage.
/// continuosly broadcast blobs of entries out /// continuosly send entries out
pub fn write_and_send_entries( pub fn write_and_send_entries(
crdt: &Arc<RwLock<Crdt>>, crdt: &Arc<RwLock<Crdt>>,
bank: &Arc<Bank>,
ledger_writer: &mut LedgerWriter, ledger_writer: &mut LedgerWriter,
blob_sender: &BlobSender, entry_sender: &Sender<Vec<Entry>>,
blob_recycler: &BlobRecycler,
entry_receiver: &Receiver<Vec<Entry>>, entry_receiver: &Receiver<Vec<Entry>>,
) -> Result<()> { ) -> Result<()> {
let mut ventries = Vec::new();
let entries = entry_receiver.recv_timeout(Duration::new(1, 0))?; let entries = entry_receiver.recv_timeout(Duration::new(1, 0))?;
let mut num_entries = entries.len();
let mut num_txs = 0;
ventries.push(entries);
while let Ok(more) = entry_receiver.try_recv() {
num_entries += more.len();
ventries.push(more);
}
info!("write_stage entries: {}", num_entries);
let to_blobs_total = 0;
let mut blob_send_total = 0;
let mut register_entry_total = 0;
let mut crdt_votes_total = 0;
let start = Instant::now();
for _ in 0..ventries.len() {
let entries = ventries.pop().unwrap();
for e in entries.iter() {
num_txs += e.transactions.len();
}
let crdt_votes_start = Instant::now();
let votes = &entries.votes(); let votes = &entries.votes();
crdt.write().unwrap().insert_votes(&votes); crdt.write().unwrap().insert_votes(&votes);
crdt_votes_total += duration_as_ms(&crdt_votes_start.elapsed());
ledger_writer.write_entries(entries.clone())?; ledger_writer.write_entries(entries.clone())?;
for entry in &entries { let register_entry_start = Instant::now();
if !entry.has_more { register_entry_total += duration_as_ms(&register_entry_start.elapsed());
bank.register_entry_id(&entry.id);
} inc_new_counter_info!("write_stage-write_entries", entries.len());
}
//TODO(anatoly): real stake based voting needs to change this //TODO(anatoly): real stake based voting needs to change this
//leader simply votes if the current set of validators have voted //leader simply votes if the current set of validators have voted
//on a valid last id //on a valid last id
trace!("New blobs? {}", entries.len()); trace!("New entries? {}", entries.len());
let blobs = entries.to_blobs(blob_recycler); let blob_send_start = Instant::now();
if !entries.is_empty() {
if !blobs.is_empty() {
inc_new_counter_info!("write_stage-recv_vote", votes.len()); inc_new_counter_info!("write_stage-recv_vote", votes.len());
inc_new_counter_info!("write_stage-broadcast_blobs", blobs.len()); inc_new_counter_info!("write_stage-broadcast_entries", entries.len());
trace!("broadcasting {}", blobs.len()); trace!("broadcasting {}", entries.len());
blob_sender.send(blobs)?; entry_sender.send(entries)?;
} }
blob_send_total += duration_as_ms(&blob_send_start.elapsed());
}
info!("done write_stage txs: {} time {} ms txs/s: {} to_blobs_total: {} register_entry_total: {} blob_send_total: {} crdt_votes_total: {}",
num_txs, duration_as_ms(&start.elapsed()),
num_txs as f32 / duration_as_s(&start.elapsed()),
to_blobs_total,
register_entry_total,
blob_send_total,
crdt_votes_total);
Ok(()) Ok(())
} }
@ -73,7 +105,7 @@ impl WriteStage {
blob_recycler: BlobRecycler, blob_recycler: BlobRecycler,
ledger_path: &str, ledger_path: &str,
entry_receiver: Receiver<Vec<Entry>>, entry_receiver: Receiver<Vec<Entry>>,
) -> (Self, BlobReceiver) { ) -> (Self, Receiver<Vec<Entry>>) {
let (vote_blob_sender, vote_blob_receiver) = channel(); let (vote_blob_sender, vote_blob_receiver) = channel();
let send = UdpSocket::bind("0.0.0.0:0").expect("bind"); let send = UdpSocket::bind("0.0.0.0:0").expect("bind");
let t_responder = responder( let t_responder = responder(
@ -82,7 +114,7 @@ impl WriteStage {
blob_recycler.clone(), blob_recycler.clone(),
vote_blob_receiver, vote_blob_receiver,
); );
let (blob_sender, blob_receiver) = channel(); let (entry_sender, entry_receiver_forward) = channel();
let mut ledger_writer = LedgerWriter::recover(ledger_path).unwrap(); let mut ledger_writer = LedgerWriter::recover(ledger_path).unwrap();
let thread_hdl = Builder::new() let thread_hdl = Builder::new()
@ -94,10 +126,8 @@ impl WriteStage {
loop { loop {
if let Err(e) = Self::write_and_send_entries( if let Err(e) = Self::write_and_send_entries(
&crdt, &crdt,
&bank,
&mut ledger_writer, &mut ledger_writer,
&blob_sender, &entry_sender,
&blob_recycler,
&entry_receiver, &entry_receiver,
) { ) {
match e { match e {
@ -129,7 +159,7 @@ impl WriteStage {
}).unwrap(); }).unwrap();
let thread_hdls = vec![t_responder, thread_hdl]; let thread_hdls = vec![t_responder, thread_hdl];
(WriteStage { thread_hdls }, blob_receiver) (WriteStage { thread_hdls }, entry_receiver_forward)
} }
} }