2018-06-06 11:24:24 -06:00
|
|
|
//! The `entry_writer` module helps implement the TPU's write stage. It
|
|
|
|
//! writes entries to the given writer, which is typically a file or
|
|
|
|
//! stdout, and then sends the Entry to its output channel.
|
2018-05-11 22:36:16 -06:00
|
|
|
|
2018-05-14 15:33:11 -06:00
|
|
|
use bank::Bank;
|
2018-05-11 22:36:16 -06:00
|
|
|
use entry::Entry;
|
2018-05-29 11:52:00 -06:00
|
|
|
use ledger::Block;
|
2018-06-27 12:33:56 -06:00
|
|
|
use packet::BlobRecycler;
|
2018-05-11 22:36:16 -06:00
|
|
|
use result::Result;
|
|
|
|
use serde_json;
|
|
|
|
use std::collections::VecDeque;
|
2018-07-01 08:09:32 -07:00
|
|
|
use std::io::{self, sink, Write};
|
2018-05-12 15:14:10 -06:00
|
|
|
use std::sync::mpsc::Receiver;
|
2018-05-11 22:36:16 -06:00
|
|
|
use std::sync::{Arc, Mutex};
|
|
|
|
use std::time::Duration;
|
2018-06-27 12:33:56 -06:00
|
|
|
use streamer::BlobSender;
|
2018-05-11 22:36:16 -06:00
|
|
|
|
|
|
|
pub struct EntryWriter<'a> {
|
2018-05-14 15:33:11 -06:00
|
|
|
bank: &'a Bank,
|
2018-05-11 22:36:16 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> EntryWriter<'a> {
|
2018-05-14 15:33:11 -06:00
|
|
|
/// Create a new Tpu that wraps the given Bank.
|
|
|
|
pub fn new(bank: &'a Bank) -> Self {
|
|
|
|
EntryWriter { bank }
|
2018-05-11 22:36:16 -06:00
|
|
|
}
|
|
|
|
|
2018-07-01 08:10:41 -07:00
|
|
|
fn write_and_register_entry<W: Write>(
|
|
|
|
&self,
|
|
|
|
writer: &Mutex<W>,
|
|
|
|
entry: &Entry,
|
|
|
|
) -> io::Result<()> {
|
|
|
|
trace!("write_and_register_entry entry");
|
2018-06-28 16:18:10 -06:00
|
|
|
if !entry.has_more {
|
|
|
|
self.bank.register_entry_id(&entry.id);
|
|
|
|
}
|
2018-05-11 22:36:16 -06:00
|
|
|
writeln!(
|
2018-07-01 08:10:41 -07:00
|
|
|
writer
|
|
|
|
.lock()
|
|
|
|
.expect("'writer' lock in fn fn write_and_register_entry"),
|
2018-05-11 22:36:16 -06:00
|
|
|
"{}",
|
2018-07-01 08:10:41 -07:00
|
|
|
serde_json::to_string(&entry)
|
|
|
|
.expect("'entry' to_strong in fn write_and_register_entry")
|
2018-07-01 08:09:32 -07:00
|
|
|
)
|
2018-05-11 22:36:16 -06:00
|
|
|
}
|
|
|
|
|
2018-05-12 15:14:10 -06:00
|
|
|
fn write_entries<W: Write>(
|
|
|
|
&self,
|
|
|
|
writer: &Mutex<W>,
|
|
|
|
entry_receiver: &Receiver<Entry>,
|
|
|
|
) -> Result<Vec<Entry>> {
|
2018-05-11 22:36:16 -06:00
|
|
|
//TODO implement a serialize for channel that does this without allocations
|
2018-05-12 15:14:10 -06:00
|
|
|
let entry = entry_receiver.recv_timeout(Duration::new(1, 0))?;
|
2018-07-01 08:13:00 -07:00
|
|
|
let mut entries = vec![entry];
|
2018-05-12 15:14:10 -06:00
|
|
|
while let Ok(entry) = entry_receiver.try_recv() {
|
2018-07-01 08:13:00 -07:00
|
|
|
entries.push(entry);
|
2018-05-11 22:36:16 -06:00
|
|
|
}
|
2018-07-01 08:16:41 -07:00
|
|
|
for entry in &entries {
|
|
|
|
self.write_and_register_entry(writer, &entry)?;
|
|
|
|
}
|
2018-07-01 08:13:00 -07:00
|
|
|
Ok(entries)
|
2018-05-11 22:36:16 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Process any Entry items that have been published by the Historian.
|
|
|
|
/// continuosly broadcast blobs of entries out
|
|
|
|
pub fn write_and_send_entries<W: Write>(
|
|
|
|
&self,
|
2018-07-01 08:16:41 -07:00
|
|
|
blob_sender: &BlobSender,
|
2018-06-27 12:33:56 -06:00
|
|
|
blob_recycler: &BlobRecycler,
|
2018-05-11 22:36:16 -06:00
|
|
|
writer: &Mutex<W>,
|
2018-05-12 15:14:10 -06:00
|
|
|
entry_receiver: &Receiver<Entry>,
|
2018-05-11 22:36:16 -06:00
|
|
|
) -> Result<()> {
|
2018-07-01 08:16:41 -07:00
|
|
|
let entries = self.write_entries(writer, entry_receiver)?;
|
|
|
|
trace!("New blobs? {}", entries.len());
|
|
|
|
let mut blobs = VecDeque::new();
|
|
|
|
entries.to_blobs(blob_recycler, &mut blobs);
|
|
|
|
if !blobs.is_empty() {
|
|
|
|
trace!("broadcasting {}", blobs.len());
|
|
|
|
blob_sender.send(blobs)?;
|
2018-05-11 22:36:16 -06:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Process any Entry items that have been published by the Historian.
|
|
|
|
/// continuosly broadcast blobs of entries out
|
2018-05-12 15:14:10 -06:00
|
|
|
pub fn drain_entries(&self, entry_receiver: &Receiver<Entry>) -> Result<()> {
|
|
|
|
self.write_entries(&Arc::new(Mutex::new(sink())), entry_receiver)?;
|
2018-05-11 22:36:16 -06:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
2018-06-28 16:18:10 -06:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
|
|
|
use ledger;
|
|
|
|
use mint::Mint;
|
|
|
|
use packet::BLOB_DATA_SIZE;
|
|
|
|
use signature::{KeyPair, KeyPairUtil};
|
|
|
|
use transaction::Transaction;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_dont_register_partial_entries() {
|
|
|
|
let mint = Mint::new(1);
|
|
|
|
let bank = Bank::new(&mint);
|
|
|
|
|
|
|
|
let entry_writer = EntryWriter::new(&bank);
|
|
|
|
let keypair = KeyPair::new();
|
|
|
|
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, mint.last_id());
|
|
|
|
|
|
|
|
// NOTE: if Entry grows to larger than a transaction, the code below falls over
|
|
|
|
let threshold = (BLOB_DATA_SIZE / 256) - 1; // 256 is transaction size
|
|
|
|
|
|
|
|
// Verify large entries are split up and the first sets has_more.
|
|
|
|
let txs = vec![tx.clone(); threshold * 2];
|
|
|
|
let entries = ledger::next_entries(&mint.last_id(), 0, txs);
|
|
|
|
assert_eq!(entries.len(), 2);
|
|
|
|
assert!(entries[0].has_more);
|
|
|
|
assert!(!entries[1].has_more);
|
|
|
|
|
2018-07-01 08:10:41 -07:00
|
|
|
// Verify that write_and_register_entry doesn't register the first entries after a split.
|
2018-06-28 16:18:10 -06:00
|
|
|
assert_eq!(bank.last_id(), mint.last_id());
|
|
|
|
let writer = Mutex::new(sink());
|
2018-07-01 08:10:41 -07:00
|
|
|
entry_writer
|
|
|
|
.write_and_register_entry(&writer, &entries[0])
|
|
|
|
.unwrap();
|
2018-06-28 16:18:10 -06:00
|
|
|
assert_eq!(bank.last_id(), mint.last_id());
|
|
|
|
|
2018-07-01 08:10:41 -07:00
|
|
|
// Verify that write_and_register_entry registers the final entry after a split.
|
|
|
|
entry_writer
|
|
|
|
.write_and_register_entry(&writer, &entries[1])
|
|
|
|
.unwrap();
|
2018-06-28 16:18:10 -06:00
|
|
|
assert_eq!(bank.last_id(), entries[1].id);
|
|
|
|
}
|
|
|
|
}
|