Files
solana/src/entry_writer.rs

135 lines
4.3 KiB
Rust
Raw Normal View History

2018-06-06 11:24:24 -06:00
//! The `entry_writer` module helps implement the TPU's write stage. It
//! writes entries to the given writer, which is typically a file or
//! stdout, and then sends the Entry to its output channel.
2018-05-14 15:33:11 -06:00
use bank::Bank;
use entry::Entry;
2018-05-29 11:52:00 -06:00
use ledger::Block;
2018-06-27 12:33:56 -06:00
use packet::BlobRecycler;
use result::Result;
use serde_json;
use std::collections::VecDeque;
2018-07-01 08:09:32 -07:00
use std::io::{self, sink, Write};
use std::sync::mpsc::Receiver;
use std::sync::{Arc, Mutex};
use std::time::Duration;
2018-06-27 12:33:56 -06:00
use streamer::BlobSender;
pub struct EntryWriter<'a> {
2018-05-14 15:33:11 -06:00
bank: &'a Bank,
}
impl<'a> EntryWriter<'a> {
2018-05-14 15:33:11 -06:00
/// Create a new Tpu that wraps the given Bank.
pub fn new(bank: &'a Bank) -> Self {
EntryWriter { bank }
}
fn write_and_register_entry<W: Write>(
&self,
writer: &Mutex<W>,
entry: &Entry,
) -> io::Result<()> {
trace!("write_and_register_entry entry");
2018-06-28 16:18:10 -06:00
if !entry.has_more {
self.bank.register_entry_id(&entry.id);
}
writeln!(
writer
.lock()
.expect("'writer' lock in fn fn write_and_register_entry"),
"{}",
serde_json::to_string(&entry)
.expect("'entry' to_strong in fn write_and_register_entry")
2018-07-01 08:09:32 -07:00
)
}
fn write_entries<W: Write>(
&self,
writer: &Mutex<W>,
entry_receiver: &Receiver<Entry>,
) -> Result<Vec<Entry>> {
//TODO implement a serialize for channel that does this without allocations
let entry = entry_receiver.recv_timeout(Duration::new(1, 0))?;
2018-07-01 08:13:00 -07:00
let mut entries = vec![entry];
while let Ok(entry) = entry_receiver.try_recv() {
2018-07-01 08:13:00 -07:00
entries.push(entry);
}
2018-07-01 08:16:41 -07:00
for entry in &entries {
self.write_and_register_entry(writer, &entry)?;
}
2018-07-01 08:13:00 -07:00
Ok(entries)
}
/// Process any Entry items that have been published by the Historian.
/// continuosly broadcast blobs of entries out
pub fn write_and_send_entries<W: Write>(
&self,
2018-07-01 08:16:41 -07:00
blob_sender: &BlobSender,
2018-06-27 12:33:56 -06:00
blob_recycler: &BlobRecycler,
writer: &Mutex<W>,
entry_receiver: &Receiver<Entry>,
) -> Result<()> {
2018-07-01 08:16:41 -07:00
let entries = self.write_entries(writer, entry_receiver)?;
trace!("New blobs? {}", entries.len());
let mut blobs = VecDeque::new();
entries.to_blobs(blob_recycler, &mut blobs);
if !blobs.is_empty() {
trace!("broadcasting {}", blobs.len());
blob_sender.send(blobs)?;
}
Ok(())
}
/// Process any Entry items that have been published by the Historian.
/// continuosly broadcast blobs of entries out
pub fn drain_entries(&self, entry_receiver: &Receiver<Entry>) -> Result<()> {
self.write_entries(&Arc::new(Mutex::new(sink())), entry_receiver)?;
Ok(())
}
}
2018-06-28 16:18:10 -06:00
#[cfg(test)]
mod tests {
use super::*;
use ledger;
use mint::Mint;
use packet::BLOB_DATA_SIZE;
use signature::{KeyPair, KeyPairUtil};
use transaction::Transaction;
#[test]
fn test_dont_register_partial_entries() {
let mint = Mint::new(1);
let bank = Bank::new(&mint);
let entry_writer = EntryWriter::new(&bank);
let keypair = KeyPair::new();
let tx = Transaction::new(&mint.keypair(), keypair.pubkey(), 1, mint.last_id());
// NOTE: if Entry grows to larger than a transaction, the code below falls over
let threshold = (BLOB_DATA_SIZE / 256) - 1; // 256 is transaction size
// Verify large entries are split up and the first sets has_more.
let txs = vec![tx.clone(); threshold * 2];
let entries = ledger::next_entries(&mint.last_id(), 0, txs);
assert_eq!(entries.len(), 2);
assert!(entries[0].has_more);
assert!(!entries[1].has_more);
// Verify that write_and_register_entry doesn't register the first entries after a split.
2018-06-28 16:18:10 -06:00
assert_eq!(bank.last_id(), mint.last_id());
let writer = Mutex::new(sink());
entry_writer
.write_and_register_entry(&writer, &entries[0])
.unwrap();
2018-06-28 16:18:10 -06:00
assert_eq!(bank.last_id(), mint.last_id());
// Verify that write_and_register_entry registers the final entry after a split.
entry_writer
.write_and_register_entry(&writer, &entries[1])
.unwrap();
2018-06-28 16:18:10 -06:00
assert_eq!(bank.last_id(), entries[1].id);
}
}