Use a Drop trait to keep track of lifetimes for recycled objects.
* Move recycler instances to the point of allocation * sinks no longer need to call `recycle` * Remove the recycler arguments from all the apis that no longer need them
This commit is contained in:
committed by
Greg Fitzgerald
parent
6732a9078d
commit
431692d9d0
@ -6,7 +6,7 @@ use bank::Bank;
|
|||||||
use bincode::deserialize;
|
use bincode::deserialize;
|
||||||
use counter::Counter;
|
use counter::Counter;
|
||||||
use log::Level;
|
use log::Level;
|
||||||
use packet::{PacketRecycler, Packets, SharedPackets};
|
use packet::{Packets, SharedPackets};
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use record_stage::Signal;
|
use record_stage::Signal;
|
||||||
use result::{Error, Result};
|
use result::{Error, Result};
|
||||||
@ -34,18 +34,12 @@ impl BankingStage {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
bank: Arc<Bank>,
|
bank: Arc<Bank>,
|
||||||
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
verified_receiver: Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||||
packet_recycler: PacketRecycler,
|
|
||||||
) -> (Self, Receiver<Signal>) {
|
) -> (Self, Receiver<Signal>) {
|
||||||
let (signal_sender, signal_receiver) = channel();
|
let (signal_sender, signal_receiver) = channel();
|
||||||
let thread_hdl = Builder::new()
|
let thread_hdl = Builder::new()
|
||||||
.name("solana-banking-stage".to_string())
|
.name("solana-banking-stage".to_string())
|
||||||
.spawn(move || loop {
|
.spawn(move || loop {
|
||||||
if let Err(e) = Self::process_packets(
|
if let Err(e) = Self::process_packets(&bank, &verified_receiver, &signal_sender) {
|
||||||
&bank,
|
|
||||||
&verified_receiver,
|
|
||||||
&signal_sender,
|
|
||||||
&packet_recycler,
|
|
||||||
) {
|
|
||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||||
@ -75,7 +69,6 @@ impl BankingStage {
|
|||||||
bank: &Arc<Bank>,
|
bank: &Arc<Bank>,
|
||||||
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
verified_receiver: &Receiver<Vec<(SharedPackets, Vec<u8>)>>,
|
||||||
signal_sender: &Sender<Signal>,
|
signal_sender: &Sender<Signal>,
|
||||||
packet_recycler: &PacketRecycler,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
let recv_start = Instant::now();
|
let recv_start = Instant::now();
|
||||||
@ -92,7 +85,7 @@ impl BankingStage {
|
|||||||
let count = mms.iter().map(|x| x.1.len()).sum();
|
let count = mms.iter().map(|x| x.1.len()).sum();
|
||||||
let proc_start = Instant::now();
|
let proc_start = Instant::now();
|
||||||
for (msgs, vers) in mms {
|
for (msgs, vers) in mms {
|
||||||
let transactions = Self::deserialize_transactions(&msgs.read().unwrap());
|
let transactions = Self::deserialize_transactions(&msgs.read());
|
||||||
reqs_len += transactions.len();
|
reqs_len += transactions.len();
|
||||||
let transactions = transactions
|
let transactions = transactions
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@ -113,8 +106,6 @@ impl BankingStage {
|
|||||||
return Err(Error::SendError);
|
return Err(Error::SendError);
|
||||||
}
|
}
|
||||||
debug!("done process_transactions");
|
debug!("done process_transactions");
|
||||||
|
|
||||||
packet_recycler.recycle(msgs, "process_transactions");
|
|
||||||
}
|
}
|
||||||
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
let total_time_s = timing::duration_as_s(&proc_start.elapsed());
|
||||||
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
let total_time_ms = timing::duration_as_ms(&proc_start.elapsed());
|
||||||
|
@ -20,8 +20,8 @@ fn producer(addr: &SocketAddr, recycler: &PacketRecycler, exit: Arc<AtomicBool>)
|
|||||||
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
let send = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||||
let msgs = recycler.allocate();
|
let msgs = recycler.allocate();
|
||||||
let msgs_ = msgs.clone();
|
let msgs_ = msgs.clone();
|
||||||
msgs.write().unwrap().packets.resize(10, Packet::default());
|
msgs.write().packets.resize(10, Packet::default());
|
||||||
for w in &mut msgs.write().unwrap().packets {
|
for w in &mut msgs.write().packets {
|
||||||
w.meta.size = PACKET_DATA_SIZE;
|
w.meta.size = PACKET_DATA_SIZE;
|
||||||
w.meta.set_addr(&addr);
|
w.meta.set_addr(&addr);
|
||||||
}
|
}
|
||||||
@ -30,7 +30,7 @@ fn producer(addr: &SocketAddr, recycler: &PacketRecycler, exit: Arc<AtomicBool>)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let mut num = 0;
|
let mut num = 0;
|
||||||
for p in &msgs_.read().unwrap().packets {
|
for p in &msgs_.read().packets {
|
||||||
let a = p.meta.addr();
|
let a = p.meta.addr();
|
||||||
assert!(p.meta.size < BLOB_SIZE);
|
assert!(p.meta.size < BLOB_SIZE);
|
||||||
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
send.send_to(&p.data[..p.meta.size], &a).unwrap();
|
||||||
@ -52,7 +52,7 @@ fn sink(
|
|||||||
}
|
}
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
if let Ok(msgs) = r.recv_timeout(timer) {
|
if let Ok(msgs) = r.recv_timeout(timer) {
|
||||||
rvs.fetch_add(msgs.read().unwrap().packets.len(), Ordering::Relaxed);
|
rvs.fetch_add(msgs.read().packets.len(), Ordering::Relaxed);
|
||||||
recycler.recycle(msgs, "sink");
|
recycler.recycle(msgs, "sink");
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -91,12 +91,7 @@ fn main() -> Result<()> {
|
|||||||
|
|
||||||
let (s_reader, r_reader) = channel();
|
let (s_reader, r_reader) = channel();
|
||||||
read_channels.push(r_reader);
|
read_channels.push(r_reader);
|
||||||
read_threads.push(receiver(
|
read_threads.push(receiver(Arc::new(read), exit.clone(), s_reader));
|
||||||
Arc::new(read),
|
|
||||||
exit.clone(),
|
|
||||||
pack_recycler.clone(),
|
|
||||||
s_reader,
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let t_producer1 = producer(&addr, &pack_recycler, exit.clone());
|
let t_producer1 = producer(&addr, &pack_recycler, exit.clone());
|
||||||
|
@ -17,7 +17,6 @@ use solana::hash::Hash;
|
|||||||
use solana::logger;
|
use solana::logger;
|
||||||
use solana::metrics;
|
use solana::metrics;
|
||||||
use solana::ncp::Ncp;
|
use solana::ncp::Ncp;
|
||||||
use solana::packet::BlobRecycler;
|
|
||||||
use solana::service::Service;
|
use solana::service::Service;
|
||||||
use solana::signature::{read_keypair, GenKeys, Keypair, KeypairUtil};
|
use solana::signature::{read_keypair, GenKeys, Keypair, KeypairUtil};
|
||||||
use solana::thin_client::{poll_gossip_for_leader, ThinClient};
|
use solana::thin_client::{poll_gossip_for_leader, ThinClient};
|
||||||
@ -695,14 +694,7 @@ fn converge(
|
|||||||
spy_crdt.set_leader(leader.id);
|
spy_crdt.set_leader(leader.id);
|
||||||
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
let spy_ref = Arc::new(RwLock::new(spy_crdt));
|
||||||
let window = Arc::new(RwLock::new(default_window()));
|
let window = Arc::new(RwLock::new(default_window()));
|
||||||
let ncp = Ncp::new(
|
let ncp = Ncp::new(&spy_ref, window, None, gossip_socket, exit_signal.clone());
|
||||||
&spy_ref,
|
|
||||||
window,
|
|
||||||
BlobRecycler::default(),
|
|
||||||
None,
|
|
||||||
gossip_socket,
|
|
||||||
exit_signal.clone(),
|
|
||||||
);
|
|
||||||
let mut v: Vec<NodeInfo> = vec![];
|
let mut v: Vec<NodeInfo> = vec![];
|
||||||
// wait for the network to converge, 30 seconds should be plenty
|
// wait for the network to converge, 30 seconds should be plenty
|
||||||
for _ in 0..30 {
|
for _ in 0..30 {
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
//! The `blob_fetch_stage` pulls blobs from UDP sockets and sends it to a channel.
|
//! The `blob_fetch_stage` pulls blobs from UDP sockets and sends it to a channel.
|
||||||
|
|
||||||
use packet::BlobRecycler;
|
|
||||||
use service::Service;
|
use service::Service;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
@ -15,24 +14,18 @@ pub struct BlobFetchStage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BlobFetchStage {
|
impl BlobFetchStage {
|
||||||
pub fn new(
|
pub fn new(socket: Arc<UdpSocket>, exit: Arc<AtomicBool>) -> (Self, BlobReceiver) {
|
||||||
socket: Arc<UdpSocket>,
|
Self::new_multi_socket(vec![socket], exit)
|
||||||
exit: Arc<AtomicBool>,
|
|
||||||
recycler: &BlobRecycler,
|
|
||||||
) -> (Self, BlobReceiver) {
|
|
||||||
Self::new_multi_socket(vec![socket], exit, recycler)
|
|
||||||
}
|
}
|
||||||
pub fn new_multi_socket(
|
pub fn new_multi_socket(
|
||||||
sockets: Vec<Arc<UdpSocket>>,
|
sockets: Vec<Arc<UdpSocket>>,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
recycler: &BlobRecycler,
|
|
||||||
) -> (Self, BlobReceiver) {
|
) -> (Self, BlobReceiver) {
|
||||||
let (sender, receiver) = channel();
|
let (sender, receiver) = channel();
|
||||||
let thread_hdls: Vec<_> = sockets
|
let thread_hdls: Vec<_> = sockets
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|socket| {
|
.map(|socket| streamer::blob_receiver(socket, exit.clone(), sender.clone()))
|
||||||
streamer::blob_receiver(socket, exit.clone(), recycler.clone(), sender.clone())
|
.collect();
|
||||||
}).collect();
|
|
||||||
|
|
||||||
(BlobFetchStage { exit, thread_hdls }, receiver)
|
(BlobFetchStage { exit, thread_hdls }, receiver)
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ fn broadcast(
|
|||||||
let to_blobs_elapsed = duration_as_ms(&to_blobs_start.elapsed());
|
let to_blobs_elapsed = duration_as_ms(&to_blobs_start.elapsed());
|
||||||
|
|
||||||
// flatten deque to vec
|
// flatten deque to vec
|
||||||
let blobs_vec: Vec<_> = dq.into_iter().collect();
|
let blobs_vec: SharedBlobs = dq.into_iter().collect();
|
||||||
|
|
||||||
let blobs_chunking = Instant::now();
|
let blobs_chunking = Instant::now();
|
||||||
// We could receive more blobs than window slots so
|
// We could receive more blobs than window slots so
|
||||||
@ -80,36 +80,24 @@ fn broadcast(
|
|||||||
{
|
{
|
||||||
let mut win = window.write().unwrap();
|
let mut win = window.write().unwrap();
|
||||||
assert!(blobs.len() <= win.len());
|
assert!(blobs.len() <= win.len());
|
||||||
for b in &blobs {
|
for b in blobs.iter() {
|
||||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
let ix = b.read().get_index().expect("blob index");
|
||||||
let pos = (ix % WINDOW_SIZE) as usize;
|
let pos = (ix % WINDOW_SIZE) as usize;
|
||||||
if let Some(x) = win[pos].data.take() {
|
if let Some(x) = win[pos].data.take() {
|
||||||
trace!(
|
trace!("{} popped {} at {}", id, x.read().get_index().unwrap(), pos);
|
||||||
"{} popped {} at {}",
|
|
||||||
id,
|
|
||||||
x.read().unwrap().get_index().unwrap(),
|
|
||||||
pos
|
|
||||||
);
|
|
||||||
recycler.recycle(x, "broadcast-data");
|
|
||||||
}
|
}
|
||||||
if let Some(x) = win[pos].coding.take() {
|
if let Some(x) = win[pos].coding.take() {
|
||||||
trace!(
|
trace!("{} popped {} at {}", id, x.read().get_index().unwrap(), pos);
|
||||||
"{} popped {} at {}",
|
|
||||||
id,
|
|
||||||
x.read().unwrap().get_index().unwrap(),
|
|
||||||
pos
|
|
||||||
);
|
|
||||||
recycler.recycle(x, "broadcast-coding");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
trace!("{} null {}", id, pos);
|
trace!("{} null {}", id, pos);
|
||||||
}
|
}
|
||||||
while let Some(b) = blobs.pop() {
|
for b in blobs.iter() {
|
||||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
let ix = b.read().get_index().expect("blob index");
|
||||||
let pos = (ix % WINDOW_SIZE) as usize;
|
let pos = (ix % WINDOW_SIZE) as usize;
|
||||||
trace!("{} caching {} at {}", id, ix, pos);
|
trace!("{} caching {} at {}", id, ix, pos);
|
||||||
assert!(win[pos].data.is_none());
|
assert!(win[pos].data.is_none());
|
||||||
win[pos].data = Some(b);
|
win[pos].data = Some(b.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,10 +241,10 @@ impl BroadcastStage {
|
|||||||
crdt: Arc<RwLock<Crdt>>,
|
crdt: Arc<RwLock<Crdt>>,
|
||||||
window: SharedWindow,
|
window: SharedWindow,
|
||||||
entry_height: u64,
|
entry_height: u64,
|
||||||
recycler: BlobRecycler,
|
|
||||||
receiver: Receiver<Vec<Entry>>,
|
receiver: Receiver<Vec<Entry>>,
|
||||||
exit_sender: Arc<AtomicBool>,
|
exit_sender: Arc<AtomicBool>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
|
let recycler = BlobRecycler::default();
|
||||||
let thread_hdl = Builder::new()
|
let thread_hdl = Builder::new()
|
||||||
.name("solana-broadcaster".to_string())
|
.name("solana-broadcaster".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
@ -282,7 +270,6 @@ mod tests {
|
|||||||
use crdt::{Crdt, Node};
|
use crdt::{Crdt, Node};
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use mint::Mint;
|
use mint::Mint;
|
||||||
use packet::BlobRecycler;
|
|
||||||
use recorder::Recorder;
|
use recorder::Recorder;
|
||||||
use service::Service;
|
use service::Service;
|
||||||
use signature::{Keypair, KeypairUtil, Pubkey};
|
use signature::{Keypair, KeypairUtil, Pubkey};
|
||||||
@ -318,7 +305,6 @@ mod tests {
|
|||||||
crdt.insert(&broadcast_buddy.info);
|
crdt.insert(&broadcast_buddy.info);
|
||||||
crdt.set_leader_rotation_interval(leader_rotation_interval);
|
crdt.set_leader_rotation_interval(leader_rotation_interval);
|
||||||
let crdt = Arc::new(RwLock::new(crdt));
|
let crdt = Arc::new(RwLock::new(crdt));
|
||||||
let blob_recycler = BlobRecycler::default();
|
|
||||||
|
|
||||||
// Make dummy initial entries
|
// Make dummy initial entries
|
||||||
let mint = Mint::new(10000);
|
let mint = Mint::new(10000);
|
||||||
@ -326,8 +312,7 @@ mod tests {
|
|||||||
let entry_height = entries.len() as u64;
|
let entry_height = entries.len() as u64;
|
||||||
|
|
||||||
// Setup a window
|
// Setup a window
|
||||||
let window =
|
let window = new_window_from_entries(&entries, entry_height, &leader_info.info);
|
||||||
new_window_from_entries(&entries, entry_height, &leader_info.info, &blob_recycler);
|
|
||||||
|
|
||||||
let shared_window = Arc::new(RwLock::new(window));
|
let shared_window = Arc::new(RwLock::new(window));
|
||||||
|
|
||||||
@ -339,7 +324,6 @@ mod tests {
|
|||||||
crdt.clone(),
|
crdt.clone(),
|
||||||
shared_window.clone(),
|
shared_window.clone(),
|
||||||
entry_height,
|
entry_height,
|
||||||
blob_recycler.clone(),
|
|
||||||
entry_receiver,
|
entry_receiver,
|
||||||
exit_sender,
|
exit_sender,
|
||||||
);
|
);
|
||||||
@ -359,7 +343,7 @@ mod tests {
|
|||||||
let window = shared_window.read().unwrap();
|
let window = shared_window.read().unwrap();
|
||||||
window.iter().fold(0, |m, w_slot| {
|
window.iter().fold(0, |m, w_slot| {
|
||||||
if let Some(ref blob) = w_slot.data {
|
if let Some(ref blob) = w_slot.data {
|
||||||
cmp::max(m, blob.read().unwrap().get_index().unwrap())
|
cmp::max(m, blob.read().get_index().unwrap())
|
||||||
} else {
|
} else {
|
||||||
m
|
m
|
||||||
}
|
}
|
||||||
|
@ -439,7 +439,7 @@ mod test {
|
|||||||
]);
|
]);
|
||||||
let date =
|
let date =
|
||||||
DateTime::<Utc>::from_utc(NaiveDate::from_ymd(2016, 7, 8).and_hms(9, 10, 11), Utc);
|
DateTime::<Utc>::from_utc(NaiveDate::from_ymd(2016, 7, 8).and_hms(9, 10, 11), Utc);
|
||||||
let dateIso8601 = "2016-07-08T09:10:11Z";
|
let date_is_08601 = "2016-07-08T09:10:11Z";
|
||||||
|
|
||||||
let tx = Transaction::budget_new(&keypair, to, 192, Hash::default());
|
let tx = Transaction::budget_new(&keypair, to, 192, Hash::default());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -477,9 +477,9 @@ mod test {
|
|||||||
date,
|
date,
|
||||||
Hash::default(),
|
Hash::default(),
|
||||||
);
|
);
|
||||||
let mut expectedUserdata = vec![1, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0];
|
let mut expected_userdata = vec![1, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0];
|
||||||
expectedUserdata.extend(dateIso8601.as_bytes());
|
expected_userdata.extend(date_is_08601.as_bytes());
|
||||||
assert_eq!(tx.userdata, expectedUserdata,);
|
assert_eq!(tx.userdata, expected_userdata,);
|
||||||
|
|
||||||
// ApplySignature
|
// ApplySignature
|
||||||
let tx = Transaction::budget_new_signature(&keypair, keypair.pubkey(), to, Hash::default());
|
let tx = Transaction::budget_new_signature(&keypair, keypair.pubkey(), to, Hash::default());
|
||||||
|
41
src/crdt.rs
41
src/crdt.rs
@ -570,7 +570,7 @@ impl Crdt {
|
|||||||
// only leader should be broadcasting
|
// only leader should be broadcasting
|
||||||
assert!(me.leader_id != v.id);
|
assert!(me.leader_id != v.id);
|
||||||
let bl = b.unwrap();
|
let bl = b.unwrap();
|
||||||
let blob = bl.read().expect("blob read lock in streamer::broadcast");
|
let blob = bl.read();
|
||||||
//TODO profile this, may need multiple sockets for par_iter
|
//TODO profile this, may need multiple sockets for par_iter
|
||||||
trace!(
|
trace!(
|
||||||
"{}: BROADCAST idx: {} sz: {} to {},{} coding: {}",
|
"{}: BROADCAST idx: {} sz: {} to {},{} coding: {}",
|
||||||
@ -622,10 +622,9 @@ impl Crdt {
|
|||||||
(s.my_data().clone(), s.table.values().cloned().collect())
|
(s.my_data().clone(), s.table.values().cloned().collect())
|
||||||
};
|
};
|
||||||
blob.write()
|
blob.write()
|
||||||
.unwrap()
|
|
||||||
.set_id(me.id)
|
.set_id(me.id)
|
||||||
.expect("set_id in pub fn retransmit");
|
.expect("set_id in pub fn retransmit");
|
||||||
let rblob = blob.read().unwrap();
|
let rblob = blob.read();
|
||||||
let orders: Vec<_> = table
|
let orders: Vec<_> = table
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|v| {
|
.filter(|v| {
|
||||||
@ -880,10 +879,10 @@ impl Crdt {
|
|||||||
/// randomly pick a node and ask them for updates asynchronously
|
/// randomly pick a node and ask them for updates asynchronously
|
||||||
pub fn gossip(
|
pub fn gossip(
|
||||||
obj: Arc<RwLock<Self>>,
|
obj: Arc<RwLock<Self>>,
|
||||||
blob_recycler: BlobRecycler,
|
|
||||||
blob_sender: BlobSender,
|
blob_sender: BlobSender,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
|
let blob_recycler = BlobRecycler::default();
|
||||||
Builder::new()
|
Builder::new()
|
||||||
.name("solana-gossip".to_string())
|
.name("solana-gossip".to_string())
|
||||||
.spawn(move || loop {
|
.spawn(move || loop {
|
||||||
@ -913,8 +912,8 @@ impl Crdt {
|
|||||||
blob_recycler: &BlobRecycler,
|
blob_recycler: &BlobRecycler,
|
||||||
) -> Option<SharedBlob> {
|
) -> Option<SharedBlob> {
|
||||||
let pos = (ix as usize) % window.read().unwrap().len();
|
let pos = (ix as usize) % window.read().unwrap().len();
|
||||||
if let Some(blob) = &window.read().unwrap()[pos].data {
|
if let Some(ref mut blob) = &mut window.write().unwrap()[pos].data {
|
||||||
let mut wblob = blob.write().unwrap();
|
let mut wblob = blob.write();
|
||||||
let blob_ix = wblob.get_index().expect("run_window_request get_index");
|
let blob_ix = wblob.get_index().expect("run_window_request get_index");
|
||||||
if blob_ix == ix {
|
if blob_ix == ix {
|
||||||
let num_retransmits = wblob.meta.num_retransmits;
|
let num_retransmits = wblob.meta.num_retransmits;
|
||||||
@ -937,7 +936,7 @@ impl Crdt {
|
|||||||
|
|
||||||
// copy to avoid doing IO inside the lock
|
// copy to avoid doing IO inside the lock
|
||||||
{
|
{
|
||||||
let mut outblob = out.write().unwrap();
|
let mut outblob = out.write();
|
||||||
let sz = wblob.meta.size;
|
let sz = wblob.meta.size;
|
||||||
outblob.meta.size = sz;
|
outblob.meta.size = sz;
|
||||||
outblob.data[..sz].copy_from_slice(&wblob.data[..sz]);
|
outblob.data[..sz].copy_from_slice(&wblob.data[..sz]);
|
||||||
@ -1177,16 +1176,11 @@ impl Crdt {
|
|||||||
}
|
}
|
||||||
let mut resps = Vec::new();
|
let mut resps = Vec::new();
|
||||||
for req in reqs {
|
for req in reqs {
|
||||||
if let Some(resp) = Self::handle_blob(
|
if let Some(resp) =
|
||||||
obj,
|
Self::handle_blob(obj, window, ledger_window, blob_recycler, &req.read())
|
||||||
window,
|
{
|
||||||
ledger_window,
|
|
||||||
blob_recycler,
|
|
||||||
&req.read().unwrap(),
|
|
||||||
) {
|
|
||||||
resps.push(resp);
|
resps.push(resp);
|
||||||
}
|
}
|
||||||
blob_recycler.recycle(req, "run_listen");
|
|
||||||
}
|
}
|
||||||
response_sender.send(resps)?;
|
response_sender.send(resps)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -1195,12 +1189,12 @@ impl Crdt {
|
|||||||
me: Arc<RwLock<Self>>,
|
me: Arc<RwLock<Self>>,
|
||||||
window: SharedWindow,
|
window: SharedWindow,
|
||||||
ledger_path: Option<&str>,
|
ledger_path: Option<&str>,
|
||||||
blob_recycler: BlobRecycler,
|
|
||||||
requests_receiver: BlobReceiver,
|
requests_receiver: BlobReceiver,
|
||||||
response_sender: BlobSender,
|
response_sender: BlobSender,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
let mut ledger_window = ledger_path.map(|p| LedgerWindow::open(p).unwrap());
|
let mut ledger_window = ledger_path.map(|p| LedgerWindow::open(p).unwrap());
|
||||||
|
let blob_recycler = BlobRecycler::default();
|
||||||
|
|
||||||
Builder::new()
|
Builder::new()
|
||||||
.name("solana-listen".to_string())
|
.name("solana-listen".to_string())
|
||||||
@ -1650,10 +1644,9 @@ mod tests {
|
|||||||
// check that the service works
|
// check that the service works
|
||||||
// and that it eventually produces a request for both nodes
|
// and that it eventually produces a request for both nodes
|
||||||
let (sender, reader) = channel();
|
let (sender, reader) = channel();
|
||||||
let recycler = BlobRecycler::default();
|
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let obj = Arc::new(RwLock::new(crdt));
|
let obj = Arc::new(RwLock::new(crdt));
|
||||||
let thread = Crdt::gossip(obj, recycler, sender, exit.clone());
|
let thread = Crdt::gossip(obj, sender, exit.clone());
|
||||||
let mut one = false;
|
let mut one = false;
|
||||||
let mut two = false;
|
let mut two = false;
|
||||||
for _ in 0..30 {
|
for _ in 0..30 {
|
||||||
@ -1664,9 +1657,9 @@ mod tests {
|
|||||||
}
|
}
|
||||||
assert!(rv.len() > 0);
|
assert!(rv.len() > 0);
|
||||||
for i in rv.iter() {
|
for i in rv.iter() {
|
||||||
if i.read().unwrap().meta.addr() == nxt1.contact_info.ncp {
|
if i.read().meta.addr() == nxt1.contact_info.ncp {
|
||||||
one = true;
|
one = true;
|
||||||
} else if i.read().unwrap().meta.addr() == nxt2.contact_info.ncp {
|
} else if i.read().meta.addr() == nxt2.contact_info.ncp {
|
||||||
two = true;
|
two = true;
|
||||||
} else {
|
} else {
|
||||||
//unexpected request
|
//unexpected request
|
||||||
@ -1774,7 +1767,7 @@ mod tests {
|
|||||||
);
|
);
|
||||||
assert!(rv.is_none());
|
assert!(rv.is_none());
|
||||||
let out = recycler.allocate();
|
let out = recycler.allocate();
|
||||||
out.write().unwrap().meta.size = 200;
|
out.write().meta.size = 200;
|
||||||
window.write().unwrap()[0].data = Some(out);
|
window.write().unwrap()[0].data = Some(out);
|
||||||
let rv = Crdt::run_window_request(
|
let rv = Crdt::run_window_request(
|
||||||
&me,
|
&me,
|
||||||
@ -1788,7 +1781,7 @@ mod tests {
|
|||||||
assert!(rv.is_some());
|
assert!(rv.is_some());
|
||||||
let v = rv.unwrap();
|
let v = rv.unwrap();
|
||||||
//test we copied the blob
|
//test we copied the blob
|
||||||
assert_eq!(v.read().unwrap().meta.size, 200);
|
assert_eq!(v.read().meta.size, 200);
|
||||||
let len = window.read().unwrap().len() as u64;
|
let len = window.read().unwrap().len() as u64;
|
||||||
let rv = Crdt::run_window_request(
|
let rv = Crdt::run_window_request(
|
||||||
&me,
|
&me,
|
||||||
@ -1859,7 +1852,7 @@ mod tests {
|
|||||||
assert!(rv.is_none());
|
assert!(rv.is_none());
|
||||||
let blob = recycler.allocate();
|
let blob = recycler.allocate();
|
||||||
let blob_size = 200;
|
let blob_size = 200;
|
||||||
blob.write().unwrap().meta.size = blob_size;
|
blob.write().meta.size = blob_size;
|
||||||
window.write().unwrap()[0].data = Some(blob);
|
window.write().unwrap()[0].data = Some(blob);
|
||||||
|
|
||||||
let num_requests: u32 = 64;
|
let num_requests: u32 = 64;
|
||||||
@ -1873,7 +1866,7 @@ mod tests {
|
|||||||
0,
|
0,
|
||||||
&recycler,
|
&recycler,
|
||||||
).unwrap();
|
).unwrap();
|
||||||
let blob = shared_blob.read().unwrap();
|
let blob = shared_blob.read();
|
||||||
// Test we copied the blob
|
// Test we copied the blob
|
||||||
assert_eq!(blob.meta.size, blob_size);
|
assert_eq!(blob.meta.size, blob_size);
|
||||||
|
|
||||||
|
@ -84,7 +84,7 @@ impl Entry {
|
|||||||
) -> SharedBlob {
|
) -> SharedBlob {
|
||||||
let blob = blob_recycler.allocate();
|
let blob = blob_recycler.allocate();
|
||||||
{
|
{
|
||||||
let mut blob_w = blob.write().unwrap();
|
let mut blob_w = blob.write();
|
||||||
let pos = {
|
let pos = {
|
||||||
let mut out = Cursor::new(blob_w.data_mut());
|
let mut out = Cursor::new(blob_w.data_mut());
|
||||||
serialize_into(&mut out, &self).expect("failed to serialize output");
|
serialize_into(&mut out, &self).expect("failed to serialize output");
|
||||||
|
@ -75,7 +75,7 @@ pub const ERASURE_W: i32 = 32;
|
|||||||
// There are some alignment restrictions, blocks should be aligned by 16 bytes
|
// There are some alignment restrictions, blocks should be aligned by 16 bytes
|
||||||
// which means their size should be >= 16 bytes
|
// which means their size should be >= 16 bytes
|
||||||
pub fn generate_coding_blocks(coding: &mut [&mut [u8]], data: &[&[u8]]) -> Result<()> {
|
pub fn generate_coding_blocks(coding: &mut [&mut [u8]], data: &[&[u8]]) -> Result<()> {
|
||||||
if data.len() == 0 {
|
if data.is_empty() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
let k = data.len() as i32;
|
let k = data.len() as i32;
|
||||||
@ -130,7 +130,7 @@ pub fn decode_blocks(
|
|||||||
coding: &mut [&mut [u8]],
|
coding: &mut [&mut [u8]],
|
||||||
erasures: &[i32],
|
erasures: &[i32],
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
if data.len() == 0 {
|
if data.is_empty() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
let block_len = data[0].len();
|
let block_len = data[0].len();
|
||||||
@ -247,7 +247,7 @@ pub fn generate_coding(
|
|||||||
trace!("{} window[{}] = {:?}", id, n, window[n].data);
|
trace!("{} window[{}] = {:?}", id, n, window[n].data);
|
||||||
|
|
||||||
if let Some(b) = &window[n].data {
|
if let Some(b) = &window[n].data {
|
||||||
max_data_size = cmp::max(b.read().unwrap().meta.size, max_data_size);
|
max_data_size = cmp::max(b.read().meta.size, max_data_size);
|
||||||
} else {
|
} else {
|
||||||
trace!("{} data block is null @ {}", id, n);
|
trace!("{} data block is null @ {}", id, n);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@ -266,7 +266,7 @@ pub fn generate_coding(
|
|||||||
if let Some(b) = &window[n].data {
|
if let Some(b) = &window[n].data {
|
||||||
// make sure extra bytes in each blob are zero-d out for generation of
|
// make sure extra bytes in each blob are zero-d out for generation of
|
||||||
// coding blobs
|
// coding blobs
|
||||||
let mut b_wl = b.write().unwrap();
|
let mut b_wl = b.write();
|
||||||
for i in b_wl.meta.size..max_data_size {
|
for i in b_wl.meta.size..max_data_size {
|
||||||
b_wl.data[i] = 0;
|
b_wl.data[i] = 0;
|
||||||
}
|
}
|
||||||
@ -288,13 +288,13 @@ pub fn generate_coding(
|
|||||||
window[n].coding = Some(recycler.allocate());
|
window[n].coding = Some(recycler.allocate());
|
||||||
|
|
||||||
let coding = window[n].coding.clone().unwrap();
|
let coding = window[n].coding.clone().unwrap();
|
||||||
let mut coding_wl = coding.write().unwrap();
|
let mut coding_wl = coding.write();
|
||||||
for i in 0..max_data_size {
|
for i in 0..max_data_size {
|
||||||
coding_wl.data[i] = 0;
|
coding_wl.data[i] = 0;
|
||||||
}
|
}
|
||||||
// copy index and id from the data blob
|
// copy index and id from the data blob
|
||||||
if let Some(data) = &window[n].data {
|
if let Some(data) = &window[n].data {
|
||||||
let data_rl = data.read().unwrap();
|
let data_rl = data.read();
|
||||||
|
|
||||||
let index = data_rl.get_index().unwrap();
|
let index = data_rl.get_index().unwrap();
|
||||||
let id = data_rl.get_id().unwrap();
|
let id = data_rl.get_id().unwrap();
|
||||||
@ -316,10 +316,7 @@ pub fn generate_coding(
|
|||||||
coding_blobs.push(coding.clone());
|
coding_blobs.push(coding.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
let data_locks: Vec<_> = data_blobs
|
let data_locks: Vec<_> = data_blobs.iter().map(|b| b.read()).collect();
|
||||||
.iter()
|
|
||||||
.map(|b| b.read().expect("'data_locks' of data_blobs"))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let data_ptrs: Vec<_> = data_locks
|
let data_ptrs: Vec<_> = data_locks
|
||||||
.iter()
|
.iter()
|
||||||
@ -329,10 +326,7 @@ pub fn generate_coding(
|
|||||||
&l.data[..max_data_size]
|
&l.data[..max_data_size]
|
||||||
}).collect();
|
}).collect();
|
||||||
|
|
||||||
let mut coding_locks: Vec<_> = coding_blobs
|
let mut coding_locks: Vec<_> = coding_blobs.iter().map(|b| b.write()).collect();
|
||||||
.iter()
|
|
||||||
.map(|b| b.write().expect("'coding_locks' of coding_blobs"))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut coding_ptrs: Vec<_> = coding_locks
|
let mut coding_ptrs: Vec<_> = coding_locks
|
||||||
.iter_mut()
|
.iter_mut()
|
||||||
@ -364,7 +358,7 @@ fn is_missing(
|
|||||||
c_or_d: &str,
|
c_or_d: &str,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
if let Some(blob) = window_slot.take() {
|
if let Some(blob) = window_slot.take() {
|
||||||
let blob_idx = blob.read().unwrap().get_index().unwrap();
|
let blob_idx = blob.read().get_index().unwrap();
|
||||||
if blob_idx == idx {
|
if blob_idx == idx {
|
||||||
trace!("recover {}: idx: {} good {}", id, idx, c_or_d);
|
trace!("recover {}: idx: {} good {}", id, idx, c_or_d);
|
||||||
// put it back
|
// put it back
|
||||||
@ -489,7 +483,7 @@ pub fn recover(
|
|||||||
|
|
||||||
if let Some(b) = window[j].data.clone() {
|
if let Some(b) = window[j].data.clone() {
|
||||||
if meta.is_none() {
|
if meta.is_none() {
|
||||||
meta = Some(b.read().unwrap().meta.clone());
|
meta = Some(b.read().meta.clone());
|
||||||
trace!("recover {} meta at {} {:?}", id, j, meta);
|
trace!("recover {} meta at {} {:?}", id, j, meta);
|
||||||
}
|
}
|
||||||
blobs.push(b);
|
blobs.push(b);
|
||||||
@ -505,7 +499,7 @@ pub fn recover(
|
|||||||
let j = i % window.len();
|
let j = i % window.len();
|
||||||
if let Some(b) = window[j].coding.clone() {
|
if let Some(b) = window[j].coding.clone() {
|
||||||
if size.is_none() {
|
if size.is_none() {
|
||||||
size = Some(b.read().unwrap().meta.size - BLOB_HEADER_SIZE);
|
size = Some(b.read().meta.size - BLOB_HEADER_SIZE);
|
||||||
trace!(
|
trace!(
|
||||||
"{} recover size {} from {}",
|
"{} recover size {} from {}",
|
||||||
id,
|
id,
|
||||||
@ -529,7 +523,7 @@ pub fn recover(
|
|||||||
let j = i % window.len();
|
let j = i % window.len();
|
||||||
|
|
||||||
if let Some(b) = &window[j].data {
|
if let Some(b) = &window[j].data {
|
||||||
let mut b_wl = b.write().unwrap();
|
let mut b_wl = b.write();
|
||||||
for i in b_wl.meta.size..size {
|
for i in b_wl.meta.size..size {
|
||||||
b_wl.data[i] = 0;
|
b_wl.data[i] = 0;
|
||||||
}
|
}
|
||||||
@ -541,7 +535,7 @@ pub fn recover(
|
|||||||
trace!("erasures[]: {} {:?} data_size: {}", id, erasures, size,);
|
trace!("erasures[]: {} {:?} data_size: {}", id, erasures, size,);
|
||||||
//lock everything for write
|
//lock everything for write
|
||||||
for b in &blobs {
|
for b in &blobs {
|
||||||
locks.push(b.write().expect("'locks' arr in pb fn recover"));
|
locks.push(b.write());
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -680,7 +674,7 @@ mod test {
|
|||||||
print!("window({:>w$}): ", i, w = 2);
|
print!("window({:>w$}): ", i, w = 2);
|
||||||
if w.data.is_some() {
|
if w.data.is_some() {
|
||||||
let window_l1 = w.data.clone().unwrap();
|
let window_l1 = w.data.clone().unwrap();
|
||||||
let window_l2 = window_l1.read().unwrap();
|
let window_l2 = window_l1.read();
|
||||||
print!(
|
print!(
|
||||||
"data index: {:?} meta.size: {} data: ",
|
"data index: {:?} meta.size: {} data: ",
|
||||||
window_l2.get_index(),
|
window_l2.get_index(),
|
||||||
@ -692,11 +686,11 @@ mod test {
|
|||||||
} else {
|
} else {
|
||||||
print!("data null ");
|
print!("data null ");
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
print!("window({:>w$}): ", i, w = 2);
|
print!("window({:>w$}): ", i, w = 2);
|
||||||
if w.coding.is_some() {
|
if w.coding.is_some() {
|
||||||
let window_l1 = w.coding.clone().unwrap();
|
let window_l1 = w.coding.clone().unwrap();
|
||||||
let window_l2 = window_l1.read().unwrap();
|
let window_l2 = window_l1.read();
|
||||||
print!(
|
print!(
|
||||||
"coding index: {:?} meta.size: {} data: ",
|
"coding index: {:?} meta.size: {} data: ",
|
||||||
window_l2.get_index(),
|
window_l2.get_index(),
|
||||||
@ -708,7 +702,7 @@ mod test {
|
|||||||
} else {
|
} else {
|
||||||
print!("coding null");
|
print!("coding null");
|
||||||
}
|
}
|
||||||
println!("");
|
println!();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -730,7 +724,7 @@ mod test {
|
|||||||
for i in 0..num_blobs {
|
for i in 0..num_blobs {
|
||||||
let b = blob_recycler.allocate();
|
let b = blob_recycler.allocate();
|
||||||
let b_ = b.clone();
|
let b_ = b.clone();
|
||||||
let mut w = b.write().unwrap();
|
let mut w = b.write();
|
||||||
// generate a random length, multiple of 4 between 8 and 32
|
// generate a random length, multiple of 4 between 8 and 32
|
||||||
let data_len = if i == 3 {
|
let data_len = if i == 3 {
|
||||||
BLOB_DATA_SIZE
|
BLOB_DATA_SIZE
|
||||||
@ -762,7 +756,7 @@ mod test {
|
|||||||
);
|
);
|
||||||
assert!(index_blobs(&d, &blobs, &mut (offset as u64)).is_ok());
|
assert!(index_blobs(&d, &blobs, &mut (offset as u64)).is_ok());
|
||||||
for b in blobs {
|
for b in blobs {
|
||||||
let idx = b.read().unwrap().get_index().unwrap() as usize % WINDOW_SIZE;
|
let idx = b.read().get_index().unwrap() as usize % WINDOW_SIZE;
|
||||||
|
|
||||||
window[idx].data = Some(b);
|
window[idx].data = Some(b);
|
||||||
}
|
}
|
||||||
@ -773,11 +767,11 @@ mod test {
|
|||||||
for i in 0..num_blobs {
|
for i in 0..num_blobs {
|
||||||
if let Some(b) = &window[i].data {
|
if let Some(b) = &window[i].data {
|
||||||
let size = {
|
let size = {
|
||||||
let b_l = b.read().unwrap();
|
let b_l = b.read();
|
||||||
b_l.meta.size
|
b_l.meta.size
|
||||||
} as usize;
|
} as usize;
|
||||||
|
|
||||||
let mut b_l = b.write().unwrap();
|
let mut b_l = b.write();
|
||||||
for i in size..BLOB_SIZE {
|
for i in size..BLOB_SIZE {
|
||||||
b_l.data[i] = thread_rng().gen();
|
b_l.data[i] = thread_rng().gen();
|
||||||
}
|
}
|
||||||
@ -790,7 +784,7 @@ mod test {
|
|||||||
for _ in 0..WINDOW_SIZE * 10 {
|
for _ in 0..WINDOW_SIZE * 10 {
|
||||||
let blob = blob_recycler.allocate();
|
let blob = blob_recycler.allocate();
|
||||||
{
|
{
|
||||||
let mut b_l = blob.write().unwrap();
|
let mut b_l = blob.write();
|
||||||
|
|
||||||
for i in 0..BLOB_SIZE {
|
for i in 0..BLOB_SIZE {
|
||||||
b_l.data[i] = thread_rng().gen();
|
b_l.data[i] = thread_rng().gen();
|
||||||
@ -821,7 +815,7 @@ mod test {
|
|||||||
|
|
||||||
for slot in &window {
|
for slot in &window {
|
||||||
if let Some(blob) = &slot.data {
|
if let Some(blob) = &slot.data {
|
||||||
let blob_r = blob.read().unwrap();
|
let blob_r = blob.read();
|
||||||
assert!(!blob_r.is_coding());
|
assert!(!blob_r.is_coding());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -875,9 +869,9 @@ mod test {
|
|||||||
// Check the result, block is here to drop locks
|
// Check the result, block is here to drop locks
|
||||||
|
|
||||||
let window_l = window[erase_offset].data.clone().unwrap();
|
let window_l = window[erase_offset].data.clone().unwrap();
|
||||||
let window_l2 = window_l.read().unwrap();
|
let window_l2 = window_l.read();
|
||||||
let ref_l = refwindow.clone().unwrap();
|
let ref_l = refwindow.clone().unwrap();
|
||||||
let ref_l2 = ref_l.read().unwrap();
|
let ref_l2 = ref_l.read();
|
||||||
|
|
||||||
assert_eq!(window_l2.meta.size, ref_l2.meta.size);
|
assert_eq!(window_l2.meta.size, ref_l2.meta.size);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -924,9 +918,9 @@ mod test {
|
|||||||
{
|
{
|
||||||
// Check the result, block is here to drop locks
|
// Check the result, block is here to drop locks
|
||||||
let window_l = window[erase_offset].data.clone().unwrap();
|
let window_l = window[erase_offset].data.clone().unwrap();
|
||||||
let window_l2 = window_l.read().unwrap();
|
let window_l2 = window_l.read();
|
||||||
let ref_l = refwindow.clone().unwrap();
|
let ref_l = refwindow.clone().unwrap();
|
||||||
let ref_l2 = ref_l.read().unwrap();
|
let ref_l2 = ref_l.read();
|
||||||
assert_eq!(window_l2.meta.size, ref_l2.meta.size);
|
assert_eq!(window_l2.meta.size, ref_l2.meta.size);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
window_l2.data[..window_l2.meta.size],
|
window_l2.data[..window_l2.meta.size],
|
||||||
@ -947,10 +941,7 @@ mod test {
|
|||||||
// Create a hole in the window by making the blob's index stale
|
// Create a hole in the window by making the blob's index stale
|
||||||
let refwindow = window[offset].data.clone();
|
let refwindow = window[offset].data.clone();
|
||||||
if let Some(blob) = &window[erase_offset].data {
|
if let Some(blob) = &window[erase_offset].data {
|
||||||
blob.write()
|
blob.write().set_index(erase_offset as u64).unwrap(); // this also writes to refwindow...
|
||||||
.unwrap()
|
|
||||||
.set_index(erase_offset as u64)
|
|
||||||
.unwrap(); // this also writes to refwindow...
|
|
||||||
}
|
}
|
||||||
print_window(&window);
|
print_window(&window);
|
||||||
|
|
||||||
@ -970,7 +961,6 @@ mod test {
|
|||||||
// fix refwindow, we wrote to it above...
|
// fix refwindow, we wrote to it above...
|
||||||
if let Some(blob) = &refwindow {
|
if let Some(blob) = &refwindow {
|
||||||
blob.write()
|
blob.write()
|
||||||
.unwrap()
|
|
||||||
.set_index((erase_offset + WINDOW_SIZE) as u64)
|
.set_index((erase_offset + WINDOW_SIZE) as u64)
|
||||||
.unwrap(); // this also writes to refwindow...
|
.unwrap(); // this also writes to refwindow...
|
||||||
}
|
}
|
||||||
@ -978,9 +968,9 @@ mod test {
|
|||||||
{
|
{
|
||||||
// Check the result, block is here to drop locks
|
// Check the result, block is here to drop locks
|
||||||
let window_l = window[erase_offset].data.clone().unwrap();
|
let window_l = window[erase_offset].data.clone().unwrap();
|
||||||
let window_l2 = window_l.read().unwrap();
|
let window_l2 = window_l.read();
|
||||||
let ref_l = refwindow.clone().unwrap();
|
let ref_l = refwindow.clone().unwrap();
|
||||||
let ref_l2 = ref_l.read().unwrap();
|
let ref_l2 = ref_l.read();
|
||||||
assert_eq!(window_l2.meta.size, ref_l2.meta.size);
|
assert_eq!(window_l2.meta.size, ref_l2.meta.size);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
window_l2.data[..window_l2.meta.size],
|
window_l2.data[..window_l2.meta.size],
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
|
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
|
||||||
|
|
||||||
use packet::PacketRecycler;
|
|
||||||
use service::Service;
|
use service::Service;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
@ -15,25 +14,19 @@ pub struct FetchStage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FetchStage {
|
impl FetchStage {
|
||||||
pub fn new(
|
pub fn new(sockets: Vec<UdpSocket>, exit: Arc<AtomicBool>) -> (Self, PacketReceiver) {
|
||||||
sockets: Vec<UdpSocket>,
|
|
||||||
exit: Arc<AtomicBool>,
|
|
||||||
recycler: &PacketRecycler,
|
|
||||||
) -> (Self, PacketReceiver) {
|
|
||||||
let tx_sockets = sockets.into_iter().map(Arc::new).collect();
|
let tx_sockets = sockets.into_iter().map(Arc::new).collect();
|
||||||
Self::new_multi_socket(tx_sockets, exit, recycler)
|
Self::new_multi_socket(tx_sockets, exit)
|
||||||
}
|
}
|
||||||
pub fn new_multi_socket(
|
pub fn new_multi_socket(
|
||||||
sockets: Vec<Arc<UdpSocket>>,
|
sockets: Vec<Arc<UdpSocket>>,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
recycler: &PacketRecycler,
|
|
||||||
) -> (Self, PacketReceiver) {
|
) -> (Self, PacketReceiver) {
|
||||||
let (sender, receiver) = channel();
|
let (sender, receiver) = channel();
|
||||||
let thread_hdls: Vec<_> = sockets
|
let thread_hdls: Vec<_> = sockets
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|socket| {
|
.map(|socket| streamer::receiver(socket, exit.clone(), sender.clone()))
|
||||||
streamer::receiver(socket, exit.clone(), recycler.clone(), sender.clone())
|
.collect();
|
||||||
}).collect();
|
|
||||||
|
|
||||||
(FetchStage { exit, thread_hdls }, receiver)
|
(FetchStage { exit, thread_hdls }, receiver)
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,6 @@ use drone::DRONE_PORT;
|
|||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use ledger::read_ledger;
|
use ledger::read_ledger;
|
||||||
use ncp::Ncp;
|
use ncp::Ncp;
|
||||||
use packet::BlobRecycler;
|
|
||||||
use rpc::{JsonRpcService, RPC_PORT};
|
use rpc::{JsonRpcService, RPC_PORT};
|
||||||
use rpu::Rpu;
|
use rpu::Rpu;
|
||||||
use service::Service;
|
use service::Service;
|
||||||
@ -91,7 +90,6 @@ pub struct Fullnode {
|
|||||||
broadcast_socket: UdpSocket,
|
broadcast_socket: UdpSocket,
|
||||||
requests_socket: UdpSocket,
|
requests_socket: UdpSocket,
|
||||||
respond_socket: UdpSocket,
|
respond_socket: UdpSocket,
|
||||||
blob_recycler: BlobRecycler,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||||
@ -235,8 +233,6 @@ impl Fullnode {
|
|||||||
}
|
}
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let bank = Arc::new(bank);
|
let bank = Arc::new(bank);
|
||||||
let mut blob_recycler = BlobRecycler::default();
|
|
||||||
blob_recycler.set_name("fullnode::Blob");
|
|
||||||
|
|
||||||
let rpu = Some(Rpu::new(
|
let rpu = Some(Rpu::new(
|
||||||
&bank,
|
&bank,
|
||||||
@ -248,7 +244,6 @@ impl Fullnode {
|
|||||||
.respond
|
.respond
|
||||||
.try_clone()
|
.try_clone()
|
||||||
.expect("Failed to clone respond socket"),
|
.expect("Failed to clone respond socket"),
|
||||||
&blob_recycler,
|
|
||||||
));
|
));
|
||||||
|
|
||||||
// TODO: this code assumes this node is the leader
|
// TODO: this code assumes this node is the leader
|
||||||
@ -263,8 +258,7 @@ impl Fullnode {
|
|||||||
exit.clone(),
|
exit.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let window =
|
let window = window::new_window_from_entries(ledger_tail, entry_height, &node.info);
|
||||||
window::new_window_from_entries(ledger_tail, entry_height, &node.info, &blob_recycler);
|
|
||||||
let shared_window = Arc::new(RwLock::new(window));
|
let shared_window = Arc::new(RwLock::new(window));
|
||||||
|
|
||||||
let mut crdt = Crdt::new(node.info).expect("Crdt::new");
|
let mut crdt = Crdt::new(node.info).expect("Crdt::new");
|
||||||
@ -276,7 +270,6 @@ impl Fullnode {
|
|||||||
let ncp = Ncp::new(
|
let ncp = Ncp::new(
|
||||||
&crdt,
|
&crdt,
|
||||||
shared_window.clone(),
|
shared_window.clone(),
|
||||||
blob_recycler.clone(),
|
|
||||||
Some(ledger_path),
|
Some(ledger_path),
|
||||||
node.sockets.gossip,
|
node.sockets.gossip,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
@ -295,7 +288,6 @@ impl Fullnode {
|
|||||||
entry_height,
|
entry_height,
|
||||||
crdt.clone(),
|
crdt.clone(),
|
||||||
shared_window.clone(),
|
shared_window.clone(),
|
||||||
blob_recycler.clone(),
|
|
||||||
node.sockets
|
node.sockets
|
||||||
.replicate
|
.replicate
|
||||||
.iter()
|
.iter()
|
||||||
@ -330,7 +322,6 @@ impl Fullnode {
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|s| s.try_clone().expect("Failed to clone transaction sockets"))
|
.map(|s| s.try_clone().expect("Failed to clone transaction sockets"))
|
||||||
.collect(),
|
.collect(),
|
||||||
&blob_recycler,
|
|
||||||
ledger_path,
|
ledger_path,
|
||||||
sigverify_disabled,
|
sigverify_disabled,
|
||||||
entry_height,
|
entry_height,
|
||||||
@ -344,7 +335,6 @@ impl Fullnode {
|
|||||||
crdt.clone(),
|
crdt.clone(),
|
||||||
shared_window.clone(),
|
shared_window.clone(),
|
||||||
entry_height,
|
entry_height,
|
||||||
blob_recycler.clone(),
|
|
||||||
entry_receiver,
|
entry_receiver,
|
||||||
tpu_exit,
|
tpu_exit,
|
||||||
);
|
);
|
||||||
@ -363,7 +353,6 @@ impl Fullnode {
|
|||||||
ncp,
|
ncp,
|
||||||
rpc_service,
|
rpc_service,
|
||||||
node_role,
|
node_role,
|
||||||
blob_recycler: blob_recycler.clone(),
|
|
||||||
ledger_path: ledger_path.to_owned(),
|
ledger_path: ledger_path.to_owned(),
|
||||||
exit,
|
exit,
|
||||||
replicate_socket: node.sockets.replicate,
|
replicate_socket: node.sockets.replicate,
|
||||||
@ -406,7 +395,6 @@ impl Fullnode {
|
|||||||
self.respond_socket
|
self.respond_socket
|
||||||
.try_clone()
|
.try_clone()
|
||||||
.expect("Failed to clone respond socket"),
|
.expect("Failed to clone respond socket"),
|
||||||
&self.blob_recycler,
|
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -416,7 +404,6 @@ impl Fullnode {
|
|||||||
entry_height,
|
entry_height,
|
||||||
self.crdt.clone(),
|
self.crdt.clone(),
|
||||||
self.shared_window.clone(),
|
self.shared_window.clone(),
|
||||||
self.blob_recycler.clone(),
|
|
||||||
self.replicate_socket
|
self.replicate_socket
|
||||||
.iter()
|
.iter()
|
||||||
.map(|s| s.try_clone().expect("Failed to clone replicate sockets"))
|
.map(|s| s.try_clone().expect("Failed to clone replicate sockets"))
|
||||||
|
@ -456,7 +456,7 @@ pub fn reconstruct_entries_from_blobs(blobs: Vec<SharedBlob>) -> Result<Vec<Entr
|
|||||||
|
|
||||||
for blob in blobs {
|
for blob in blobs {
|
||||||
let entry = {
|
let entry = {
|
||||||
let msg = blob.read().unwrap();
|
let msg = blob.read();
|
||||||
let msg_size = msg.get_size()?;
|
let msg_size = msg.get_size()?;
|
||||||
deserialize(&msg.data()[..msg_size])
|
deserialize(&msg.data()[..msg_size])
|
||||||
};
|
};
|
||||||
|
@ -39,6 +39,7 @@ pub mod payment_plan;
|
|||||||
pub mod record_stage;
|
pub mod record_stage;
|
||||||
pub mod recorder;
|
pub mod recorder;
|
||||||
pub mod recvmmsg;
|
pub mod recvmmsg;
|
||||||
|
pub mod recycler;
|
||||||
pub mod replicate_stage;
|
pub mod replicate_stage;
|
||||||
pub mod request;
|
pub mod request;
|
||||||
pub mod request_processor;
|
pub mod request_processor;
|
||||||
|
30
src/ncp.rs
30
src/ncp.rs
@ -1,7 +1,6 @@
|
|||||||
//! The `ncp` module implements the network control plane.
|
//! The `ncp` module implements the network control plane.
|
||||||
|
|
||||||
use crdt::Crdt;
|
use crdt::Crdt;
|
||||||
use packet::BlobRecycler;
|
|
||||||
use service::Service;
|
use service::Service;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
@ -20,7 +19,6 @@ impl Ncp {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
crdt: &Arc<RwLock<Crdt>>,
|
crdt: &Arc<RwLock<Crdt>>,
|
||||||
window: SharedWindow,
|
window: SharedWindow,
|
||||||
blob_recycler: BlobRecycler,
|
|
||||||
ledger_path: Option<&str>,
|
ledger_path: Option<&str>,
|
||||||
gossip_socket: UdpSocket,
|
gossip_socket: UdpSocket,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
@ -32,29 +30,19 @@ impl Ncp {
|
|||||||
&crdt.read().unwrap().id.as_ref()[..4],
|
&crdt.read().unwrap().id.as_ref()[..4],
|
||||||
gossip_socket.local_addr().unwrap()
|
gossip_socket.local_addr().unwrap()
|
||||||
);
|
);
|
||||||
let t_receiver = streamer::blob_receiver(
|
let t_receiver =
|
||||||
gossip_socket.clone(),
|
streamer::blob_receiver(gossip_socket.clone(), exit.clone(), request_sender);
|
||||||
exit.clone(),
|
|
||||||
blob_recycler.clone(),
|
|
||||||
request_sender,
|
|
||||||
);
|
|
||||||
let (response_sender, response_receiver) = channel();
|
let (response_sender, response_receiver) = channel();
|
||||||
let t_responder = streamer::responder(
|
let t_responder = streamer::responder("ncp", gossip_socket, response_receiver);
|
||||||
"ncp",
|
|
||||||
gossip_socket,
|
|
||||||
blob_recycler.clone(),
|
|
||||||
response_receiver,
|
|
||||||
);
|
|
||||||
let t_listen = Crdt::listen(
|
let t_listen = Crdt::listen(
|
||||||
crdt.clone(),
|
crdt.clone(),
|
||||||
window,
|
window,
|
||||||
ledger_path,
|
ledger_path,
|
||||||
blob_recycler.clone(),
|
|
||||||
request_receiver,
|
request_receiver,
|
||||||
response_sender.clone(),
|
response_sender.clone(),
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
);
|
);
|
||||||
let t_gossip = Crdt::gossip(crdt.clone(), blob_recycler, response_sender, exit.clone());
|
let t_gossip = Crdt::gossip(crdt.clone(), response_sender, exit.clone());
|
||||||
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
|
let thread_hdls = vec![t_receiver, t_responder, t_listen, t_gossip];
|
||||||
Ncp { exit, thread_hdls }
|
Ncp { exit, thread_hdls }
|
||||||
}
|
}
|
||||||
@ -80,7 +68,6 @@ impl Service for Ncp {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use crdt::{Crdt, Node};
|
use crdt::{Crdt, Node};
|
||||||
use ncp::Ncp;
|
use ncp::Ncp;
|
||||||
use packet::BlobRecycler;
|
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
@ -93,14 +80,7 @@ mod tests {
|
|||||||
let crdt = Crdt::new(tn.info.clone()).expect("Crdt::new");
|
let crdt = Crdt::new(tn.info.clone()).expect("Crdt::new");
|
||||||
let c = Arc::new(RwLock::new(crdt));
|
let c = Arc::new(RwLock::new(crdt));
|
||||||
let w = Arc::new(RwLock::new(vec![]));
|
let w = Arc::new(RwLock::new(vec![]));
|
||||||
let d = Ncp::new(
|
let d = Ncp::new(&c, w, None, tn.sockets.gossip, exit.clone());
|
||||||
&c,
|
|
||||||
w,
|
|
||||||
BlobRecycler::default(),
|
|
||||||
None,
|
|
||||||
tn.sockets.gossip,
|
|
||||||
exit.clone(),
|
|
||||||
);
|
|
||||||
d.close().expect("thread join");
|
d.close().expect("thread join");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
267
src/packet.rs
267
src/packet.rs
@ -4,6 +4,7 @@ use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
|
|||||||
use counter::Counter;
|
use counter::Counter;
|
||||||
use log::Level;
|
use log::Level;
|
||||||
use recvmmsg::{recv_mmsg, NUM_RCVMMSGS};
|
use recvmmsg::{recv_mmsg, NUM_RCVMMSGS};
|
||||||
|
use recycler;
|
||||||
use result::{Error, Result};
|
use result::{Error, Result};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use signature::Pubkey;
|
use signature::Pubkey;
|
||||||
@ -11,14 +12,13 @@ use std::fmt;
|
|||||||
use std::io;
|
use std::io;
|
||||||
use std::mem::size_of;
|
use std::mem::size_of;
|
||||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::AtomicUsize;
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
|
||||||
|
|
||||||
pub type SharedPackets = Arc<RwLock<Packets>>;
|
pub type SharedPackets = recycler::Recyclable<Packets>;
|
||||||
pub type SharedBlob = Arc<RwLock<Blob>>;
|
pub type SharedBlob = recycler::Recyclable<Blob>;
|
||||||
pub type SharedBlobs = Vec<SharedBlob>;
|
pub type SharedBlobs = Vec<SharedBlob>;
|
||||||
pub type PacketRecycler = Recycler<Packets>;
|
pub type PacketRecycler = recycler::Recycler<Packets>;
|
||||||
pub type BlobRecycler = Recycler<Blob>;
|
pub type BlobRecycler = recycler::Recycler<Blob>;
|
||||||
|
|
||||||
pub const NUM_PACKETS: usize = 1024 * 8;
|
pub const NUM_PACKETS: usize = 1024 * 8;
|
||||||
pub const BLOB_SIZE: usize = (64 * 1024 - 128); // wikipedia says there should be 20b for ipv4 headers
|
pub const BLOB_SIZE: usize = (64 * 1024 - 128); // wikipedia says there should be 20b for ipv4 headers
|
||||||
@ -63,14 +63,7 @@ impl Default for Packet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait Reset {
|
impl recycler::Reset for Packet {
|
||||||
// Reset trait is an object that can re-initialize important parts
|
|
||||||
// of itself, similar to Default, but not necessarily a full clear
|
|
||||||
// also, we do it in-place.
|
|
||||||
fn reset(&mut self);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Reset for Packet {
|
|
||||||
fn reset(&mut self) {
|
fn reset(&mut self) {
|
||||||
self.meta = Meta::default();
|
self.meta = Meta::default();
|
||||||
}
|
}
|
||||||
@ -130,7 +123,7 @@ impl Default for Packets {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Reset for Packets {
|
impl recycler::Reset for Packets {
|
||||||
fn reset(&mut self) {
|
fn reset(&mut self) {
|
||||||
for i in 0..self.packets.len() {
|
for i in 0..self.packets.len() {
|
||||||
self.packets[i].reset();
|
self.packets[i].reset();
|
||||||
@ -165,7 +158,7 @@ impl Default for Blob {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Reset for Blob {
|
impl recycler::Reset for Blob {
|
||||||
fn reset(&mut self) {
|
fn reset(&mut self) {
|
||||||
self.meta = Meta::default();
|
self.meta = Meta::default();
|
||||||
self.data[..BLOB_HEADER_SIZE].copy_from_slice(&[0u8; BLOB_HEADER_SIZE]);
|
self.data[..BLOB_HEADER_SIZE].copy_from_slice(&[0u8; BLOB_HEADER_SIZE]);
|
||||||
@ -178,118 +171,6 @@ pub enum BlobError {
|
|||||||
BadState,
|
BadState,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Recycler<T> {
|
|
||||||
#[cfg_attr(feature = "cargo-clippy", allow(type_complexity))]
|
|
||||||
gc: Arc<Mutex<Vec<(Arc<RwLock<T>>, &'static str)>>>,
|
|
||||||
allocated_count: Arc<AtomicUsize>,
|
|
||||||
recycled_count: Arc<AtomicUsize>,
|
|
||||||
reuse_count: Arc<AtomicUsize>,
|
|
||||||
skipped_count: Arc<AtomicUsize>,
|
|
||||||
name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Default> Default for Recycler<T> {
|
|
||||||
fn default() -> Recycler<T> {
|
|
||||||
Recycler {
|
|
||||||
gc: Arc::new(Mutex::new(vec![])),
|
|
||||||
allocated_count: Arc::new(AtomicUsize::new(0)),
|
|
||||||
recycled_count: Arc::new(AtomicUsize::new(0)),
|
|
||||||
reuse_count: Arc::new(AtomicUsize::new(0)),
|
|
||||||
skipped_count: Arc::new(AtomicUsize::new(0)),
|
|
||||||
name: format!("? sz: {}", size_of::<T>()).to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Default> Clone for Recycler<T> {
|
|
||||||
fn clone(&self) -> Recycler<T> {
|
|
||||||
Recycler {
|
|
||||||
gc: self.gc.clone(),
|
|
||||||
allocated_count: self.allocated_count.clone(),
|
|
||||||
recycled_count: self.recycled_count.clone(),
|
|
||||||
reuse_count: self.reuse_count.clone(),
|
|
||||||
skipped_count: self.skipped_count.clone(),
|
|
||||||
name: self.name.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inc_counter(x: &AtomicUsize) {
|
|
||||||
x.fetch_add(1, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Default + Reset> Recycler<T> {
|
|
||||||
pub fn set_name(&mut self, name: &'static str) {
|
|
||||||
self.name = name.to_string();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn allocate(&self) -> Arc<RwLock<T>> {
|
|
||||||
let mut gc = self.gc.lock().expect("recycler lock in pb fn allocate");
|
|
||||||
let gc_count = gc.len();
|
|
||||||
|
|
||||||
loop {
|
|
||||||
if let Some((x, who)) = gc.pop() {
|
|
||||||
// Only return the item if this recycler is the last reference to it.
|
|
||||||
// Remove this check once `T` holds a Weak reference back to this
|
|
||||||
// recycler and implements `Drop`. At the time of this writing, Weak can't
|
|
||||||
// be passed across threads ('alloc' is a nightly-only API), and so our
|
|
||||||
// reference-counted recyclables are awkwardly being recycled by hand,
|
|
||||||
// which allows this race condition to exist.
|
|
||||||
if Arc::strong_count(&x) > 1 {
|
|
||||||
// Commenting out this message, is annoying for known use case of
|
|
||||||
// validator hanging onto a blob in the window, but also sending it over
|
|
||||||
// to retransmmit_request
|
|
||||||
//
|
|
||||||
// warn!("Recycled item still in use. Booting it.");
|
|
||||||
trace!(
|
|
||||||
"{} Recycled item from \"{}\" still in use. {} Booting it.",
|
|
||||||
self.name,
|
|
||||||
who,
|
|
||||||
Arc::strong_count(&x)
|
|
||||||
);
|
|
||||||
inc_counter(&self.skipped_count);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut w = x.write().unwrap();
|
|
||||||
w.reset();
|
|
||||||
}
|
|
||||||
inc_counter(&self.reuse_count);
|
|
||||||
return x;
|
|
||||||
} else {
|
|
||||||
inc_counter(&self.allocated_count);
|
|
||||||
if self.allocated_count.load(Ordering::Relaxed) % 2048 == 0 {
|
|
||||||
self.print_stats(gc_count);
|
|
||||||
}
|
|
||||||
return Arc::new(RwLock::new(Default::default()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn print_stats(&self, gc_count: usize) {
|
|
||||||
info!(
|
|
||||||
"{} recycler stats: allocated: {} reused: {} skipped: {} recycled: {} gc_count: {}",
|
|
||||||
self.name,
|
|
||||||
self.allocated_count.load(Ordering::Relaxed),
|
|
||||||
self.reuse_count.load(Ordering::Relaxed),
|
|
||||||
self.skipped_count.load(Ordering::Relaxed),
|
|
||||||
self.recycled_count.load(Ordering::Relaxed),
|
|
||||||
gc_count
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn recycle(&self, x: Arc<RwLock<T>>, who: &'static str) {
|
|
||||||
let mut gc = self.gc.lock().expect("recycler lock in pub fn recycle");
|
|
||||||
inc_counter(&self.recycled_count);
|
|
||||||
if self.recycled_count.load(Ordering::Relaxed) % 2048 == 0 {
|
|
||||||
self.print_stats(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
gc.push((x, who));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Packets {
|
impl Packets {
|
||||||
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
fn run_read_from(&mut self, socket: &UdpSocket) -> Result<usize> {
|
||||||
self.packets.resize(NUM_PACKETS, Packet::default());
|
self.packets.resize(NUM_PACKETS, Packet::default());
|
||||||
@ -348,12 +229,9 @@ pub fn to_packets_chunked<T: Serialize>(
|
|||||||
) -> Vec<SharedPackets> {
|
) -> Vec<SharedPackets> {
|
||||||
let mut out = vec![];
|
let mut out = vec![];
|
||||||
for x in xs.chunks(chunks) {
|
for x in xs.chunks(chunks) {
|
||||||
let p = r.allocate();
|
let mut p = r.allocate();
|
||||||
p.write()
|
p.write().packets.resize(x.len(), Default::default());
|
||||||
.unwrap()
|
for (i, o) in x.iter().zip(p.write().packets.iter_mut()) {
|
||||||
.packets
|
|
||||||
.resize(x.len(), Default::default());
|
|
||||||
for (i, o) in x.iter().zip(p.write().unwrap().packets.iter_mut()) {
|
|
||||||
let v = serialize(&i).expect("serialize request");
|
let v = serialize(&i).expect("serialize request");
|
||||||
let len = v.len();
|
let len = v.len();
|
||||||
o.data[..len].copy_from_slice(&v);
|
o.data[..len].copy_from_slice(&v);
|
||||||
@ -375,7 +253,7 @@ pub fn to_blob<T: Serialize>(
|
|||||||
) -> Result<SharedBlob> {
|
) -> Result<SharedBlob> {
|
||||||
let blob = blob_recycler.allocate();
|
let blob = blob_recycler.allocate();
|
||||||
{
|
{
|
||||||
let mut b = blob.write().unwrap();
|
let mut b = blob.write();
|
||||||
let v = serialize(&resp)?;
|
let v = serialize(&resp)?;
|
||||||
let len = v.len();
|
let len = v.len();
|
||||||
assert!(len <= BLOB_SIZE);
|
assert!(len <= BLOB_SIZE);
|
||||||
@ -492,7 +370,7 @@ impl Blob {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn recv_blob(socket: &UdpSocket, r: &SharedBlob) -> io::Result<()> {
|
pub fn recv_blob(socket: &UdpSocket, r: &SharedBlob) -> io::Result<()> {
|
||||||
let mut p = r.write().expect("'r' write lock in pub fn recv_from");
|
let mut p = r.write();
|
||||||
trace!("receiving on {}", socket.local_addr().unwrap());
|
trace!("receiving on {}", socket.local_addr().unwrap());
|
||||||
|
|
||||||
let (nrecv, from) = socket.recv_from(&mut p.data)?;
|
let (nrecv, from) = socket.recv_from(&mut p.data)?;
|
||||||
@ -517,14 +395,12 @@ impl Blob {
|
|||||||
match Blob::recv_blob(socket, &r) {
|
match Blob::recv_blob(socket, &r) {
|
||||||
Err(_) if i > 0 => {
|
Err(_) if i > 0 => {
|
||||||
trace!("got {:?} messages on {}", i, socket.local_addr().unwrap());
|
trace!("got {:?} messages on {}", i, socket.local_addr().unwrap());
|
||||||
re.recycle(r, "Bob::recv_from::i>0");
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
if e.kind() != io::ErrorKind::WouldBlock {
|
if e.kind() != io::ErrorKind::WouldBlock {
|
||||||
info!("recv_from err {:?}", e);
|
info!("recv_from err {:?}", e);
|
||||||
}
|
}
|
||||||
re.recycle(r, "Blob::recv_from::empty");
|
|
||||||
return Err(Error::IO(e));
|
return Err(Error::IO(e));
|
||||||
}
|
}
|
||||||
Ok(()) => if i == 0 {
|
Ok(()) => if i == 0 {
|
||||||
@ -535,10 +411,10 @@ impl Blob {
|
|||||||
}
|
}
|
||||||
Ok(v)
|
Ok(v)
|
||||||
}
|
}
|
||||||
pub fn send_to(re: &BlobRecycler, socket: &UdpSocket, v: SharedBlobs) -> Result<()> {
|
pub fn send_to(socket: &UdpSocket, v: SharedBlobs) -> Result<()> {
|
||||||
for r in v {
|
for r in v {
|
||||||
{
|
{
|
||||||
let p = r.read().expect("'r' read lock in pub fn send_to");
|
let p = r.read();
|
||||||
let a = p.meta.addr();
|
let a = p.meta.addr();
|
||||||
if let Err(e) = socket.send_to(&p.data[..p.meta.size], &a) {
|
if let Err(e) = socket.send_to(&p.data[..p.meta.size], &a) {
|
||||||
warn!(
|
warn!(
|
||||||
@ -548,7 +424,6 @@ impl Blob {
|
|||||||
Err(e)?;
|
Err(e)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
re.recycle(r, "send_to");
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -557,87 +432,15 @@ impl Blob {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use packet::{
|
use packet::{
|
||||||
to_packets, Blob, BlobRecycler, Meta, Packet, PacketRecycler, Packets, Recycler, Reset,
|
to_packets, Blob, BlobRecycler, Meta, Packet, PacketRecycler, Packets, BLOB_HEADER_SIZE,
|
||||||
BLOB_HEADER_SIZE, NUM_PACKETS, PACKET_DATA_SIZE,
|
NUM_PACKETS, PACKET_DATA_SIZE,
|
||||||
};
|
};
|
||||||
|
use recycler::Reset;
|
||||||
use request::Request;
|
use request::Request;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn packet_recycler_test() {
|
|
||||||
let r = PacketRecycler::default();
|
|
||||||
let p = r.allocate();
|
|
||||||
r.recycle(p, "recycler_test");
|
|
||||||
assert_eq!(r.gc.lock().unwrap().len(), 1);
|
|
||||||
let _ = r.allocate();
|
|
||||||
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Reset for u8 {
|
|
||||||
fn reset(&mut self) {
|
|
||||||
*self = Default::default();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_leaked_recyclable() {
|
|
||||||
// Ensure that the recycler won't return an item
|
|
||||||
// that is still referenced outside the recycler.
|
|
||||||
let r = Recycler::<u8>::default();
|
|
||||||
let x0 = r.allocate();
|
|
||||||
r.recycle(x0.clone(), "leaked_recyclable:1");
|
|
||||||
assert_eq!(Arc::strong_count(&x0), 2);
|
|
||||||
assert_eq!(r.gc.lock().unwrap().len(), 1);
|
|
||||||
|
|
||||||
let x1 = r.allocate();
|
|
||||||
assert_eq!(Arc::strong_count(&x1), 1);
|
|
||||||
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_leaked_recyclable_recursion() {
|
|
||||||
// In the case of a leaked recyclable, ensure the recycler drops its lock before recursing.
|
|
||||||
let r = Recycler::<u8>::default();
|
|
||||||
let x0 = r.allocate();
|
|
||||||
let x1 = r.allocate();
|
|
||||||
r.recycle(x0, "leaked_recyclable_recursion:1"); // <-- allocate() of this will require locking the recycler's stack.
|
|
||||||
r.recycle(x1.clone(), "leaked_recyclable_recursion:2"); // <-- allocate() of this will cause it to be dropped and recurse.
|
|
||||||
assert_eq!(Arc::strong_count(&x1), 2);
|
|
||||||
assert_eq!(r.gc.lock().unwrap().len(), 2);
|
|
||||||
|
|
||||||
r.allocate(); // Ensure lock is released before recursing.
|
|
||||||
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn test_recycling_is_happening() {
|
|
||||||
// Test the case in allocate() which should return a re-used object and not allocate a new
|
|
||||||
// one.
|
|
||||||
let r = PacketRecycler::default();
|
|
||||||
let x0 = r.allocate();
|
|
||||||
{
|
|
||||||
x0.write().unwrap().packets.resize(1, Packet::default());
|
|
||||||
}
|
|
||||||
r.recycle(x0, "recycle");
|
|
||||||
let x1 = r.allocate();
|
|
||||||
assert_ne!(
|
|
||||||
x1.read().unwrap().packets.len(),
|
|
||||||
Packets::default().packets.len()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn blob_recycler_test() {
|
|
||||||
let r = BlobRecycler::default();
|
|
||||||
let p = r.allocate();
|
|
||||||
r.recycle(p, "blob_recycler_test");
|
|
||||||
assert_eq!(r.gc.lock().unwrap().len(), 1);
|
|
||||||
let _ = r.allocate();
|
|
||||||
assert_eq!(r.gc.lock().unwrap().len(), 0);
|
|
||||||
}
|
|
||||||
#[test]
|
#[test]
|
||||||
pub fn packet_send_recv() {
|
pub fn packet_send_recv() {
|
||||||
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
@ -646,14 +449,14 @@ mod tests {
|
|||||||
let saddr = sender.local_addr().unwrap();
|
let saddr = sender.local_addr().unwrap();
|
||||||
let r = PacketRecycler::default();
|
let r = PacketRecycler::default();
|
||||||
let p = r.allocate();
|
let p = r.allocate();
|
||||||
p.write().unwrap().packets.resize(10, Packet::default());
|
p.write().packets.resize(10, Packet::default());
|
||||||
for m in p.write().unwrap().packets.iter_mut() {
|
for m in p.write().packets.iter_mut() {
|
||||||
m.meta.set_addr(&addr);
|
m.meta.set_addr(&addr);
|
||||||
m.meta.size = PACKET_DATA_SIZE;
|
m.meta.size = PACKET_DATA_SIZE;
|
||||||
}
|
}
|
||||||
p.read().unwrap().send_to(&sender).unwrap();
|
p.read().send_to(&sender).unwrap();
|
||||||
p.write().unwrap().recv_from(&reader).unwrap();
|
p.write().recv_from(&reader).unwrap();
|
||||||
for m in p.write().unwrap().packets.iter_mut() {
|
for m in p.write().packets.iter_mut() {
|
||||||
assert_eq!(m.meta.size, PACKET_DATA_SIZE);
|
assert_eq!(m.meta.size, PACKET_DATA_SIZE);
|
||||||
assert_eq!(m.meta.addr(), saddr);
|
assert_eq!(m.meta.addr(), saddr);
|
||||||
}
|
}
|
||||||
@ -667,16 +470,16 @@ mod tests {
|
|||||||
let re = PacketRecycler::default();
|
let re = PacketRecycler::default();
|
||||||
let rv = to_packets(&re, &vec![tx.clone(); 1]);
|
let rv = to_packets(&re, &vec![tx.clone(); 1]);
|
||||||
assert_eq!(rv.len(), 1);
|
assert_eq!(rv.len(), 1);
|
||||||
assert_eq!(rv[0].read().unwrap().packets.len(), 1);
|
assert_eq!(rv[0].read().packets.len(), 1);
|
||||||
|
|
||||||
let rv = to_packets(&re, &vec![tx.clone(); NUM_PACKETS]);
|
let rv = to_packets(&re, &vec![tx.clone(); NUM_PACKETS]);
|
||||||
assert_eq!(rv.len(), 1);
|
assert_eq!(rv.len(), 1);
|
||||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
assert_eq!(rv[0].read().packets.len(), NUM_PACKETS);
|
||||||
|
|
||||||
let rv = to_packets(&re, &vec![tx.clone(); NUM_PACKETS + 1]);
|
let rv = to_packets(&re, &vec![tx.clone(); NUM_PACKETS + 1]);
|
||||||
assert_eq!(rv.len(), 2);
|
assert_eq!(rv.len(), 2);
|
||||||
assert_eq!(rv[0].read().unwrap().packets.len(), NUM_PACKETS);
|
assert_eq!(rv[0].read().packets.len(), NUM_PACKETS);
|
||||||
assert_eq!(rv[1].read().unwrap().packets.len(), 1);
|
assert_eq!(rv[1].read().packets.len(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -687,15 +490,15 @@ mod tests {
|
|||||||
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
let r = BlobRecycler::default();
|
let r = BlobRecycler::default();
|
||||||
let p = r.allocate();
|
let p = r.allocate();
|
||||||
p.write().unwrap().meta.set_addr(&addr);
|
p.write().meta.set_addr(&addr);
|
||||||
p.write().unwrap().meta.size = 1024;
|
p.write().meta.size = 1024;
|
||||||
let v = vec![p];
|
let v = vec![p];
|
||||||
Blob::send_to(&r, &sender, v).unwrap();
|
Blob::send_to(&sender, v).unwrap();
|
||||||
trace!("send_to");
|
trace!("send_to");
|
||||||
let rv = Blob::recv_from(&r, &reader).unwrap();
|
let rv = Blob::recv_from(&r, &reader).unwrap();
|
||||||
trace!("recv_from");
|
trace!("recv_from");
|
||||||
assert_eq!(rv.len(), 1);
|
assert_eq!(rv.len(), 1);
|
||||||
assert_eq!(rv[0].write().unwrap().meta.size, 1024);
|
assert_eq!(rv[0].read().meta.size, 1024);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(all(feature = "ipv6", test))]
|
#[cfg(all(feature = "ipv6", test))]
|
||||||
@ -706,14 +509,14 @@ mod tests {
|
|||||||
let sender = UdpSocket::bind("[::1]:0").expect("bind");
|
let sender = UdpSocket::bind("[::1]:0").expect("bind");
|
||||||
let r = BlobRecycler::default();
|
let r = BlobRecycler::default();
|
||||||
let p = r.allocate();
|
let p = r.allocate();
|
||||||
p.write().unwrap().meta.set_addr(&addr);
|
p.as_mut().meta.set_addr(&addr);
|
||||||
p.write().unwrap().meta.size = 1024;
|
p.as_mut().meta.size = 1024;
|
||||||
let mut v = VecDeque::default();
|
let mut v = VecDeque::default();
|
||||||
v.push_back(p);
|
v.push_back(p);
|
||||||
Blob::send_to(&r, &sender, &mut v).unwrap();
|
Blob::send_to(&r, &sender, &mut v).unwrap();
|
||||||
let mut rv = Blob::recv_from(&r, &reader).unwrap();
|
let mut rv = Blob::recv_from(&r, &reader).unwrap();
|
||||||
let rp = rv.pop_front().unwrap();
|
let rp = rv.pop_front().unwrap();
|
||||||
assert_eq!(rp.write().unwrap().meta.size, 1024);
|
assert_eq!(rp.as_mut().meta.size, 1024);
|
||||||
r.recycle(rp, "blob_ip6_send_recv");
|
r.recycle(rp, "blob_ip6_send_recv");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,7 +39,8 @@ impl RecordStage {
|
|||||||
.name("solana-record-stage".to_string())
|
.name("solana-record-stage".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
let mut recorder = Recorder::new(start_hash);
|
let mut recorder = Recorder::new(start_hash);
|
||||||
let _ = Self::process_signals(&mut recorder, &signal_receiver, bank, &entry_sender);
|
let _ =
|
||||||
|
Self::process_signals(&mut recorder, &signal_receiver, &bank, &entry_sender);
|
||||||
}).unwrap();
|
}).unwrap();
|
||||||
|
|
||||||
(RecordStage { thread_hdl }, entry_receiver)
|
(RecordStage { thread_hdl }, entry_receiver)
|
||||||
@ -65,7 +66,7 @@ impl RecordStage {
|
|||||||
start_time,
|
start_time,
|
||||||
tick_duration,
|
tick_duration,
|
||||||
&signal_receiver,
|
&signal_receiver,
|
||||||
bank.clone(),
|
&bank,
|
||||||
&entry_sender,
|
&entry_sender,
|
||||||
).is_err()
|
).is_err()
|
||||||
{
|
{
|
||||||
@ -92,7 +93,7 @@ impl RecordStage {
|
|||||||
let txs_len = txs.len();
|
let txs_len = txs.len();
|
||||||
let entries = recorder.record(txs);
|
let entries = recorder.record(txs);
|
||||||
|
|
||||||
for entry in entries.iter() {
|
for entry in &entries {
|
||||||
if !entry.has_more {
|
if !entry.has_more {
|
||||||
bank.register_entry_id(&entry.id);
|
bank.register_entry_id(&entry.id);
|
||||||
}
|
}
|
||||||
@ -110,12 +111,12 @@ impl RecordStage {
|
|||||||
fn process_signals(
|
fn process_signals(
|
||||||
recorder: &mut Recorder,
|
recorder: &mut Recorder,
|
||||||
receiver: &Receiver<Signal>,
|
receiver: &Receiver<Signal>,
|
||||||
bank: Arc<Bank>,
|
bank: &Arc<Bank>,
|
||||||
sender: &Sender<Vec<Entry>>,
|
sender: &Sender<Vec<Entry>>,
|
||||||
) -> Result<(), ()> {
|
) -> Result<(), ()> {
|
||||||
loop {
|
loop {
|
||||||
match receiver.recv() {
|
match receiver.recv() {
|
||||||
Ok(signal) => Self::process_signal(signal, &bank, recorder, sender)?,
|
Ok(signal) => Self::process_signal(signal, bank, recorder, sender)?,
|
||||||
Err(RecvError) => return Err(()),
|
Err(RecvError) => return Err(()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -126,7 +127,7 @@ impl RecordStage {
|
|||||||
start_time: Instant,
|
start_time: Instant,
|
||||||
tick_duration: Duration,
|
tick_duration: Duration,
|
||||||
receiver: &Receiver<Signal>,
|
receiver: &Receiver<Signal>,
|
||||||
bank: Arc<Bank>,
|
bank: &Arc<Bank>,
|
||||||
sender: &Sender<Vec<Entry>>,
|
sender: &Sender<Vec<Entry>>,
|
||||||
) -> Result<(), ()> {
|
) -> Result<(), ()> {
|
||||||
loop {
|
loop {
|
||||||
|
173
src/recycler.rs
Normal file
173
src/recycler.rs
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
use std::fmt;
|
||||||
|
use std::sync::{Arc, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||||
|
|
||||||
|
/// A function that leaves the given type in the same state as Default,
|
||||||
|
/// but starts with an existing type instead of allocating a new one.
|
||||||
|
pub trait Reset {
|
||||||
|
fn reset(&mut self);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An value that's returned to its heap once dropped.
|
||||||
|
pub struct Recyclable<T: Default + Reset> {
|
||||||
|
val: Arc<RwLock<T>>,
|
||||||
|
landfill: Arc<Mutex<Vec<Arc<RwLock<T>>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Default + Reset> Recyclable<T> {
|
||||||
|
pub fn read(&self) -> RwLockReadGuard<T> {
|
||||||
|
self.val.read().unwrap()
|
||||||
|
}
|
||||||
|
pub fn write(&self) -> RwLockWriteGuard<T> {
|
||||||
|
self.val.write().unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Default + Reset> Drop for Recyclable<T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if Arc::strong_count(&self.val) == 1 {
|
||||||
|
// this isn't thread safe, it will allow some concurrent drops to leak and not recycle
|
||||||
|
// if that happens the allocator will end up allocating from the heap
|
||||||
|
self.landfill.lock().unwrap().push(self.val.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Default + Reset> Clone for Recyclable<T> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Recyclable {
|
||||||
|
val: self.val.clone(),
|
||||||
|
landfill: self.landfill.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: fmt::Debug + Default + Reset> fmt::Debug for Recyclable<T> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "Recyclable {:?}", &self.read())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An object to minimize memory allocations. Use `allocate()`
|
||||||
|
/// to get recyclable values of type `T`. When those recyclables
|
||||||
|
/// are dropped, they're returned to the recycler. The next time
|
||||||
|
/// `allocate()` is called, the value will be pulled from the
|
||||||
|
/// recycler instead being allocated from memory.
|
||||||
|
|
||||||
|
pub struct Recycler<T: Default + Reset> {
|
||||||
|
landfill: Arc<Mutex<Vec<Arc<RwLock<T>>>>>,
|
||||||
|
}
|
||||||
|
impl<T: Default + Reset> Clone for Recycler<T> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Recycler {
|
||||||
|
landfill: self.landfill.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Default + Reset> Default for Recycler<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Recycler {
|
||||||
|
landfill: Arc::new(Mutex::new(vec![])),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Default + Reset> Recycler<T> {
|
||||||
|
pub fn allocate(&self) -> Recyclable<T> {
|
||||||
|
let val = self
|
||||||
|
.landfill
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.pop()
|
||||||
|
.map(|val| {
|
||||||
|
val.write().unwrap().reset();
|
||||||
|
val
|
||||||
|
}).unwrap_or_else(|| Arc::new(RwLock::new(Default::default())));
|
||||||
|
Recyclable {
|
||||||
|
val,
|
||||||
|
landfill: self.landfill.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn recycle(&self, r: Recyclable<T>, _name: &str) {
|
||||||
|
drop(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::mem;
|
||||||
|
use std::sync::mpsc::channel;
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct Foo {
|
||||||
|
x: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Reset for Foo {
|
||||||
|
fn reset(&mut self) {
|
||||||
|
self.x = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_allocate() {
|
||||||
|
let recycler: Recycler<Foo> = Recycler::default();
|
||||||
|
let r = recycler.allocate();
|
||||||
|
assert_eq!(r.read().x, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_recycle() {
|
||||||
|
let recycler: Recycler<Foo> = Recycler::default();
|
||||||
|
|
||||||
|
{
|
||||||
|
let foo = recycler.allocate();
|
||||||
|
foo.write().x = 1;
|
||||||
|
}
|
||||||
|
assert_eq!(recycler.landfill.lock().unwrap().len(), 1);
|
||||||
|
|
||||||
|
let foo = recycler.allocate();
|
||||||
|
assert_eq!(foo.read().x, 0);
|
||||||
|
assert_eq!(recycler.landfill.lock().unwrap().len(), 0);
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_channel() {
|
||||||
|
let recycler: Recycler<Foo> = Recycler::default();
|
||||||
|
let (sender, receiver) = channel();
|
||||||
|
{
|
||||||
|
let foo = recycler.allocate();
|
||||||
|
foo.write().x = 1;
|
||||||
|
sender.send(foo).unwrap();
|
||||||
|
assert_eq!(recycler.landfill.lock().unwrap().len(), 0);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let foo = receiver.recv().unwrap();
|
||||||
|
assert_eq!(foo.read().x, 1);
|
||||||
|
assert_eq!(recycler.landfill.lock().unwrap().len(), 0);
|
||||||
|
}
|
||||||
|
assert_eq!(recycler.landfill.lock().unwrap().len(), 1);
|
||||||
|
}
|
||||||
|
#[test]
|
||||||
|
fn test_window() {
|
||||||
|
let recycler: Recycler<Foo> = Recycler::default();
|
||||||
|
let mut window = vec![None];
|
||||||
|
let (sender, receiver) = channel();
|
||||||
|
{
|
||||||
|
// item is in the window while its in the pipeline
|
||||||
|
// which is used to serve requests from other threads
|
||||||
|
let item = recycler.allocate();
|
||||||
|
item.write().x = 1;
|
||||||
|
window[0] = Some(item);
|
||||||
|
sender.send(window[0].clone().unwrap()).unwrap();
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let foo = receiver.recv().unwrap();
|
||||||
|
assert_eq!(foo.read().x, 1);
|
||||||
|
let old = mem::replace(&mut window[0], None).unwrap();
|
||||||
|
assert_eq!(old.read().x, 1);
|
||||||
|
}
|
||||||
|
// only one thing should be in the landfill at the end
|
||||||
|
assert_eq!(recycler.landfill.lock().unwrap().len(), 1);
|
||||||
|
}
|
||||||
|
}
|
@ -5,7 +5,6 @@ use counter::Counter;
|
|||||||
use crdt::Crdt;
|
use crdt::Crdt;
|
||||||
use ledger::{reconstruct_entries_from_blobs, Block, LedgerWriter};
|
use ledger::{reconstruct_entries_from_blobs, Block, LedgerWriter};
|
||||||
use log::Level;
|
use log::Level;
|
||||||
use packet::BlobRecycler;
|
|
||||||
use result::{Error, Result};
|
use result::{Error, Result};
|
||||||
use service::Service;
|
use service::Service;
|
||||||
use signature::Keypair;
|
use signature::Keypair;
|
||||||
@ -30,7 +29,6 @@ impl ReplicateStage {
|
|||||||
fn replicate_requests(
|
fn replicate_requests(
|
||||||
bank: &Arc<Bank>,
|
bank: &Arc<Bank>,
|
||||||
crdt: &Arc<RwLock<Crdt>>,
|
crdt: &Arc<RwLock<Crdt>>,
|
||||||
blob_recycler: &BlobRecycler,
|
|
||||||
window_receiver: &BlobReceiver,
|
window_receiver: &BlobReceiver,
|
||||||
ledger_writer: Option<&mut LedgerWriter>,
|
ledger_writer: Option<&mut LedgerWriter>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
@ -40,14 +38,10 @@ impl ReplicateStage {
|
|||||||
while let Ok(mut more) = window_receiver.try_recv() {
|
while let Ok(mut more) = window_receiver.try_recv() {
|
||||||
blobs.append(&mut more);
|
blobs.append(&mut more);
|
||||||
}
|
}
|
||||||
let entries = reconstruct_entries_from_blobs(blobs.clone())?;
|
let entries = reconstruct_entries_from_blobs(blobs)?;
|
||||||
|
|
||||||
let res = bank.process_entries(entries.clone());
|
let res = bank.process_entries(entries.clone());
|
||||||
|
|
||||||
for blob in blobs {
|
|
||||||
blob_recycler.recycle(blob, "replicate_requests");
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut wcrdt = crdt.write().unwrap();
|
let mut wcrdt = crdt.write().unwrap();
|
||||||
wcrdt.insert_votes(&entries.votes());
|
wcrdt.insert_votes(&entries.votes());
|
||||||
@ -70,41 +64,25 @@ impl ReplicateStage {
|
|||||||
keypair: Arc<Keypair>,
|
keypair: Arc<Keypair>,
|
||||||
bank: Arc<Bank>,
|
bank: Arc<Bank>,
|
||||||
crdt: Arc<RwLock<Crdt>>,
|
crdt: Arc<RwLock<Crdt>>,
|
||||||
blob_recycler: BlobRecycler,
|
|
||||||
window_receiver: BlobReceiver,
|
window_receiver: BlobReceiver,
|
||||||
ledger_path: Option<&str>,
|
ledger_path: Option<&str>,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let (vote_blob_sender, vote_blob_receiver) = channel();
|
let (vote_blob_sender, vote_blob_receiver) = channel();
|
||||||
let send = UdpSocket::bind("0.0.0.0:0").expect("bind");
|
let send = UdpSocket::bind("0.0.0.0:0").expect("bind");
|
||||||
let t_responder = responder(
|
let t_responder = responder("replicate_stage", Arc::new(send), vote_blob_receiver);
|
||||||
"replicate_stage",
|
|
||||||
Arc::new(send),
|
|
||||||
blob_recycler.clone(),
|
|
||||||
vote_blob_receiver,
|
|
||||||
);
|
|
||||||
|
|
||||||
let vote_stage = VoteStage::new(
|
let vote_stage =
|
||||||
keypair,
|
VoteStage::new(keypair, bank.clone(), crdt.clone(), vote_blob_sender, exit);
|
||||||
bank.clone(),
|
|
||||||
crdt.clone(),
|
|
||||||
blob_recycler.clone(),
|
|
||||||
vote_blob_sender,
|
|
||||||
exit,
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut ledger_writer = ledger_path.map(|p| LedgerWriter::open(p, false).unwrap());
|
let mut ledger_writer = ledger_path.map(|p| LedgerWriter::open(p, false).unwrap());
|
||||||
|
|
||||||
let t_replicate = Builder::new()
|
let t_replicate = Builder::new()
|
||||||
.name("solana-replicate-stage".to_string())
|
.name("solana-replicate-stage".to_string())
|
||||||
.spawn(move || loop {
|
.spawn(move || loop {
|
||||||
if let Err(e) = Self::replicate_requests(
|
if let Err(e) =
|
||||||
&bank,
|
Self::replicate_requests(&bank, &crdt, &window_receiver, ledger_writer.as_mut())
|
||||||
&crdt,
|
{
|
||||||
&blob_recycler,
|
|
||||||
&window_receiver,
|
|
||||||
ledger_writer.as_mut(),
|
|
||||||
) {
|
|
||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||||
|
@ -49,7 +49,7 @@ impl RequestStage {
|
|||||||
let mut reqs_len = 0;
|
let mut reqs_len = 0;
|
||||||
let proc_start = Instant::now();
|
let proc_start = Instant::now();
|
||||||
for msgs in batch {
|
for msgs in batch {
|
||||||
let reqs: Vec<_> = Self::deserialize_requests(&msgs.read().unwrap())
|
let reqs: Vec<_> = Self::deserialize_requests(&msgs.read())
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|x| x)
|
.filter_map(|x| x)
|
||||||
.collect();
|
.collect();
|
||||||
@ -80,11 +80,11 @@ impl RequestStage {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
request_processor: RequestProcessor,
|
request_processor: RequestProcessor,
|
||||||
packet_receiver: Receiver<SharedPackets>,
|
packet_receiver: Receiver<SharedPackets>,
|
||||||
packet_recycler: PacketRecycler,
|
|
||||||
blob_recycler: BlobRecycler,
|
|
||||||
) -> (Self, BlobReceiver) {
|
) -> (Self, BlobReceiver) {
|
||||||
|
let packet_recycler = PacketRecycler::default();
|
||||||
let request_processor = Arc::new(request_processor);
|
let request_processor = Arc::new(request_processor);
|
||||||
let request_processor_ = request_processor.clone();
|
let request_processor_ = request_processor.clone();
|
||||||
|
let blob_recycler = BlobRecycler::default();
|
||||||
let (blob_sender, blob_receiver) = channel();
|
let (blob_sender, blob_receiver) = channel();
|
||||||
let thread_hdl = Builder::new()
|
let thread_hdl = Builder::new()
|
||||||
.name("solana-request-stage".to_string())
|
.name("solana-request-stage".to_string())
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
use counter::Counter;
|
use counter::Counter;
|
||||||
use crdt::Crdt;
|
use crdt::Crdt;
|
||||||
use log::Level;
|
use log::Level;
|
||||||
use packet::BlobRecycler;
|
|
||||||
use result::{Error, Result};
|
use result::{Error, Result};
|
||||||
use service::Service;
|
use service::Service;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
@ -17,25 +16,15 @@ use streamer::BlobReceiver;
|
|||||||
use window::SharedWindow;
|
use window::SharedWindow;
|
||||||
use window_service::window_service;
|
use window_service::window_service;
|
||||||
|
|
||||||
fn retransmit(
|
fn retransmit(crdt: &Arc<RwLock<Crdt>>, r: &BlobReceiver, sock: &UdpSocket) -> Result<()> {
|
||||||
crdt: &Arc<RwLock<Crdt>>,
|
|
||||||
recycler: &BlobRecycler,
|
|
||||||
r: &BlobReceiver,
|
|
||||||
sock: &UdpSocket,
|
|
||||||
) -> Result<()> {
|
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
let mut dq = r.recv_timeout(timer)?;
|
let mut dq = r.recv_timeout(timer)?;
|
||||||
while let Ok(mut nq) = r.try_recv() {
|
while let Ok(mut nq) = r.try_recv() {
|
||||||
dq.append(&mut nq);
|
dq.append(&mut nq);
|
||||||
}
|
}
|
||||||
{
|
for b in &mut dq {
|
||||||
for b in &dq {
|
|
||||||
Crdt::retransmit(&crdt, b, sock)?;
|
Crdt::retransmit(&crdt, b, sock)?;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
for b in dq {
|
|
||||||
recycler.recycle(b, "retransmit");
|
|
||||||
}
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -47,18 +36,13 @@ fn retransmit(
|
|||||||
/// * `crdt` - This structure needs to be updated and populated by the bank and via gossip.
|
/// * `crdt` - This structure needs to be updated and populated by the bank and via gossip.
|
||||||
/// * `recycler` - Blob recycler.
|
/// * `recycler` - Blob recycler.
|
||||||
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
/// * `r` - Receive channel for blobs to be retransmitted to all the layer 1 nodes.
|
||||||
fn retransmitter(
|
fn retransmitter(sock: Arc<UdpSocket>, crdt: Arc<RwLock<Crdt>>, r: BlobReceiver) -> JoinHandle<()> {
|
||||||
sock: Arc<UdpSocket>,
|
|
||||||
crdt: Arc<RwLock<Crdt>>,
|
|
||||||
recycler: BlobRecycler,
|
|
||||||
r: BlobReceiver,
|
|
||||||
) -> JoinHandle<()> {
|
|
||||||
Builder::new()
|
Builder::new()
|
||||||
.name("solana-retransmitter".to_string())
|
.name("solana-retransmitter".to_string())
|
||||||
.spawn(move || {
|
.spawn(move || {
|
||||||
trace!("retransmitter started");
|
trace!("retransmitter started");
|
||||||
loop {
|
loop {
|
||||||
if let Err(e) = retransmit(&crdt, &recycler, &r, &sock) {
|
if let Err(e) = retransmit(&crdt, &r, &sock) {
|
||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||||
@ -83,23 +67,16 @@ impl RetransmitStage {
|
|||||||
entry_height: u64,
|
entry_height: u64,
|
||||||
retransmit_socket: Arc<UdpSocket>,
|
retransmit_socket: Arc<UdpSocket>,
|
||||||
repair_socket: Arc<UdpSocket>,
|
repair_socket: Arc<UdpSocket>,
|
||||||
blob_recycler: &BlobRecycler,
|
|
||||||
fetch_stage_receiver: BlobReceiver,
|
fetch_stage_receiver: BlobReceiver,
|
||||||
) -> (Self, BlobReceiver) {
|
) -> (Self, BlobReceiver) {
|
||||||
let (retransmit_sender, retransmit_receiver) = channel();
|
let (retransmit_sender, retransmit_receiver) = channel();
|
||||||
|
|
||||||
let t_retransmit = retransmitter(
|
let t_retransmit = retransmitter(retransmit_socket, crdt.clone(), retransmit_receiver);
|
||||||
retransmit_socket,
|
|
||||||
crdt.clone(),
|
|
||||||
blob_recycler.clone(),
|
|
||||||
retransmit_receiver,
|
|
||||||
);
|
|
||||||
let (blob_sender, blob_receiver) = channel();
|
let (blob_sender, blob_receiver) = channel();
|
||||||
let t_window = window_service(
|
let t_window = window_service(
|
||||||
crdt.clone(),
|
crdt.clone(),
|
||||||
window,
|
window,
|
||||||
entry_height,
|
entry_height,
|
||||||
blob_recycler.clone(),
|
|
||||||
fetch_stage_receiver,
|
fetch_stage_receiver,
|
||||||
blob_sender,
|
blob_sender,
|
||||||
retransmit_sender,
|
retransmit_sender,
|
||||||
|
31
src/rpu.rs
31
src/rpu.rs
@ -24,7 +24,6 @@
|
|||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use packet::{BlobRecycler, PacketRecycler};
|
|
||||||
use request_processor::RequestProcessor;
|
use request_processor::RequestProcessor;
|
||||||
use request_stage::RequestStage;
|
use request_stage::RequestStage;
|
||||||
use service::Service;
|
use service::Service;
|
||||||
@ -42,37 +41,15 @@ pub struct Rpu {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Rpu {
|
impl Rpu {
|
||||||
pub fn new(
|
pub fn new(bank: &Arc<Bank>, requests_socket: UdpSocket, respond_socket: UdpSocket) -> Self {
|
||||||
bank: &Arc<Bank>,
|
|
||||||
requests_socket: UdpSocket,
|
|
||||||
respond_socket: UdpSocket,
|
|
||||||
blob_recycler: &BlobRecycler,
|
|
||||||
) -> Self {
|
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let mut packet_recycler = PacketRecycler::default();
|
|
||||||
packet_recycler.set_name("rpu::Packet");
|
|
||||||
let (packet_sender, packet_receiver) = channel();
|
let (packet_sender, packet_receiver) = channel();
|
||||||
let t_receiver = streamer::receiver(
|
let t_receiver = streamer::receiver(Arc::new(requests_socket), exit.clone(), packet_sender);
|
||||||
Arc::new(requests_socket),
|
|
||||||
exit.clone(),
|
|
||||||
packet_recycler.clone(),
|
|
||||||
packet_sender,
|
|
||||||
);
|
|
||||||
|
|
||||||
let request_processor = RequestProcessor::new(bank.clone());
|
let request_processor = RequestProcessor::new(bank.clone());
|
||||||
let (request_stage, blob_receiver) = RequestStage::new(
|
let (request_stage, blob_receiver) = RequestStage::new(request_processor, packet_receiver);
|
||||||
request_processor,
|
|
||||||
packet_receiver,
|
|
||||||
packet_recycler.clone(),
|
|
||||||
blob_recycler.clone(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let t_responder = streamer::responder(
|
let t_responder = streamer::responder("rpu", Arc::new(respond_socket), blob_receiver);
|
||||||
"rpu",
|
|
||||||
Arc::new(respond_socket),
|
|
||||||
blob_recycler.clone(),
|
|
||||||
blob_receiver,
|
|
||||||
);
|
|
||||||
|
|
||||||
let thread_hdls = vec![t_receiver, t_responder];
|
let thread_hdls = vec![t_receiver, t_responder];
|
||||||
|
|
||||||
|
@ -72,10 +72,7 @@ fn verify_packet_disabled(_packet: &Packet) -> u8 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn batch_size(batches: &[SharedPackets]) -> usize {
|
fn batch_size(batches: &[SharedPackets]) -> usize {
|
||||||
batches
|
batches.iter().map(|p| p.read().packets.len()).sum()
|
||||||
.iter()
|
|
||||||
.map(|p| p.read().unwrap().packets.len())
|
|
||||||
.sum()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "cuda"))]
|
#[cfg(not(feature = "cuda"))]
|
||||||
@ -89,14 +86,8 @@ pub fn ed25519_verify_cpu(batches: &[SharedPackets]) -> Vec<Vec<u8>> {
|
|||||||
info!("CPU ECDSA for {}", batch_size(batches));
|
info!("CPU ECDSA for {}", batch_size(batches));
|
||||||
let rv = batches
|
let rv = batches
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|p| {
|
.map(|p| p.read().packets.par_iter().map(verify_packet).collect())
|
||||||
p.read()
|
.collect();
|
||||||
.expect("'p' read lock in ed25519_verify")
|
|
||||||
.packets
|
|
||||||
.par_iter()
|
|
||||||
.map(verify_packet)
|
|
||||||
.collect()
|
|
||||||
}).collect();
|
|
||||||
inc_new_counter_info!("ed25519_verify_cpu", count);
|
inc_new_counter_info!("ed25519_verify_cpu", count);
|
||||||
rv
|
rv
|
||||||
}
|
}
|
||||||
@ -109,7 +100,6 @@ pub fn ed25519_verify_disabled(batches: &[SharedPackets]) -> Vec<Vec<u8>> {
|
|||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|p| {
|
.map(|p| {
|
||||||
p.read()
|
p.read()
|
||||||
.expect("'p' read lock in ed25519_verify")
|
|
||||||
.packets
|
.packets
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.map(verify_packet_disabled)
|
.map(verify_packet_disabled)
|
||||||
@ -151,11 +141,7 @@ pub fn ed25519_verify(batches: &[SharedPackets]) -> Vec<Vec<u8>> {
|
|||||||
let mut rvs = Vec::new();
|
let mut rvs = Vec::new();
|
||||||
|
|
||||||
for packets in batches {
|
for packets in batches {
|
||||||
locks.push(
|
locks.push(packets.read());
|
||||||
packets
|
|
||||||
.read()
|
|
||||||
.expect("'packets' read lock in pub fn ed25519_verify"),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
let mut num = 0;
|
let mut num = 0;
|
||||||
for p in locks {
|
for p in locks {
|
||||||
@ -209,9 +195,8 @@ pub fn ed25519_verify(batches: &[SharedPackets]) -> Vec<Vec<u8>> {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use bincode::serialize;
|
use bincode::serialize;
|
||||||
use packet::{Packet, Packets, SharedPackets};
|
use packet::{Packet, PacketRecycler};
|
||||||
use sigverify;
|
use sigverify;
|
||||||
use std::sync::RwLock;
|
|
||||||
use transaction::Transaction;
|
use transaction::Transaction;
|
||||||
use transaction::{memfind, test_tx};
|
use transaction::{memfind, test_tx};
|
||||||
|
|
||||||
@ -242,13 +227,18 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// generate packet vector
|
// generate packet vector
|
||||||
let mut packets = Packets::default();
|
let packet_recycler = PacketRecycler::default();
|
||||||
packets.packets = Vec::new();
|
let batches: Vec<_> = (0..2)
|
||||||
|
.map(|_| {
|
||||||
|
let packets = packet_recycler.allocate();
|
||||||
|
packets.write().packets.resize(0, Default::default());
|
||||||
for _ in 0..n {
|
for _ in 0..n {
|
||||||
packets.packets.push(packet.clone());
|
packets.write().packets.push(packet.clone());
|
||||||
}
|
}
|
||||||
let shared_packets = SharedPackets::new(RwLock::new(packets));
|
assert_eq!(packets.read().packets.len(), n);
|
||||||
let batches = vec![shared_packets.clone(), shared_packets.clone()];
|
packets
|
||||||
|
}).collect();
|
||||||
|
assert_eq!(batches.len(), 2);
|
||||||
|
|
||||||
// verify packets
|
// verify packets
|
||||||
let ans = sigverify::ed25519_verify(&batches);
|
let ans = sigverify::ed25519_verify(&batches);
|
||||||
|
@ -23,10 +23,7 @@ fn recv_loop(
|
|||||||
loop {
|
loop {
|
||||||
let msgs = re.allocate();
|
let msgs = re.allocate();
|
||||||
loop {
|
loop {
|
||||||
let result = msgs
|
let result = msgs.write().recv_from(sock);
|
||||||
.write()
|
|
||||||
.expect("write lock in fn recv_loop")
|
|
||||||
.recv_from(sock);
|
|
||||||
match result {
|
match result {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
channel.send(msgs)?;
|
channel.send(msgs)?;
|
||||||
@ -34,7 +31,6 @@ fn recv_loop(
|
|||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
if exit.load(Ordering::Relaxed) {
|
if exit.load(Ordering::Relaxed) {
|
||||||
re.recycle(msgs, "recv_loop");
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -46,10 +42,10 @@ fn recv_loop(
|
|||||||
pub fn receiver(
|
pub fn receiver(
|
||||||
sock: Arc<UdpSocket>,
|
sock: Arc<UdpSocket>,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
recycler: PacketRecycler,
|
|
||||||
packet_sender: PacketSender,
|
packet_sender: PacketSender,
|
||||||
) -> JoinHandle<()> {
|
) -> JoinHandle<()> {
|
||||||
let res = sock.set_read_timeout(Some(Duration::new(1, 0)));
|
let res = sock.set_read_timeout(Some(Duration::new(1, 0)));
|
||||||
|
let recycler = PacketRecycler::default();
|
||||||
if res.is_err() {
|
if res.is_err() {
|
||||||
panic!("streamer::receiver set_read_timeout error");
|
panic!("streamer::receiver set_read_timeout error");
|
||||||
}
|
}
|
||||||
@ -61,10 +57,10 @@ pub fn receiver(
|
|||||||
}).unwrap()
|
}).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn recv_send(sock: &UdpSocket, recycler: &BlobRecycler, r: &BlobReceiver) -> Result<()> {
|
fn recv_send(sock: &UdpSocket, r: &BlobReceiver) -> Result<()> {
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
let msgs = r.recv_timeout(timer)?;
|
let msgs = r.recv_timeout(timer)?;
|
||||||
Blob::send_to(recycler, sock, msgs)?;
|
Blob::send_to(sock, msgs)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,11 +68,11 @@ pub fn recv_batch(recvr: &PacketReceiver) -> Result<(Vec<SharedPackets>, usize)>
|
|||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
let msgs = recvr.recv_timeout(timer)?;
|
let msgs = recvr.recv_timeout(timer)?;
|
||||||
trace!("got msgs");
|
trace!("got msgs");
|
||||||
let mut len = msgs.read().unwrap().packets.len();
|
let mut len = msgs.read().packets.len();
|
||||||
let mut batch = vec![msgs];
|
let mut batch = vec![msgs];
|
||||||
while let Ok(more) = recvr.try_recv() {
|
while let Ok(more) = recvr.try_recv() {
|
||||||
trace!("got more msgs");
|
trace!("got more msgs");
|
||||||
len += more.read().unwrap().packets.len();
|
len += more.read().packets.len();
|
||||||
batch.push(more);
|
batch.push(more);
|
||||||
|
|
||||||
if len > 100_000 {
|
if len > 100_000 {
|
||||||
@ -87,16 +83,11 @@ pub fn recv_batch(recvr: &PacketReceiver) -> Result<(Vec<SharedPackets>, usize)>
|
|||||||
Ok((batch, len))
|
Ok((batch, len))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn responder(
|
pub fn responder(name: &'static str, sock: Arc<UdpSocket>, r: BlobReceiver) -> JoinHandle<()> {
|
||||||
name: &'static str,
|
|
||||||
sock: Arc<UdpSocket>,
|
|
||||||
recycler: BlobRecycler,
|
|
||||||
r: BlobReceiver,
|
|
||||||
) -> JoinHandle<()> {
|
|
||||||
Builder::new()
|
Builder::new()
|
||||||
.name(format!("solana-responder-{}", name))
|
.name(format!("solana-responder-{}", name))
|
||||||
.spawn(move || loop {
|
.spawn(move || loop {
|
||||||
if let Err(e) = recv_send(&sock, &recycler, &r) {
|
if let Err(e) = recv_send(&sock, &r) {
|
||||||
match e {
|
match e {
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||||
@ -117,17 +108,13 @@ fn recv_blobs(recycler: &BlobRecycler, sock: &UdpSocket, s: &BlobSender) -> Resu
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn blob_receiver(
|
pub fn blob_receiver(sock: Arc<UdpSocket>, exit: Arc<AtomicBool>, s: BlobSender) -> JoinHandle<()> {
|
||||||
sock: Arc<UdpSocket>,
|
|
||||||
exit: Arc<AtomicBool>,
|
|
||||||
recycler: BlobRecycler,
|
|
||||||
s: BlobSender,
|
|
||||||
) -> JoinHandle<()> {
|
|
||||||
//DOCUMENTED SIDE-EFFECT
|
//DOCUMENTED SIDE-EFFECT
|
||||||
//1 second timeout on socket read
|
//1 second timeout on socket read
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
sock.set_read_timeout(Some(timer))
|
sock.set_read_timeout(Some(timer))
|
||||||
.expect("set socket timeout");
|
.expect("set socket timeout");
|
||||||
|
let recycler = BlobRecycler::default();
|
||||||
Builder::new()
|
Builder::new()
|
||||||
.name("solana-blob_receiver".to_string())
|
.name("solana-blob_receiver".to_string())
|
||||||
.spawn(move || loop {
|
.spawn(move || loop {
|
||||||
@ -140,7 +127,7 @@ pub fn blob_receiver(
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
|
use packet::{Blob, BlobRecycler, Packet, Packets, PACKET_DATA_SIZE};
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
@ -155,8 +142,8 @@ mod test {
|
|||||||
for _t in 0..5 {
|
for _t in 0..5 {
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
match r.recv_timeout(timer) {
|
match r.recv_timeout(timer) {
|
||||||
Ok(m) => *num += m.read().unwrap().packets.len(),
|
Ok(m) => *num += m.read().packets.len(),
|
||||||
e => info!("error {:?}", e),
|
_ => info!("get_msgs error"),
|
||||||
}
|
}
|
||||||
if *num == 10 {
|
if *num == 10 {
|
||||||
break;
|
break;
|
||||||
@ -177,28 +164,17 @@ mod test {
|
|||||||
let addr = read.local_addr().unwrap();
|
let addr = read.local_addr().unwrap();
|
||||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let pack_recycler = PacketRecycler::default();
|
|
||||||
let resp_recycler = BlobRecycler::default();
|
let resp_recycler = BlobRecycler::default();
|
||||||
let (s_reader, r_reader) = channel();
|
let (s_reader, r_reader) = channel();
|
||||||
let t_receiver = receiver(
|
let t_receiver = receiver(Arc::new(read), exit.clone(), s_reader);
|
||||||
Arc::new(read),
|
|
||||||
exit.clone(),
|
|
||||||
pack_recycler.clone(),
|
|
||||||
s_reader,
|
|
||||||
);
|
|
||||||
let t_responder = {
|
let t_responder = {
|
||||||
let (s_responder, r_responder) = channel();
|
let (s_responder, r_responder) = channel();
|
||||||
let t_responder = responder(
|
let t_responder = responder("streamer_send_test", Arc::new(send), r_responder);
|
||||||
"streamer_send_test",
|
|
||||||
Arc::new(send),
|
|
||||||
resp_recycler.clone(),
|
|
||||||
r_responder,
|
|
||||||
);
|
|
||||||
let mut msgs = Vec::new();
|
let mut msgs = Vec::new();
|
||||||
for i in 0..10 {
|
for i in 0..10 {
|
||||||
let b = resp_recycler.allocate();
|
let mut b = resp_recycler.allocate();
|
||||||
{
|
{
|
||||||
let mut w = b.write().unwrap();
|
let mut w = b.write();
|
||||||
w.data[0] = i as u8;
|
w.data[0] = i as u8;
|
||||||
w.meta.size = PACKET_DATA_SIZE;
|
w.meta.size = PACKET_DATA_SIZE;
|
||||||
w.meta.set_addr(&addr);
|
w.meta.set_addr(&addr);
|
||||||
|
@ -9,7 +9,6 @@ use crdt::{Crdt, CrdtError, NodeInfo};
|
|||||||
use hash::Hash;
|
use hash::Hash;
|
||||||
use log::Level;
|
use log::Level;
|
||||||
use ncp::Ncp;
|
use ncp::Ncp;
|
||||||
use packet::BlobRecycler;
|
|
||||||
use request::{Request, Response};
|
use request::{Request, Response};
|
||||||
use result::{Error, Result};
|
use result::{Error, Result};
|
||||||
use signature::{Keypair, Pubkey, Signature};
|
use signature::{Keypair, Pubkey, Signature};
|
||||||
@ -375,14 +374,7 @@ pub fn poll_gossip_for_leader(leader_ncp: SocketAddr, timeout: Option<u64>) -> R
|
|||||||
let my_addr = gossip_socket.local_addr().unwrap();
|
let my_addr = gossip_socket.local_addr().unwrap();
|
||||||
let crdt = Arc::new(RwLock::new(Crdt::new(node).expect("Crdt::new")));
|
let crdt = Arc::new(RwLock::new(Crdt::new(node).expect("Crdt::new")));
|
||||||
let window = Arc::new(RwLock::new(vec![]));
|
let window = Arc::new(RwLock::new(vec![]));
|
||||||
let ncp = Ncp::new(
|
let ncp = Ncp::new(&crdt.clone(), window, None, gossip_socket, exit.clone());
|
||||||
&crdt.clone(),
|
|
||||||
window,
|
|
||||||
BlobRecycler::default(),
|
|
||||||
None,
|
|
||||||
gossip_socket,
|
|
||||||
exit.clone(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let leader_entry_point = NodeInfo::new_entry_point(&leader_ncp);
|
let leader_entry_point = NodeInfo::new_entry_point(&leader_ncp);
|
||||||
crdt.write().unwrap().insert(&leader_entry_point);
|
crdt.write().unwrap().insert(&leader_entry_point);
|
||||||
|
11
src/tpu.rs
11
src/tpu.rs
@ -30,7 +30,6 @@ use banking_stage::BankingStage;
|
|||||||
use crdt::Crdt;
|
use crdt::Crdt;
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use fetch_stage::FetchStage;
|
use fetch_stage::FetchStage;
|
||||||
use packet::{BlobRecycler, PacketRecycler};
|
|
||||||
use record_stage::RecordStage;
|
use record_stage::RecordStage;
|
||||||
use service::Service;
|
use service::Service;
|
||||||
use signature::Keypair;
|
use signature::Keypair;
|
||||||
@ -63,23 +62,18 @@ impl Tpu {
|
|||||||
crdt: &Arc<RwLock<Crdt>>,
|
crdt: &Arc<RwLock<Crdt>>,
|
||||||
tick_duration: Option<Duration>,
|
tick_duration: Option<Duration>,
|
||||||
transactions_sockets: Vec<UdpSocket>,
|
transactions_sockets: Vec<UdpSocket>,
|
||||||
blob_recycler: &BlobRecycler,
|
|
||||||
ledger_path: &str,
|
ledger_path: &str,
|
||||||
sigverify_disabled: bool,
|
sigverify_disabled: bool,
|
||||||
entry_height: u64,
|
entry_height: u64,
|
||||||
) -> (Self, Receiver<Vec<Entry>>, Arc<AtomicBool>) {
|
) -> (Self, Receiver<Vec<Entry>>, Arc<AtomicBool>) {
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let mut packet_recycler = PacketRecycler::default();
|
|
||||||
packet_recycler.set_name("tpu::Packet");
|
|
||||||
|
|
||||||
let (fetch_stage, packet_receiver) =
|
let (fetch_stage, packet_receiver) = FetchStage::new(transactions_sockets, exit.clone());
|
||||||
FetchStage::new(transactions_sockets, exit.clone(), &packet_recycler);
|
|
||||||
|
|
||||||
let (sigverify_stage, verified_receiver) =
|
let (sigverify_stage, verified_receiver) =
|
||||||
SigVerifyStage::new(packet_receiver, sigverify_disabled);
|
SigVerifyStage::new(packet_receiver, sigverify_disabled);
|
||||||
|
|
||||||
let (banking_stage, signal_receiver) =
|
let (banking_stage, signal_receiver) = BankingStage::new(bank.clone(), verified_receiver);
|
||||||
BankingStage::new(bank.clone(), verified_receiver, packet_recycler.clone());
|
|
||||||
|
|
||||||
let (record_stage, entry_receiver) = match tick_duration {
|
let (record_stage, entry_receiver) = match tick_duration {
|
||||||
Some(tick_duration) => {
|
Some(tick_duration) => {
|
||||||
@ -92,7 +86,6 @@ impl Tpu {
|
|||||||
keypair,
|
keypair,
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
crdt.clone(),
|
crdt.clone(),
|
||||||
blob_recycler.clone(),
|
|
||||||
ledger_path,
|
ledger_path,
|
||||||
entry_receiver,
|
entry_receiver,
|
||||||
entry_height,
|
entry_height,
|
||||||
|
30
src/tvu.rs
30
src/tvu.rs
@ -39,7 +39,6 @@
|
|||||||
use bank::Bank;
|
use bank::Bank;
|
||||||
use blob_fetch_stage::BlobFetchStage;
|
use blob_fetch_stage::BlobFetchStage;
|
||||||
use crdt::Crdt;
|
use crdt::Crdt;
|
||||||
use packet::BlobRecycler;
|
|
||||||
use replicate_stage::ReplicateStage;
|
use replicate_stage::ReplicateStage;
|
||||||
use retransmit_stage::RetransmitStage;
|
use retransmit_stage::RetransmitStage;
|
||||||
use service::Service;
|
use service::Service;
|
||||||
@ -75,7 +74,6 @@ impl Tvu {
|
|||||||
entry_height: u64,
|
entry_height: u64,
|
||||||
crdt: Arc<RwLock<Crdt>>,
|
crdt: Arc<RwLock<Crdt>>,
|
||||||
window: SharedWindow,
|
window: SharedWindow,
|
||||||
blob_recycler: BlobRecycler,
|
|
||||||
replicate_sockets: Vec<UdpSocket>,
|
replicate_sockets: Vec<UdpSocket>,
|
||||||
repair_socket: UdpSocket,
|
repair_socket: UdpSocket,
|
||||||
retransmit_socket: UdpSocket,
|
retransmit_socket: UdpSocket,
|
||||||
@ -87,7 +85,7 @@ impl Tvu {
|
|||||||
replicate_sockets.into_iter().map(Arc::new).collect();
|
replicate_sockets.into_iter().map(Arc::new).collect();
|
||||||
blob_sockets.push(repair_socket.clone());
|
blob_sockets.push(repair_socket.clone());
|
||||||
let (fetch_stage, blob_fetch_receiver) =
|
let (fetch_stage, blob_fetch_receiver) =
|
||||||
BlobFetchStage::new_multi_socket(blob_sockets, exit.clone(), &blob_recycler);
|
BlobFetchStage::new_multi_socket(blob_sockets, exit.clone());
|
||||||
//TODO
|
//TODO
|
||||||
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
//the packets coming out of blob_receiver need to be sent to the GPU and verified
|
||||||
//then sent to the window, which does the erasure coding reconstruction
|
//then sent to the window, which does the erasure coding reconstruction
|
||||||
@ -97,7 +95,6 @@ impl Tvu {
|
|||||||
entry_height,
|
entry_height,
|
||||||
Arc::new(retransmit_socket),
|
Arc::new(retransmit_socket),
|
||||||
repair_socket,
|
repair_socket,
|
||||||
&blob_recycler,
|
|
||||||
blob_fetch_receiver,
|
blob_fetch_receiver,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -105,7 +102,6 @@ impl Tvu {
|
|||||||
keypair,
|
keypair,
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
crdt,
|
crdt,
|
||||||
blob_recycler,
|
|
||||||
blob_window_receiver,
|
blob_window_receiver,
|
||||||
ledger_path,
|
ledger_path,
|
||||||
exit,
|
exit,
|
||||||
@ -163,11 +159,10 @@ pub mod tests {
|
|||||||
crdt: Arc<RwLock<Crdt>>,
|
crdt: Arc<RwLock<Crdt>>,
|
||||||
gossip: UdpSocket,
|
gossip: UdpSocket,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
) -> (Ncp, SharedWindow, BlobRecycler) {
|
) -> (Ncp, SharedWindow) {
|
||||||
let window = Arc::new(RwLock::new(window::default_window()));
|
let window = Arc::new(RwLock::new(window::default_window()));
|
||||||
let recycler = BlobRecycler::default();
|
let ncp = Ncp::new(&crdt, window.clone(), None, gossip, exit);
|
||||||
let ncp = Ncp::new(&crdt, window.clone(), recycler.clone(), None, gossip, exit);
|
(ncp, window)
|
||||||
(ncp, window, recycler)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Test that message sent from leader to target1 and replicated to target2
|
/// Test that message sent from leader to target1 and replicated to target2
|
||||||
@ -207,19 +202,13 @@ pub mod tests {
|
|||||||
.map(Arc::new)
|
.map(Arc::new)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let t_receiver = streamer::blob_receiver(
|
let t_receiver = streamer::blob_receiver(blob_sockets[0].clone(), exit.clone(), s_reader);
|
||||||
blob_sockets[0].clone(),
|
|
||||||
exit.clone(),
|
|
||||||
recycler.clone(),
|
|
||||||
s_reader,
|
|
||||||
);
|
|
||||||
|
|
||||||
// simulate leader sending messages
|
// simulate leader sending messages
|
||||||
let (s_responder, r_responder) = channel();
|
let (s_responder, r_responder) = channel();
|
||||||
let t_responder = streamer::responder(
|
let t_responder = streamer::responder(
|
||||||
"test_replicate",
|
"test_replicate",
|
||||||
Arc::new(leader.sockets.requests),
|
Arc::new(leader.sockets.requests),
|
||||||
recycler.clone(),
|
|
||||||
r_responder,
|
r_responder,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -241,7 +230,6 @@ pub mod tests {
|
|||||||
0,
|
0,
|
||||||
cref1,
|
cref1,
|
||||||
dr_1.1,
|
dr_1.1,
|
||||||
dr_1.2,
|
|
||||||
target1.sockets.replicate,
|
target1.sockets.replicate,
|
||||||
target1.sockets.repair,
|
target1.sockets.repair,
|
||||||
target1.sockets.retransmit,
|
target1.sockets.retransmit,
|
||||||
@ -276,9 +264,9 @@ pub mod tests {
|
|||||||
alice_ref_balance -= transfer_amount;
|
alice_ref_balance -= transfer_amount;
|
||||||
|
|
||||||
for entry in vec![entry0, entry1] {
|
for entry in vec![entry0, entry1] {
|
||||||
let b = recycler.allocate();
|
let mut b = recycler.allocate();
|
||||||
{
|
{
|
||||||
let mut w = b.write().unwrap();
|
let mut w = b.write();
|
||||||
w.set_index(blob_id).unwrap();
|
w.set_index(blob_id).unwrap();
|
||||||
blob_id += 1;
|
blob_id += 1;
|
||||||
w.set_id(leader_id).unwrap();
|
w.set_id(leader_id).unwrap();
|
||||||
@ -299,8 +287,8 @@ pub mod tests {
|
|||||||
|
|
||||||
// receive retransmitted messages
|
// receive retransmitted messages
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
while let Ok(msg) = r_reader.recv_timeout(timer) {
|
while let Ok(_msg) = r_reader.recv_timeout(timer) {
|
||||||
trace!("msg: {:?}", msg);
|
trace!("got msg");
|
||||||
}
|
}
|
||||||
|
|
||||||
let alice_balance = bank.get_balance(&mint.keypair().pubkey());
|
let alice_balance = bank.get_balance(&mint.keypair().pubkey());
|
||||||
|
@ -47,7 +47,7 @@ pub fn create_new_signed_vote_blob(
|
|||||||
}?;
|
}?;
|
||||||
let tx = Transaction::budget_new_vote(&keypair, vote, *last_id, 0);
|
let tx = Transaction::budget_new_vote(&keypair, vote, *last_id, 0);
|
||||||
{
|
{
|
||||||
let mut blob = shared_blob.write().unwrap();
|
let mut blob = shared_blob.write();
|
||||||
let bytes = serialize(&tx)?;
|
let bytes = serialize(&tx)?;
|
||||||
let len = bytes.len();
|
let len = bytes.len();
|
||||||
blob.data[..len].copy_from_slice(&bytes);
|
blob.data[..len].copy_from_slice(&bytes);
|
||||||
@ -174,10 +174,10 @@ impl VoteStage {
|
|||||||
keypair: Arc<Keypair>,
|
keypair: Arc<Keypair>,
|
||||||
bank: Arc<Bank>,
|
bank: Arc<Bank>,
|
||||||
crdt: Arc<RwLock<Crdt>>,
|
crdt: Arc<RwLock<Crdt>>,
|
||||||
blob_recycler: BlobRecycler,
|
|
||||||
vote_blob_sender: BlobSender,
|
vote_blob_sender: BlobSender,
|
||||||
exit: Arc<AtomicBool>,
|
exit: Arc<AtomicBool>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
|
let blob_recycler = BlobRecycler::default();
|
||||||
let thread_hdl = spawn(move || {
|
let thread_hdl = spawn(move || {
|
||||||
Self::run(
|
Self::run(
|
||||||
&keypair,
|
&keypair,
|
||||||
@ -230,7 +230,6 @@ pub mod tests {
|
|||||||
use instruction::Vote;
|
use instruction::Vote;
|
||||||
use logger;
|
use logger;
|
||||||
use mint::Mint;
|
use mint::Mint;
|
||||||
use packet::BlobRecycler;
|
|
||||||
use service::Service;
|
use service::Service;
|
||||||
use signature::{Keypair, KeypairUtil};
|
use signature::{Keypair, KeypairUtil};
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
@ -249,7 +248,6 @@ pub mod tests {
|
|||||||
let node = Node::new_localhost();
|
let node = Node::new_localhost();
|
||||||
let mut crdt = Crdt::new(node.info.clone()).expect("Crdt::new");
|
let mut crdt = Crdt::new(node.info.clone()).expect("Crdt::new");
|
||||||
crdt.set_leader(node.info.id);
|
crdt.set_leader(node.info.id);
|
||||||
let blob_recycler = BlobRecycler::default();
|
|
||||||
let (sender, receiver) = channel();
|
let (sender, receiver) = channel();
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
@ -257,7 +255,6 @@ pub mod tests {
|
|||||||
Arc::new(keypair),
|
Arc::new(keypair),
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
Arc::new(RwLock::new(crdt)),
|
Arc::new(RwLock::new(crdt)),
|
||||||
blob_recycler.clone(),
|
|
||||||
sender,
|
sender,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
);
|
);
|
||||||
@ -382,7 +379,7 @@ pub mod tests {
|
|||||||
|
|
||||||
// vote should be valid
|
// vote should be valid
|
||||||
let blob = &vote_blob.unwrap()[0];
|
let blob = &vote_blob.unwrap()[0];
|
||||||
let tx = deserialize(&(blob.read().unwrap().data)).unwrap();
|
let tx = deserialize(&(blob.read().data)).unwrap();
|
||||||
assert!(bank.process_transaction(&tx).is_ok());
|
assert!(bank.process_transaction(&tx).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ use std::sync::{Arc, RwLock};
|
|||||||
|
|
||||||
pub const WINDOW_SIZE: u64 = 2 * 1024;
|
pub const WINDOW_SIZE: u64 = 2 * 1024;
|
||||||
|
|
||||||
#[derive(Clone, Default)]
|
#[derive(Default, Clone)]
|
||||||
pub struct WindowSlot {
|
pub struct WindowSlot {
|
||||||
pub data: Option<SharedBlob>,
|
pub data: Option<SharedBlob>,
|
||||||
pub coding: Option<SharedBlob>,
|
pub coding: Option<SharedBlob>,
|
||||||
@ -28,7 +28,7 @@ pub struct WindowSlot {
|
|||||||
impl WindowSlot {
|
impl WindowSlot {
|
||||||
fn blob_index(&self) -> Option<u64> {
|
fn blob_index(&self) -> Option<u64> {
|
||||||
match self.data {
|
match self.data {
|
||||||
Some(ref blob) => blob.read().unwrap().get_index().ok(),
|
Some(ref blob) => blob.read().get_index().ok(),
|
||||||
None => None,
|
None => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -194,12 +194,7 @@ impl WindowUtil for Window {
|
|||||||
) {
|
) {
|
||||||
let w = (pix % WINDOW_SIZE) as usize;
|
let w = (pix % WINDOW_SIZE) as usize;
|
||||||
|
|
||||||
let is_coding = {
|
let is_coding = blob.read().is_coding();
|
||||||
let blob_r = blob
|
|
||||||
.read()
|
|
||||||
.expect("blob read lock for flogs streamer::window");
|
|
||||||
blob_r.is_coding()
|
|
||||||
};
|
|
||||||
|
|
||||||
// insert a newly received blob into a window slot, clearing out and recycling any previous
|
// insert a newly received blob into a window slot, clearing out and recycling any previous
|
||||||
// blob unless the incoming blob is a duplicate (based on idx)
|
// blob unless the incoming blob is a duplicate (based on idx)
|
||||||
@ -213,7 +208,7 @@ impl WindowUtil for Window {
|
|||||||
c_or_d: &str,
|
c_or_d: &str,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
if let Some(old) = mem::replace(window_slot, Some(blob)) {
|
if let Some(old) = mem::replace(window_slot, Some(blob)) {
|
||||||
let is_dup = old.read().unwrap().get_index().unwrap() == pix;
|
let is_dup = old.read().get_index().unwrap() == pix;
|
||||||
recycler.recycle(old, "insert_blob_is_dup");
|
recycler.recycle(old, "insert_blob_is_dup");
|
||||||
trace!(
|
trace!(
|
||||||
"{}: occupied {} window slot {:}, is_dup: {}",
|
"{}: occupied {} window slot {:}, is_dup: {}",
|
||||||
@ -263,7 +258,7 @@ impl WindowUtil for Window {
|
|||||||
trace!("{}: k: {} consumed: {}", id, k, *consumed,);
|
trace!("{}: k: {} consumed: {}", id, k, *consumed,);
|
||||||
|
|
||||||
if let Some(blob) = &self[k].data {
|
if let Some(blob) = &self[k].data {
|
||||||
if blob.read().unwrap().get_index().unwrap() < *consumed {
|
if blob.read().get_index().unwrap() < *consumed {
|
||||||
// window wrap-around, end of received
|
// window wrap-around, end of received
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -271,7 +266,10 @@ impl WindowUtil for Window {
|
|||||||
// self[k].data is None, end of received
|
// self[k].data is None, end of received
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
consume_queue.push(self[k].data.clone().expect("clone in fn recv_window"));
|
let slot = self[k].clone();
|
||||||
|
if let Some(r) = slot.data {
|
||||||
|
consume_queue.push(r)
|
||||||
|
}
|
||||||
*consumed += 1;
|
*consumed += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -324,7 +322,7 @@ pub fn blob_idx_in_window(id: &Pubkey, pix: u64, consumed: u64, received: &mut u
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn default_window() -> Window {
|
pub fn default_window() -> Window {
|
||||||
vec![WindowSlot::default(); WINDOW_SIZE as usize]
|
(0..WINDOW_SIZE).map(|_| WindowSlot::default()).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn index_blobs(
|
pub fn index_blobs(
|
||||||
@ -336,7 +334,7 @@ pub fn index_blobs(
|
|||||||
trace!("{}: INDEX_BLOBS {}", node_info.id, blobs.len());
|
trace!("{}: INDEX_BLOBS {}", node_info.id, blobs.len());
|
||||||
for (i, b) in blobs.iter().enumerate() {
|
for (i, b) in blobs.iter().enumerate() {
|
||||||
// only leader should be broadcasting
|
// only leader should be broadcasting
|
||||||
let mut blob = b.write().expect("'blob' write lock in crdt::index_blobs");
|
let mut blob = b.write();
|
||||||
blob.set_id(node_info.id)
|
blob.set_id(node_info.id)
|
||||||
.expect("set_id in pub fn broadcast");
|
.expect("set_id in pub fn broadcast");
|
||||||
blob.set_index(*receive_index + i as u64)
|
blob.set_index(*receive_index + i as u64)
|
||||||
@ -373,7 +371,7 @@ pub fn initialized_window(
|
|||||||
// populate the window, offset by implied index
|
// populate the window, offset by implied index
|
||||||
let diff = cmp::max(blobs.len() as isize - window.len() as isize, 0) as usize;
|
let diff = cmp::max(blobs.len() as isize - window.len() as isize, 0) as usize;
|
||||||
for b in blobs.into_iter().skip(diff) {
|
for b in blobs.into_iter().skip(diff) {
|
||||||
let ix = b.read().unwrap().get_index().expect("blob index");
|
let ix = b.read().get_index().expect("blob index");
|
||||||
let pos = (ix % WINDOW_SIZE) as usize;
|
let pos = (ix % WINDOW_SIZE) as usize;
|
||||||
trace!("{} caching {} at {}", id, ix, pos);
|
trace!("{} caching {} at {}", id, ix, pos);
|
||||||
assert!(window[pos].data.is_none());
|
assert!(window[pos].data.is_none());
|
||||||
@ -387,16 +385,16 @@ pub fn new_window_from_entries(
|
|||||||
ledger_tail: &[Entry],
|
ledger_tail: &[Entry],
|
||||||
entry_height: u64,
|
entry_height: u64,
|
||||||
node_info: &NodeInfo,
|
node_info: &NodeInfo,
|
||||||
blob_recycler: &BlobRecycler,
|
|
||||||
) -> Window {
|
) -> Window {
|
||||||
// convert to blobs
|
// convert to blobs
|
||||||
|
let blob_recycler = BlobRecycler::default();
|
||||||
let blobs = ledger_tail.to_blobs(&blob_recycler);
|
let blobs = ledger_tail.to_blobs(&blob_recycler);
|
||||||
initialized_window(&node_info, blobs, entry_height)
|
initialized_window(&node_info, blobs, entry_height)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use packet::{Blob, BlobRecycler, Packet, PacketRecycler, Packets, PACKET_DATA_SIZE};
|
use packet::{Blob, BlobRecycler, Packet, Packets, PACKET_DATA_SIZE};
|
||||||
use signature::Pubkey;
|
use signature::Pubkey;
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
@ -412,7 +410,7 @@ mod test {
|
|||||||
for _t in 0..5 {
|
for _t in 0..5 {
|
||||||
let timer = Duration::new(1, 0);
|
let timer = Duration::new(1, 0);
|
||||||
match r.recv_timeout(timer) {
|
match r.recv_timeout(timer) {
|
||||||
Ok(m) => *num += m.read().unwrap().packets.len(),
|
Ok(m) => *num += m.read().packets.len(),
|
||||||
e => info!("error {:?}", e),
|
e => info!("error {:?}", e),
|
||||||
}
|
}
|
||||||
if *num == 10 {
|
if *num == 10 {
|
||||||
@ -434,28 +432,17 @@ mod test {
|
|||||||
let addr = read.local_addr().unwrap();
|
let addr = read.local_addr().unwrap();
|
||||||
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
let send = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||||
let exit = Arc::new(AtomicBool::new(false));
|
let exit = Arc::new(AtomicBool::new(false));
|
||||||
let pack_recycler = PacketRecycler::default();
|
|
||||||
let resp_recycler = BlobRecycler::default();
|
let resp_recycler = BlobRecycler::default();
|
||||||
let (s_reader, r_reader) = channel();
|
let (s_reader, r_reader) = channel();
|
||||||
let t_receiver = receiver(
|
let t_receiver = receiver(Arc::new(read), exit.clone(), s_reader);
|
||||||
Arc::new(read),
|
|
||||||
exit.clone(),
|
|
||||||
pack_recycler.clone(),
|
|
||||||
s_reader,
|
|
||||||
);
|
|
||||||
let t_responder = {
|
let t_responder = {
|
||||||
let (s_responder, r_responder) = channel();
|
let (s_responder, r_responder) = channel();
|
||||||
let t_responder = responder(
|
let t_responder = responder("streamer_send_test", Arc::new(send), r_responder);
|
||||||
"streamer_send_test",
|
|
||||||
Arc::new(send),
|
|
||||||
resp_recycler.clone(),
|
|
||||||
r_responder,
|
|
||||||
);
|
|
||||||
let mut msgs = Vec::new();
|
let mut msgs = Vec::new();
|
||||||
for i in 0..10 {
|
for i in 0..10 {
|
||||||
let b = resp_recycler.allocate();
|
let mut b = resp_recycler.allocate();
|
||||||
{
|
{
|
||||||
let mut w = b.write().unwrap();
|
let mut w = b.write();
|
||||||
w.data[0] = i as u8;
|
w.data[0] = i as u8;
|
||||||
w.meta.size = PACKET_DATA_SIZE;
|
w.meta.size = PACKET_DATA_SIZE;
|
||||||
w.meta.set_addr(&addr);
|
w.meta.set_addr(&addr);
|
||||||
|
@ -46,9 +46,7 @@ fn add_block_to_retransmit_queue(
|
|||||||
recycler: &BlobRecycler,
|
recycler: &BlobRecycler,
|
||||||
retransmit_queue: &mut Vec<SharedBlob>,
|
retransmit_queue: &mut Vec<SharedBlob>,
|
||||||
) {
|
) {
|
||||||
let p = b
|
let p = b.read();
|
||||||
.read()
|
|
||||||
.expect("'b' read lock in fn add_block_to_retransmit_queue");
|
|
||||||
//TODO this check isn't safe against adverserial packets
|
//TODO this check isn't safe against adverserial packets
|
||||||
//we need to maintain a sequence window
|
//we need to maintain a sequence window
|
||||||
trace!(
|
trace!(
|
||||||
@ -73,9 +71,7 @@ fn add_block_to_retransmit_queue(
|
|||||||
//is dropped via a weakref to the recycler
|
//is dropped via a weakref to the recycler
|
||||||
let nv = recycler.allocate();
|
let nv = recycler.allocate();
|
||||||
{
|
{
|
||||||
let mut mnv = nv
|
let mut mnv = nv.write();
|
||||||
.write()
|
|
||||||
.expect("recycler write lock in fn add_block_to_retransmit_queue");
|
|
||||||
let sz = p.meta.size;
|
let sz = p.meta.size;
|
||||||
mnv.meta.size = sz;
|
mnv.meta.size = sz;
|
||||||
mnv.data[..sz].copy_from_slice(&p.data[..sz]);
|
mnv.data[..sz].copy_from_slice(&p.data[..sz]);
|
||||||
@ -110,9 +106,9 @@ fn retransmit_all_leader_blocks(
|
|||||||
{
|
{
|
||||||
*pending_retransmits = false;
|
*pending_retransmits = false;
|
||||||
if w.leader_unknown {
|
if w.leader_unknown {
|
||||||
if let Some(b) = w.clone().data {
|
if let Some(ref b) = w.data {
|
||||||
add_block_to_retransmit_queue(
|
add_block_to_retransmit_queue(
|
||||||
&b,
|
b,
|
||||||
leader_id,
|
leader_id,
|
||||||
recycler,
|
recycler,
|
||||||
&mut retransmit_queue,
|
&mut retransmit_queue,
|
||||||
@ -190,7 +186,7 @@ fn recv_window(
|
|||||||
let mut consume_queue = Vec::new();
|
let mut consume_queue = Vec::new();
|
||||||
for b in dq {
|
for b in dq {
|
||||||
let (pix, meta_size) = {
|
let (pix, meta_size) = {
|
||||||
let p = b.write().unwrap();
|
let p = b.read();
|
||||||
(p.get_index()?, p.meta.size)
|
(p.get_index()?, p.meta.size)
|
||||||
};
|
};
|
||||||
pixs.push(pix);
|
pixs.push(pix);
|
||||||
@ -236,7 +232,6 @@ pub fn window_service(
|
|||||||
crdt: Arc<RwLock<Crdt>>,
|
crdt: Arc<RwLock<Crdt>>,
|
||||||
window: SharedWindow,
|
window: SharedWindow,
|
||||||
entry_height: u64,
|
entry_height: u64,
|
||||||
recycler: BlobRecycler,
|
|
||||||
r: BlobReceiver,
|
r: BlobReceiver,
|
||||||
s: BlobSender,
|
s: BlobSender,
|
||||||
retransmit: BlobSender,
|
retransmit: BlobSender,
|
||||||
@ -251,6 +246,7 @@ pub fn window_service(
|
|||||||
let mut times = 0;
|
let mut times = 0;
|
||||||
let id = crdt.read().unwrap().id;
|
let id = crdt.read().unwrap().id;
|
||||||
let mut pending_retransmits = false;
|
let mut pending_retransmits = false;
|
||||||
|
let recycler = BlobRecycler::default();
|
||||||
trace!("{}: RECV_WINDOW started", id);
|
trace!("{}: RECV_WINDOW started", id);
|
||||||
loop {
|
loop {
|
||||||
if let Err(e) = recv_window(
|
if let Err(e) = recv_window(
|
||||||
@ -317,7 +313,7 @@ mod test {
|
|||||||
match r.recv_timeout(timer) {
|
match r.recv_timeout(timer) {
|
||||||
Ok(m) => {
|
Ok(m) => {
|
||||||
for (i, v) in m.iter().enumerate() {
|
for (i, v) in m.iter().enumerate() {
|
||||||
assert_eq!(v.read().unwrap().get_index().unwrap() as usize, *num + i);
|
assert_eq!(v.read().get_index().unwrap() as usize, *num + i);
|
||||||
}
|
}
|
||||||
*num += m.len();
|
*num += m.len();
|
||||||
}
|
}
|
||||||
@ -341,12 +337,7 @@ mod test {
|
|||||||
|
|
||||||
let resp_recycler = BlobRecycler::default();
|
let resp_recycler = BlobRecycler::default();
|
||||||
let (s_reader, r_reader) = channel();
|
let (s_reader, r_reader) = channel();
|
||||||
let t_receiver = blob_receiver(
|
let t_receiver = blob_receiver(Arc::new(tn.sockets.gossip), exit.clone(), s_reader);
|
||||||
Arc::new(tn.sockets.gossip),
|
|
||||||
exit.clone(),
|
|
||||||
resp_recycler.clone(),
|
|
||||||
s_reader,
|
|
||||||
);
|
|
||||||
let (s_window, r_window) = channel();
|
let (s_window, r_window) = channel();
|
||||||
let (s_retransmit, r_retransmit) = channel();
|
let (s_retransmit, r_retransmit) = channel();
|
||||||
let win = Arc::new(RwLock::new(default_window()));
|
let win = Arc::new(RwLock::new(default_window()));
|
||||||
@ -354,7 +345,6 @@ mod test {
|
|||||||
subs,
|
subs,
|
||||||
win,
|
win,
|
||||||
0,
|
0,
|
||||||
resp_recycler.clone(),
|
|
||||||
r_reader,
|
r_reader,
|
||||||
s_window,
|
s_window,
|
||||||
s_retransmit,
|
s_retransmit,
|
||||||
@ -365,18 +355,13 @@ mod test {
|
|||||||
let blob_sockets: Vec<Arc<UdpSocket>> =
|
let blob_sockets: Vec<Arc<UdpSocket>> =
|
||||||
tn.sockets.replicate.into_iter().map(Arc::new).collect();
|
tn.sockets.replicate.into_iter().map(Arc::new).collect();
|
||||||
|
|
||||||
let t_responder = responder(
|
let t_responder = responder("window_send_test", blob_sockets[0].clone(), r_responder);
|
||||||
"window_send_test",
|
|
||||||
blob_sockets[0].clone(),
|
|
||||||
resp_recycler.clone(),
|
|
||||||
r_responder,
|
|
||||||
);
|
|
||||||
let mut msgs = Vec::new();
|
let mut msgs = Vec::new();
|
||||||
for v in 0..10 {
|
for v in 0..10 {
|
||||||
let i = 9 - v;
|
let i = 9 - v;
|
||||||
let b = resp_recycler.allocate();
|
let b = resp_recycler.allocate();
|
||||||
{
|
{
|
||||||
let mut w = b.write().unwrap();
|
let mut w = b.write();
|
||||||
w.set_index(i).unwrap();
|
w.set_index(i).unwrap();
|
||||||
w.set_id(me_id).unwrap();
|
w.set_id(me_id).unwrap();
|
||||||
assert_eq!(i, w.get_index().unwrap());
|
assert_eq!(i, w.get_index().unwrap());
|
||||||
@ -414,12 +399,7 @@ mod test {
|
|||||||
|
|
||||||
let resp_recycler = BlobRecycler::default();
|
let resp_recycler = BlobRecycler::default();
|
||||||
let (s_reader, r_reader) = channel();
|
let (s_reader, r_reader) = channel();
|
||||||
let t_receiver = blob_receiver(
|
let t_receiver = blob_receiver(Arc::new(tn.sockets.gossip), exit.clone(), s_reader);
|
||||||
Arc::new(tn.sockets.gossip),
|
|
||||||
exit.clone(),
|
|
||||||
resp_recycler.clone(),
|
|
||||||
s_reader,
|
|
||||||
);
|
|
||||||
let (s_window, _r_window) = channel();
|
let (s_window, _r_window) = channel();
|
||||||
let (s_retransmit, r_retransmit) = channel();
|
let (s_retransmit, r_retransmit) = channel();
|
||||||
let win = Arc::new(RwLock::new(default_window()));
|
let win = Arc::new(RwLock::new(default_window()));
|
||||||
@ -427,7 +407,6 @@ mod test {
|
|||||||
subs.clone(),
|
subs.clone(),
|
||||||
win,
|
win,
|
||||||
0,
|
0,
|
||||||
resp_recycler.clone(),
|
|
||||||
r_reader,
|
r_reader,
|
||||||
s_window,
|
s_window,
|
||||||
s_retransmit,
|
s_retransmit,
|
||||||
@ -437,18 +416,13 @@ mod test {
|
|||||||
let (s_responder, r_responder) = channel();
|
let (s_responder, r_responder) = channel();
|
||||||
let blob_sockets: Vec<Arc<UdpSocket>> =
|
let blob_sockets: Vec<Arc<UdpSocket>> =
|
||||||
tn.sockets.replicate.into_iter().map(Arc::new).collect();
|
tn.sockets.replicate.into_iter().map(Arc::new).collect();
|
||||||
let t_responder = responder(
|
let t_responder = responder("window_send_test", blob_sockets[0].clone(), r_responder);
|
||||||
"window_send_test",
|
|
||||||
blob_sockets[0].clone(),
|
|
||||||
resp_recycler.clone(),
|
|
||||||
r_responder,
|
|
||||||
);
|
|
||||||
let mut msgs = Vec::new();
|
let mut msgs = Vec::new();
|
||||||
for v in 0..10 {
|
for v in 0..10 {
|
||||||
let i = 9 - v;
|
let i = 9 - v;
|
||||||
let b = resp_recycler.allocate();
|
let b = resp_recycler.allocate();
|
||||||
{
|
{
|
||||||
let mut w = b.write().unwrap();
|
let mut w = b.write();
|
||||||
w.set_index(i).unwrap();
|
w.set_index(i).unwrap();
|
||||||
w.set_id(me_id).unwrap();
|
w.set_id(me_id).unwrap();
|
||||||
assert_eq!(i, w.get_index().unwrap());
|
assert_eq!(i, w.get_index().unwrap());
|
||||||
@ -479,12 +453,7 @@ mod test {
|
|||||||
|
|
||||||
let resp_recycler = BlobRecycler::default();
|
let resp_recycler = BlobRecycler::default();
|
||||||
let (s_reader, r_reader) = channel();
|
let (s_reader, r_reader) = channel();
|
||||||
let t_receiver = blob_receiver(
|
let t_receiver = blob_receiver(Arc::new(tn.sockets.gossip), exit.clone(), s_reader);
|
||||||
Arc::new(tn.sockets.gossip),
|
|
||||||
exit.clone(),
|
|
||||||
resp_recycler.clone(),
|
|
||||||
s_reader,
|
|
||||||
);
|
|
||||||
let (s_window, _r_window) = channel();
|
let (s_window, _r_window) = channel();
|
||||||
let (s_retransmit, r_retransmit) = channel();
|
let (s_retransmit, r_retransmit) = channel();
|
||||||
let win = Arc::new(RwLock::new(default_window()));
|
let win = Arc::new(RwLock::new(default_window()));
|
||||||
@ -492,7 +461,6 @@ mod test {
|
|||||||
subs.clone(),
|
subs.clone(),
|
||||||
win,
|
win,
|
||||||
0,
|
0,
|
||||||
resp_recycler.clone(),
|
|
||||||
r_reader,
|
r_reader,
|
||||||
s_window,
|
s_window,
|
||||||
s_retransmit,
|
s_retransmit,
|
||||||
@ -502,18 +470,13 @@ mod test {
|
|||||||
let (s_responder, r_responder) = channel();
|
let (s_responder, r_responder) = channel();
|
||||||
let blob_sockets: Vec<Arc<UdpSocket>> =
|
let blob_sockets: Vec<Arc<UdpSocket>> =
|
||||||
tn.sockets.replicate.into_iter().map(Arc::new).collect();
|
tn.sockets.replicate.into_iter().map(Arc::new).collect();
|
||||||
let t_responder = responder(
|
let t_responder = responder("window_send_test", blob_sockets[0].clone(), r_responder);
|
||||||
"window_send_test",
|
|
||||||
blob_sockets[0].clone(),
|
|
||||||
resp_recycler.clone(),
|
|
||||||
r_responder,
|
|
||||||
);
|
|
||||||
let mut msgs = Vec::new();
|
let mut msgs = Vec::new();
|
||||||
for v in 0..10 {
|
for v in 0..10 {
|
||||||
let i = 9 - v;
|
let i = 9 - v;
|
||||||
let b = resp_recycler.allocate();
|
let b = resp_recycler.allocate();
|
||||||
{
|
{
|
||||||
let mut w = b.write().unwrap();
|
let mut w = b.write();
|
||||||
w.set_index(i).unwrap();
|
w.set_index(i).unwrap();
|
||||||
w.set_id(me_id).unwrap();
|
w.set_id(me_id).unwrap();
|
||||||
assert_eq!(i, w.get_index().unwrap());
|
assert_eq!(i, w.get_index().unwrap());
|
||||||
@ -533,7 +496,7 @@ mod test {
|
|||||||
let i = 9 + v;
|
let i = 9 + v;
|
||||||
let b = resp_recycler.allocate();
|
let b = resp_recycler.allocate();
|
||||||
{
|
{
|
||||||
let mut w = b.write().unwrap();
|
let mut w = b.write();
|
||||||
w.set_index(i).unwrap();
|
w.set_index(i).unwrap();
|
||||||
w.set_id(me_id).unwrap();
|
w.set_id(me_id).unwrap();
|
||||||
assert_eq!(i, w.get_index().unwrap());
|
assert_eq!(i, w.get_index().unwrap());
|
||||||
|
@ -176,7 +176,6 @@ impl WriteStage {
|
|||||||
keypair: Arc<Keypair>,
|
keypair: Arc<Keypair>,
|
||||||
bank: Arc<Bank>,
|
bank: Arc<Bank>,
|
||||||
crdt: Arc<RwLock<Crdt>>,
|
crdt: Arc<RwLock<Crdt>>,
|
||||||
blob_recycler: BlobRecycler,
|
|
||||||
ledger_path: &str,
|
ledger_path: &str,
|
||||||
entry_receiver: Receiver<Vec<Entry>>,
|
entry_receiver: Receiver<Vec<Entry>>,
|
||||||
entry_height: u64,
|
entry_height: u64,
|
||||||
@ -186,7 +185,6 @@ impl WriteStage {
|
|||||||
let t_responder = responder(
|
let t_responder = responder(
|
||||||
"write_stage_vote_sender",
|
"write_stage_vote_sender",
|
||||||
Arc::new(send),
|
Arc::new(send),
|
||||||
blob_recycler.clone(),
|
|
||||||
vote_blob_receiver,
|
vote_blob_receiver,
|
||||||
);
|
);
|
||||||
let (entry_sender, entry_receiver_forward) = channel();
|
let (entry_sender, entry_receiver_forward) = channel();
|
||||||
@ -205,6 +203,7 @@ impl WriteStage {
|
|||||||
leader_rotation_interval = rcrdt.get_leader_rotation_interval();
|
leader_rotation_interval = rcrdt.get_leader_rotation_interval();
|
||||||
}
|
}
|
||||||
let mut entry_height = entry_height;
|
let mut entry_height = entry_height;
|
||||||
|
let blob_recycler = BlobRecycler::default();
|
||||||
loop {
|
loop {
|
||||||
info!("write_stage entry height: {}", entry_height);
|
info!("write_stage entry height: {}", entry_height);
|
||||||
// Note that entry height is not zero indexed, it starts at 1, so the
|
// Note that entry height is not zero indexed, it starts at 1, so the
|
||||||
@ -296,7 +295,6 @@ mod tests {
|
|||||||
use crdt::{Crdt, Node};
|
use crdt::{Crdt, Node};
|
||||||
use entry::Entry;
|
use entry::Entry;
|
||||||
use ledger::{genesis, read_ledger};
|
use ledger::{genesis, read_ledger};
|
||||||
use packet::BlobRecycler;
|
|
||||||
use recorder::Recorder;
|
use recorder::Recorder;
|
||||||
use service::Service;
|
use service::Service;
|
||||||
use signature::{Keypair, KeypairUtil, Pubkey};
|
use signature::{Keypair, KeypairUtil, Pubkey};
|
||||||
@ -337,7 +335,6 @@ mod tests {
|
|||||||
let crdt = Arc::new(RwLock::new(crdt));
|
let crdt = Arc::new(RwLock::new(crdt));
|
||||||
let bank = Bank::new_default(true);
|
let bank = Bank::new_default(true);
|
||||||
let bank = Arc::new(bank);
|
let bank = Arc::new(bank);
|
||||||
let blob_recycler = BlobRecycler::default();
|
|
||||||
|
|
||||||
// Make a ledger
|
// Make a ledger
|
||||||
let (_, leader_ledger_path) = genesis("test_leader_rotation_exit", 10_000);
|
let (_, leader_ledger_path) = genesis("test_leader_rotation_exit", 10_000);
|
||||||
@ -352,7 +349,6 @@ mod tests {
|
|||||||
leader_keypair,
|
leader_keypair,
|
||||||
bank.clone(),
|
bank.clone(),
|
||||||
crdt.clone(),
|
crdt.clone(),
|
||||||
blob_recycler,
|
|
||||||
&leader_ledger_path,
|
&leader_ledger_path,
|
||||||
entry_receiver,
|
entry_receiver,
|
||||||
entry_height,
|
entry_height,
|
||||||
|
@ -21,14 +21,7 @@ fn test_node(exit: Arc<AtomicBool>) -> (Arc<RwLock<Crdt>>, Ncp, UdpSocket) {
|
|||||||
let crdt = Crdt::new(tn.info.clone()).expect("Crdt::new");
|
let crdt = Crdt::new(tn.info.clone()).expect("Crdt::new");
|
||||||
let c = Arc::new(RwLock::new(crdt));
|
let c = Arc::new(RwLock::new(crdt));
|
||||||
let w = Arc::new(RwLock::new(vec![]));
|
let w = Arc::new(RwLock::new(vec![]));
|
||||||
let d = Ncp::new(
|
let d = Ncp::new(&c.clone(), w, None, tn.sockets.gossip, exit);
|
||||||
&c.clone(),
|
|
||||||
w,
|
|
||||||
BlobRecycler::default(),
|
|
||||||
None,
|
|
||||||
tn.sockets.gossip,
|
|
||||||
exit,
|
|
||||||
);
|
|
||||||
(c, d, tn.sockets.replicate.pop().unwrap())
|
(c, d, tn.sockets.replicate.pop().unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -166,9 +159,10 @@ pub fn crdt_retransmit() -> result::Result<()> {
|
|||||||
sleep(Duration::new(1, 0));
|
sleep(Duration::new(1, 0));
|
||||||
}
|
}
|
||||||
assert!(done);
|
assert!(done);
|
||||||
let mut b = Blob::default();
|
let r = BlobRecycler::default();
|
||||||
b.meta.size = 10;
|
let b = r.allocate();
|
||||||
Crdt::retransmit(&c1, &Arc::new(RwLock::new(b)), &tn1)?;
|
b.write().meta.size = 10;
|
||||||
|
Crdt::retransmit(&c1, &b, &tn1)?;
|
||||||
let res: Vec<_> = [tn1, tn2, tn3]
|
let res: Vec<_> = [tn1, tn2, tn3]
|
||||||
.into_par_iter()
|
.into_par_iter()
|
||||||
.map(|s| {
|
.map(|s| {
|
||||||
|
@ -13,7 +13,6 @@ use solana::ledger::LedgerWriter;
|
|||||||
use solana::logger;
|
use solana::logger;
|
||||||
use solana::mint::Mint;
|
use solana::mint::Mint;
|
||||||
use solana::ncp::Ncp;
|
use solana::ncp::Ncp;
|
||||||
use solana::packet::BlobRecycler;
|
|
||||||
use solana::result;
|
use solana::result;
|
||||||
use solana::service::Service;
|
use solana::service::Service;
|
||||||
use solana::signature::{Keypair, KeypairUtil, Pubkey};
|
use solana::signature::{Keypair, KeypairUtil, Pubkey};
|
||||||
@ -41,11 +40,9 @@ fn make_spy_node(leader: &NodeInfo) -> (Ncp, Arc<RwLock<Crdt>>, Pubkey) {
|
|||||||
spy_crdt.set_leader(leader.id);
|
spy_crdt.set_leader(leader.id);
|
||||||
let spy_crdt_ref = Arc::new(RwLock::new(spy_crdt));
|
let spy_crdt_ref = Arc::new(RwLock::new(spy_crdt));
|
||||||
let spy_window = Arc::new(RwLock::new(default_window()));
|
let spy_window = Arc::new(RwLock::new(default_window()));
|
||||||
let recycler = BlobRecycler::default();
|
|
||||||
let ncp = Ncp::new(
|
let ncp = Ncp::new(
|
||||||
&spy_crdt_ref,
|
&spy_crdt_ref,
|
||||||
spy_window,
|
spy_window,
|
||||||
recycler,
|
|
||||||
None,
|
None,
|
||||||
spy.sockets.gossip,
|
spy.sockets.gossip,
|
||||||
exit.clone(),
|
exit.clone(),
|
||||||
|
Reference in New Issue
Block a user