2019-04-14 18:52:05 -07:00
|
|
|
//! `window_service` handles the data plane incoming blobs, storing them in
|
|
|
|
//! blocktree and retransmitting where required
|
2018-09-07 16:00:26 -06:00
|
|
|
//!
|
2018-12-07 20:16:27 -07:00
|
|
|
use crate::cluster_info::ClusterInfo;
|
2019-11-01 14:23:03 -07:00
|
|
|
use crate::packet::Packets;
|
2019-05-09 14:10:04 -07:00
|
|
|
use crate::repair_service::{RepairService, RepairStrategy};
|
2018-12-07 20:16:27 -07:00
|
|
|
use crate::result::{Error, Result};
|
2019-02-07 15:10:54 -08:00
|
|
|
use crate::service::Service;
|
2019-10-28 16:07:51 -07:00
|
|
|
use crate::streamer::PacketSender;
|
|
|
|
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
|
|
|
|
use rayon::iter::IntoParallelRefMutIterator;
|
|
|
|
use rayon::iter::ParallelIterator;
|
2019-09-03 21:50:57 +00:00
|
|
|
use rayon::ThreadPool;
|
2019-10-18 10:28:51 -06:00
|
|
|
use solana_ledger::blocktree::{self, Blocktree};
|
|
|
|
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
|
|
|
use solana_ledger::shred::Shred;
|
2019-05-17 07:00:06 -07:00
|
|
|
use solana_metrics::{inc_new_counter_debug, inc_new_counter_error};
|
2019-09-12 11:39:39 -07:00
|
|
|
use solana_rayon_threadlimit::get_thread_count;
|
2019-04-22 15:21:10 -07:00
|
|
|
use solana_runtime::bank::Bank;
|
2018-10-25 11:13:08 -07:00
|
|
|
use solana_sdk::pubkey::Pubkey;
|
2018-11-16 08:45:59 -08:00
|
|
|
use solana_sdk::timing::duration_as_ms;
|
2018-09-07 16:00:26 -06:00
|
|
|
use std::net::UdpSocket;
|
2019-02-13 20:04:20 -08:00
|
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
2018-09-07 16:00:26 -06:00
|
|
|
use std::sync::{Arc, RwLock};
|
2019-02-07 15:10:54 -08:00
|
|
|
use std::thread::{self, Builder, JoinHandle};
|
2018-09-07 16:00:26 -06:00
|
|
|
use std::time::{Duration, Instant};
|
|
|
|
|
2019-09-24 12:25:25 -07:00
|
|
|
fn verify_shred_slot(shred: &Shred, root: u64) -> bool {
|
|
|
|
if shred.is_data() {
|
|
|
|
// Only data shreds have parent information
|
|
|
|
blocktree::verify_shred_slots(shred.slot(), shred.parent(), root)
|
|
|
|
} else {
|
|
|
|
// Filter out outdated coding shreds
|
|
|
|
shred.slot() >= root
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-22 15:21:10 -07:00
|
|
|
/// drop blobs that are from myself or not from the correct leader for the
|
2019-05-13 21:19:51 -07:00
|
|
|
/// blob's slot
|
|
|
|
pub fn should_retransmit_and_persist(
|
2019-09-18 16:24:30 -07:00
|
|
|
shred: &Shred,
|
2019-05-13 21:19:51 -07:00
|
|
|
bank: Option<Arc<Bank>>,
|
|
|
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
2019-05-23 23:20:04 -07:00
|
|
|
my_pubkey: &Pubkey,
|
2019-09-16 13:13:53 -07:00
|
|
|
root: u64,
|
2019-04-22 18:41:01 -07:00
|
|
|
) -> bool {
|
2019-05-23 23:20:04 -07:00
|
|
|
let slot_leader_pubkey = match bank {
|
2019-08-20 17:16:06 -07:00
|
|
|
None => leader_schedule_cache.slot_leader_at(shred.slot(), None),
|
|
|
|
Some(bank) => leader_schedule_cache.slot_leader_at(shred.slot(), Some(&bank)),
|
2019-04-22 18:41:01 -07:00
|
|
|
};
|
2019-08-20 17:16:06 -07:00
|
|
|
if let Some(leader_id) = slot_leader_pubkey {
|
|
|
|
if leader_id == *my_pubkey {
|
|
|
|
inc_new_counter_debug!("streamer-recv_window-circular_transmission", 1);
|
|
|
|
false
|
2019-09-24 12:25:25 -07:00
|
|
|
} else if !verify_shred_slot(shred, root) {
|
2019-09-16 13:13:53 -07:00
|
|
|
inc_new_counter_debug!("streamer-recv_window-outdated_transmission", 1);
|
|
|
|
false
|
2019-08-20 17:16:06 -07:00
|
|
|
} else {
|
|
|
|
true
|
|
|
|
}
|
|
|
|
} else {
|
2019-05-17 07:00:06 -07:00
|
|
|
inc_new_counter_debug!("streamer-recv_window-unknown_leader", 1);
|
2019-05-24 19:20:09 -07:00
|
|
|
false
|
2019-04-22 15:21:10 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-13 21:19:51 -07:00
|
|
|
fn recv_window<F>(
|
2019-02-07 20:52:39 -08:00
|
|
|
blocktree: &Arc<Blocktree>,
|
2019-05-23 23:20:04 -07:00
|
|
|
my_pubkey: &Pubkey,
|
2019-11-01 14:23:03 -07:00
|
|
|
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
|
2019-08-20 17:16:06 -07:00
|
|
|
retransmit: &PacketSender,
|
|
|
|
shred_filter: F,
|
2019-09-03 21:50:57 +00:00
|
|
|
thread_pool: &ThreadPool,
|
2019-09-05 18:20:30 -07:00
|
|
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
2019-05-13 21:19:51 -07:00
|
|
|
) -> Result<()>
|
|
|
|
where
|
2019-10-02 18:33:01 -07:00
|
|
|
F: Fn(&Shred, u64) -> bool + Sync,
|
2019-05-13 21:19:51 -07:00
|
|
|
{
|
2018-09-07 16:00:26 -06:00
|
|
|
let timer = Duration::from_millis(200);
|
2019-10-28 16:07:51 -07:00
|
|
|
let mut packets = verified_receiver.recv_timeout(timer)?;
|
2019-11-01 14:23:03 -07:00
|
|
|
let mut total_packets: usize = packets.iter().map(|p| p.packets.len()).sum();
|
2018-11-24 19:32:33 -08:00
|
|
|
|
2019-10-28 16:07:51 -07:00
|
|
|
while let Ok(mut more_packets) = verified_receiver.try_recv() {
|
2019-11-01 14:23:03 -07:00
|
|
|
let count: usize = more_packets.iter().map(|p| p.packets.len()).sum();
|
2019-10-28 16:07:51 -07:00
|
|
|
total_packets += count;
|
|
|
|
packets.append(&mut more_packets)
|
2018-09-07 16:00:26 -06:00
|
|
|
}
|
2019-10-26 16:15:59 -07:00
|
|
|
|
2018-09-07 16:00:26 -06:00
|
|
|
let now = Instant::now();
|
2019-10-21 12:04:52 -07:00
|
|
|
inc_new_counter_debug!("streamer-recv_window-recv", total_packets);
|
2019-08-20 17:16:06 -07:00
|
|
|
|
2019-09-16 13:13:53 -07:00
|
|
|
let last_root = blocktree.last_root();
|
2019-10-17 16:26:29 -07:00
|
|
|
let shreds: Vec<_> = thread_pool.install(|| {
|
2019-09-03 21:50:57 +00:00
|
|
|
packets
|
2019-09-05 19:16:18 -07:00
|
|
|
.par_iter_mut()
|
2019-11-01 14:23:03 -07:00
|
|
|
.flat_map(|packets| {
|
2019-10-21 12:04:52 -07:00
|
|
|
packets
|
|
|
|
.packets
|
|
|
|
.iter_mut()
|
2019-11-01 14:23:03 -07:00
|
|
|
.filter_map(|packet| {
|
|
|
|
if packet.meta.discard {
|
2019-10-28 16:07:51 -07:00
|
|
|
inc_new_counter_debug!("streamer-recv_window-invalid_signature", 1);
|
|
|
|
None
|
|
|
|
} else if let Ok(shred) =
|
|
|
|
Shred::new_from_serialized_shred(packet.data.to_vec())
|
|
|
|
{
|
2019-10-21 12:04:52 -07:00
|
|
|
if shred_filter(&shred, last_root) {
|
|
|
|
packet.meta.slot = shred.slot();
|
|
|
|
packet.meta.seed = shred.seed();
|
|
|
|
Some(shred)
|
|
|
|
} else {
|
|
|
|
packet.meta.discard = true;
|
|
|
|
None
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
packet.meta.discard = true;
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>()
|
2019-09-03 21:50:57 +00:00
|
|
|
})
|
2019-10-17 16:26:29 -07:00
|
|
|
.collect()
|
2019-09-05 19:16:18 -07:00
|
|
|
});
|
2018-10-16 12:54:23 -07:00
|
|
|
|
2019-08-20 17:16:06 -07:00
|
|
|
trace!("{:?} shreds from packets", shreds.len());
|
|
|
|
|
2019-10-21 12:04:52 -07:00
|
|
|
trace!("{} num total shreds received: {}", my_pubkey, total_packets);
|
2018-09-07 16:00:26 -06:00
|
|
|
|
2019-11-01 14:23:03 -07:00
|
|
|
for packets in packets.into_iter() {
|
2019-10-21 12:04:52 -07:00
|
|
|
if !packets.packets.is_empty() {
|
|
|
|
// Ignore the send error, as the retransmit is optional (e.g. archivers don't retransmit)
|
|
|
|
let _ = retransmit.send(packets);
|
|
|
|
}
|
2019-08-20 17:16:06 -07:00
|
|
|
}
|
2018-11-24 19:32:33 -08:00
|
|
|
|
2019-10-26 16:15:59 -07:00
|
|
|
let blocktree_insert_metrics = blocktree.insert_shreds(shreds, Some(leader_schedule_cache))?;
|
|
|
|
blocktree_insert_metrics.report_metrics("recv-window-insert-shreds");
|
2018-11-24 19:32:33 -08:00
|
|
|
|
2019-08-21 15:27:42 -07:00
|
|
|
trace!(
|
2018-11-24 19:32:33 -08:00
|
|
|
"Elapsed processing time in recv_window(): {}",
|
|
|
|
duration_as_ms(&now.elapsed())
|
|
|
|
);
|
|
|
|
|
2018-09-07 16:00:26 -06:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-02-07 15:10:54 -08:00
|
|
|
// Implement a destructor for the window_service thread to signal it exited
|
|
|
|
// even on panics
|
|
|
|
struct Finalizer {
|
|
|
|
exit_sender: Arc<AtomicBool>,
|
|
|
|
}
|
2018-11-24 19:32:33 -08:00
|
|
|
|
2019-02-07 15:10:54 -08:00
|
|
|
impl Finalizer {
|
|
|
|
fn new(exit_sender: Arc<AtomicBool>) -> Self {
|
|
|
|
Finalizer { exit_sender }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Implement a destructor for Finalizer.
|
|
|
|
impl Drop for Finalizer {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
self.exit_sender.clone().store(true, Ordering::Relaxed);
|
|
|
|
}
|
|
|
|
}
|
2018-11-24 19:32:33 -08:00
|
|
|
|
2019-02-07 15:10:54 -08:00
|
|
|
pub struct WindowService {
|
|
|
|
t_window: JoinHandle<()>,
|
|
|
|
repair_service: RepairService,
|
|
|
|
}
|
2018-09-07 16:00:26 -06:00
|
|
|
|
2019-02-07 15:10:54 -08:00
|
|
|
impl WindowService {
|
2019-04-23 16:24:44 -07:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2019-05-13 21:19:51 -07:00
|
|
|
pub fn new<F>(
|
2019-02-07 20:52:39 -08:00
|
|
|
blocktree: Arc<Blocktree>,
|
2019-02-07 15:10:54 -08:00
|
|
|
cluster_info: Arc<RwLock<ClusterInfo>>,
|
2019-11-01 14:23:03 -07:00
|
|
|
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
|
2019-08-20 17:16:06 -07:00
|
|
|
retransmit: PacketSender,
|
2019-02-07 15:10:54 -08:00
|
|
|
repair_socket: Arc<UdpSocket>,
|
2019-03-04 20:50:02 -08:00
|
|
|
exit: &Arc<AtomicBool>,
|
2019-05-09 14:10:04 -07:00
|
|
|
repair_strategy: RepairStrategy,
|
2019-09-05 18:20:30 -07:00
|
|
|
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
2019-08-20 17:16:06 -07:00
|
|
|
shred_filter: F,
|
2019-05-13 21:19:51 -07:00
|
|
|
) -> WindowService
|
|
|
|
where
|
|
|
|
F: 'static
|
2019-09-18 16:24:30 -07:00
|
|
|
+ Fn(&Pubkey, &Shred, Option<Arc<Bank>>, u64) -> bool
|
2019-05-13 21:19:51 -07:00
|
|
|
+ std::marker::Send
|
|
|
|
+ std::marker::Sync,
|
|
|
|
{
|
2019-05-09 14:10:04 -07:00
|
|
|
let bank_forks = match repair_strategy {
|
|
|
|
RepairStrategy::RepairRange(_) => None,
|
|
|
|
|
|
|
|
RepairStrategy::RepairAll { ref bank_forks, .. } => Some(bank_forks.clone()),
|
|
|
|
};
|
|
|
|
|
2019-02-12 17:43:45 -08:00
|
|
|
let repair_service = RepairService::new(
|
|
|
|
blocktree.clone(),
|
2019-05-24 19:20:09 -07:00
|
|
|
exit.clone(),
|
2019-02-12 17:43:45 -08:00
|
|
|
repair_socket,
|
|
|
|
cluster_info.clone(),
|
2019-05-09 14:10:04 -07:00
|
|
|
repair_strategy,
|
2019-02-12 17:43:45 -08:00
|
|
|
);
|
2019-03-04 20:50:02 -08:00
|
|
|
let exit = exit.clone();
|
2019-08-20 17:16:06 -07:00
|
|
|
let shred_filter = Arc::new(shred_filter);
|
2019-05-13 21:19:51 -07:00
|
|
|
let bank_forks = bank_forks.clone();
|
2019-09-05 18:20:30 -07:00
|
|
|
let leader_schedule_cache = leader_schedule_cache.clone();
|
2019-02-07 15:10:54 -08:00
|
|
|
let t_window = Builder::new()
|
|
|
|
.name("solana-window".to_string())
|
2019-07-10 13:08:17 -05:00
|
|
|
// TODO: Mark: Why is it overflowing
|
|
|
|
.stack_size(8 * 1024 * 1024)
|
2019-02-07 15:10:54 -08:00
|
|
|
.spawn(move || {
|
2019-03-04 20:50:02 -08:00
|
|
|
let _exit = Finalizer::new(exit.clone());
|
2019-02-07 15:10:54 -08:00
|
|
|
let id = cluster_info.read().unwrap().id();
|
|
|
|
trace!("{}: RECV_WINDOW started", id);
|
2019-07-24 12:46:10 -07:00
|
|
|
let mut now = Instant::now();
|
2019-09-03 21:50:57 +00:00
|
|
|
let thread_pool = rayon::ThreadPoolBuilder::new()
|
2019-09-12 11:39:39 -07:00
|
|
|
.num_threads(get_thread_count())
|
2019-09-03 21:50:57 +00:00
|
|
|
.build()
|
|
|
|
.unwrap();
|
2019-02-07 15:10:54 -08:00
|
|
|
loop {
|
|
|
|
if exit.load(Ordering::Relaxed) {
|
|
|
|
break;
|
2018-11-24 19:32:33 -08:00
|
|
|
}
|
2019-05-09 14:10:04 -07:00
|
|
|
|
2019-06-12 16:43:05 -07:00
|
|
|
if let Err(e) = recv_window(
|
|
|
|
&blocktree,
|
|
|
|
&id,
|
2019-10-28 16:07:51 -07:00
|
|
|
&verified_receiver,
|
2019-06-12 16:43:05 -07:00
|
|
|
&retransmit,
|
2019-09-17 18:22:46 -07:00
|
|
|
|shred, last_root| {
|
2019-08-20 17:16:06 -07:00
|
|
|
shred_filter(
|
2019-06-12 16:43:05 -07:00
|
|
|
&id,
|
2019-08-20 17:16:06 -07:00
|
|
|
shred,
|
2019-06-12 16:43:05 -07:00
|
|
|
bank_forks
|
|
|
|
.as_ref()
|
|
|
|
.map(|bank_forks| bank_forks.read().unwrap().working_bank()),
|
2019-09-16 13:13:53 -07:00
|
|
|
last_root,
|
2019-06-12 16:43:05 -07:00
|
|
|
)
|
|
|
|
},
|
2019-09-03 21:50:57 +00:00
|
|
|
&thread_pool,
|
2019-09-05 18:20:30 -07:00
|
|
|
&leader_schedule_cache,
|
2019-06-12 16:43:05 -07:00
|
|
|
) {
|
2019-02-07 15:10:54 -08:00
|
|
|
match e {
|
2019-10-28 16:07:51 -07:00
|
|
|
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
|
|
|
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout) => {
|
2019-07-24 12:46:10 -07:00
|
|
|
if now.elapsed() > Duration::from_secs(30) {
|
|
|
|
warn!("Window does not seem to be receiving data. Ensure port configuration is correct...");
|
|
|
|
now = Instant::now();
|
|
|
|
}
|
|
|
|
}
|
2019-02-07 15:10:54 -08:00
|
|
|
_ => {
|
2019-05-17 07:00:06 -07:00
|
|
|
inc_new_counter_error!("streamer-window-error", 1, 1);
|
2019-02-07 15:10:54 -08:00
|
|
|
error!("window error: {:?}", e);
|
|
|
|
}
|
2018-11-24 19:32:33 -08:00
|
|
|
}
|
2019-07-24 12:46:10 -07:00
|
|
|
} else {
|
|
|
|
now = Instant::now();
|
2018-11-24 19:32:33 -08:00
|
|
|
}
|
2018-09-07 16:00:26 -06:00
|
|
|
}
|
2019-02-07 15:10:54 -08:00
|
|
|
})
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
WindowService {
|
|
|
|
t_window,
|
|
|
|
repair_service,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Service for WindowService {
|
|
|
|
type JoinReturnType = ();
|
|
|
|
|
|
|
|
fn join(self) -> thread::Result<()> {
|
|
|
|
self.t_window.join()?;
|
|
|
|
self.repair_service.join()
|
|
|
|
}
|
2018-09-07 16:00:26 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
2019-04-14 18:52:05 -07:00
|
|
|
use super::*;
|
2019-09-17 15:11:29 -07:00
|
|
|
use crate::{
|
|
|
|
cluster_info::ClusterInfo,
|
|
|
|
contact_info::ContactInfo,
|
|
|
|
genesis_utils::create_genesis_block_with_leader,
|
|
|
|
packet::{Packet, Packets},
|
|
|
|
repair_service::RepairSlotRange,
|
|
|
|
service::Service,
|
|
|
|
};
|
2019-10-28 16:07:51 -07:00
|
|
|
use crossbeam_channel::unbounded;
|
2019-09-17 15:11:29 -07:00
|
|
|
use rand::{seq::SliceRandom, thread_rng};
|
2019-10-18 22:55:59 -07:00
|
|
|
use solana_ledger::shred::DataShredHeader;
|
2019-10-18 10:28:51 -06:00
|
|
|
use solana_ledger::{
|
|
|
|
blocktree::{get_tmp_ledger_path, make_many_slot_entries, Blocktree},
|
|
|
|
entry::{create_ticks, Entry},
|
2019-10-18 22:55:59 -07:00
|
|
|
shred::Shredder,
|
2019-10-18 10:28:51 -06:00
|
|
|
};
|
2019-09-17 15:11:29 -07:00
|
|
|
use solana_sdk::{
|
2019-11-02 00:38:30 -07:00
|
|
|
clock::Slot,
|
2019-10-08 22:34:26 -07:00
|
|
|
epoch_schedule::MINIMUM_SLOTS_PER_EPOCH,
|
2019-09-17 15:11:29 -07:00
|
|
|
hash::Hash,
|
|
|
|
signature::{Keypair, KeypairUtil},
|
|
|
|
};
|
|
|
|
use std::{
|
|
|
|
net::UdpSocket,
|
|
|
|
sync::atomic::{AtomicBool, Ordering},
|
2019-10-28 16:07:51 -07:00
|
|
|
sync::mpsc::channel,
|
2019-09-17 15:11:29 -07:00
|
|
|
sync::{Arc, RwLock},
|
|
|
|
thread::sleep,
|
|
|
|
time::Duration,
|
|
|
|
};
|
2018-09-21 16:01:24 -07:00
|
|
|
|
2019-09-16 13:13:53 -07:00
|
|
|
fn local_entries_to_shred(
|
2019-10-08 00:42:51 -07:00
|
|
|
entries: &[Entry],
|
2019-11-02 00:38:30 -07:00
|
|
|
slot: Slot,
|
|
|
|
parent: Slot,
|
2019-09-16 13:13:53 -07:00
|
|
|
keypair: &Arc<Keypair>,
|
2019-09-18 16:24:30 -07:00
|
|
|
) -> Vec<Shred> {
|
2019-11-06 13:27:58 -08:00
|
|
|
let shredder = Shredder::new(slot, parent, 0.0, keypair.clone(), 0)
|
2019-10-08 00:42:51 -07:00
|
|
|
.expect("Failed to create entry shredder");
|
|
|
|
shredder.entries_to_shreds(&entries, true, 0).0
|
2019-08-20 17:16:06 -07:00
|
|
|
}
|
|
|
|
|
2019-04-14 18:52:05 -07:00
|
|
|
#[test]
|
2019-09-17 15:11:29 -07:00
|
|
|
fn test_process_shred() {
|
2019-04-14 18:52:05 -07:00
|
|
|
let blocktree_path = get_tmp_ledger_path!();
|
|
|
|
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
|
|
|
|
let num_entries = 10;
|
2019-10-31 16:38:50 -04:00
|
|
|
let original_entries = create_ticks(num_entries, 0, Hash::default());
|
2019-10-08 00:42:51 -07:00
|
|
|
let mut shreds = local_entries_to_shred(&original_entries, 0, 0, &Arc::new(Keypair::new()));
|
2019-09-05 18:20:30 -07:00
|
|
|
shreds.reverse();
|
|
|
|
blocktree
|
|
|
|
.insert_shreds(shreds, None)
|
|
|
|
.expect("Expect successful processing of shred");
|
2019-04-14 18:52:05 -07:00
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
blocktree.get_slot_entries(0, 0, None).unwrap(),
|
|
|
|
original_entries
|
|
|
|
);
|
|
|
|
|
|
|
|
drop(blocktree);
|
|
|
|
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
|
|
|
}
|
|
|
|
|
2019-04-22 15:21:10 -07:00
|
|
|
#[test]
|
|
|
|
fn test_should_retransmit_and_persist() {
|
|
|
|
let me_id = Pubkey::new_rand();
|
2019-09-16 13:13:53 -07:00
|
|
|
let leader_keypair = Arc::new(Keypair::new());
|
2019-06-12 16:43:05 -07:00
|
|
|
let leader_pubkey = leader_keypair.pubkey();
|
2019-04-22 15:21:10 -07:00
|
|
|
let bank = Arc::new(Bank::new(
|
2019-05-23 23:20:04 -07:00
|
|
|
&create_genesis_block_with_leader(100, &leader_pubkey, 10).genesis_block,
|
2019-04-22 15:21:10 -07:00
|
|
|
));
|
2019-04-22 18:41:01 -07:00
|
|
|
let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
2019-04-22 15:21:10 -07:00
|
|
|
|
2019-10-08 00:42:51 -07:00
|
|
|
let mut shreds = local_entries_to_shred(&[Entry::default()], 0, 0, &leader_keypair);
|
2019-04-22 15:21:10 -07:00
|
|
|
|
|
|
|
// with a Bank for slot 0, blob continues
|
|
|
|
assert_eq!(
|
2019-09-17 18:22:46 -07:00
|
|
|
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, 0,),
|
2019-04-22 15:21:10 -07:00
|
|
|
true
|
|
|
|
);
|
|
|
|
|
2019-09-24 14:54:10 -07:00
|
|
|
// If it's a coding shred, test that slot >= root
|
2019-10-18 22:55:59 -07:00
|
|
|
let (common, coding) = Shredder::new_coding_shred_header(5, 5, 6, 6, 0);
|
2019-09-24 14:54:10 -07:00
|
|
|
let mut coding_shred =
|
2019-10-18 22:55:59 -07:00
|
|
|
Shred::new_empty_from_header(common, DataShredHeader::default(), coding);
|
|
|
|
Shredder::sign_shred(&leader_keypair, &mut coding_shred);
|
2019-09-24 14:54:10 -07:00
|
|
|
assert_eq!(
|
|
|
|
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 0),
|
|
|
|
true
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 5),
|
|
|
|
true
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 6),
|
|
|
|
false
|
|
|
|
);
|
|
|
|
|
2019-05-24 19:20:09 -07:00
|
|
|
// with a Bank and no idea who leader is, blob gets thrown out
|
2019-08-20 17:16:06 -07:00
|
|
|
shreds[0].set_slot(MINIMUM_SLOTS_PER_EPOCH as u64 * 3);
|
2019-04-22 15:21:10 -07:00
|
|
|
assert_eq!(
|
2019-09-17 18:22:46 -07:00
|
|
|
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, 0),
|
2019-09-16 13:13:53 -07:00
|
|
|
false
|
|
|
|
);
|
|
|
|
|
|
|
|
// with a shred where shred.slot() == root, blob gets thrown out
|
|
|
|
let slot = MINIMUM_SLOTS_PER_EPOCH as u64 * 3;
|
2019-10-08 00:42:51 -07:00
|
|
|
let shreds = local_entries_to_shred(&[Entry::default()], slot, slot - 1, &leader_keypair);
|
2019-09-16 13:13:53 -07:00
|
|
|
assert_eq!(
|
2019-09-17 18:22:46 -07:00
|
|
|
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, slot),
|
2019-09-16 13:13:53 -07:00
|
|
|
false
|
|
|
|
);
|
|
|
|
|
|
|
|
// with a shred where shred.parent() < root, blob gets thrown out
|
|
|
|
let slot = MINIMUM_SLOTS_PER_EPOCH as u64 * 3;
|
|
|
|
let shreds =
|
2019-10-08 00:42:51 -07:00
|
|
|
local_entries_to_shred(&[Entry::default()], slot + 1, slot - 1, &leader_keypair);
|
2019-09-16 13:13:53 -07:00
|
|
|
assert_eq!(
|
2019-09-17 18:22:46 -07:00
|
|
|
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, slot),
|
2019-05-24 19:20:09 -07:00
|
|
|
false
|
2019-04-22 15:21:10 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
// if the blob came back from me, it doesn't continue, whether or not I have a bank
|
2019-09-24 14:54:10 -07:00
|
|
|
assert_eq!(
|
|
|
|
should_retransmit_and_persist(&shreds[0], None, &cache, &me_id, 0),
|
|
|
|
false
|
|
|
|
);
|
2019-04-22 15:21:10 -07:00
|
|
|
}
|
|
|
|
|
2019-09-05 19:16:18 -07:00
|
|
|
fn make_test_window(
|
2019-11-01 14:23:03 -07:00
|
|
|
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
|
2019-09-05 19:16:18 -07:00
|
|
|
exit: Arc<AtomicBool>,
|
|
|
|
) -> WindowService {
|
|
|
|
let blocktree_path = get_tmp_ledger_path!();
|
|
|
|
let (blocktree, _, _) = Blocktree::open_with_signal(&blocktree_path)
|
|
|
|
.expect("Expected to be able to open database ledger");
|
|
|
|
|
|
|
|
let blocktree = Arc::new(blocktree);
|
|
|
|
let (retransmit_sender, _retransmit_receiver) = channel();
|
|
|
|
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
|
|
|
ContactInfo::new_localhost(&Pubkey::default(), 0),
|
|
|
|
)));
|
|
|
|
let repair_sock = Arc::new(UdpSocket::bind(socketaddr_any!()).unwrap());
|
|
|
|
let window = WindowService::new(
|
|
|
|
blocktree,
|
|
|
|
cluster_info,
|
2019-10-28 16:07:51 -07:00
|
|
|
verified_receiver,
|
2019-09-05 19:16:18 -07:00
|
|
|
retransmit_sender,
|
|
|
|
repair_sock,
|
|
|
|
&exit,
|
|
|
|
RepairStrategy::RepairRange(RepairSlotRange { start: 0, end: 0 }),
|
|
|
|
&Arc::new(LeaderScheduleCache::default()),
|
2019-09-17 18:22:46 -07:00
|
|
|
|_, _, _, _| true,
|
2019-09-05 19:16:18 -07:00
|
|
|
);
|
|
|
|
window
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_recv_window() {
|
2019-10-28 16:07:51 -07:00
|
|
|
let (packet_sender, packet_receiver) = unbounded();
|
2019-09-05 19:16:18 -07:00
|
|
|
let exit = Arc::new(AtomicBool::new(false));
|
|
|
|
let window = make_test_window(packet_receiver, exit.clone());
|
|
|
|
// send 5 slots worth of data to the window
|
|
|
|
let (shreds, _) = make_many_slot_entries(0, 5, 10);
|
|
|
|
let packets: Vec<_> = shreds
|
|
|
|
.into_iter()
|
2019-09-17 18:22:46 -07:00
|
|
|
.map(|mut s| {
|
2019-09-05 19:16:18 -07:00
|
|
|
let mut p = Packet::default();
|
2019-09-18 16:24:30 -07:00
|
|
|
p.data.copy_from_slice(&mut s.payload);
|
2019-09-05 19:16:18 -07:00
|
|
|
p
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
let mut packets = Packets::new(packets);
|
2019-11-01 14:23:03 -07:00
|
|
|
packet_sender.send(vec![packets.clone()]).unwrap();
|
2019-09-05 19:16:18 -07:00
|
|
|
sleep(Duration::from_millis(500));
|
|
|
|
|
|
|
|
// add some empty packets to the data set. These should fail to deserialize
|
|
|
|
packets.packets.append(&mut vec![Packet::default(); 10]);
|
|
|
|
packets.packets.shuffle(&mut thread_rng());
|
2019-11-01 14:23:03 -07:00
|
|
|
packet_sender.send(vec![packets.clone()]).unwrap();
|
2019-09-05 19:16:18 -07:00
|
|
|
sleep(Duration::from_millis(500));
|
|
|
|
|
|
|
|
// send 1 empty packet that cannot deserialize into a shred
|
|
|
|
packet_sender
|
2019-11-01 14:23:03 -07:00
|
|
|
.send(vec![Packets::new(vec![Packet::default(); 1])])
|
2019-09-05 19:16:18 -07:00
|
|
|
.unwrap();
|
|
|
|
sleep(Duration::from_millis(500));
|
|
|
|
|
|
|
|
exit.store(true, Ordering::Relaxed);
|
|
|
|
window.join().unwrap();
|
|
|
|
}
|
2018-09-07 16:00:26 -06:00
|
|
|
}
|