Limit maximum number of shreds in a slot to 32K (#7584)
* Limit maximum number of shreds in a slot to 32K * mark dead slot replay as fatal error
This commit is contained in:
@ -537,6 +537,7 @@ impl ReplayStage {
|
||||
}
|
||||
Err(Error::BlockError(_)) => true,
|
||||
Err(Error::BlocktreeError(BlocktreeError::InvalidShredData(_))) => true,
|
||||
Err(Error::BlocktreeError(BlocktreeError::DeadSlot)) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
@ -2,7 +2,10 @@
|
||||
|
||||
use crate::packet::{Packet, PacketsRecycler};
|
||||
use crate::streamer::{self, PacketReceiver, PacketSender};
|
||||
use solana_ledger::blocktree::MAX_DATA_SHREDS_PER_SLOT;
|
||||
use solana_ledger::shred::{OFFSET_OF_SHRED_INDEX, SIZE_OF_SHRED_INDEX};
|
||||
use solana_perf::cuda_runtime::PinnedVec;
|
||||
use solana_perf::packet::limited_deserialize;
|
||||
use solana_perf::recycler::Recycler;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
@ -21,7 +24,21 @@ impl ShredFetchStage {
|
||||
F: Fn(&mut Packet),
|
||||
{
|
||||
while let Some(mut p) = recvr.iter().next() {
|
||||
p.packets.iter_mut().for_each(|p| modify(p));
|
||||
let index_start = OFFSET_OF_SHRED_INDEX;
|
||||
let index_end = index_start + SIZE_OF_SHRED_INDEX;
|
||||
p.packets.iter_mut().for_each(|p| {
|
||||
p.meta.discard = true;
|
||||
if index_end <= p.meta.size {
|
||||
if let Ok(index) = limited_deserialize::<u32>(&p.data[index_start..index_end]) {
|
||||
if index < MAX_DATA_SHREDS_PER_SLOT as u32 {
|
||||
p.meta.discard = false;
|
||||
modify(p);
|
||||
} else {
|
||||
inc_new_counter_warn!("shred_fetch_stage-shred_index_overrun", 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
if sendr.send(p).is_err() {
|
||||
break;
|
||||
}
|
||||
|
@ -4,12 +4,10 @@ use crate::sigverify;
|
||||
use crate::sigverify_stage::SigVerifier;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
use solana_ledger::shred::ShredType;
|
||||
use solana_ledger::shred::{OFFSET_OF_SHRED_SLOT, SIZE_OF_SHRED_SLOT};
|
||||
use solana_ledger::sigverify_shreds::verify_shreds_gpu;
|
||||
use solana_perf::recycler_cache::RecyclerCache;
|
||||
use solana_sdk::signature::Signature;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::mem::size_of;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -36,8 +34,8 @@ impl ShredSigVerifier {
|
||||
.iter()
|
||||
.flat_map(|batch| {
|
||||
batch.packets.iter().filter_map(|packet| {
|
||||
let slot_start = size_of::<Signature>() + size_of::<ShredType>();
|
||||
let slot_end = slot_start + size_of::<u64>();
|
||||
let slot_start = OFFSET_OF_SHRED_SLOT;
|
||||
let slot_end = slot_start + SIZE_OF_SHRED_SLOT;
|
||||
trace!("slot {} {}", slot_start, slot_end,);
|
||||
if slot_end <= packet.meta.size {
|
||||
limited_deserialize(&packet.data[slot_start..slot_end]).ok()
|
||||
|
@ -13,7 +13,7 @@ use rayon::iter::IntoParallelRefMutIterator;
|
||||
use rayon::iter::ParallelIterator;
|
||||
use rayon::ThreadPool;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::blocktree::{self, Blocktree};
|
||||
use solana_ledger::blocktree::{self, Blocktree, MAX_DATA_SHREDS_PER_SLOT};
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
use solana_ledger::shred::Shred;
|
||||
use solana_metrics::{inc_new_counter_debug, inc_new_counter_error};
|
||||
@ -61,6 +61,9 @@ pub fn should_retransmit_and_persist(
|
||||
} else if shred.version() != shred_version {
|
||||
inc_new_counter_debug!("streamer-recv_window-incorrect_shred_version", 1);
|
||||
false
|
||||
} else if shred.index() >= MAX_DATA_SHREDS_PER_SLOT as u32 {
|
||||
inc_new_counter_warn!("streamer-recv_window-shred_index_overrun", 1);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
@ -130,6 +133,13 @@ where
|
||||
Shred::new_from_serialized_shred(packet.data.to_vec())
|
||||
{
|
||||
if shred_filter(&shred, last_root) {
|
||||
// Mark slot as dead if the current shred is on the boundary
|
||||
// of max shreds per slot. However, let the current shred
|
||||
// get retransmitted. It'll allow peer nodes to see this shred
|
||||
// and trigger them to mark the slot as dead.
|
||||
if shred.index() >= (MAX_DATA_SHREDS_PER_SLOT - 1) as u32 {
|
||||
let _ = blocktree.set_dead_slot(shred.slot());
|
||||
}
|
||||
packet.meta.slot = shred.slot();
|
||||
packet.meta.seed = shred.seed();
|
||||
Some(shred)
|
||||
|
Reference in New Issue
Block a user