Add limit and shrink policy for recycler (#15320)
This commit is contained in:
@ -1853,7 +1853,7 @@ impl ClusterInfo {
|
||||
let mut last_contact_info_trace = timestamp();
|
||||
let mut last_contact_info_save = timestamp();
|
||||
let mut entrypoints_processed = false;
|
||||
let recycler = PacketsRecycler::default();
|
||||
let recycler = PacketsRecycler::new_without_limit("gossip-recycler-shrink-stats");
|
||||
let crds_data = vec![
|
||||
CrdsData::Version(Version::new(self.id())),
|
||||
CrdsData::NodeInstance(self.instance.with_wallclock(timestamp())),
|
||||
@ -2104,7 +2104,7 @@ impl ClusterInfo {
|
||||
.process_pull_requests(callers.cloned(), timestamp());
|
||||
let output_size_limit =
|
||||
self.update_data_budget(stakes.len()) / PULL_RESPONSE_MIN_SERIALIZED_SIZE;
|
||||
let mut packets = Packets::new_with_recycler(recycler.clone(), 64, "handle_pull_requests");
|
||||
let mut packets = Packets::new_with_recycler(recycler.clone(), 64).unwrap();
|
||||
let (caller_and_filters, addrs): (Vec<_>, Vec<_>) = {
|
||||
let mut rng = rand::thread_rng();
|
||||
let check_pull_request =
|
||||
@ -2389,8 +2389,7 @@ impl ClusterInfo {
|
||||
if packets.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let packets =
|
||||
Packets::new_with_recycler_data(recycler, "handle_ping_messages", packets);
|
||||
let packets = Packets::new_with_recycler_data(recycler, packets).unwrap();
|
||||
Some(packets)
|
||||
}
|
||||
}
|
||||
@ -3019,7 +3018,8 @@ impl ClusterInfo {
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> JoinHandle<()> {
|
||||
let exit = exit.clone();
|
||||
let recycler = PacketsRecycler::default();
|
||||
let recycler =
|
||||
PacketsRecycler::new_without_limit("cluster-info-listen-recycler-shrink-stats");
|
||||
Builder::new()
|
||||
.name("solana-listen".to_string())
|
||||
.spawn(move || {
|
||||
@ -3464,7 +3464,7 @@ mod tests {
|
||||
.iter()
|
||||
.map(|ping| Pong::new(ping, &this_node).unwrap())
|
||||
.collect();
|
||||
let recycler = PacketsRecycler::default();
|
||||
let recycler = PacketsRecycler::new_without_limit("");
|
||||
let packets = cluster_info
|
||||
.handle_ping_messages(
|
||||
remote_nodes
|
||||
|
@ -29,7 +29,14 @@ impl FetchStage {
|
||||
) -> (Self, PacketReceiver) {
|
||||
let (sender, receiver) = channel();
|
||||
(
|
||||
Self::new_with_sender(sockets, tpu_forwards_sockets, exit, &sender, &poh_recorder),
|
||||
Self::new_with_sender(
|
||||
sockets,
|
||||
tpu_forwards_sockets,
|
||||
exit,
|
||||
&sender,
|
||||
&poh_recorder,
|
||||
None,
|
||||
),
|
||||
receiver,
|
||||
)
|
||||
}
|
||||
@ -39,6 +46,7 @@ impl FetchStage {
|
||||
exit: &Arc<AtomicBool>,
|
||||
sender: &PacketSender,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
allocated_packet_limit: Option<u32>,
|
||||
) -> Self {
|
||||
let tx_sockets = sockets.into_iter().map(Arc::new).collect();
|
||||
let tpu_forwards_sockets = tpu_forwards_sockets.into_iter().map(Arc::new).collect();
|
||||
@ -48,6 +56,7 @@ impl FetchStage {
|
||||
exit,
|
||||
&sender,
|
||||
&poh_recorder,
|
||||
allocated_packet_limit,
|
||||
)
|
||||
}
|
||||
|
||||
@ -92,8 +101,10 @@ impl FetchStage {
|
||||
exit: &Arc<AtomicBool>,
|
||||
sender: &PacketSender,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
limit: Option<u32>,
|
||||
) -> Self {
|
||||
let recycler: PacketsRecycler = Recycler::warmed(1000, 1024);
|
||||
let recycler: PacketsRecycler =
|
||||
Recycler::warmed(1000, 1024, limit, "fetch_stage_recycler_shrink");
|
||||
|
||||
let tpu_threads = sockets.into_iter().map(|socket| {
|
||||
streamer::receiver(
|
||||
|
@ -47,7 +47,7 @@ impl GossipService {
|
||||
gossip_socket.clone(),
|
||||
&exit,
|
||||
request_sender,
|
||||
Recycler::default(),
|
||||
Recycler::new_without_limit("gossip-receiver-recycler-shrink-stats"),
|
||||
"gossip_receiver",
|
||||
);
|
||||
let (response_sender, response_receiver) = channel();
|
||||
|
@ -279,7 +279,7 @@ impl ServeRepair {
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> JoinHandle<()> {
|
||||
let exit = exit.clone();
|
||||
let recycler = PacketsRecycler::default();
|
||||
let recycler = PacketsRecycler::new_without_limit("serve-repair-recycler-shrink-stats");
|
||||
Builder::new()
|
||||
.name("solana-repair-listen".to_string())
|
||||
.spawn(move || {
|
||||
@ -498,11 +498,7 @@ impl ServeRepair {
|
||||
|
||||
if let Some(packet) = packet {
|
||||
inc_new_counter_debug!("serve_repair-window-request-ledger", 1);
|
||||
return Some(Packets::new_with_recycler_data(
|
||||
recycler,
|
||||
"run_window_request",
|
||||
vec![packet],
|
||||
));
|
||||
return Some(Packets::new_with_recycler_data(recycler, vec![packet])).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@ -538,11 +534,7 @@ impl ServeRepair {
|
||||
from_addr,
|
||||
nonce,
|
||||
)?;
|
||||
return Some(Packets::new_with_recycler_data(
|
||||
recycler,
|
||||
"run_highest_window_request",
|
||||
vec![packet],
|
||||
));
|
||||
return Packets::new_with_recycler_data(recycler, vec![packet]);
|
||||
}
|
||||
None
|
||||
}
|
||||
@ -555,7 +547,7 @@ impl ServeRepair {
|
||||
max_responses: usize,
|
||||
nonce: Nonce,
|
||||
) -> Option<Packets> {
|
||||
let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan");
|
||||
let mut res = Packets::new_with_recycler(recycler.clone(), 64).unwrap();
|
||||
if let Some(blockstore) = blockstore {
|
||||
// Try to find the next "n" parent slots of the input slot
|
||||
while let Ok(Some(meta)) = blockstore.meta(slot) {
|
||||
@ -609,7 +601,7 @@ mod tests {
|
||||
|
||||
/// test run_window_request responds with the right shred, and do not overrun
|
||||
fn run_highest_window_request(slot: Slot, num_slots: u64, nonce: Nonce) {
|
||||
let recycler = PacketsRecycler::default();
|
||||
let recycler = PacketsRecycler::new_without_limit("");
|
||||
solana_logger::setup();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
@ -677,7 +669,7 @@ mod tests {
|
||||
|
||||
/// test window requests respond with the right shred, and do not overrun
|
||||
fn run_window_request(slot: Slot, nonce: Nonce) {
|
||||
let recycler = PacketsRecycler::default();
|
||||
let recycler = PacketsRecycler::new_without_limit("");
|
||||
solana_logger::setup();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
@ -845,7 +837,7 @@ mod tests {
|
||||
|
||||
fn run_orphan(slot: Slot, num_slots: u64, nonce: Nonce) {
|
||||
solana_logger::setup();
|
||||
let recycler = PacketsRecycler::default();
|
||||
let recycler = PacketsRecycler::new_without_limit("");
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
@ -916,7 +908,7 @@ mod tests {
|
||||
#[test]
|
||||
fn run_orphan_corrupted_shred_size() {
|
||||
solana_logger::setup();
|
||||
let recycler = PacketsRecycler::default();
|
||||
let recycler = PacketsRecycler::new_without_limit("");
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
@ -30,7 +30,7 @@ impl ServeRepairService {
|
||||
serve_repair_socket.clone(),
|
||||
&exit,
|
||||
request_sender,
|
||||
Recycler::default(),
|
||||
Recycler::new_without_limit("serve-repair-receiver-recycler-shrink-stats"),
|
||||
"serve_repair_receiver",
|
||||
);
|
||||
let (response_sender, response_receiver) = channel();
|
||||
|
@ -167,8 +167,10 @@ impl ShredFetchStage {
|
||||
sender: &PacketSender,
|
||||
bank_forks: Option<Arc<RwLock<BankForks>>>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
limit: Option<u32>,
|
||||
) -> Self {
|
||||
let recycler: PacketsRecycler = Recycler::warmed(100, 1024);
|
||||
let recycler: PacketsRecycler =
|
||||
Recycler::warmed(100, 1024, limit, "shred_fetch_stage_recycler_shrink");
|
||||
|
||||
let (mut tvu_threads, tvu_filter) = Self::packet_modifier(
|
||||
sockets,
|
||||
|
@ -23,8 +23,8 @@ impl Default for TransactionSigVerifier {
|
||||
fn default() -> Self {
|
||||
init();
|
||||
Self {
|
||||
recycler: Recycler::warmed(50, 4096),
|
||||
recycler_out: Recycler::warmed(50, 4096),
|
||||
recycler: Recycler::warmed(50, 4096, None, ""),
|
||||
recycler_out: Recycler::warmed(50, 4096, None, ""),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -26,7 +26,10 @@ impl ShredSigVerifier {
|
||||
Self {
|
||||
bank_forks,
|
||||
leader_schedule_cache,
|
||||
recycler_cache: RecyclerCache::warmed(),
|
||||
recycler_cache: RecyclerCache::warmed(
|
||||
"shred-sig-verifier-offsets-recycler-shrink-stats",
|
||||
"shred-sig-verifier-buffer-recycler-shrink-stats",
|
||||
),
|
||||
}
|
||||
}
|
||||
fn read_slots(batches: &[Packets]) -> HashSet<u64> {
|
||||
|
@ -67,6 +67,9 @@ impl Tpu {
|
||||
&exit,
|
||||
&packet_sender,
|
||||
&poh_recorder,
|
||||
// At 1024 packets per `Packet`, each packet about MTU size ~1k, this is roughly
|
||||
// 20GB
|
||||
Some(20_000),
|
||||
);
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
|
||||
|
@ -145,6 +145,7 @@ impl Tvu {
|
||||
&fetch_sender,
|
||||
Some(bank_forks.clone()),
|
||||
&exit,
|
||||
None,
|
||||
);
|
||||
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
|
Reference in New Issue
Block a user