Revert "Add limit and shrink policy for recycler (#15320)"

This reverts commit c2e8814dce.
This commit is contained in:
behzad nouri
2021-04-07 11:15:38 -04:00
parent 6907a2366e
commit e405747409
24 changed files with 129 additions and 491 deletions

View File

@ -1923,7 +1923,7 @@ impl ClusterInfo {
let mut last_contact_info_trace = timestamp();
let mut last_contact_info_save = timestamp();
let mut entrypoints_processed = false;
let recycler = PacketsRecycler::new_without_limit("gossip-recycler-shrink-stats");
let recycler = PacketsRecycler::default();
let crds_data = vec![
CrdsData::Version(Version::new(self.id())),
CrdsData::NodeInstance(self.instance.with_wallclock(timestamp())),
@ -2187,7 +2187,7 @@ impl ClusterInfo {
.process_pull_requests(callers.cloned(), timestamp());
let output_size_limit =
self.update_data_budget(stakes.len()) / PULL_RESPONSE_MIN_SERIALIZED_SIZE;
let mut packets = Packets::new_with_recycler(recycler.clone(), 64).unwrap();
let mut packets = Packets::new_with_recycler(recycler.clone(), 64, "handle_pull_requests");
let (caller_and_filters, addrs): (Vec<_>, Vec<_>) = {
let mut rng = rand::thread_rng();
let check_pull_request =
@ -2472,7 +2472,8 @@ impl ClusterInfo {
if packets.is_empty() {
None
} else {
let packets = Packets::new_with_recycler_data(recycler, packets).unwrap();
let packets =
Packets::new_with_recycler_data(recycler, "handle_ping_messages", packets);
Some(packets)
}
}
@ -3164,8 +3165,7 @@ impl ClusterInfo {
exit: &Arc<AtomicBool>,
) -> JoinHandle<()> {
let exit = exit.clone();
let recycler =
PacketsRecycler::new_without_limit("cluster-info-listen-recycler-shrink-stats");
let recycler = PacketsRecycler::default();
Builder::new()
.name("solana-listen".to_string())
.spawn(move || {
@ -3611,7 +3611,7 @@ mod tests {
.iter()
.map(|ping| Pong::new(ping, &this_node).unwrap())
.collect();
let recycler = PacketsRecycler::new_without_limit("");
let recycler = PacketsRecycler::default();
let packets = cluster_info
.handle_ping_messages(
remote_nodes

View File

@ -35,7 +35,6 @@ impl FetchStage {
exit,
&sender,
&poh_recorder,
None,
coalesce_ms,
),
receiver,
@ -47,7 +46,6 @@ impl FetchStage {
exit: &Arc<AtomicBool>,
sender: &PacketSender,
poh_recorder: &Arc<Mutex<PohRecorder>>,
allocated_packet_limit: Option<u32>,
coalesce_ms: u64,
) -> Self {
let tx_sockets = sockets.into_iter().map(Arc::new).collect();
@ -58,7 +56,6 @@ impl FetchStage {
exit,
&sender,
&poh_recorder,
allocated_packet_limit,
coalesce_ms,
)
}
@ -104,11 +101,9 @@ impl FetchStage {
exit: &Arc<AtomicBool>,
sender: &PacketSender,
poh_recorder: &Arc<Mutex<PohRecorder>>,
limit: Option<u32>,
coalesce_ms: u64,
) -> Self {
let recycler: PacketsRecycler =
Recycler::warmed(1000, 1024, limit, "fetch_stage_recycler_shrink");
let recycler: PacketsRecycler = Recycler::warmed(1000, 1024);
let tpu_threads = sockets.into_iter().map(|socket| {
streamer::receiver(

View File

@ -47,7 +47,7 @@ impl GossipService {
gossip_socket.clone(),
&exit,
request_sender,
Recycler::new_without_limit("gossip-receiver-recycler-shrink-stats"),
Recycler::default(),
"gossip_receiver",
1,
);

View File

@ -278,7 +278,7 @@ impl ServeRepair {
exit: &Arc<AtomicBool>,
) -> JoinHandle<()> {
let exit = exit.clone();
let recycler = PacketsRecycler::new_without_limit("serve-repair-recycler-shrink-stats");
let recycler = PacketsRecycler::default();
Builder::new()
.name("solana-repair-listen".to_string())
.spawn(move || {
@ -490,7 +490,11 @@ impl ServeRepair {
if let Some(packet) = packet {
inc_new_counter_debug!("serve_repair-window-request-ledger", 1);
return Some(Packets::new_with_recycler_data(recycler, vec![packet])).unwrap();
return Some(Packets::new_with_recycler_data(
recycler,
"run_window_request",
vec![packet],
));
}
}
@ -526,7 +530,11 @@ impl ServeRepair {
from_addr,
nonce,
)?;
return Packets::new_with_recycler_data(recycler, vec![packet]);
return Some(Packets::new_with_recycler_data(
recycler,
"run_highest_window_request",
vec![packet],
));
}
None
}
@ -539,7 +547,7 @@ impl ServeRepair {
max_responses: usize,
nonce: Nonce,
) -> Option<Packets> {
let mut res = Packets::new_with_recycler(recycler.clone(), 64).unwrap();
let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan");
if let Some(blockstore) = blockstore {
// Try to find the next "n" parent slots of the input slot
while let Ok(Some(meta)) = blockstore.meta(slot) {
@ -593,7 +601,7 @@ mod tests {
/// test run_window_request responds with the right shred, and do not overrun
fn run_highest_window_request(slot: Slot, num_slots: u64, nonce: Nonce) {
let recycler = PacketsRecycler::new_without_limit("");
let recycler = PacketsRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
@ -661,7 +669,7 @@ mod tests {
/// test window requests respond with the right shred, and do not overrun
fn run_window_request(slot: Slot, nonce: Nonce) {
let recycler = PacketsRecycler::new_without_limit("");
let recycler = PacketsRecycler::default();
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
@ -829,7 +837,7 @@ mod tests {
fn run_orphan(slot: Slot, num_slots: u64, nonce: Nonce) {
solana_logger::setup();
let recycler = PacketsRecycler::new_without_limit("");
let recycler = PacketsRecycler::default();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
@ -900,7 +908,7 @@ mod tests {
#[test]
fn run_orphan_corrupted_shred_size() {
solana_logger::setup();
let recycler = PacketsRecycler::new_without_limit("");
let recycler = PacketsRecycler::default();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());

View File

@ -30,7 +30,7 @@ impl ServeRepairService {
serve_repair_socket.clone(),
&exit,
request_sender,
Recycler::new_without_limit("serve-repair-receiver-recycler-shrink-stats"),
Recycler::default(),
"serve_repair_receiver",
1,
);

View File

@ -168,10 +168,8 @@ impl ShredFetchStage {
sender: &PacketSender,
bank_forks: Option<Arc<RwLock<BankForks>>>,
exit: &Arc<AtomicBool>,
limit: Option<u32>,
) -> Self {
let recycler: PacketsRecycler =
Recycler::warmed(100, 1024, limit, "shred_fetch_stage_recycler_shrink");
let recycler: PacketsRecycler = Recycler::warmed(100, 1024);
let (mut tvu_threads, tvu_filter) = Self::packet_modifier(
sockets,

View File

@ -23,8 +23,8 @@ impl Default for TransactionSigVerifier {
fn default() -> Self {
init();
Self {
recycler: Recycler::warmed(50, 4096, None, ""),
recycler_out: Recycler::warmed(50, 4096, None, ""),
recycler: Recycler::warmed(50, 4096),
recycler_out: Recycler::warmed(50, 4096),
}
}
}

View File

@ -25,10 +25,7 @@ impl ShredSigVerifier {
Self {
bank_forks,
leader_schedule_cache,
recycler_cache: RecyclerCache::warmed(
"shred-sig-verifier-offsets-recycler-shrink-stats",
"shred-sig-verifier-buffer-recycler-shrink-stats",
),
recycler_cache: RecyclerCache::warmed(),
}
}
fn read_slots(batches: &[Packets]) -> HashSet<u64> {

View File

@ -75,9 +75,6 @@ impl Tpu {
&exit,
&packet_sender,
&poh_recorder,
// At 1024 packets per `Packet`, each packet about MTU size ~1k, this is roughly
// 20GB
Some(20_000),
tpu_coalesce_ms,
);
let (verified_sender, verified_receiver) = unbounded();

View File

@ -151,7 +151,6 @@ impl Tvu {
&fetch_sender,
Some(bank_forks.clone()),
&exit,
None,
);
let (verified_sender, verified_receiver) = unbounded();