Increase tpu coalescing and add parameter (#15536)

Should create larger entries on average
This commit is contained in:
sakridge
2021-02-26 09:15:45 -08:00
committed by GitHub
parent 5a9896706c
commit 05409e51ce
10 changed files with 46 additions and 6 deletions

View File

@ -26,6 +26,7 @@ impl FetchStage {
tpu_forwards_sockets: Vec<UdpSocket>,
exit: &Arc<AtomicBool>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
coalesce_ms: u64,
) -> (Self, PacketReceiver) {
let (sender, receiver) = channel();
(
@ -36,6 +37,7 @@ impl FetchStage {
&sender,
&poh_recorder,
None,
coalesce_ms,
),
receiver,
)
@ -47,6 +49,7 @@ impl FetchStage {
sender: &PacketSender,
poh_recorder: &Arc<Mutex<PohRecorder>>,
allocated_packet_limit: Option<u32>,
coalesce_ms: u64,
) -> Self {
let tx_sockets = sockets.into_iter().map(Arc::new).collect();
let tpu_forwards_sockets = tpu_forwards_sockets.into_iter().map(Arc::new).collect();
@ -57,6 +60,7 @@ impl FetchStage {
&sender,
&poh_recorder,
allocated_packet_limit,
coalesce_ms,
)
}
@ -102,6 +106,7 @@ impl FetchStage {
sender: &PacketSender,
poh_recorder: &Arc<Mutex<PohRecorder>>,
limit: Option<u32>,
coalesce_ms: u64,
) -> Self {
let recycler: PacketsRecycler =
Recycler::warmed(1000, 1024, limit, "fetch_stage_recycler_shrink");
@ -113,6 +118,7 @@ impl FetchStage {
sender.clone(),
recycler.clone(),
"fetch_stage",
coalesce_ms,
)
});
@ -124,6 +130,7 @@ impl FetchStage {
forward_sender.clone(),
recycler.clone(),
"fetch_forward_stage",
coalesce_ms,
)
});

View File

@ -49,6 +49,7 @@ impl GossipService {
request_sender,
Recycler::new_without_limit("gossip-receiver-recycler-shrink-stats"),
"gossip_receiver",
1,
);
let (response_sender, response_receiver) = channel();
let t_responder = streamer::responder("gossip", gossip_socket, response_receiver);

View File

@ -32,6 +32,7 @@ impl ServeRepairService {
request_sender,
Recycler::new_without_limit("serve-repair-receiver-recycler-shrink-stats"),
"serve_repair_receiver",
1,
);
let (response_sender, response_receiver) = channel();
let t_responder =

View File

@ -149,6 +149,7 @@ impl ShredFetchStage {
packet_sender.clone(),
recycler.clone(),
"packet_modifier",
1,
)
})
.collect();

View File

@ -29,6 +29,8 @@ use std::{
thread,
};
pub const DEFAULT_TPU_COALESCE_MS: u64 = 5;
pub struct Tpu {
fetch_stage: FetchStage,
sigverify_stage: SigVerifyStage,
@ -59,6 +61,7 @@ impl Tpu {
replay_vote_receiver: ReplayVoteReceiver,
replay_vote_sender: ReplayVoteSender,
bank_notification_sender: Option<BankNotificationSender>,
tpu_coalesce_ms: u64,
) -> Self {
let (packet_sender, packet_receiver) = channel();
let fetch_stage = FetchStage::new_with_sender(
@ -70,6 +73,7 @@ impl Tpu {
// At 1024 packets per `Packet`, each packet about MTU size ~1k, this is roughly
// 20GB
Some(20_000),
tpu_coalesce_ms,
);
let (verified_sender, verified_receiver) = unbounded();

View File

@ -28,7 +28,7 @@ use crate::{
serve_repair_service::ServeRepairService,
sigverify,
snapshot_packager_service::{PendingSnapshotPackage, SnapshotPackagerService},
tpu::Tpu,
tpu::{Tpu, DEFAULT_TPU_COALESCE_MS},
transaction_status_service::TransactionStatusService,
tvu::{Sockets, Tvu, TvuConfig},
};
@ -126,6 +126,7 @@ pub struct ValidatorConfig {
pub warp_slot: Option<Slot>,
pub accounts_db_test_hash_calculation: bool,
pub accounts_db_use_index_hash_calculation: bool,
pub tpu_coalesce_ms: u64,
}
impl Default for ValidatorConfig {
@ -177,6 +178,7 @@ impl Default for ValidatorConfig {
warp_slot: None,
accounts_db_test_hash_calculation: false,
accounts_db_use_index_hash_calculation: true,
tpu_coalesce_ms: DEFAULT_TPU_COALESCE_MS,
}
}
}
@ -681,6 +683,7 @@ impl Validator {
replay_vote_receiver,
replay_vote_sender,
bank_notification_sender,
config.tpu_coalesce_ms,
);
datapoint_info!("validator-new", ("id", id.to_string(), String));