From 2510f3d3526fdbb7c441ac3c7c0ab69532035e77 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Tue, 27 Aug 2019 19:28:00 -0700 Subject: [PATCH] Remove extra call to serialize in shred verify (#5698) --- core/src/replicator.rs | 2 +- core/src/retransmit_stage.rs | 10 ++++++++-- core/src/shred.rs | 8 ++++++-- core/src/window_service.rs | 30 +++++++++++++++++++++--------- 4 files changed, 36 insertions(+), 14 deletions(-) diff --git a/core/src/replicator.rs b/core/src/replicator.rs index 4109b167fa..1b85305c4c 100644 --- a/core/src/replicator.rs +++ b/core/src/replicator.rs @@ -474,7 +474,7 @@ impl Replicator { repair_socket, &exit, RepairStrategy::RepairRange(repair_slot_range), - |_, _, _| true, + |_, _, _, _| true, ); info!("waiting for ledger download"); Self::wait_for_segment_download( diff --git a/core/src/retransmit_stage.rs b/core/src/retransmit_stage.rs index e4c717a502..8f9fde3c80 100644 --- a/core/src/retransmit_stage.rs +++ b/core/src/retransmit_stage.rs @@ -150,8 +150,14 @@ impl RetransmitStage { repair_socket, exit, repair_strategy, - move |id, blob, working_bank| { - should_retransmit_and_persist(blob, working_bank, &leader_schedule_cache, id) + move |id, shred, shred_buf, working_bank| { + should_retransmit_and_persist( + shred, + shred_buf, + working_bank, + &leader_schedule_cache, + id, + ) }, ); diff --git a/core/src/shred.rs b/core/src/shred.rs index 31cb10b063..a3598b2b8c 100644 --- a/core/src/shred.rs +++ b/core/src/shred.rs @@ -107,6 +107,11 @@ impl Shred { } pub fn verify(&self, pubkey: &Pubkey) -> bool { + let shred = bincode::serialize(&self).unwrap(); + self.fast_verify(&shred, pubkey) + } + + pub fn fast_verify(&self, shred_buf: &[u8], pubkey: &Pubkey) -> bool { let signed_payload_offset = match self { Shred::FirstInSlot(_) | Shred::FirstInFECSet(_) @@ -119,9 +124,8 @@ impl Shred { } } + bincode::serialized_size(&Signature::default()).unwrap() as usize; - let shred = bincode::serialize(&self).unwrap(); self.signature() - .verify(pubkey.as_ref(), &shred[signed_payload_offset..]) + .verify(pubkey.as_ref(), &shred_buf[signed_payload_offset..]) } } diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 7abda6c75a..6a9c68ae64 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -31,6 +31,7 @@ pub fn process_shreds(shreds: Vec, blocktree: &Arc) -> Result< /// blob's slot pub fn should_retransmit_and_persist( shred: &Shred, + shred_buf: &[u8], bank: Option>, leader_schedule_cache: &Arc, my_pubkey: &Pubkey, @@ -44,7 +45,7 @@ pub fn should_retransmit_and_persist( if leader_id == *my_pubkey { inc_new_counter_debug!("streamer-recv_window-circular_transmission", 1); false - } else if !shred.verify(&leader_id) { + } else if !shred.fast_verify(&shred_buf, &leader_id) { inc_new_counter_debug!("streamer-recv_window-invalid_signature", 1); false } else { @@ -64,7 +65,7 @@ fn recv_window( shred_filter: F, ) -> Result<()> where - F: Fn(&Shred) -> bool, + F: Fn(&Shred, &[u8]) -> bool, F: Sync, { let timer = Duration::from_millis(200); @@ -81,7 +82,7 @@ where for (i, packet) in packets.packets.iter_mut().enumerate() { if let Ok(s) = bincode::deserialize(&packet.data) { let shred: Shred = s; - if shred_filter(&shred) { + if shred_filter(&shred, &packet.data) { packet.meta.slot = shred.slot(); packet.meta.seed = shred.seed(); shreds.push(shred); @@ -157,7 +158,7 @@ impl WindowService { ) -> WindowService where F: 'static - + Fn(&Pubkey, &Shred, Option>) -> bool + + Fn(&Pubkey, &Shred, &[u8], Option>) -> bool + std::marker::Send + std::marker::Sync, { @@ -196,10 +197,11 @@ impl WindowService { &id, &r, &retransmit, - |shred| { + |shred, shred_buf| { shred_filter( &id, shred, + shred_buf, bank_forks .as_ref() .map(|bank_forks| bank_forks.read().unwrap().working_bank()), @@ -310,10 +312,20 @@ mod test { let entry = Entry::default(); let mut shreds = local_entries_to_shred(vec![entry], &Arc::new(leader_keypair)); + let shred_bufs: Vec<_> = shreds + .iter() + .map(|s| bincode::serialize(s).unwrap()) + .collect(); // with a Bank for slot 0, blob continues assert_eq!( - should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id), + should_retransmit_and_persist( + &shreds[0], + &shred_bufs[0], + Some(bank.clone()), + &cache, + &me_id + ), true ); @@ -328,7 +340,7 @@ mod test { // with a Bank and no idea who leader is, blob gets thrown out shreds[0].set_slot(MINIMUM_SLOTS_PER_EPOCH as u64 * 3); assert_eq!( - should_retransmit_and_persist(&shreds[0], Some(bank), &cache, &me_id), + should_retransmit_and_persist(&shreds[0], &shred_bufs[0], Some(bank), &cache, &me_id), false ); @@ -388,7 +400,7 @@ mod test { Arc::new(leader_node.sockets.repair), &exit, repair_strategy, - |_, _, _| true, + |_, _, _, _| true, ); let t_responder = { let (s_responder, r_responder) = channel(); @@ -477,7 +489,7 @@ mod test { Arc::new(leader_node.sockets.repair), &exit, repair_strategy, - |_, _, _| true, + |_, _, _, _| true, ); let t_responder = { let (s_responder, r_responder) = channel();