diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index bca2dad305..3dd2d6ede5 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -936,7 +936,7 @@ pub fn create_test_recorder( bank.tick_height(), bank.last_blockhash(), bank.slot(), - Some(4), + Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), blocktree, @@ -1604,7 +1604,7 @@ mod tests { bank.tick_height(), bank.last_blockhash(), bank.slot(), - Some(4), + Some((4, 4)), bank.ticks_per_slot(), &pubkey, &Arc::new(blocktree), @@ -1692,7 +1692,7 @@ mod tests { bank.tick_height(), bank.last_blockhash(), bank.slot(), - Some(4), + Some((4, 4)), bank.ticks_per_slot(), &pubkey, &Arc::new(blocktree), @@ -1792,7 +1792,7 @@ mod tests { bank.tick_height(), bank.last_blockhash(), bank.slot(), - Some(4), + Some((4, 4)), bank.ticks_per_slot(), &Pubkey::new_rand(), &Arc::new(blocktree), diff --git a/core/src/leader_schedule_cache.rs b/core/src/leader_schedule_cache.rs index a55d50751c..8484866cb4 100644 --- a/core/src/leader_schedule_cache.rs +++ b/core/src/leader_schedule_cache.rs @@ -67,15 +67,17 @@ impl LeaderScheduleCache { } } - /// Return the next slot after the given current_slot that the given node will be leader + /// Return the (next slot, last slot) after the given current_slot that the given node will be leader pub fn next_leader_slot( &self, pubkey: &Pubkey, mut current_slot: u64, bank: &Bank, blocktree: Option<&Blocktree>, - ) -> Option { + ) -> Option<(u64, u64)> { let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1); + let mut first_slot = None; + let mut last_slot = current_slot; while let Some(leader_schedule) = self.get_epoch_schedule_else_compute(epoch, bank) { // clippy thinks I should do this: // for (i, ) in leader_schedule @@ -98,14 +100,19 @@ impl LeaderScheduleCache { } } - return Some(current_slot); + if first_slot.is_none() { + first_slot = Some(current_slot); + } + last_slot = current_slot; + } else if first_slot.is_some() { + return Some((first_slot.unwrap(), last_slot)); } } epoch += 1; start_index = 0; } - None + first_slot.and_then(|slot| Some((slot, last_slot))) } fn slot_leader_at_no_compute(&self, slot: u64) -> Option { @@ -317,8 +324,14 @@ mod tests { cache.slot_leader_at(bank.slot(), Some(&bank)).unwrap(), pubkey ); - assert_eq!(cache.next_leader_slot(&pubkey, 0, &bank, None), Some(1)); - assert_eq!(cache.next_leader_slot(&pubkey, 1, &bank, None), Some(2)); + assert_eq!( + cache.next_leader_slot(&pubkey, 0, &bank, None), + Some((1, 16383)) + ); + assert_eq!( + cache.next_leader_slot(&pubkey, 1, &bank, None), + Some((2, 16383)) + ); assert_eq!( cache.next_leader_slot( &pubkey, @@ -365,8 +378,11 @@ mod tests { ); // Check that the next leader slot after 0 is slot 1 assert_eq!( - cache.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)), - Some(1) + cache + .next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)) + .unwrap() + .0, + 1 ); // Write a blob into slot 2 that chains to slot 1, @@ -374,8 +390,11 @@ mod tests { let (blobs, _) = make_slot_entries(2, 1, 1); blocktree.write_blobs(&blobs[..]).unwrap(); assert_eq!( - cache.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)), - Some(1) + cache + .next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)) + .unwrap() + .0, + 1 ); // Write a blob into slot 1 @@ -384,8 +403,11 @@ mod tests { // Check that slot 1 and 2 are skipped blocktree.write_blobs(&blobs[..]).unwrap(); assert_eq!( - cache.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)), - Some(3) + cache + .next_leader_slot(&pubkey, 0, &bank, Some(&blocktree)) + .unwrap() + .0, + 3 ); // Integrity checks @@ -459,8 +481,11 @@ mod tests { expected_slot += index; assert_eq!( - cache.next_leader_slot(&node_pubkey, 0, &bank, None), - Some(expected_slot), + cache + .next_leader_slot(&node_pubkey, 0, &bank, None) + .unwrap() + .0, + expected_slot ); } diff --git a/core/src/poh_recorder.rs b/core/src/poh_recorder.rs index 9e42999afd..aa15367779 100644 --- a/core/src/poh_recorder.rs +++ b/core/src/poh_recorder.rs @@ -21,12 +21,15 @@ use solana_sdk::poh_config::PohConfig; use solana_sdk::pubkey::Pubkey; use solana_sdk::timing; pub use solana_sdk::timing::Slot; +use solana_sdk::timing::NUM_CONSECUTIVE_LEADER_SLOTS; use solana_sdk::transaction::Transaction; +use std::cmp; use std::sync::mpsc::{channel, Receiver, Sender, SyncSender}; use std::sync::{Arc, Mutex}; use std::time::Instant; const GRACE_TICKS_FACTOR: u64 = 2; +const MAX_GRACE_TICKS: u64 = 12; #[derive(Debug, PartialEq, Eq, Clone)] pub enum PohRecorderError { @@ -74,11 +77,9 @@ impl PohRecorder { Some(&self.blocktree), ); assert_eq!(self.ticks_per_slot, bank.ticks_per_slot()); - let (start_leader_at_tick, last_leader_tick) = Self::compute_leader_slot_ticks( - next_leader_slot, - self.ticks_per_slot, - self.grace_ticks, - ); + let (start_leader_at_tick, last_leader_tick, grace_ticks) = + Self::compute_leader_slot_ticks(next_leader_slot, self.ticks_per_slot); + self.grace_ticks = grace_ticks; self.start_leader_at_tick = start_leader_at_tick; self.last_leader_tick = last_leader_tick; } @@ -154,25 +155,39 @@ impl PohRecorder { (false, 0, next_tick / self.ticks_per_slot, self.start_slot) } - // returns (start_leader_at_tick, last_leader_tick) given the next slot this + // returns (start_leader_at_tick, last_leader_tick, grace_ticks) given the next slot this // recorder will lead fn compute_leader_slot_ticks( - next_leader_slot: Option, + next_leader_slot: Option<(Slot, Slot)>, ticks_per_slot: u64, - grace_ticks: u64, - ) -> (Option, u64) { + ) -> (Option, u64, u64) { next_leader_slot - .map(|slot| { - ( - Some(slot * ticks_per_slot + grace_ticks), - (slot + 1) * ticks_per_slot - 1, - ) + .map(|(first, last)| { + let first_tick = first * ticks_per_slot; + let last_tick = (last + 1) * ticks_per_slot - 1; + let grace_ticks = cmp::min( + MAX_GRACE_TICKS, + (last_tick - first_tick + 1) / GRACE_TICKS_FACTOR, + ); + (Some(first_tick + grace_ticks), last_tick, grace_ticks) }) - .unwrap_or((None, 0)) + .unwrap_or(( + None, + 0, + cmp::min( + MAX_GRACE_TICKS, + ticks_per_slot * NUM_CONSECUTIVE_LEADER_SLOTS / GRACE_TICKS_FACTOR, + ), + )) } // synchronize PoH with a bank - pub fn reset(&mut self, blockhash: Hash, start_slot: Slot, next_leader_slot: Option) { + pub fn reset( + &mut self, + blockhash: Hash, + start_slot: Slot, + next_leader_slot: Option<(Slot, Slot)>, + ) { self.clear_bank(); let mut cache = vec![]; { @@ -190,11 +205,9 @@ impl PohRecorder { self.start_tick = (start_slot + 1) * self.ticks_per_slot; self.tick_height = self.start_tick - 1; - let (start_leader_at_tick, last_leader_tick) = Self::compute_leader_slot_ticks( - next_leader_slot, - self.ticks_per_slot, - self.grace_ticks, - ); + let (start_leader_at_tick, last_leader_tick, grace_ticks) = + Self::compute_leader_slot_ticks(next_leader_slot, self.ticks_per_slot); + self.grace_ticks = grace_ticks; self.start_leader_at_tick = start_leader_at_tick; self.last_leader_tick = last_leader_tick; } @@ -369,7 +382,7 @@ impl PohRecorder { tick_height: u64, last_entry_hash: Hash, start_slot: Slot, - next_leader_slot: Option, + next_leader_slot: Option<(Slot, Slot)>, ticks_per_slot: u64, id: &Pubkey, blocktree: &Arc, @@ -382,9 +395,8 @@ impl PohRecorder { poh_config.hashes_per_tick, ))); let (sender, receiver) = channel(); - let grace_ticks = ticks_per_slot / GRACE_TICKS_FACTOR; - let (start_leader_at_tick, last_leader_tick) = - Self::compute_leader_slot_ticks(next_leader_slot, ticks_per_slot, grace_ticks); + let (start_leader_at_tick, last_leader_tick, grace_ticks) = + Self::compute_leader_slot_ticks(next_leader_slot, ticks_per_slot); ( Self { poh, @@ -415,7 +427,7 @@ impl PohRecorder { tick_height: u64, last_entry_hash: Hash, start_slot: Slot, - next_leader_slot: Option, + next_leader_slot: Option<(Slot, Slot)>, ticks_per_slot: u64, id: &Pubkey, blocktree: &Arc, @@ -459,7 +471,7 @@ mod tests { 0, prev_hash, 0, - Some(4), + Some((4, 4)), DEFAULT_TICKS_PER_SLOT, &Pubkey::default(), &Arc::new(blocktree), @@ -486,7 +498,7 @@ mod tests { 0, prev_hash, 0, - Some(4), + Some((4, 4)), DEFAULT_TICKS_PER_SLOT, &Pubkey::default(), &Arc::new(blocktree), @@ -512,7 +524,7 @@ mod tests { 0, Hash::default(), 0, - Some(4), + Some((4, 4)), DEFAULT_TICKS_PER_SLOT, &Pubkey::default(), &Arc::new(blocktree), @@ -521,7 +533,7 @@ mod tests { ); poh_recorder.tick(); assert_eq!(poh_recorder.tick_cache.len(), 1); - poh_recorder.reset(Hash::default(), 0, Some(4)); + poh_recorder.reset(Hash::default(), 0, Some((4, 4))); assert_eq!(poh_recorder.tick_cache.len(), 0); } Blocktree::destroy(&ledger_path).unwrap(); @@ -540,7 +552,7 @@ mod tests { 0, prev_hash, 0, - Some(4), + Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), &Arc::new(blocktree), @@ -574,7 +586,7 @@ mod tests { 0, prev_hash, 0, - Some(4), + Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), &Arc::new(blocktree), @@ -620,7 +632,7 @@ mod tests { 0, prev_hash, 0, - Some(4), + Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), &Arc::new(blocktree), @@ -664,7 +676,7 @@ mod tests { 0, prev_hash, 0, - Some(4), + Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), &Arc::new(blocktree), @@ -702,7 +714,7 @@ mod tests { 0, prev_hash, 0, - Some(4), + Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), &Arc::new(blocktree), @@ -742,7 +754,7 @@ mod tests { 0, prev_hash, 0, - Some(4), + Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), &Arc::new(blocktree), @@ -789,7 +801,7 @@ mod tests { 0, prev_hash, 0, - Some(4), + Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), &Arc::new(blocktree), @@ -833,7 +845,7 @@ mod tests { 0, prev_hash, 0, - Some(4), + Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), &Arc::new(blocktree), @@ -868,7 +880,7 @@ mod tests { 0, Hash::default(), 0, - Some(4), + Some((4, 4)), DEFAULT_TICKS_PER_SLOT, &Pubkey::default(), &Arc::new(blocktree), @@ -879,7 +891,7 @@ mod tests { poh_recorder.tick(); assert_eq!(poh_recorder.tick_cache.len(), 2); let hash = poh_recorder.poh.lock().unwrap().hash; - poh_recorder.reset(hash, 0, Some(4)); + poh_recorder.reset(hash, 0, Some((4, 4))); assert_eq!(poh_recorder.tick_cache.len(), 0); } Blocktree::destroy(&ledger_path).unwrap(); @@ -895,7 +907,7 @@ mod tests { 0, Hash::default(), 0, - Some(4), + Some((4, 4)), DEFAULT_TICKS_PER_SLOT, &Pubkey::default(), &Arc::new(blocktree), @@ -905,7 +917,7 @@ mod tests { poh_recorder.tick(); poh_recorder.tick(); assert_eq!(poh_recorder.tick_cache.len(), 2); - poh_recorder.reset(poh_recorder.tick_cache[0].0.hash, 0, Some(4)); + poh_recorder.reset(poh_recorder.tick_cache[0].0.hash, 0, Some((4, 4))); assert_eq!(poh_recorder.tick_cache.len(), 0); } Blocktree::destroy(&ledger_path).unwrap(); @@ -923,7 +935,7 @@ mod tests { 0, Hash::default(), 0, - Some(4), + Some((4, 4)), DEFAULT_TICKS_PER_SLOT, &Pubkey::default(), &Arc::new(blocktree), @@ -935,7 +947,7 @@ mod tests { poh_recorder.tick(); assert_eq!(poh_recorder.tick_cache.len(), 3); assert_eq!(poh_recorder.tick_height, 3); - poh_recorder.reset(hash(b"hello"), 0, Some(4)); // parent slot 0 implies tick_height of 3 + poh_recorder.reset(hash(b"hello"), 0, Some((4, 4))); // parent slot 0 implies tick_height of 3 assert_eq!(poh_recorder.tick_cache.len(), 0); poh_recorder.tick(); assert_eq!(poh_recorder.tick_height, 4); @@ -955,7 +967,7 @@ mod tests { 0, Hash::default(), 0, - Some(4), + Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), &Arc::new(blocktree), @@ -968,7 +980,7 @@ mod tests { max_tick_height: 3, }; poh_recorder.set_working_bank(working_bank); - poh_recorder.reset(hash(b"hello"), 0, Some(4)); + poh_recorder.reset(hash(b"hello"), 0, Some((4, 4))); assert!(poh_recorder.working_bank.is_none()); } Blocktree::destroy(&ledger_path).unwrap(); @@ -1020,7 +1032,7 @@ mod tests { 0, prev_hash, 0, - Some(4), + Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), &Arc::new(blocktree), @@ -1085,7 +1097,7 @@ mod tests { assert_eq!(poh_recorder.reached_leader_tick().0, false); // Provide a leader slot 1 slot down - poh_recorder.reset(bank.last_blockhash(), 0, Some(2)); + poh_recorder.reset(bank.last_blockhash(), 0, Some((2, 2))); let init_ticks = poh_recorder.tick_height(); @@ -1104,13 +1116,13 @@ mod tests { assert_eq!(poh_recorder.reached_leader_tick().0, false); // reset poh now. we should immediately be leader - poh_recorder.reset(bank.last_blockhash(), 1, Some(2)); + poh_recorder.reset(bank.last_blockhash(), 1, Some((2, 2))); assert_eq!(poh_recorder.reached_leader_tick().0, true); assert_eq!(poh_recorder.reached_leader_tick().1, 0); // Now test that with grace ticks we can reach leader ticks // Set the leader slot 1 slot down - poh_recorder.reset(bank.last_blockhash(), 1, Some(3)); + poh_recorder.reset(bank.last_blockhash(), 1, Some((3, 3))); // Send one slot worth of ticks ("skips" slot 2) for _ in 0..bank.ticks_per_slot() { @@ -1121,7 +1133,7 @@ mod tests { assert_eq!(poh_recorder.reached_leader_tick().0, false); // Send 1 less tick than the grace ticks - for _ in 0..bank.ticks_per_slot() / GRACE_TICKS_FACTOR { + for _ in 0..bank.ticks_per_slot() * NUM_CONSECUTIVE_LEADER_SLOTS / GRACE_TICKS_FACTOR { poh_recorder.tick(); } @@ -1129,12 +1141,12 @@ mod tests { assert_eq!(poh_recorder.reached_leader_tick().0, true); assert_eq!( poh_recorder.reached_leader_tick().1, - bank.ticks_per_slot() / GRACE_TICKS_FACTOR + bank.ticks_per_slot() * NUM_CONSECUTIVE_LEADER_SLOTS / GRACE_TICKS_FACTOR ); // Let's test that correct grace ticks are reported // Set the leader slot 1 slot down - poh_recorder.reset(bank.last_blockhash(), 2, Some(4)); + poh_recorder.reset(bank.last_blockhash(), 2, Some((4, 4))); // send ticks for a slot for _ in 0..bank.ticks_per_slot() { @@ -1143,7 +1155,7 @@ mod tests { // We are not the leader yet, as expected assert_eq!(poh_recorder.reached_leader_tick().0, false); - poh_recorder.reset(bank.last_blockhash(), 3, Some(4)); + poh_recorder.reset(bank.last_blockhash(), 3, Some((4, 4))); // without sending more ticks, we should be leader now assert_eq!(poh_recorder.reached_leader_tick().0, true); @@ -1152,7 +1164,7 @@ mod tests { // Let's test that if a node overshoots the ticks for its target // leader slot, reached_leader_tick() will return true, because it's overdue // Set the leader slot 1 slot down - poh_recorder.reset(bank.last_blockhash(), 4, Some(5)); + poh_recorder.reset(bank.last_blockhash(), 4, Some((5, 5))); // Send remaining ticks for the slot (remember we sent extra ticks in the previous part of the test) for _ in 0..4 * bank.ticks_per_slot() { @@ -1200,7 +1212,8 @@ mod tests { ); // We reset with leader slot after 3 slots - poh_recorder.reset(bank.last_blockhash(), 0, Some(bank.slot() + 3)); + let bank_slot = bank.slot() + 3; + poh_recorder.reset(bank.last_blockhash(), 0, Some((bank_slot, bank_slot))); // Test that the node won't be leader in next 2 slots assert_eq!( @@ -1245,7 +1258,7 @@ mod tests { 0, bank.last_blockhash(), 0, - Some(2), + Some((2, 2)), bank.ticks_per_slot(), &Pubkey::default(), &Arc::new(blocktree), @@ -1262,4 +1275,32 @@ mod tests { assert!(!bank.check_hash_age(&genesis_blockhash, 1)); } } + + #[test] + fn test_compute_leader_slots() { + assert_eq!( + PohRecorder::compute_leader_slot_ticks(None, 0), + (None, 0, 0) + ); + + assert_eq!( + PohRecorder::compute_leader_slot_ticks(Some((4, 4)), 8), + (Some(36), 39, 4) + ); + + assert_eq!( + PohRecorder::compute_leader_slot_ticks(Some((4, 7)), 8), + (Some(44), 63, MAX_GRACE_TICKS) + ); + + assert_eq!( + PohRecorder::compute_leader_slot_ticks(Some((6, 7)), 8), + (Some(56), 63, 8) + ); + + assert_eq!( + PohRecorder::compute_leader_slot_ticks(Some((6, 7)), 4), + (Some(28), 31, 4) + ); + } } diff --git a/core/src/poh_service.rs b/core/src/poh_service.rs index d10812795e..40c927fd54 100644 --- a/core/src/poh_service.rs +++ b/core/src/poh_service.rs @@ -113,7 +113,7 @@ mod tests { bank.tick_height(), prev_hash, bank.slot(), - Some(4), + Some((4, 4)), bank.ticks_per_slot(), &Pubkey::default(), &Arc::new(blocktree), diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index d000286eb9..a96829a5c5 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -478,7 +478,7 @@ impl ReplayStage { .reset(bank.last_blockhash(), bank.slot(), next_leader_slot); let next_leader_msg = if let Some(next_leader_slot) = next_leader_slot { - format!("My next leader slot is #{}", next_leader_slot) + format!("My next leader slot is #{}", next_leader_slot.0) } else { "I am not in the upcoming leader schedule yet".to_owned() };