Prevent Requests/Caching of leader schedules for epochs beyond confirmed roots (#4033)
automerge
This commit is contained in:
		| @@ -266,7 +266,7 @@ impl BankingStage { | |||||||
|         let poh = poh_recorder.lock().unwrap(); |         let poh = poh_recorder.lock().unwrap(); | ||||||
|         let leader_id = match poh.bank() { |         let leader_id = match poh.bank() { | ||||||
|             Some(bank) => leader_schedule_cache |             Some(bank) => leader_schedule_cache | ||||||
|                 .slot_leader_at_else_compute(bank.slot() + 1, &bank) |                 .slot_leader_at(bank.slot() + 1, Some(&bank)) | ||||||
|                 .unwrap_or_default(), |                 .unwrap_or_default(), | ||||||
|             None => { |             None => { | ||||||
|                 if poh.would_be_leader(DEFAULT_TICKS_PER_SLOT) { |                 if poh.would_be_leader(DEFAULT_TICKS_PER_SLOT) { | ||||||
|   | |||||||
| @@ -131,7 +131,7 @@ pub fn process_blocktree( | |||||||
|         vec![(slot, meta, bank, entry_height, last_entry_hash)] |         vec![(slot, meta, bank, entry_height, last_entry_hash)] | ||||||
|     }; |     }; | ||||||
|  |  | ||||||
|     let leader_schedule_cache = LeaderScheduleCache::new(*pending_slots[0].2.epoch_schedule()); |     let leader_schedule_cache = LeaderScheduleCache::new(*pending_slots[0].2.epoch_schedule(), 0); | ||||||
|  |  | ||||||
|     let mut fork_info = vec![]; |     let mut fork_info = vec![]; | ||||||
|     let mut last_status_report = Instant::now(); |     let mut last_status_report = Instant::now(); | ||||||
| @@ -188,6 +188,7 @@ pub fn process_blocktree( | |||||||
|         bank.freeze(); // all banks handled by this routine are created from complete slots |         bank.freeze(); // all banks handled by this routine are created from complete slots | ||||||
|  |  | ||||||
|         if blocktree.is_root(slot) { |         if blocktree.is_root(slot) { | ||||||
|  |             leader_schedule_cache.set_root(slot); | ||||||
|             bank.squash(); |             bank.squash(); | ||||||
|             pending_slots.clear(); |             pending_slots.clear(); | ||||||
|             fork_info.clear(); |             fork_info.clear(); | ||||||
| @@ -219,7 +220,7 @@ pub fn process_blocktree( | |||||||
|                 let next_bank = Arc::new(Bank::new_from_parent( |                 let next_bank = Arc::new(Bank::new_from_parent( | ||||||
|                     &bank, |                     &bank, | ||||||
|                     &leader_schedule_cache |                     &leader_schedule_cache | ||||||
|                         .slot_leader_at_else_compute(next_slot, &bank) |                         .slot_leader_at(next_slot, Some(&bank)) | ||||||
|                         .unwrap(), |                         .unwrap(), | ||||||
|                     next_slot, |                     next_slot, | ||||||
|                 )); |                 )); | ||||||
|   | |||||||
| @@ -15,41 +15,34 @@ pub struct LeaderScheduleCache { | |||||||
|     // Map from an epoch to a leader schedule for that epoch |     // Map from an epoch to a leader schedule for that epoch | ||||||
|     pub cached_schedules: RwLock<CachedSchedules>, |     pub cached_schedules: RwLock<CachedSchedules>, | ||||||
|     epoch_schedule: EpochSchedule, |     epoch_schedule: EpochSchedule, | ||||||
|  |     max_epoch: RwLock<u64>, | ||||||
| } | } | ||||||
|  |  | ||||||
| impl LeaderScheduleCache { | impl LeaderScheduleCache { | ||||||
|     pub fn new_from_bank(bank: &Bank) -> Self { |     pub fn new_from_bank(bank: &Bank) -> Self { | ||||||
|         Self::new(*bank.epoch_schedule()) |         Self::new(*bank.epoch_schedule(), bank.slot()) | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     pub fn new(epoch_schedule: EpochSchedule) -> Self { |     pub fn new(epoch_schedule: EpochSchedule, root: u64) -> Self { | ||||||
|         Self { |         let cache = Self { | ||||||
|             cached_schedules: RwLock::new((HashMap::new(), VecDeque::new())), |             cached_schedules: RwLock::new((HashMap::new(), VecDeque::new())), | ||||||
|             epoch_schedule, |             epoch_schedule, | ||||||
|         } |             max_epoch: RwLock::new(0), | ||||||
|  |         }; | ||||||
|  |  | ||||||
|  |         cache.set_root(root); | ||||||
|  |         cache | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     pub fn slot_leader_at(&self, slot: u64) -> Option<Pubkey> { |     pub fn set_root(&self, root: u64) { | ||||||
|         let (epoch, slot_index) = self.epoch_schedule.get_epoch_and_slot_index(slot); |         *self.max_epoch.write().unwrap() = self.epoch_schedule.get_stakers_epoch(root); | ||||||
|         self.cached_schedules |  | ||||||
|             .read() |  | ||||||
|             .unwrap() |  | ||||||
|             .0 |  | ||||||
|             .get(&epoch) |  | ||||||
|             .map(|schedule| schedule[slot_index]) |  | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     pub fn slot_leader_at_else_compute(&self, slot: u64, bank: &Bank) -> Option<Pubkey> { |     pub fn slot_leader_at(&self, slot: u64, bank: Option<&Bank>) -> Option<Pubkey> { | ||||||
|         let cache_result = self.slot_leader_at(slot); |         if let Some(bank) = bank { | ||||||
|         if cache_result.is_some() { |             self.slot_leader_at_else_compute(slot, bank) | ||||||
|             cache_result |  | ||||||
|         } else { |         } else { | ||||||
|             let (epoch, slot_index) = bank.get_epoch_and_slot_index(slot); |             self.slot_leader_at_no_compute(slot) | ||||||
|             if let Some(epoch_schedule) = self.compute_epoch_schedule(epoch, bank) { |  | ||||||
|                 Some(epoch_schedule[slot_index]) |  | ||||||
|             } else { |  | ||||||
|                 None |  | ||||||
|             } |  | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  |  | ||||||
| @@ -94,6 +87,39 @@ impl LeaderScheduleCache { | |||||||
|         None |         None | ||||||
|     } |     } | ||||||
|  |  | ||||||
|  |     fn slot_leader_at_no_compute(&self, slot: u64) -> Option<Pubkey> { | ||||||
|  |         let (epoch, slot_index) = self.epoch_schedule.get_epoch_and_slot_index(slot); | ||||||
|  |         self.cached_schedules | ||||||
|  |             .read() | ||||||
|  |             .unwrap() | ||||||
|  |             .0 | ||||||
|  |             .get(&epoch) | ||||||
|  |             .map(|schedule| schedule[slot_index]) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     fn slot_leader_at_else_compute(&self, slot: u64, bank: &Bank) -> Option<Pubkey> { | ||||||
|  |         let cache_result = self.slot_leader_at_no_compute(slot); | ||||||
|  |         // Forbid asking for slots in an unconfirmed epoch | ||||||
|  |         let bank_epoch = self.epoch_schedule.get_epoch_and_slot_index(slot).0; | ||||||
|  |         if bank_epoch > *self.max_epoch.read().unwrap() { | ||||||
|  |             error!( | ||||||
|  |                 "Requested leader in slot: {} of unconfirmed epoch: {}", | ||||||
|  |                 slot, bank_epoch | ||||||
|  |             ); | ||||||
|  |             return None; | ||||||
|  |         } | ||||||
|  |         if cache_result.is_some() { | ||||||
|  |             cache_result | ||||||
|  |         } else { | ||||||
|  |             let (epoch, slot_index) = bank.get_epoch_and_slot_index(slot); | ||||||
|  |             if let Some(epoch_schedule) = self.compute_epoch_schedule(epoch, bank) { | ||||||
|  |                 Some(epoch_schedule[slot_index]) | ||||||
|  |             } else { | ||||||
|  |                 None | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|     fn get_epoch_schedule_else_compute( |     fn get_epoch_schedule_else_compute( | ||||||
|         &self, |         &self, | ||||||
|         epoch: u64, |         epoch: u64, | ||||||
| @@ -150,19 +176,17 @@ mod tests { | |||||||
|     use crate::blocktree::get_tmp_ledger_path; |     use crate::blocktree::get_tmp_ledger_path; | ||||||
|  |  | ||||||
|     #[test] |     #[test] | ||||||
|     fn test_slot_leader_at_else_compute() { |     fn test_slot_leader_at() { | ||||||
|         let (genesis_block, _mint_keypair) = GenesisBlock::new(2); |         let (genesis_block, _mint_keypair) = GenesisBlock::new(2); | ||||||
|         let bank = Bank::new(&genesis_block); |         let bank = Bank::new(&genesis_block); | ||||||
|         let cache = LeaderScheduleCache::new_from_bank(&bank); |         let cache = LeaderScheduleCache::new_from_bank(&bank); | ||||||
|  |  | ||||||
|         // Nothing in the cache, should return None |         // Nothing in the cache, should return None | ||||||
|         assert!(cache.slot_leader_at(bank.slot()).is_none()); |         assert!(cache.slot_leader_at(bank.slot(), None).is_none()); | ||||||
|  |  | ||||||
|         // Add something to the cache |         // Add something to the cache | ||||||
|         assert!(cache |         assert!(cache.slot_leader_at(bank.slot(), Some(&bank)).is_some()); | ||||||
|             .slot_leader_at_else_compute(bank.slot(), &bank) |         assert!(cache.slot_leader_at(bank.slot(), None).is_some()); | ||||||
|             .is_some()); |  | ||||||
|         assert!(cache.slot_leader_at(bank.slot()).is_some()); |  | ||||||
|         assert_eq!(cache.cached_schedules.read().unwrap().0.len(), 1); |         assert_eq!(cache.cached_schedules.read().unwrap().0.len(), 1); | ||||||
|     } |     } | ||||||
|  |  | ||||||
| @@ -195,9 +219,9 @@ mod tests { | |||||||
|     fn run_thread_race() { |     fn run_thread_race() { | ||||||
|         let slots_per_epoch = MINIMUM_SLOT_LENGTH as u64; |         let slots_per_epoch = MINIMUM_SLOT_LENGTH as u64; | ||||||
|         let epoch_schedule = EpochSchedule::new(slots_per_epoch, slots_per_epoch / 2, true); |         let epoch_schedule = EpochSchedule::new(slots_per_epoch, slots_per_epoch / 2, true); | ||||||
|         let cache = Arc::new(LeaderScheduleCache::new(epoch_schedule)); |  | ||||||
|         let (genesis_block, _mint_keypair) = GenesisBlock::new(2); |         let (genesis_block, _mint_keypair) = GenesisBlock::new(2); | ||||||
|         let bank = Arc::new(Bank::new(&genesis_block)); |         let bank = Arc::new(Bank::new(&genesis_block)); | ||||||
|  |         let cache = Arc::new(LeaderScheduleCache::new(epoch_schedule, bank.slot())); | ||||||
|  |  | ||||||
|         let num_threads = 10; |         let num_threads = 10; | ||||||
|         let (threads, senders): (Vec<_>, Vec<_>) = (0..num_threads) |         let (threads, senders): (Vec<_>, Vec<_>) = (0..num_threads) | ||||||
| @@ -210,7 +234,7 @@ mod tests { | |||||||
|                         .name("test_thread_race_leader_schedule_cache".to_string()) |                         .name("test_thread_race_leader_schedule_cache".to_string()) | ||||||
|                         .spawn(move || { |                         .spawn(move || { | ||||||
|                             let _ = receiver.recv(); |                             let _ = receiver.recv(); | ||||||
|                             cache.slot_leader_at_else_compute(bank.slot(), &bank); |                             cache.slot_leader_at(bank.slot(), Some(&bank)); | ||||||
|                         }) |                         }) | ||||||
|                         .unwrap(), |                         .unwrap(), | ||||||
|                     sender, |                     sender, | ||||||
| @@ -246,9 +270,7 @@ mod tests { | |||||||
|         let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); |         let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); | ||||||
|  |  | ||||||
|         assert_eq!( |         assert_eq!( | ||||||
|             cache |             cache.slot_leader_at(bank.slot(), Some(&bank)).unwrap(), | ||||||
|                 .slot_leader_at_else_compute(bank.slot(), &bank) |  | ||||||
|                 .unwrap(), |  | ||||||
|             pubkey |             pubkey | ||||||
|         ); |         ); | ||||||
|         assert_eq!(cache.next_leader_slot(&pubkey, 0, &bank, None), Some(1)); |         assert_eq!(cache.next_leader_slot(&pubkey, 0, &bank, None), Some(1)); | ||||||
| @@ -294,9 +316,7 @@ mod tests { | |||||||
|             ); |             ); | ||||||
|  |  | ||||||
|             assert_eq!( |             assert_eq!( | ||||||
|                 cache |                 cache.slot_leader_at(bank.slot(), Some(&bank)).unwrap(), | ||||||
|                     .slot_leader_at_else_compute(bank.slot(), &bank) |  | ||||||
|                     .unwrap(), |  | ||||||
|                 pubkey |                 pubkey | ||||||
|             ); |             ); | ||||||
|             // Check that the next leader slot after 0 is slot 1 |             // Check that the next leader slot after 0 is slot 1 | ||||||
| @@ -400,4 +420,35 @@ mod tests { | |||||||
|             Some(expected_slot), |             Some(expected_slot), | ||||||
|         ); |         ); | ||||||
|     } |     } | ||||||
|  |  | ||||||
|  |     #[test] | ||||||
|  |     fn test_schedule_for_unconfirmed_epoch() { | ||||||
|  |         let (genesis_block, _mint_keypair) = GenesisBlock::new(2); | ||||||
|  |         let bank = Arc::new(Bank::new(&genesis_block)); | ||||||
|  |         let cache = LeaderScheduleCache::new_from_bank(&bank); | ||||||
|  |  | ||||||
|  |         assert_eq!(*cache.max_epoch.read().unwrap(), 1); | ||||||
|  |  | ||||||
|  |         // Asking for the leader for the last slot in epoch 1 is ok b/c | ||||||
|  |         // epoch 1 is confirmed | ||||||
|  |         assert_eq!(bank.get_epoch_and_slot_index(95).0, 1); | ||||||
|  |         assert!(cache.slot_leader_at(95, Some(&bank)).is_some()); | ||||||
|  |  | ||||||
|  |         // Asking for the lader for the first slot in epoch 2 is not ok | ||||||
|  |         // b/c epoch 2 is unconfirmed | ||||||
|  |         assert_eq!(bank.get_epoch_and_slot_index(96).0, 2); | ||||||
|  |         assert!(cache.slot_leader_at(96, Some(&bank)).is_none()); | ||||||
|  |  | ||||||
|  |         let bank2 = Bank::new_from_parent(&bank, &Pubkey::new_rand(), 95); | ||||||
|  |         assert!(bank2.epoch_vote_accounts(2).is_some()); | ||||||
|  |  | ||||||
|  |         // Set root for a slot in epoch 1, so that epoch 2 is now confirmed | ||||||
|  |         cache.set_root(95); | ||||||
|  |         assert_eq!(*cache.max_epoch.read().unwrap(), 2); | ||||||
|  |         assert!(cache.slot_leader_at(96, Some(&bank2)).is_some()); | ||||||
|  |         assert_eq!(bank2.get_epoch_and_slot_index(223).0, 2); | ||||||
|  |         assert!(cache.slot_leader_at(223, Some(&bank2)).is_some()); | ||||||
|  |         assert_eq!(bank2.get_epoch_and_slot_index(224).0, 3); | ||||||
|  |         assert!(cache.slot_leader_at(224, Some(&bank2)).is_none()); | ||||||
|  |     } | ||||||
| } | } | ||||||
|   | |||||||
| @@ -157,6 +157,7 @@ impl ReplayStage { | |||||||
|                             &vote_account, |                             &vote_account, | ||||||
|                             &cluster_info, |                             &cluster_info, | ||||||
|                             &blocktree, |                             &blocktree, | ||||||
|  |                             &leader_schedule_cache, | ||||||
|                         )?; |                         )?; | ||||||
|  |  | ||||||
|                         Self::reset_poh_recorder( |                         Self::reset_poh_recorder( | ||||||
| @@ -235,7 +236,7 @@ impl ReplayStage { | |||||||
|             }; |             }; | ||||||
|             assert!(parent.is_frozen()); |             assert!(parent.is_frozen()); | ||||||
|  |  | ||||||
|             leader_schedule_cache.slot_leader_at_else_compute(poh_slot, &parent) |             leader_schedule_cache.slot_leader_at(poh_slot, Some(&parent)) | ||||||
|                 .map(|next_leader| { |                 .map(|next_leader| { | ||||||
|                     debug!( |                     debug!( | ||||||
|                         "me: {} leader {} at poh slot {}", |                         "me: {} leader {} at poh slot {}", | ||||||
| @@ -308,12 +309,14 @@ impl ReplayStage { | |||||||
|         vote_account_pubkey: &Pubkey, |         vote_account_pubkey: &Pubkey, | ||||||
|         cluster_info: &Arc<RwLock<ClusterInfo>>, |         cluster_info: &Arc<RwLock<ClusterInfo>>, | ||||||
|         blocktree: &Arc<Blocktree>, |         blocktree: &Arc<Blocktree>, | ||||||
|  |         leader_schedule_cache: &Arc<LeaderScheduleCache>, | ||||||
|     ) -> Result<()> |     ) -> Result<()> | ||||||
|     where |     where | ||||||
|         T: 'static + KeypairUtil + Send + Sync, |         T: 'static + KeypairUtil + Send + Sync, | ||||||
|     { |     { | ||||||
|         if let Some(new_root) = locktower.record_vote(bank.slot()) { |         if let Some(new_root) = locktower.record_vote(bank.slot()) { | ||||||
|             bank_forks.write().unwrap().set_root(new_root); |             bank_forks.write().unwrap().set_root(new_root); | ||||||
|  |             leader_schedule_cache.set_root(new_root); | ||||||
|             blocktree.set_root(new_root)?; |             blocktree.set_root(new_root)?; | ||||||
|             Self::handle_new_root(&bank_forks, progress); |             Self::handle_new_root(&bank_forks, progress); | ||||||
|         } |         } | ||||||
| @@ -593,7 +596,7 @@ impl ReplayStage { | |||||||
|                     continue; |                     continue; | ||||||
|                 } |                 } | ||||||
|                 let leader = leader_schedule_cache |                 let leader = leader_schedule_cache | ||||||
|                     .slot_leader_at_else_compute(child_id, &parent_bank) |                     .slot_leader_at(child_id, Some(&parent_bank)) | ||||||
|                     .unwrap(); |                     .unwrap(); | ||||||
|                 info!("new fork:{} parent:{}", child_id, parent_id); |                 info!("new fork:{} parent:{}", child_id, parent_id); | ||||||
|                 forks.insert(Bank::new_from_parent(&parent_bank, &leader, child_id)); |                 forks.insert(Bank::new_from_parent(&parent_bank, &leader, child_id)); | ||||||
|   | |||||||
| @@ -51,7 +51,7 @@ fn retransmit( | |||||||
|     ); |     ); | ||||||
|     for blob in &blobs { |     for blob in &blobs { | ||||||
|         let leader = leader_schedule_cache |         let leader = leader_schedule_cache | ||||||
|             .slot_leader_at_else_compute(blob.read().unwrap().slot(), r_bank.as_ref()); |             .slot_leader_at(blob.read().unwrap().slot(), Some(r_bank.as_ref())); | ||||||
|         if blob.read().unwrap().meta.forward { |         if blob.read().unwrap().meta.forward { | ||||||
|             ClusterInfo::retransmit_to(&cluster_info, &neighbors, blob, leader, sock, true)?; |             ClusterInfo::retransmit_to(&cluster_info, &neighbors, blob, leader, sock, true)?; | ||||||
|             ClusterInfo::retransmit_to(&cluster_info, &children, blob, leader, sock, false)?; |             ClusterInfo::retransmit_to(&cluster_info, &children, blob, leader, sock, false)?; | ||||||
|   | |||||||
| @@ -85,10 +85,10 @@ fn should_retransmit_and_persist( | |||||||
|     my_id: &Pubkey, |     my_id: &Pubkey, | ||||||
| ) -> bool { | ) -> bool { | ||||||
|     let slot_leader_id = match bank { |     let slot_leader_id = match bank { | ||||||
|         None => leader_schedule_cache.and_then(|cache| cache.slot_leader_at(blob.slot())), |         None => leader_schedule_cache.and_then(|cache| cache.slot_leader_at(blob.slot(), None)), | ||||||
|         Some(bank) => match leader_schedule_cache { |         Some(bank) => match leader_schedule_cache { | ||||||
|             None => slot_leader_at(blob.slot(), &bank), |             None => slot_leader_at(blob.slot(), &bank), | ||||||
|             Some(cache) => cache.slot_leader_at_else_compute(blob.slot(), bank), |             Some(cache) => cache.slot_leader_at(blob.slot(), Some(bank)), | ||||||
|         }, |         }, | ||||||
|     }; |     }; | ||||||
|  |  | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user