From 9d330fc6384320b0610e00bc520b63cf7cd4415a Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" <75863576+jeffwashington@users.noreply.github.com> Date: Wed, 27 Oct 2021 11:18:27 -0500 Subject: [PATCH] FillerAccts: use variable cycle partitions (#20963) --- runtime/src/accounts_db.rs | 16 ++---- runtime/src/bank.rs | 103 +++++++++++++++++++++++++++++++--- runtime/src/serde_snapshot.rs | 2 +- 3 files changed, 102 insertions(+), 19 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index cd6c51b03e..82dffb269f 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -52,7 +52,8 @@ use solana_measure::measure::Measure; use solana_rayon_threadlimit::get_thread_count; use solana_sdk::{ account::{AccountSharedData, ReadableAccount}, - clock::{BankId, Epoch, Slot, SlotCount}, + clock::{BankId, Epoch, Slot}, + epoch_schedule::EpochSchedule, genesis_config::ClusterType, hash::{Hash, Hasher}, pubkey::Pubkey, @@ -6766,7 +6767,7 @@ impl AccountsDb { /// The filler accounts are added to each slot in the snapshot after index generation. /// The accounts added in a slot are setup to have pubkeys such that rent will be collected from them before (or when?) their slot becomes an epoch old. /// Thus, the filler accounts are rewritten by rent and the old slot can be thrown away successfully. - pub fn maybe_add_filler_accounts(&self, ticks_per_slot: SlotCount) { + pub fn maybe_add_filler_accounts(&self, epoch_schedule: &EpochSchedule) { if self.filler_account_count == 0 { return; } @@ -6792,8 +6793,6 @@ impl AccountsDb { .skip(pass * per_pass) .take(per_pass) .collect::>(); - let slot_count_in_two_day = - crate::bank::Bank::slot_count_in_two_day_helper(ticks_per_slot); self.thread_pool.install(|| { roots_in_this_pass.into_par_iter().for_each(|slot| { let storage_maps: Vec> = self @@ -6804,13 +6803,10 @@ impl AccountsDb { return; } - let partition = *crate::bank::Bank::get_partitions( + let partition = crate::bank::Bank::variable_cycle_partition_from_previous_slot( + epoch_schedule, *slot, - slot.saturating_sub(1), - slot_count_in_two_day, - ) - .last() - .unwrap(); + ); let subrange = crate::bank::Bank::pubkey_range_from_partition(partition); let idx = overall_index.fetch_add(1, Ordering::Relaxed); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index a7f2b33a08..f490071de2 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -4550,6 +4550,32 @@ impl Bank { Self::get_partitions(self.slot(), self.parent_slot(), slot_count_in_two_day) } + pub fn variable_cycle_partition_from_previous_slot( + epoch_schedule: &EpochSchedule, + slot: Slot, + ) -> Partition { + // similar code to Bank::variable_cycle_partitions + let (current_epoch, current_slot_index) = epoch_schedule.get_epoch_and_slot_index(slot); + let (parent_epoch, mut parent_slot_index) = + epoch_schedule.get_epoch_and_slot_index(slot.saturating_sub(1)); + let cycle_params = Self::rent_single_epoch_collection_cycle_params( + current_epoch, + epoch_schedule.get_slots_in_epoch(current_epoch), + ); + + if parent_epoch < current_epoch { + parent_slot_index = 0; + } + + let generated_for_gapped_epochs = false; + Self::get_partition_from_slot_indexes( + cycle_params, + parent_slot_index, + current_slot_index, + generated_for_gapped_epochs, + ) + } + fn variable_cycle_partitions(&self) -> Vec { let (current_epoch, current_slot_index) = self.get_epoch_and_slot_index(self.slot()); let (parent_epoch, mut parent_slot_index) = @@ -4600,6 +4626,20 @@ impl Bank { generated_for_gapped_epochs: bool, ) -> Partition { let cycle_params = self.determine_collection_cycle_params(epoch); + Self::get_partition_from_slot_indexes( + cycle_params, + start_slot_index, + end_slot_index, + generated_for_gapped_epochs, + ) + } + + pub fn get_partition_from_slot_indexes( + cycle_params: RentCollectionCycleParams, + start_slot_index: SlotIndex, + end_slot_index: SlotIndex, + generated_for_gapped_epochs: bool, + ) -> Partition { let (_, _, in_multi_epoch_cycle, _, _, partition_count) = cycle_params; // use common codepath for both very likely and very unlikely for the sake of minimized @@ -4669,18 +4709,26 @@ impl Bank { self.do_partition_from_slot_indexes(start_slot_index, end_slot_index, epoch, true) } + pub fn rent_single_epoch_collection_cycle_params( + epoch: Epoch, + slot_count_per_epoch: SlotCount, + ) -> RentCollectionCycleParams { + ( + epoch, + slot_count_per_epoch, + false, + 0, + 1, + slot_count_per_epoch, + ) + } + fn determine_collection_cycle_params(&self, epoch: Epoch) -> RentCollectionCycleParams { let slot_count_per_epoch = self.get_slots_in_epoch(epoch); if !self.use_multi_epoch_collection_cycle(epoch) { - ( - epoch, - slot_count_per_epoch, - false, - 0, - 1, - slot_count_per_epoch, - ) + // mnb should always go through this code path + Self::rent_single_epoch_collection_cycle_params(epoch, slot_count_per_epoch) } else { let epoch_count_in_cycle = self.slot_count_in_two_day() / slot_count_per_epoch; let partition_count = slot_count_per_epoch * epoch_count_in_cycle; @@ -7484,6 +7532,26 @@ pub(crate) mod tests { assert_eq!(bank.collected_rent.load(Relaxed), rent_collected); } + fn test_rent_collection_partitions(bank: &Bank) -> Vec { + let partitions = bank.rent_collection_partitions(); + let slot = bank.slot(); + if slot.saturating_sub(1) == bank.parent_slot() { + let partition = Bank::variable_cycle_partition_from_previous_slot( + bank.epoch_schedule(), + bank.slot(), + ); + assert_eq!( + partitions.last().unwrap(), + &partition, + "slot: {}, slots per epoch: {}, partitions: {:?}", + bank.slot(), + bank.epoch_schedule().slots_per_epoch, + partitions + ); + } + partitions + } + #[test] fn test_rent_eager_across_epoch_without_gap() { let (genesis_config, _mint_keypair) = create_genesis_config(1); @@ -7501,6 +7569,25 @@ pub(crate) mod tests { assert_eq!(bank.rent_collection_partitions(), vec![(0, 0, 64)]); } + #[test] + fn test_rent_eager_across_epoch_without_gap_mnb() { + solana_logger::setup(); + let (mut genesis_config, _mint_keypair) = create_genesis_config(1); + genesis_config.cluster_type = ClusterType::MainnetBeta; + + let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); + assert_eq!(test_rent_collection_partitions(&bank), vec![(0, 0, 32)]); + + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(test_rent_collection_partitions(&bank), vec![(0, 1, 32)]); + for _ in 2..32 { + bank = Arc::new(new_from_parent(&bank)); + } + assert_eq!(test_rent_collection_partitions(&bank), vec![(30, 31, 32)]); + bank = Arc::new(new_from_parent(&bank)); + assert_eq!(test_rent_collection_partitions(&bank), vec![(0, 0, 64)]); + } + #[test] fn test_rent_eager_across_epoch_with_full_gap() { let (mut genesis_config, _mint_keypair) = create_genesis_config(1); diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 0ac7719b6c..72af80e1c6 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -548,7 +548,7 @@ where .unwrap(); accounts_db.generate_index(limit_load_slot_count_from_snapshot, verify_index); - accounts_db.maybe_add_filler_accounts(genesis_config.ticks_per_slot()); + accounts_db.maybe_add_filler_accounts(&genesis_config.epoch_schedule); handle.join().unwrap(); measure_notify.stop();