diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index c8e51ad854..a39902a384 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -35,6 +35,7 @@ use crate::{ contains::Contains, pubkey_bins::PubkeyBinCalculator24, read_only_accounts_cache::ReadOnlyAccountsCache, + rent_collector::RentCollector, sorted_storages::SortedStorages, }; use blake3::traits::digest::Digest; @@ -50,6 +51,7 @@ use rayon::{prelude::*, ThreadPool}; use serde::{Deserialize, Serialize}; use solana_measure::measure::Measure; use solana_rayon_threadlimit::get_thread_count; +use solana_sdk::genesis_config::GenesisConfig; use solana_sdk::{ account::{AccountSharedData, ReadableAccount}, clock::{BankId, Epoch, Slot, SlotCount}, @@ -219,12 +221,13 @@ struct GenerateIndexTimings { pub insertion_time_us: u64, pub min_bin_size: usize, pub max_bin_size: usize, - #[allow(dead_code)] pub total_items: usize, pub storage_size_accounts_map_us: u64, pub storage_size_storages_us: u64, pub storage_size_accounts_map_flatten_us: u64, pub index_flush_us: u64, + pub rent_exempt: u64, + pub total_duplicates: u64, } #[derive(Default, Debug, PartialEq)] @@ -260,6 +263,17 @@ impl GenerateIndexTimings { i64 ), ("index_flush_us", self.index_flush_us as i64, i64), + ( + "total_not_rent_exempt_with_duplicates", + self.total_duplicates.saturating_sub(self.rent_exempt) as i64, + i64 + ), + ( + "total_items_with_duplicates", + self.total_duplicates as i64, + i64 + ), + ("total_items", self.total_items as i64, i64), ); } } @@ -6666,17 +6680,20 @@ impl AccountsDb { accounts_map } + /// return time_us, # accts rent exempt, total # accts fn generate_index_for_slot<'a>( &self, accounts_map: GenerateIndexAccountsMap<'a>, slot: &Slot, - ) -> u64 { + rent_collector: &RentCollector, + ) -> (u64, u64, u64) { if accounts_map.is_empty() { - return 0; + return (0, 0, 0); } let secondary = !self.account_indexes.is_empty(); + let mut rent_exempt = 0; let len = accounts_map.len(); let items = accounts_map.into_iter().map( |( @@ -6696,6 +6713,13 @@ impl AccountsDb { ); } + if rent_collector.no_rent(&pubkey, &stored_account, false) || { + let (_rent_due, exempt) = rent_collector.get_rent_due(&stored_account); + exempt + } { + rent_exempt += 1; + } + ( pubkey, AccountInfo { @@ -6718,7 +6742,7 @@ impl AccountsDb { if !dirty_pubkeys.is_empty() { self.uncleaned_pubkeys.insert(*slot, dirty_pubkeys); } - insert_us + (insert_us, rent_exempt, len as u64) } fn filler_unique_id_bytes() -> usize { @@ -6849,13 +6873,27 @@ impl AccountsDb { } #[allow(clippy::needless_collect)] - pub fn generate_index(&self, limit_load_slot_count_from_snapshot: Option, verify: bool) { + pub fn generate_index( + &self, + limit_load_slot_count_from_snapshot: Option, + verify: bool, + genesis_config: &GenesisConfig, + ) { let mut slots = self.storage.all_slots(); #[allow(clippy::stable_sort_primitive)] slots.sort(); if let Some(limit) = limit_load_slot_count_from_snapshot { slots.truncate(limit); // get rid of the newer slots and keep just the older } + let max_slot = slots.last().cloned().unwrap_or_default(); + let schedule = genesis_config.epoch_schedule; + let rent_collector = RentCollector::new( + schedule.get_epoch(max_slot), + &schedule, + genesis_config.slots_per_year(), + &genesis_config.rent, + ); + // pass == 0 always runs and generates the index // pass == 1 only runs if verify == true. // verify checks that all the expected items are in the accounts index and measures how long it takes to look them all up @@ -6870,6 +6908,8 @@ impl AccountsDb { let chunk_size = (outer_slots_len / 7) + 1; // approximately 400k slots in a snapshot let mut index_time = Measure::start("index"); let insertion_time_us = AtomicU64::new(0); + let rent_exempt = AtomicU64::new(0); + let total_duplicates = AtomicU64::new(0); let storage_info_timings = Mutex::new(GenerateIndexTimings::default()); let scan_time: u64 = slots .par_chunks(chunk_size) @@ -6898,7 +6938,11 @@ impl AccountsDb { let insert_us = if pass == 0 { // generate index - self.generate_index_for_slot(accounts_map, slot) + let (insert_us, rent_exempt_this_slot, total_this_slot) = + self.generate_index_for_slot(accounts_map, slot, &rent_collector); + rent_exempt.fetch_add(rent_exempt_this_slot, Ordering::Relaxed); + total_duplicates.fetch_add(total_this_slot, Ordering::Relaxed); + insert_us } else { // verify index matches expected and measure the time to get all items assert!(verify); @@ -6969,6 +7013,8 @@ impl AccountsDb { min_bin_size, max_bin_size, total_items, + rent_exempt: rent_exempt.load(Ordering::Relaxed), + total_duplicates: total_duplicates.load(Ordering::Relaxed), storage_size_accounts_map_us: storage_info_timings.storage_size_accounts_map_us, storage_size_accounts_map_flatten_us: storage_info_timings .storage_size_accounts_map_flatten_us, diff --git a/runtime/src/rent_collector.rs b/runtime/src/rent_collector.rs index 308660ccf3..b98e5f6f11 100644 --- a/runtime/src/rent_collector.rs +++ b/runtime/src/rent_collector.rs @@ -1,4 +1,5 @@ //! calculate and collect rent from Accounts +use log::*; use solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, clock::Epoch, @@ -37,6 +38,11 @@ impl RentCollector { slots_per_year: f64, rent: &Rent, ) -> Self { + info!( + "creating RentCollector, epoch: {}, slots_per_year: {}, rent: {:?}", + epoch, slots_per_year, rent + ); + Self { epoch, epoch_schedule: *epoch_schedule, @@ -52,6 +58,36 @@ impl RentCollector { } } + /// true if it is easy to determine this account should not have rent collected from it + pub fn no_rent( + &self, + address: &Pubkey, + account: &impl ReadableAccount, + rent_for_sysvars: bool, + ) -> bool { + account.executable() // executable accounts must be rent-exempt balance + || account.rent_epoch() > self.epoch + || (!rent_for_sysvars && sysvar::check_id(account.owner())) + || *address == incinerator::id() + } + + /// true if it is easy to determine this account should not have rent collected from it + pub fn get_rent_due(&self, account: &impl ReadableAccount) -> (u64, bool) { + let slots_elapsed: u64 = (account.rent_epoch()..=self.epoch) + .map(|epoch| self.epoch_schedule.get_slots_in_epoch(epoch + 1)) + .sum(); + + // avoid infinite rent in rust 1.45 + let years_elapsed = if self.slots_per_year != 0.0 { + slots_elapsed as f64 / self.slots_per_year + } else { + 0.0 + }; + + self.rent + .due(account.lamports(), account.data().len(), years_elapsed) + } + // updates this account's lamports and status and returns // the account rent collected, if any // This is NOT thread safe at some level. If we try to collect from the same account in parallel, we may collect twice. @@ -63,28 +99,15 @@ impl RentCollector { rent_for_sysvars: bool, filler_account_suffix: Option<&Pubkey>, ) -> u64 { - if account.executable() // executable accounts must be rent-exempt balance - || account.rent_epoch() > self.epoch - || (!rent_for_sysvars && sysvar::check_id(account.owner())) - || *address == incinerator::id() - || crate::accounts_db::AccountsDb::is_filler_account_helper(address, filler_account_suffix) + if self.no_rent(address, account, rent_for_sysvars) + || crate::accounts_db::AccountsDb::is_filler_account_helper( + address, + filler_account_suffix, + ) { 0 } else { - let slots_elapsed: u64 = (account.rent_epoch()..=self.epoch) - .map(|epoch| self.epoch_schedule.get_slots_in_epoch(epoch + 1)) - .sum(); - - // avoid infinite rent in rust 1.45 - let years_elapsed = if self.slots_per_year != 0.0 { - slots_elapsed as f64 / self.slots_per_year - } else { - 0.0 - }; - - let (rent_due, exempt) = - self.rent - .due(account.lamports(), account.data().len(), years_elapsed); + let (rent_due, exempt) = self.get_rent_due(account); if exempt || rent_due != 0 { if account.lamports() > rent_due { diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 72af80e1c6..4860360970 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -547,7 +547,11 @@ where }) .unwrap(); - accounts_db.generate_index(limit_load_slot_count_from_snapshot, verify_index); + accounts_db.generate_index( + limit_load_slot_count_from_snapshot, + verify_index, + genesis_config, + ); accounts_db.maybe_add_filler_accounts(&genesis_config.epoch_schedule); handle.join().unwrap();