diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 05caea76aa..88c79d445d 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -75,7 +75,7 @@ use { path::{Path, PathBuf}, str::FromStr, sync::{ - atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, + atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering}, Arc, Condvar, Mutex, MutexGuard, RwLock, }, thread::Builder, @@ -353,8 +353,8 @@ impl<'a> MultiThreadProgress<'a> { } /// An offset into the AccountsDb::storage vector -pub type AtomicAppendVecId = AtomicUsize; -pub type AppendVecId = usize; +pub type AtomicAppendVecId = AtomicU32; +pub type AppendVecId = u32; pub type SnapshotStorage = Vec>; pub type SnapshotStorages = Vec; @@ -11255,6 +11255,58 @@ pub mod tests { } } + #[test] + #[should_panic(expected = "We've run out of storage ids!")] + fn test_wrapping_append_vec_id() { + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let zero_lamport_account = + AccountSharedData::new(0, 0, AccountSharedData::default().owner()); + + // set 'next' id to the max possible value + db.next_id.store(AppendVecId::MAX, Ordering::Release); + let slots = 3; + let keys = (0..slots).map(|_| Pubkey::new_unique()).collect::>(); + // write unique keys to successive slots + keys.iter().enumerate().for_each(|(slot, key)| { + let slot = slot as Slot; + db.store_uncached(slot, &[(key, &zero_lamport_account)]); + db.get_accounts_delta_hash(slot); + db.add_root(slot); + }); + assert_eq!(slots - 1, db.next_id.load(Ordering::Acquire)); + let ancestors = Ancestors::default(); + keys.iter().for_each(|key| { + assert!(db.load_without_fixed_root(&ancestors, key).is_some()); + }); + } + + #[test] + #[should_panic(expected = "We've run out of storage ids!")] + fn test_reuse_append_vec_id() { + solana_logger::setup(); + let db = AccountsDb::new(Vec::new(), &ClusterType::Development); + let zero_lamport_account = + AccountSharedData::new(0, 0, AccountSharedData::default().owner()); + + // set 'next' id to the max possible value + db.next_id.store(AppendVecId::MAX, Ordering::Release); + let slots = 3; + let keys = (0..slots).map(|_| Pubkey::new_unique()).collect::>(); + // write unique keys to successive slots + keys.iter().enumerate().for_each(|(slot, key)| { + let slot = slot as Slot; + db.store_uncached(slot, &[(key, &zero_lamport_account)]); + db.get_accounts_delta_hash(slot); + db.add_root(slot); + // reset next_id to what it was previously to cause us to re-use the same id + db.next_id.store(AppendVecId::MAX, Ordering::Release); + }); + let ancestors = Ancestors::default(); + keys.iter().for_each(|key| { + assert!(db.load_without_fixed_root(&ancestors, key).is_some()); + }); + } + #[test] fn test_zero_lamport_new_root_not_cleaned() { let db = AccountsDb::new(Vec::new(), &ClusterType::Development); diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 5f427cd436..8fd66415d4 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -16,7 +16,7 @@ use { epoch_stakes::EpochStakes, hardened_unpack::UnpackedAppendVecMap, rent_collector::RentCollector, - serde_snapshot::future::SerializableStorage, + serde_snapshot::future::{AppendVecIdSerialized, SerializableStorage}, stakes::Stakes, }, bincode::{self, config::Options, Error}, @@ -468,7 +468,7 @@ where // rename the file to this new path. // **DEVELOPER NOTE:** Keep this check last so that it can short-circuit if // possible. - if storage_entry.id() == remapped_append_vec_id + if storage_entry.id() == remapped_append_vec_id as AppendVecIdSerialized || std::fs::metadata(&remapped_append_vec_path).is_err() { break (remapped_append_vec_id, remapped_append_vec_path); @@ -479,7 +479,7 @@ where num_collisions.fetch_add(1, Ordering::Relaxed); }; // Only rename the file if the new ID is actually different from the original. - if storage_entry.id() != remapped_append_vec_id { + if storage_entry.id() != remapped_append_vec_id as AppendVecIdSerialized { std::fs::rename(append_vec_path, &remapped_append_vec_path)?; }