Constrain memory ordering on AccountsDb::next_id (#21311)

This commit is contained in:
Brooks Prumo
2021-11-18 20:34:37 -06:00
committed by GitHub
parent f7152c889c
commit c24e30f689
2 changed files with 5 additions and 5 deletions

View File

@ -1717,7 +1717,7 @@ impl AccountsDb {
AccountStorageEntry::new( AccountStorageEntry::new(
path, path,
slot, slot,
self.next_id.fetch_add(1, Ordering::Relaxed), self.next_id.fetch_add(1, Ordering::AcqRel),
size, size,
) )
} }
@ -3729,7 +3729,7 @@ impl AccountsDb {
let ret = recycle_stores.remove_entry(i); let ret = recycle_stores.remove_entry(i);
drop(recycle_stores); drop(recycle_stores);
let old_id = ret.append_vec_id(); let old_id = ret.append_vec_id();
ret.recycle(slot, self.next_id.fetch_add(1, Ordering::Relaxed)); ret.recycle(slot, self.next_id.fetch_add(1, Ordering::AcqRel));
debug!( debug!(
"recycling store: {} {:?} old_id: {}", "recycling store: {} {:?} old_id: {}",
ret.append_vec_id(), ret.append_vec_id(),

View File

@ -463,7 +463,7 @@ where
// Remap the AppendVec ID to handle any duplicate IDs that may previously existed // Remap the AppendVec ID to handle any duplicate IDs that may previously existed
// due to full snapshots and incremental snapshots generated from different nodes // due to full snapshots and incremental snapshots generated from different nodes
let (remapped_append_vec_id, remapped_append_vec_path) = loop { let (remapped_append_vec_id, remapped_append_vec_path) = loop {
let remapped_append_vec_id = next_append_vec_id.fetch_add(1, Ordering::Relaxed); let remapped_append_vec_id = next_append_vec_id.fetch_add(1, Ordering::AcqRel);
let remapped_file_name = AppendVec::file_name(*slot, remapped_append_vec_id); let remapped_file_name = AppendVec::file_name(*slot, remapped_append_vec_id);
let remapped_append_vec_path = let remapped_append_vec_path =
append_vec_path.parent().unwrap().join(&remapped_file_name); append_vec_path.parent().unwrap().join(&remapped_file_name);
@ -512,7 +512,7 @@ where
"At least one storage entry must exist from deserializing stream" "At least one storage entry must exist from deserializing stream"
); );
let next_append_vec_id = next_append_vec_id.load(Ordering::Relaxed); let next_append_vec_id = next_append_vec_id.load(Ordering::Acquire);
let max_append_vec_id = next_append_vec_id - 1; let max_append_vec_id = next_append_vec_id - 1;
assert!( assert!(
max_append_vec_id <= AppendVecId::MAX / 2, max_append_vec_id <= AppendVecId::MAX / 2,
@ -533,7 +533,7 @@ where
); );
accounts_db accounts_db
.next_id .next_id
.store(next_append_vec_id, Ordering::Relaxed); .store(next_append_vec_id, Ordering::Release);
accounts_db accounts_db
.write_version .write_version
.fetch_add(snapshot_version, Ordering::Release); .fetch_add(snapshot_version, Ordering::Release);