diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 1c22489fde..9cc936cecf 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -1717,7 +1717,7 @@ impl AccountsDb { AccountStorageEntry::new( path, slot, - self.next_id.fetch_add(1, Ordering::Relaxed), + self.next_id.fetch_add(1, Ordering::AcqRel), size, ) } @@ -3729,7 +3729,7 @@ impl AccountsDb { let ret = recycle_stores.remove_entry(i); drop(recycle_stores); let old_id = ret.append_vec_id(); - ret.recycle(slot, self.next_id.fetch_add(1, Ordering::Relaxed)); + ret.recycle(slot, self.next_id.fetch_add(1, Ordering::AcqRel)); debug!( "recycling store: {} {:?} old_id: {}", ret.append_vec_id(), diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index ef0ea8e5ac..40c92b2851 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -463,7 +463,7 @@ where // Remap the AppendVec ID to handle any duplicate IDs that may previously existed // due to full snapshots and incremental snapshots generated from different nodes let (remapped_append_vec_id, remapped_append_vec_path) = loop { - let remapped_append_vec_id = next_append_vec_id.fetch_add(1, Ordering::Relaxed); + let remapped_append_vec_id = next_append_vec_id.fetch_add(1, Ordering::AcqRel); let remapped_file_name = AppendVec::file_name(*slot, remapped_append_vec_id); let remapped_append_vec_path = append_vec_path.parent().unwrap().join(&remapped_file_name); @@ -512,7 +512,7 @@ where "At least one storage entry must exist from deserializing stream" ); - let next_append_vec_id = next_append_vec_id.load(Ordering::Relaxed); + let next_append_vec_id = next_append_vec_id.load(Ordering::Acquire); let max_append_vec_id = next_append_vec_id - 1; assert!( max_append_vec_id <= AppendVecId::MAX / 2, @@ -533,7 +533,7 @@ where ); accounts_db .next_id - .store(next_append_vec_id, Ordering::Relaxed); + .store(next_append_vec_id, Ordering::Release); accounts_db .write_version .fetch_add(snapshot_version, Ordering::Release);