Fixup atomic ordering on BucketStorage Header (#20005)

Lock-type operations should be atomic::acquire, and unlock-type
operations should be atomic::release, so setting that here.

Additionally, change `unlock()` so it cannot fail.
This commit is contained in:
Brooks Prumo
2021-09-20 21:59:41 -05:00
committed by GitHub
parent 4895c69fea
commit afa04f7abd
2 changed files with 15 additions and 18 deletions

View File

@ -240,7 +240,7 @@ impl<T: Clone + Copy> Bucket<T> {
if best_bucket.uid(ix) == 0 { if best_bucket.uid(ix) == 0 {
let elem_loc = elem.data_loc(current_bucket); let elem_loc = elem.data_loc(current_bucket);
if elem.num_slots > 0 { if elem.num_slots > 0 {
current_bucket.free(elem_loc, elem_uid).unwrap(); current_bucket.free(elem_loc, elem_uid);
} }
// elem: &mut IndexEntry = self.index.get_mut(elem_ix); // elem: &mut IndexEntry = self.index.get_mut(elem_ix);
elem.storage_offset = ix; elem.storage_offset = ix;
@ -266,10 +266,10 @@ impl<T: Clone + Copy> Bucket<T> {
let data_bucket = &self.data[elem.data_bucket_ix() as usize]; let data_bucket = &self.data[elem.data_bucket_ix() as usize];
let loc = elem.data_loc(data_bucket); let loc = elem.data_loc(data_bucket);
//debug!( "DATA FREE {:?} {} {} {}", key, elem.data_location, data_bucket.capacity, elem_uid ); //debug!( "DATA FREE {:?} {} {} {}", key, elem.data_location, data_bucket.capacity, elem_uid );
data_bucket.free(loc, elem_uid).unwrap(); data_bucket.free(loc, elem_uid);
} }
//debug!("INDEX FREE {:?} {}", key, elem_uid); //debug!("INDEX FREE {:?} {}", key, elem_uid);
self.index.free(elem_ix, elem_uid).unwrap(); self.index.free(elem_ix, elem_uid);
} }
} }

View File

@ -41,13 +41,10 @@ impl Header {
Ok(0) Ok(0)
== self == self
.lock .lock
.compare_exchange(0, uid, Ordering::Relaxed, Ordering::Relaxed) .compare_exchange(0, uid, Ordering::Acquire, Ordering::Relaxed)
} }
fn unlock(&self, uid: u64) -> bool { fn unlock(&self) -> u64 {
Ok(uid) self.lock.swap(0, Ordering::Release)
== self
.lock
.compare_exchange(uid, 0, Ordering::Relaxed, Ordering::Relaxed)
} }
fn uid(&self) -> u64 { fn uid(&self) -> u64 {
self.lock.load(Ordering::Relaxed) self.lock.load(Ordering::Relaxed)
@ -68,7 +65,6 @@ pub struct BucketStorage {
#[derive(Debug)] #[derive(Debug)]
pub enum BucketStorageError { pub enum BucketStorageError {
AlreadyAllocated, AlreadyAllocated,
InvalidFree,
} }
impl Drop for BucketStorage { impl Drop for BucketStorage {
@ -154,7 +150,7 @@ impl BucketStorage {
e e
} }
pub fn free(&self, ix: u64, uid: u64) -> Result<(), BucketStorageError> { pub fn free(&self, ix: u64, uid: u64) {
if ix >= self.num_cells() { if ix >= self.num_cells() {
panic!("free: bad index size"); panic!("free: bad index size");
} }
@ -164,16 +160,17 @@ impl BucketStorage {
let ix = (ix * self.cell_size) as usize; let ix = (ix * self.cell_size) as usize;
//debug!("FREE {} {}", ix, uid); //debug!("FREE {} {}", ix, uid);
let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()]; let hdr_slice: &[u8] = &self.mmap[ix..ix + std::mem::size_of::<Header>()];
let mut e = Err(BucketStorageError::InvalidFree);
unsafe { unsafe {
let hdr = hdr_slice.as_ptr() as *const Header; let hdr = hdr_slice.as_ptr() as *const Header;
//debug!("FREE uid: {}", hdr.as_ref().unwrap().uid()); //debug!("FREE uid: {}", hdr.as_ref().unwrap().uid());
if hdr.as_ref().unwrap().unlock(uid) { let previous_uid = hdr.as_ref().unwrap().unlock();
assert_eq!(
previous_uid, uid,
"free: unlocked a header with a differet uid: {}",
previous_uid
);
self.used.fetch_sub(1, Ordering::Relaxed); self.used.fetch_sub(1, Ordering::Relaxed);
e = Ok(());
} }
};
e
} }
pub fn get<T: Sized>(&self, ix: u64) -> &T { pub fn get<T: Sized>(&self, ix: u64) -> &T {