rename capacity to capacity_pow2

This commit is contained in:
Brooks Prumo
2021-09-15 16:02:47 -05:00
committed by Jeff Washington (jwash)
parent 8f7a9e8f0d
commit 4b72a5947a
3 changed files with 21 additions and 22 deletions

View File

@ -169,12 +169,12 @@ impl<T: Clone + Copy> Bucket<T> {
elem.key = *key;
elem.ref_count = ref_count;
elem.data_location = 0;
elem.create_bucket_capacity = 0;
elem.create_bucket_capacity_pow2 = 0;
elem.num_slots = 0;
//debug!( "INDEX ALLOC {:?} {} {} {}", key, ii, index.capacity, elem_uid );
return Ok(ii);
}
Err(BucketMapError::IndexNoSpace(index.capacity))
Err(BucketMapError::IndexNoSpace(index.capacity_pow2))
}
pub fn addref(&mut self, key: &Pubkey) -> Option<RefCount> {
@ -245,7 +245,7 @@ impl<T: Clone + Copy> Bucket<T> {
} else {
//need to move the allocation to a best fit spot
let best_bucket = &self.data[best_fit_bucket as usize];
let cap_power = best_bucket.capacity;
let cap_power = best_bucket.capacity_pow2;
let cap = best_bucket.num_cells();
let pos = thread_rng().gen_range(0, cap);
for i in pos..pos + self.index.max_search() {
@ -257,7 +257,7 @@ impl<T: Clone + Copy> Bucket<T> {
}
// elem: &mut IndexEntry = self.index.get_mut(elem_ix);
elem.data_location = ix;
elem.create_bucket_capacity = best_bucket.capacity;
elem.create_bucket_capacity_pow2 = best_bucket.capacity_pow2;
elem.num_slots = data.len() as u64;
//debug!( "DATA ALLOC {:?} {} {} {}", key, elem.data_location, best_bucket.capacity, elem_uid );
if elem.num_slots > 0 {
@ -287,7 +287,7 @@ impl<T: Clone + Copy> Bucket<T> {
}
pub fn grow_index(&mut self, sz: u8) {
if self.index.capacity == sz {
if self.index.capacity_pow2 == sz {
let mut m = Measure::start("");
//debug!("GROW_INDEX: {}", sz);
let increment = 1;
@ -299,7 +299,7 @@ impl<T: Clone + Copy> Bucket<T> {
Arc::clone(&self.drives),
1,
std::mem::size_of::<IndexEntry>() as u64,
self.index.capacity + i, // * 2,
self.index.capacity_pow2 + i, // * 2,
self.index.max_search,
Arc::clone(&self.stats.index),
);
@ -335,7 +335,7 @@ impl<T: Clone + Copy> Bucket<T> {
}
}
m.stop();
let sz = 1 << self.index.capacity;
let sz = 1 << self.index.capacity_pow2;
{
let mut max = self.stats.index.max_size.lock().unwrap();
*max = std::cmp::max(*max, sz);
@ -360,7 +360,7 @@ impl<T: Clone + Copy> Bucket<T> {
))
}
}
if self.data[sz.0 as usize].capacity == sz.1 {
if self.data[sz.0 as usize].capacity_pow2 == sz.1 {
//debug!("GROW_DATA: {} {}", sz.0, sz.1);
self.data[sz.0 as usize].grow();
}

View File

@ -78,8 +78,7 @@ pub struct DataBucket {
path: PathBuf,
mmap: MmapMut,
pub cell_size: u64,
//power of 2
pub capacity: u8,
pub capacity_pow2: u8,
pub bytes: u64,
pub used: AtomicU64,
pub stats: Arc<BucketStats>,
@ -103,21 +102,21 @@ impl DataBucket {
drives: Arc<Vec<PathBuf>>,
num_elems: u64,
elem_size: u64,
capacity: u8,
capacity_pow2: u8,
max_search: MaxSearch,
mut stats: Arc<BucketStats>,
) -> Self {
let cell_size = elem_size * num_elems + std::mem::size_of::<Header>() as u64;
let (mmap, path) = Self::new_map(&drives, cell_size as usize, capacity, &mut stats);
let (mmap, path) = Self::new_map(&drives, cell_size as usize, capacity_pow2, &mut stats);
Self {
path,
mmap,
drives,
cell_size,
used: AtomicU64::new(0),
capacity,
capacity_pow2,
stats,
bytes: 1 << capacity,
bytes: 1 << capacity_pow2,
max_search,
}
}
@ -266,11 +265,11 @@ impl DataBucket {
fn new_map(
drives: &[PathBuf],
cell_size: usize,
capacity: u8,
capacity_pow2: u8,
stats: &mut Arc<BucketStats>,
) -> (MmapMut, PathBuf) {
let mut m0 = Measure::start("");
let capacity = 1u64 << capacity;
let capacity = 1u64 << capacity_pow2;
let r = thread_rng().gen_range(0, drives.len());
let drive = &drives[r];
let pos = format!("{}", thread_rng().gen_range(0, u128::MAX),);
@ -322,7 +321,7 @@ impl DataBucket {
let (new_map, new_file) = Self::new_map(
&self.drives,
self.cell_size as usize,
self.capacity + increment,
self.capacity_pow2 + increment,
&mut self.stats,
);
(0..old_cap as usize).into_iter().for_each(|i| {
@ -339,8 +338,8 @@ impl DataBucket {
});
self.mmap = new_map;
self.path = new_file;
self.capacity += increment;
self.bytes = 1 << self.capacity;
self.capacity_pow2 += increment;
self.bytes = 1 << self.capacity_pow2;
remove_file(old_file).unwrap();
m.stop();
let sz = self.bytes;

View File

@ -15,8 +15,8 @@ pub struct IndexEntry {
pub key: Pubkey, // can this be smaller if we have reduced the keys into buckets already?
pub ref_count: RefCount, // can this be smaller? Do we ever need more than 4B refcounts?
pub data_location: u64, // smaller? since these are variably sized, this could get tricky. well, actually accountinfo is not variable sized...
// if the bucket doubled, the index can be recomputed using create_bucket_capacity
pub create_bucket_capacity: u8, // see data_location
// if the bucket doubled, the index can be recomputed using create_bucket_capacity_pow2
pub create_bucket_capacity_pow2: u8, // see data_location
pub num_slots: Slot, // can this be smaller? epoch size should ~ be the max len. this is the num elements in the slot list
}
@ -36,7 +36,7 @@ impl IndexEntry {
// This function maps the original data location into an index in the current data bucket.
// This is coupled with how we resize data buckets.
pub fn data_loc(&self, bucket: &DataBucket) -> u64 {
self.data_location << (bucket.capacity - self.create_bucket_capacity)
self.data_location << (bucket.capacity_pow2 - self.create_bucket_capacity_pow2)
}
pub fn read_value<'a, T>(&self, bucket: &'a Bucket<T>) -> Option<(&'a [T], RefCount)> {