rename capacity to capacity_pow2
This commit is contained in:
committed by
Jeff Washington (jwash)
parent
8f7a9e8f0d
commit
4b72a5947a
@ -169,12 +169,12 @@ impl<T: Clone + Copy> Bucket<T> {
|
|||||||
elem.key = *key;
|
elem.key = *key;
|
||||||
elem.ref_count = ref_count;
|
elem.ref_count = ref_count;
|
||||||
elem.data_location = 0;
|
elem.data_location = 0;
|
||||||
elem.create_bucket_capacity = 0;
|
elem.create_bucket_capacity_pow2 = 0;
|
||||||
elem.num_slots = 0;
|
elem.num_slots = 0;
|
||||||
//debug!( "INDEX ALLOC {:?} {} {} {}", key, ii, index.capacity, elem_uid );
|
//debug!( "INDEX ALLOC {:?} {} {} {}", key, ii, index.capacity, elem_uid );
|
||||||
return Ok(ii);
|
return Ok(ii);
|
||||||
}
|
}
|
||||||
Err(BucketMapError::IndexNoSpace(index.capacity))
|
Err(BucketMapError::IndexNoSpace(index.capacity_pow2))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn addref(&mut self, key: &Pubkey) -> Option<RefCount> {
|
pub fn addref(&mut self, key: &Pubkey) -> Option<RefCount> {
|
||||||
@ -245,7 +245,7 @@ impl<T: Clone + Copy> Bucket<T> {
|
|||||||
} else {
|
} else {
|
||||||
//need to move the allocation to a best fit spot
|
//need to move the allocation to a best fit spot
|
||||||
let best_bucket = &self.data[best_fit_bucket as usize];
|
let best_bucket = &self.data[best_fit_bucket as usize];
|
||||||
let cap_power = best_bucket.capacity;
|
let cap_power = best_bucket.capacity_pow2;
|
||||||
let cap = best_bucket.num_cells();
|
let cap = best_bucket.num_cells();
|
||||||
let pos = thread_rng().gen_range(0, cap);
|
let pos = thread_rng().gen_range(0, cap);
|
||||||
for i in pos..pos + self.index.max_search() {
|
for i in pos..pos + self.index.max_search() {
|
||||||
@ -257,7 +257,7 @@ impl<T: Clone + Copy> Bucket<T> {
|
|||||||
}
|
}
|
||||||
// elem: &mut IndexEntry = self.index.get_mut(elem_ix);
|
// elem: &mut IndexEntry = self.index.get_mut(elem_ix);
|
||||||
elem.data_location = ix;
|
elem.data_location = ix;
|
||||||
elem.create_bucket_capacity = best_bucket.capacity;
|
elem.create_bucket_capacity_pow2 = best_bucket.capacity_pow2;
|
||||||
elem.num_slots = data.len() as u64;
|
elem.num_slots = data.len() as u64;
|
||||||
//debug!( "DATA ALLOC {:?} {} {} {}", key, elem.data_location, best_bucket.capacity, elem_uid );
|
//debug!( "DATA ALLOC {:?} {} {} {}", key, elem.data_location, best_bucket.capacity, elem_uid );
|
||||||
if elem.num_slots > 0 {
|
if elem.num_slots > 0 {
|
||||||
@ -287,7 +287,7 @@ impl<T: Clone + Copy> Bucket<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn grow_index(&mut self, sz: u8) {
|
pub fn grow_index(&mut self, sz: u8) {
|
||||||
if self.index.capacity == sz {
|
if self.index.capacity_pow2 == sz {
|
||||||
let mut m = Measure::start("");
|
let mut m = Measure::start("");
|
||||||
//debug!("GROW_INDEX: {}", sz);
|
//debug!("GROW_INDEX: {}", sz);
|
||||||
let increment = 1;
|
let increment = 1;
|
||||||
@ -299,7 +299,7 @@ impl<T: Clone + Copy> Bucket<T> {
|
|||||||
Arc::clone(&self.drives),
|
Arc::clone(&self.drives),
|
||||||
1,
|
1,
|
||||||
std::mem::size_of::<IndexEntry>() as u64,
|
std::mem::size_of::<IndexEntry>() as u64,
|
||||||
self.index.capacity + i, // * 2,
|
self.index.capacity_pow2 + i, // * 2,
|
||||||
self.index.max_search,
|
self.index.max_search,
|
||||||
Arc::clone(&self.stats.index),
|
Arc::clone(&self.stats.index),
|
||||||
);
|
);
|
||||||
@ -335,7 +335,7 @@ impl<T: Clone + Copy> Bucket<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
m.stop();
|
m.stop();
|
||||||
let sz = 1 << self.index.capacity;
|
let sz = 1 << self.index.capacity_pow2;
|
||||||
{
|
{
|
||||||
let mut max = self.stats.index.max_size.lock().unwrap();
|
let mut max = self.stats.index.max_size.lock().unwrap();
|
||||||
*max = std::cmp::max(*max, sz);
|
*max = std::cmp::max(*max, sz);
|
||||||
@ -360,7 +360,7 @@ impl<T: Clone + Copy> Bucket<T> {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if self.data[sz.0 as usize].capacity == sz.1 {
|
if self.data[sz.0 as usize].capacity_pow2 == sz.1 {
|
||||||
//debug!("GROW_DATA: {} {}", sz.0, sz.1);
|
//debug!("GROW_DATA: {} {}", sz.0, sz.1);
|
||||||
self.data[sz.0 as usize].grow();
|
self.data[sz.0 as usize].grow();
|
||||||
}
|
}
|
||||||
|
@ -78,8 +78,7 @@ pub struct DataBucket {
|
|||||||
path: PathBuf,
|
path: PathBuf,
|
||||||
mmap: MmapMut,
|
mmap: MmapMut,
|
||||||
pub cell_size: u64,
|
pub cell_size: u64,
|
||||||
//power of 2
|
pub capacity_pow2: u8,
|
||||||
pub capacity: u8,
|
|
||||||
pub bytes: u64,
|
pub bytes: u64,
|
||||||
pub used: AtomicU64,
|
pub used: AtomicU64,
|
||||||
pub stats: Arc<BucketStats>,
|
pub stats: Arc<BucketStats>,
|
||||||
@ -103,21 +102,21 @@ impl DataBucket {
|
|||||||
drives: Arc<Vec<PathBuf>>,
|
drives: Arc<Vec<PathBuf>>,
|
||||||
num_elems: u64,
|
num_elems: u64,
|
||||||
elem_size: u64,
|
elem_size: u64,
|
||||||
capacity: u8,
|
capacity_pow2: u8,
|
||||||
max_search: MaxSearch,
|
max_search: MaxSearch,
|
||||||
mut stats: Arc<BucketStats>,
|
mut stats: Arc<BucketStats>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let cell_size = elem_size * num_elems + std::mem::size_of::<Header>() as u64;
|
let cell_size = elem_size * num_elems + std::mem::size_of::<Header>() as u64;
|
||||||
let (mmap, path) = Self::new_map(&drives, cell_size as usize, capacity, &mut stats);
|
let (mmap, path) = Self::new_map(&drives, cell_size as usize, capacity_pow2, &mut stats);
|
||||||
Self {
|
Self {
|
||||||
path,
|
path,
|
||||||
mmap,
|
mmap,
|
||||||
drives,
|
drives,
|
||||||
cell_size,
|
cell_size,
|
||||||
used: AtomicU64::new(0),
|
used: AtomicU64::new(0),
|
||||||
capacity,
|
capacity_pow2,
|
||||||
stats,
|
stats,
|
||||||
bytes: 1 << capacity,
|
bytes: 1 << capacity_pow2,
|
||||||
max_search,
|
max_search,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -266,11 +265,11 @@ impl DataBucket {
|
|||||||
fn new_map(
|
fn new_map(
|
||||||
drives: &[PathBuf],
|
drives: &[PathBuf],
|
||||||
cell_size: usize,
|
cell_size: usize,
|
||||||
capacity: u8,
|
capacity_pow2: u8,
|
||||||
stats: &mut Arc<BucketStats>,
|
stats: &mut Arc<BucketStats>,
|
||||||
) -> (MmapMut, PathBuf) {
|
) -> (MmapMut, PathBuf) {
|
||||||
let mut m0 = Measure::start("");
|
let mut m0 = Measure::start("");
|
||||||
let capacity = 1u64 << capacity;
|
let capacity = 1u64 << capacity_pow2;
|
||||||
let r = thread_rng().gen_range(0, drives.len());
|
let r = thread_rng().gen_range(0, drives.len());
|
||||||
let drive = &drives[r];
|
let drive = &drives[r];
|
||||||
let pos = format!("{}", thread_rng().gen_range(0, u128::MAX),);
|
let pos = format!("{}", thread_rng().gen_range(0, u128::MAX),);
|
||||||
@ -322,7 +321,7 @@ impl DataBucket {
|
|||||||
let (new_map, new_file) = Self::new_map(
|
let (new_map, new_file) = Self::new_map(
|
||||||
&self.drives,
|
&self.drives,
|
||||||
self.cell_size as usize,
|
self.cell_size as usize,
|
||||||
self.capacity + increment,
|
self.capacity_pow2 + increment,
|
||||||
&mut self.stats,
|
&mut self.stats,
|
||||||
);
|
);
|
||||||
(0..old_cap as usize).into_iter().for_each(|i| {
|
(0..old_cap as usize).into_iter().for_each(|i| {
|
||||||
@ -339,8 +338,8 @@ impl DataBucket {
|
|||||||
});
|
});
|
||||||
self.mmap = new_map;
|
self.mmap = new_map;
|
||||||
self.path = new_file;
|
self.path = new_file;
|
||||||
self.capacity += increment;
|
self.capacity_pow2 += increment;
|
||||||
self.bytes = 1 << self.capacity;
|
self.bytes = 1 << self.capacity_pow2;
|
||||||
remove_file(old_file).unwrap();
|
remove_file(old_file).unwrap();
|
||||||
m.stop();
|
m.stop();
|
||||||
let sz = self.bytes;
|
let sz = self.bytes;
|
||||||
|
@ -15,8 +15,8 @@ pub struct IndexEntry {
|
|||||||
pub key: Pubkey, // can this be smaller if we have reduced the keys into buckets already?
|
pub key: Pubkey, // can this be smaller if we have reduced the keys into buckets already?
|
||||||
pub ref_count: RefCount, // can this be smaller? Do we ever need more than 4B refcounts?
|
pub ref_count: RefCount, // can this be smaller? Do we ever need more than 4B refcounts?
|
||||||
pub data_location: u64, // smaller? since these are variably sized, this could get tricky. well, actually accountinfo is not variable sized...
|
pub data_location: u64, // smaller? since these are variably sized, this could get tricky. well, actually accountinfo is not variable sized...
|
||||||
// if the bucket doubled, the index can be recomputed using create_bucket_capacity
|
// if the bucket doubled, the index can be recomputed using create_bucket_capacity_pow2
|
||||||
pub create_bucket_capacity: u8, // see data_location
|
pub create_bucket_capacity_pow2: u8, // see data_location
|
||||||
pub num_slots: Slot, // can this be smaller? epoch size should ~ be the max len. this is the num elements in the slot list
|
pub num_slots: Slot, // can this be smaller? epoch size should ~ be the max len. this is the num elements in the slot list
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -36,7 +36,7 @@ impl IndexEntry {
|
|||||||
// This function maps the original data location into an index in the current data bucket.
|
// This function maps the original data location into an index in the current data bucket.
|
||||||
// This is coupled with how we resize data buckets.
|
// This is coupled with how we resize data buckets.
|
||||||
pub fn data_loc(&self, bucket: &DataBucket) -> u64 {
|
pub fn data_loc(&self, bucket: &DataBucket) -> u64 {
|
||||||
self.data_location << (bucket.capacity - self.create_bucket_capacity)
|
self.data_location << (bucket.capacity_pow2 - self.create_bucket_capacity_pow2)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn read_value<'a, T>(&self, bucket: &'a Bucket<T>) -> Option<(&'a [T], RefCount)> {
|
pub fn read_value<'a, T>(&self, bucket: &'a Bucket<T>) -> Option<(&'a [T], RefCount)> {
|
||||||
|
Reference in New Issue
Block a user