AcctIdx: metrics fixes (#20449)

This commit is contained in:
Jeff Washington (jwash) 2021-10-05 17:26:29 -05:00 committed by GitHub
parent 4267419349
commit d03bf2bbfe
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 18 additions and 11 deletions

View File

@ -409,11 +409,14 @@ impl<T: Clone + Copy> Bucket<T> {
/// grow a data bucket /// grow a data bucket
/// The application of the new bucket is deferred until the next write lock. /// The application of the new bucket is deferred until the next write lock.
pub fn grow_data(&self, data_index: u64, current_capacity_pow2: u8) { pub fn grow_data(&self, data_index: u64, current_capacity_pow2: u8) {
let new_bucket = self.index.grow( let new_bucket = BucketStorage::new_resized(
&self.drives,
self.index.max_search,
self.data.get(data_index as usize), self.data.get(data_index as usize),
current_capacity_pow2 + 1, current_capacity_pow2 + 1,
1 << data_index, 1 << data_index,
Self::elem_size(), Self::elem_size(),
&self.stats.data,
); );
self.reallocated.add_reallocation(); self.reallocated.add_reallocation();
let mut items = self.reallocated.items.lock().unwrap(); let mut items = self.reallocated.items.lock().unwrap();

View File

@ -332,21 +332,23 @@ impl BucketStorage {
self.stats.resize_us.fetch_add(m.as_us(), Ordering::Relaxed); self.stats.resize_us.fetch_add(m.as_us(), Ordering::Relaxed);
} }
/// allocate a new bucket based on 'self', but copying data from 'bucket' /// allocate a new bucket, copying data from 'bucket'
pub fn grow( pub fn new_resized(
&self, drives: &Arc<Vec<PathBuf>>,
max_search: MaxSearch,
bucket: Option<&Self>, bucket: Option<&Self>,
capacity_pow_2: u8, capacity_pow_2: u8,
num_elems: u64, num_elems: u64,
elem_size: u64, elem_size: u64,
stats: &Arc<BucketStats>,
) -> Self { ) -> Self {
let mut new_bucket = Self::new_with_capacity( let mut new_bucket = Self::new_with_capacity(
Arc::clone(&self.drives), Arc::clone(drives),
num_elems, num_elems,
elem_size, elem_size,
capacity_pow_2, capacity_pow_2,
self.max_search, max_search,
Arc::clone(&self.stats), Arc::clone(stats),
); );
if let Some(bucket) = bucket { if let Some(bucket) = bucket {
new_bucket.copy_contents(bucket); new_bucket.copy_contents(bucket);

View File

@ -709,13 +709,15 @@ impl<T: IndexValue> InMemAccountsIndex<T> {
// it is possible that the item in the cache is marked as dirty while these updates are happening. That is ok. // it is possible that the item in the cache is marked as dirty while these updates are happening. That is ok.
let m = Measure::start("flush_update"); let m = Measure::start("flush_update");
for (k, v) in updates.into_iter() { for (k, v) in updates.into_iter() {
if v.dirty() {
continue; // marked dirty after we grabbed it above, so handle this the next time this bucket is flushed
}
if disk_resize.is_ok() { if disk_resize.is_ok() {
if v.dirty() {
continue; // marked dirty after we grabbed it above, so handle this the next time this bucket is flushed
}
flush_entries_updated_on_disk += 1;
disk_resize = disk_resize =
disk.try_write(&k, (&v.slot_list.read().unwrap(), v.ref_count())); disk.try_write(&k, (&v.slot_list.read().unwrap(), v.ref_count()));
if disk_resize.is_ok() {
flush_entries_updated_on_disk += 1;
}
} }
if disk_resize.is_err() { if disk_resize.is_err() {
// disk needs to resize, so mark all unprocessed items as dirty again so we pick them up after the resize // disk needs to resize, so mark all unprocessed items as dirty again so we pick them up after the resize