diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index f6374ce065..97cf866b99 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -409,11 +409,14 @@ impl Bucket { /// grow a data bucket /// The application of the new bucket is deferred until the next write lock. pub fn grow_data(&self, data_index: u64, current_capacity_pow2: u8) { - let new_bucket = self.index.grow( + let new_bucket = BucketStorage::new_resized( + &self.drives, + self.index.max_search, self.data.get(data_index as usize), current_capacity_pow2 + 1, 1 << data_index, Self::elem_size(), + &self.stats.data, ); self.reallocated.add_reallocation(); let mut items = self.reallocated.items.lock().unwrap(); diff --git a/bucket_map/src/bucket_storage.rs b/bucket_map/src/bucket_storage.rs index 06f7078c95..811e00ff1a 100644 --- a/bucket_map/src/bucket_storage.rs +++ b/bucket_map/src/bucket_storage.rs @@ -332,21 +332,23 @@ impl BucketStorage { self.stats.resize_us.fetch_add(m.as_us(), Ordering::Relaxed); } - /// allocate a new bucket based on 'self', but copying data from 'bucket' - pub fn grow( - &self, + /// allocate a new bucket, copying data from 'bucket' + pub fn new_resized( + drives: &Arc>, + max_search: MaxSearch, bucket: Option<&Self>, capacity_pow_2: u8, num_elems: u64, elem_size: u64, + stats: &Arc, ) -> Self { let mut new_bucket = Self::new_with_capacity( - Arc::clone(&self.drives), + Arc::clone(drives), num_elems, elem_size, capacity_pow_2, - self.max_search, - Arc::clone(&self.stats), + max_search, + Arc::clone(stats), ); if let Some(bucket) = bucket { new_bucket.copy_contents(bucket); diff --git a/runtime/src/in_mem_accounts_index.rs b/runtime/src/in_mem_accounts_index.rs index 167576cb14..43b622dd03 100644 --- a/runtime/src/in_mem_accounts_index.rs +++ b/runtime/src/in_mem_accounts_index.rs @@ -709,13 +709,15 @@ impl InMemAccountsIndex { // it is possible that the item in the cache is marked as dirty while these updates are happening. That is ok. let m = Measure::start("flush_update"); for (k, v) in updates.into_iter() { + if v.dirty() { + continue; // marked dirty after we grabbed it above, so handle this the next time this bucket is flushed + } if disk_resize.is_ok() { - if v.dirty() { - continue; // marked dirty after we grabbed it above, so handle this the next time this bucket is flushed - } - flush_entries_updated_on_disk += 1; disk_resize = disk.try_write(&k, (&v.slot_list.read().unwrap(), v.ref_count())); + if disk_resize.is_ok() { + flush_entries_updated_on_disk += 1; + } } if disk_resize.is_err() { // disk needs to resize, so mark all unprocessed items as dirty again so we pick them up after the resize