diff --git a/perf/src/recycler.rs b/perf/src/recycler.rs index a8169ab556..4eff3dbae1 100644 --- a/perf/src/recycler.rs +++ b/perf/src/recycler.rs @@ -14,14 +14,19 @@ use { // cushion against *normal* variations in the workload while bounding the // number of redundant garbage collected objects after temporary bursts. const RECYCLER_SHRINK_SIZE: usize = 1024; -// Lookback window for averaging number of garbage collected objects in terms -// of number of allocations. + +// Lookback window for exponential moving averaging number of garbage collected +// objects in terms of number of allocations. The half-life of the decaying +// factor based on the window size defined below is 11356. This means a sample +// of gc.size() that is 11356 allocations ago has half of the weight as the most +// recent sample of gc.size() at current allocation. const RECYCLER_SHRINK_WINDOW: usize = 16384; #[derive(Debug, Default)] struct RecyclerStats { total: AtomicUsize, reuse: AtomicUsize, + freed: AtomicUsize, max_gc: AtomicUsize, } @@ -148,6 +153,10 @@ impl RecyclerX { if gc.len() > RECYCLER_SHRINK_SIZE && self.size_factor.load(Ordering::Acquire) >= SIZE_FACTOR_AFTER_SHRINK { + self.stats.freed.fetch_add( + gc.len().saturating_sub(RECYCLER_SHRINK_SIZE), + Ordering::Relaxed, + ); for mut x in gc.drain(RECYCLER_SHRINK_SIZE..) { x.set_recycler(Weak::default()); } @@ -169,7 +178,7 @@ impl RecyclerX { } let total = self.stats.total.load(Ordering::Relaxed); let reuse = self.stats.reuse.load(Ordering::Relaxed); - let freed = self.stats.total.fetch_add(1, Ordering::Relaxed); + let freed = self.stats.freed.load(Ordering::Relaxed); datapoint_debug!( "recycler", ("gc_len", len as i64, i64),