Datapoints overwhelm the metrics queue and blow up ram usage. (#8272)

automerge
This commit is contained in:
anatoly yakovenko
2020-02-14 13:11:55 -06:00
committed by GitHub
parent c350543b46
commit 17fb8258e5
7 changed files with 58 additions and 31 deletions

View File

@ -1,6 +1,5 @@
use crate::erasure::ErasureConfig;
use serde::{Deserialize, Serialize};
use solana_metrics::datapoint;
use solana_sdk::clock::Slot;
use std::{collections::BTreeSet, ops::RangeBounds};
@ -138,7 +137,7 @@ impl SlotMeta {
// Should never happen
if self.consumed > self.last_index + 1 {
datapoint!(
datapoint_error!(
"blockstore_error",
(
"error",

View File

@ -13,7 +13,7 @@ use log::*;
use rand::{seq::SliceRandom, thread_rng};
use rayon::{prelude::*, ThreadPool};
use solana_measure::{measure::Measure, thread_mem_usage};
use solana_metrics::{datapoint, datapoint_error, inc_new_counter_debug};
use solana_metrics::{datapoint_error, inc_new_counter_debug};
use solana_rayon_threadlimit::get_thread_count;
use solana_runtime::{
bank::{Bank, TransactionBalancesSet, TransactionProcessResult, TransactionResults},
@ -201,7 +201,7 @@ fn process_entries_with_callback(
if batches.is_empty() {
// An entry has account lock conflicts with *itself*, which should not happen
// if generated by a properly functioning leader
datapoint!(
datapoint_error!(
"validator_process_entry_error",
(
"error",