Use log levels for datapoint metrics (#4335)

* Use log levels for datapoint metrics

* address review comments

* fix cyclomatic complexity
This commit is contained in:
Pankaj Garg
2019-05-17 17:34:05 -07:00
committed by GitHub
parent 788290ad82
commit 9476fe5ce3
16 changed files with 163 additions and 123 deletions

View File

@ -16,7 +16,7 @@ use hashbrown::HashMap;
#[cfg(not(feature = "kvstore"))]
use rocksdb;
use solana_metrics::datapoint;
use solana_metrics::{datapoint_error, datapoint_info};
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::Hash;
@ -398,17 +398,14 @@ impl Blocktree {
for (signal, slots) in self.completed_slots_senders.iter().zip(slots.into_iter()) {
let res = signal.try_send(slots);
if let Err(TrySendError::Full(_)) = res {
solana_metrics::submit(
solana_metrics::influxdb::Point::new("blocktree_error")
.add_field(
"error",
solana_metrics::influxdb::Value::String(
"Unable to send newly completed slot because channel is full"
.to_string(),
),
)
.to_owned(),
log::Level::Error,
datapoint_error!(
"blocktree_error",
(
"error",
"Unable to send newly completed slot because channel is full"
.to_string(),
String
),
);
}
}
@ -1031,7 +1028,7 @@ fn should_insert_blob(
// for the slot
let last_index = slot.last_index;
if blob_index >= last_index {
datapoint!(
datapoint_error!(
"blocktree_error",
(
"error",
@ -1048,7 +1045,7 @@ fn should_insert_blob(
// Check that we do not receive a blob with "last_index" true, but index
// less than our current received
if blob.is_last_in_slot() && blob_index < slot.received {
datapoint!(
datapoint_error!(
"blocktree_error",
(
"error",
@ -1322,7 +1319,7 @@ fn try_erasure_recover(
let (data_end_index, _) = erasure_meta.end_indexes();
let submit_metrics = |attempted: bool, status: String| {
datapoint!(
datapoint_info!(
"blocktree-erasure",
("slot", slot as i64, i64),
("start_index", start_index as i64, i64),

View File

@ -29,7 +29,7 @@ use core::cmp;
use hashbrown::HashMap;
use rand::{thread_rng, Rng};
use rayon::prelude::*;
use solana_metrics::{datapoint, inc_new_counter_debug, inc_new_counter_error};
use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error};
use solana_netutil::{
bind_in_range, bind_to, find_available_port_in_range, multi_bind_in_range, PortRange,
};
@ -850,7 +850,7 @@ impl ClusterInfo {
let out = {
match repair_request {
RepairType::Blob(slot, blob_index) => {
datapoint!(
datapoint_debug!(
"cluster_info-repair",
("repair-slot", *slot, i64),
("repair-ix", *blob_index, i64)
@ -858,7 +858,7 @@ impl ClusterInfo {
self.window_index_request_bytes(*slot, *blob_index)?
}
RepairType::HighestBlob(slot, blob_index) => {
datapoint!(
datapoint_debug!(
"cluster_info-repair_highest",
("repair-highest-slot", *slot, i64),
("repair-highest-ix", *blob_index, i64)
@ -866,7 +866,7 @@ impl ClusterInfo {
self.window_highest_index_request_bytes(*slot, *blob_index)?
}
RepairType::Orphan(slot) => {
datapoint!("cluster_info-repair_orphan", ("repair-orphan", *slot, i64));
datapoint_debug!("cluster_info-repair_orphan", ("repair-orphan", *slot, i64));
self.orphan_bytes(*slot)?
}
}

View File

@ -1,7 +1,7 @@
use crate::bank_forks::BankForks;
use crate::staking_utils;
use hashbrown::{HashMap, HashSet};
use solana_metrics::datapoint;
use solana_metrics::datapoint_info;
use solana_runtime::bank::Bank;
use solana_sdk::account::Account;
use solana_sdk::pubkey::Pubkey;
@ -125,7 +125,7 @@ impl Locktower {
vote_state.nth_recent_vote(0).map(|v| v.slot).unwrap_or(0) as i64
);
debug!("observed root {}", vote_state.root_slot.unwrap_or(0) as i64);
datapoint!(
datapoint_info!(
"locktower-observed",
(
"slot",
@ -211,7 +211,7 @@ impl Locktower {
self.epoch_stakes.epoch
);
self.epoch_stakes = EpochStakes::new_from_bank(bank, &self.epoch_stakes.delegate_id);
datapoint!(
datapoint_info!(
"locktower-epoch",
("epoch", self.epoch_stakes.epoch, i64),
("self_staked", self.epoch_stakes.self_staked, i64),
@ -223,7 +223,7 @@ impl Locktower {
pub fn record_vote(&mut self, slot: u64) -> Option<u64> {
let root_slot = self.lockouts.root_slot;
self.lockouts.process_vote(&Vote { slot });
datapoint!(
datapoint_info!(
"locktower-vote",
("latest", slot, i64),
("root", self.lockouts.root_slot.unwrap_or(0), i64)

View File

@ -6,7 +6,7 @@ use crate::blocktree::{Blocktree, CompletedSlotsReceiver, SlotMeta};
use crate::cluster_info::ClusterInfo;
use crate::result::Result;
use crate::service::Service;
use solana_metrics::datapoint;
use solana_metrics::datapoint_info;
use solana_runtime::epoch_schedule::EpochSchedule;
use solana_sdk::pubkey::Pubkey;
use std::collections::HashSet;
@ -156,7 +156,7 @@ impl RepairService {
for ((to, req), repair_request) in reqs {
if let Ok(local_addr) = repair_socket.local_addr() {
datapoint!(
datapoint_info!(
"repair_service",
("repair_request", format!("{:?}", repair_request), String),
("to", to.to_string(), String),

View File

@ -14,7 +14,7 @@ use crate::result::{Error, Result};
use crate::rpc_subscriptions::RpcSubscriptions;
use crate::service::Service;
use hashbrown::HashMap;
use solana_metrics::{datapoint, inc_new_counter_error, inc_new_counter_info};
use solana_metrics::{datapoint_warn, inc_new_counter_error, inc_new_counter_info};
use solana_runtime::bank::Bank;
use solana_sdk::hash::Hash;
use solana_sdk::pubkey::Pubkey;
@ -245,7 +245,7 @@ impl ReplayStage {
cluster_info.write().unwrap().set_leader(&next_leader);
if next_leader == *my_id && reached_leader_tick {
debug!("{} starting tpu for slot {}", my_id, poh_slot);
datapoint!(
datapoint_warn!(
"replay_stage-new_leader",
("count", poh_slot, i64),
("grace", grace_ticks, i64));
@ -468,7 +468,7 @@ impl ReplayStage {
.unwrap_or(true)
{
info!("validator fork confirmed {} {}", *slot, duration);
datapoint!("validator-confirmation", ("duration_ms", duration, i64));
datapoint_warn!("validator-confirmation", ("duration_ms", duration, i64));
false
} else {
debug!(

View File

@ -10,7 +10,7 @@ use crate::service::Service;
use crate::staking_utils;
use crate::streamer::BlobReceiver;
use crate::window_service::{should_retransmit_and_persist, WindowService};
use solana_metrics::{datapoint, inc_new_counter_error};
use solana_metrics::{datapoint_info, inc_new_counter_error};
use solana_runtime::epoch_schedule::EpochSchedule;
use solana_sdk::hash::Hash;
use std::net::UdpSocket;
@ -34,7 +34,7 @@ fn retransmit(
blobs.append(&mut nq);
}
datapoint!("retransmit-stage", ("count", blobs.len(), i64));
datapoint_info!("retransmit-stage", ("count", blobs.len(), i64));
let r_bank = bank_forks.read().unwrap().working_bank();
let bank_epoch = r_bank.get_stakers_epoch(r_bank.slot());

View File

@ -10,7 +10,7 @@ use crate::result::{Error, Result};
use crate::service::Service;
use crate::sigverify;
use crate::streamer::{self, PacketReceiver};
use solana_metrics::{datapoint, inc_new_counter_info};
use solana_metrics::{datapoint_info, inc_new_counter_info};
use solana_sdk::timing;
use std::sync::mpsc::{Receiver, RecvTimeoutError, Sender};
use std::sync::{Arc, Mutex};
@ -95,7 +95,7 @@ impl SigVerifyStage {
(len as f32 / total_time_s)
);
datapoint!(
datapoint_info!(
"sigverify_stage-total_verify_time",
("batch_len", batch_len, i64),
("len", len, i64),