caches staked nodes computed from vote-accounts (#13929)

This commit is contained in:
behzad nouri
2020-12-17 21:22:50 +00:00
committed by GitHub
parent fd7d2f82ae
commit d6d76219b6
11 changed files with 367 additions and 78 deletions

View File

@ -17,7 +17,7 @@ use crossbeam_channel::{
Receiver as CrossbeamReceiver, RecvTimeoutError as CrossbeamRecvTimeoutError,
Sender as CrossbeamSender,
};
use solana_ledger::{blockstore::Blockstore, shred::Shred, staking_utils};
use solana_ledger::{blockstore::Blockstore, shred::Shred};
use solana_measure::measure::Measure;
use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
use solana_runtime::bank::Bank;
@ -306,7 +306,7 @@ impl BroadcastStage {
for (_, bank) in retransmit_slots.iter() {
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
let stakes = bank.epoch_staked_nodes(bank_epoch);
let stakes = stakes.map(Arc::new);
let data_shreds = Arc::new(
blockstore

View File

@ -102,7 +102,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
blockstore_sender.send((data_shreds.clone(), None))?;
// 4) Start broadcast step
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
let stakes = bank.epoch_staked_nodes(bank_epoch);
let stakes = stakes.map(Arc::new);
socket_sender.send(((stakes.clone(), data_shreds), None))?;
if let Some((good_last_data_shred, bad_last_data_shred)) = last_shreds {

View File

@ -213,7 +213,7 @@ impl StandardBroadcastRun {
let mut get_leader_schedule_time = Measure::start("broadcast_get_leader_schedule");
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
let stakes = bank.epoch_staked_nodes(bank_epoch);
let stakes = stakes.map(Arc::new);
// Broadcast the last shred of the interrupted slot if necessary

View File

@ -39,7 +39,6 @@ use itertools::Itertools;
use rayon::prelude::*;
use rayon::{ThreadPool, ThreadPoolBuilder};
use serde::ser::Serialize;
use solana_ledger::staking_utils;
use solana_measure::measure::Measure;
use solana_measure::thread_mem_usage;
use solana_metrics::{inc_new_counter_debug, inc_new_counter_error};
@ -1834,7 +1833,7 @@ impl ClusterInfo {
let stakes: HashMap<_, _> = match bank_forks {
Some(ref bank_forks) => {
staking_utils::staked_nodes(&bank_forks.read().unwrap().working_bank())
bank_forks.read().unwrap().working_bank().staked_nodes()
}
None => HashMap::new(),
};
@ -2492,7 +2491,7 @@ impl ClusterInfo {
let epoch = bank.epoch();
let epoch_schedule = bank.epoch_schedule();
epoch_time_ms = epoch_schedule.get_slots_in_epoch(epoch) * DEFAULT_MS_PER_SLOT;
staking_utils::staked_nodes(&bank)
bank.staked_nodes()
}
None => {
inc_new_counter_info!("cluster_info-purge-no_working_bank", 1);

View File

@ -19,7 +19,6 @@ use solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats};
use solana_ledger::{
blockstore::{Blockstore, CompletedSlotsReceiver},
leader_schedule_cache::LeaderScheduleCache,
staking_utils,
};
use solana_measure::measure::Measure;
use solana_metrics::inc_new_counter_error;
@ -278,7 +277,7 @@ fn retransmit(
drop(r_epoch_stakes_cache);
let mut w_epoch_stakes_cache = epoch_stakes_cache.write().unwrap();
if w_epoch_stakes_cache.epoch != bank_epoch {
let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch);
let stakes = r_bank.epoch_staked_nodes(bank_epoch);
let stakes = stakes.map(Arc::new);
w_epoch_stakes_cache.stakes = stakes;
w_epoch_stakes_cache.epoch = bank_epoch;