add metrics for startup (#17913) (#17975)

* add metrics for startup

    * roll timings up higher

    * fix test

    * fix duplicate

    (cherry picked from commit 471b34132e)

    # Conflicts:
    #       ledger/src/bank_forks_utils.rs
    #       runtime/src/snapshot_utils.rs
conflicts because #17778 is not present.

Co-authored-by: Jeff Washington (jwash) <75863576+jeffwashington@users.noreply.github.com>
This commit is contained in:
mergify[bot]
2021-06-16 01:40:56 +00:00
committed by GitHub
parent fe5fed1182
commit 61573756f8
4 changed files with 67 additions and 23 deletions

View File

@@ -24,6 +24,7 @@ use solana_runtime::{
bank_forks::BankForks,
bank_utils,
commitment::VOTE_THRESHOLD_SIZE,
snapshot_utils::BankFromArchiveTimings,
transaction_batch::TransactionBatch,
vote_account::ArcVoteAccount,
vote_sender_types::ReplayVoteSender,
@@ -418,6 +419,7 @@ pub fn process_blockstore(
&recyclers,
None,
cache_block_meta_sender,
BankFromArchiveTimings::default(),
)
}
@@ -429,6 +431,7 @@ pub(crate) fn process_blockstore_from_root(
recyclers: &VerifyRecyclers,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
timings: BankFromArchiveTimings,
) -> BlockstoreProcessorResult {
do_process_blockstore_from_root(
blockstore,
@@ -437,6 +440,7 @@ pub(crate) fn process_blockstore_from_root(
recyclers,
transaction_status_sender,
cache_block_meta_sender,
timings,
)
}
@@ -447,6 +451,7 @@ fn do_process_blockstore_from_root(
recyclers: &VerifyRecyclers,
transaction_status_sender: Option<&TransactionStatusSender>,
cache_block_meta_sender: Option<&CacheBlockMetaSender>,
timings: BankFromArchiveTimings,
) -> BlockstoreProcessorResult {
info!("processing ledger from slot {}...", bank.slot());
@@ -526,18 +531,45 @@ fn do_process_blockstore_from_root(
}
let bank_forks = BankForks::new_from_banks(&initial_forks, root);
let processing_time = now.elapsed();
let debug_verify = false;
let mut time_cap = Measure::start("capitalization");
// We might be promptly restarted after bad capitalization was detected while creating newer snapshot.
// In that case, we're most likely restored from the last good snapshot and replayed up to this root.
// So again check here for the bad capitalization to avoid to continue until the next snapshot creation.
if !bank_forks
.root_bank()
.calculate_and_verify_capitalization(debug_verify)
{
return Err(BlockstoreProcessorError::RootBankWithMismatchedCapitalization(root));
}
time_cap.stop();
datapoint_info!(
"process_blockstore_from_root",
("total_time_us", now.elapsed().as_micros(), i64),
("total_time_us", processing_time.as_micros(), i64),
("frozen_banks", bank_forks.frozen_banks().len(), i64),
("slot", bank_forks.root(), i64),
("forks", initial_forks.len(), i64)
("forks", initial_forks.len(), i64),
("calculate_capitalization_us", time_cap.as_us(), i64),
("untar_us", timings.untar_us, i64),
(
"rebuild_bank_from_snapshots_us",
timings.rebuild_bank_from_snapshots_us,
i64
),
(
"verify_snapshot_bank_us",
timings.verify_snapshot_bank_us,
i64
),
);
info!("ledger processing timing: {:?}", timing);
info!(
"ledger processed in {}. root slot is {}, {} fork{} at {}, with {} frozen bank{}",
HumanTime::from(chrono::Duration::from_std(now.elapsed()).unwrap())
HumanTime::from(chrono::Duration::from_std(processing_time).unwrap())
.to_text_en(Accuracy::Precise, Tense::Present),
bank_forks.root(),
initial_forks.len(),
@@ -555,17 +587,6 @@ fn do_process_blockstore_from_root(
);
assert!(bank_forks.active_banks().is_empty());
let debug_verify = false;
// We might be promptly restarted after bad capitalization was detected while creating newer snapshot.
// In that case, we're most likely restored from the last good snapshot and replayed up to this root.
// So again check here for the bad capitalization to avoid to continue until the next snapshot creation.
if !bank_forks
.root_bank()
.calculate_and_verify_capitalization(debug_verify)
{
return Err(BlockstoreProcessorError::RootBankWithMismatchedCapitalization(root));
}
Ok((bank_forks, leader_schedule_cache))
}
@@ -2894,9 +2915,16 @@ pub mod tests {
bank1.squash();
// Test process_blockstore_from_root() from slot 1 onwards
let (bank_forks, _leader_schedule) =
do_process_blockstore_from_root(&blockstore, bank1, &opts, &recyclers, None, None)
.unwrap();
let (bank_forks, _leader_schedule) = do_process_blockstore_from_root(
&blockstore,
bank1,
&opts,
&recyclers,
None,
None,
BankFromArchiveTimings::default(),
)
.unwrap();
assert_eq!(frozen_bank_slots(&bank_forks), vec![5, 6]);
assert_eq!(bank_forks.working_bank().slot(), 6);