Revert shred fs (#9712)
* Revert "Untar is called for shred archives that do not exist. (#9565)" This reverts commit729cb5eec6
. * Revert "Dont insert shred payload into rocksdb (#9366)" This reverts commit5ed39de8c5
.
This commit is contained in:
@ -238,7 +238,7 @@ impl CrdsGossipPull {
|
||||
if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0)
|
||||
|| now + timeout < r.wallclock()
|
||||
{
|
||||
inc_new_counter_info!(
|
||||
inc_new_counter_warn!(
|
||||
"cluster_info-gossip_pull_response_value_timeout",
|
||||
1
|
||||
);
|
||||
@ -250,7 +250,7 @@ impl CrdsGossipPull {
|
||||
// Before discarding this value, check if a ContactInfo for the owner
|
||||
// exists in the table. If it doesn't, that implies that this value can be discarded
|
||||
if crds.lookup(&CrdsValueLabel::ContactInfo(owner)).is_none() {
|
||||
inc_new_counter_info!(
|
||||
inc_new_counter_warn!(
|
||||
"cluster_info-gossip_pull_response_value_timeout",
|
||||
1
|
||||
);
|
||||
|
@ -131,6 +131,7 @@ impl LedgerCleanupService {
|
||||
while let Ok(new_root) = new_root_receiver.try_recv() {
|
||||
root = new_root;
|
||||
}
|
||||
|
||||
if root - *last_purge_slot > purge_interval {
|
||||
let disk_utilization_pre = blockstore.storage_size();
|
||||
info!(
|
||||
|
@ -354,7 +354,7 @@ impl PohRecorder {
|
||||
pub fn tick(&mut self) {
|
||||
let now = Instant::now();
|
||||
let poh_entry = self.poh.lock().unwrap().tick();
|
||||
inc_new_counter_info!(
|
||||
inc_new_counter_warn!(
|
||||
"poh_recorder-tick_lock_contention",
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
@ -364,7 +364,7 @@ impl PohRecorder {
|
||||
trace!("tick_height {}", self.tick_height);
|
||||
|
||||
if self.leader_first_tick_height.is_none() {
|
||||
inc_new_counter_info!(
|
||||
inc_new_counter_warn!(
|
||||
"poh_recorder-tick_overhead",
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
@ -380,7 +380,7 @@ impl PohRecorder {
|
||||
self.tick_cache.push((entry, self.tick_height));
|
||||
let _ = self.flush_cache(true);
|
||||
}
|
||||
inc_new_counter_info!(
|
||||
inc_new_counter_warn!(
|
||||
"poh_recorder-tick_overhead",
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
@ -409,13 +409,13 @@ impl PohRecorder {
|
||||
{
|
||||
let now = Instant::now();
|
||||
let mut poh_lock = self.poh.lock().unwrap();
|
||||
inc_new_counter_info!(
|
||||
inc_new_counter_warn!(
|
||||
"poh_recorder-record_lock_contention",
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
let now = Instant::now();
|
||||
let res = poh_lock.record(mixin);
|
||||
inc_new_counter_info!(
|
||||
inc_new_counter_warn!(
|
||||
"poh_recorder-record_ms",
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
|
@ -262,7 +262,7 @@ impl RepairService {
|
||||
} else if slot_meta.consumed == slot_meta.received {
|
||||
vec![RepairType::HighestShred(slot, slot_meta.received)]
|
||||
} else {
|
||||
let reqs = blockstore.find_missing_data_indexes_ts(
|
||||
let reqs = blockstore.find_missing_data_indexes(
|
||||
slot,
|
||||
slot_meta.first_shred_timestamp,
|
||||
slot_meta.consumed,
|
||||
|
@ -1,5 +1,4 @@
|
||||
use crate::cluster_info::{ClusterInfo, MAX_SNAPSHOT_HASHES};
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::{snapshot_package::AccountsPackageReceiver, snapshot_utils};
|
||||
use solana_sdk::{clock::Slot, hash::Hash};
|
||||
use std::{
|
||||
@ -22,7 +21,6 @@ impl SnapshotPackagerService {
|
||||
starting_snapshot_hash: Option<(Slot, Hash)>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
blockstore: Option<Arc<Blockstore>>,
|
||||
) -> Self {
|
||||
let exit = exit.clone();
|
||||
let cluster_info = cluster_info.clone();
|
||||
@ -59,9 +57,6 @@ impl SnapshotPackagerService {
|
||||
}
|
||||
cluster_info.push_snapshot_hashes(hashes.clone());
|
||||
}
|
||||
if let Some(ref blockstore) = blockstore {
|
||||
let _ = blockstore.tar_shreds(snapshot_package.root);
|
||||
}
|
||||
}
|
||||
Err(RecvTimeoutError::Disconnected) => break,
|
||||
Err(RecvTimeoutError::Timeout) => (),
|
||||
|
@ -291,7 +291,6 @@ pub mod tests {
|
||||
Blockstore::open_with_signal(&blockstore_path)
|
||||
.expect("Expected to successfully open ledger");
|
||||
let blockstore = Arc::new(blockstore);
|
||||
|
||||
let bank = bank_forks.working_bank();
|
||||
let (exit, poh_recorder, poh_service, _entry_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
|
@ -198,10 +198,6 @@ impl Validator {
|
||||
let bank_info = &bank_forks_info[0];
|
||||
let bank = bank_forks[bank_info.bank_slot].clone();
|
||||
|
||||
blockstore
|
||||
.reconcile_shreds(Some(&leader_schedule_cache))
|
||||
.expect("Expected to successfully reconcile shreds");
|
||||
|
||||
info!("Starting validator from slot {}", bank.slot());
|
||||
{
|
||||
let hard_forks: Vec<_> = bank.hard_forks().read().unwrap().iter().copied().collect();
|
||||
@ -376,13 +372,8 @@ impl Validator {
|
||||
if config.snapshot_config.is_some() {
|
||||
// Start a snapshot packaging service
|
||||
let (sender, receiver) = channel();
|
||||
let snapshot_packager_service = SnapshotPackagerService::new(
|
||||
receiver,
|
||||
snapshot_hash,
|
||||
&exit,
|
||||
&cluster_info,
|
||||
Some(blockstore.clone()),
|
||||
);
|
||||
let snapshot_packager_service =
|
||||
SnapshotPackagerService::new(receiver, snapshot_hash, &exit, &cluster_info);
|
||||
(Some(snapshot_packager_service), Some(sender))
|
||||
} else {
|
||||
(None, None)
|
||||
|
@ -321,7 +321,7 @@ mod tests {
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
|
||||
let snapshot_packager_service =
|
||||
SnapshotPackagerService::new(receiver, None, &exit, &cluster_info, None);
|
||||
SnapshotPackagerService::new(receiver, None, &exit, &cluster_info);
|
||||
|
||||
// Close the channel so that the package service will exit after reading all the
|
||||
// packages off the channel
|
||||
|
Reference in New Issue
Block a user