Extract tower storage details from Tower struct

This commit is contained in:
Michael Vines
2021-07-20 22:25:13 -07:00
parent ca37873e16
commit 397801a2d8
12 changed files with 425 additions and 388 deletions

View File

@ -1,39 +1,40 @@
use crate::{
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks,
progress_map::{LockoutIntervals, ProgressMap},
};
use chrono::prelude::*;
use solana_ledger::{ancestor_iterator::AncestorIterator, blockstore::Blockstore, blockstore_db};
use solana_measure::measure::Measure;
use solana_runtime::{
bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE,
vote_account::ArcVoteAccount,
};
use solana_sdk::{
clock::{Slot, UnixTimestamp},
hash::Hash,
instruction::Instruction,
pubkey::Pubkey,
signature::{Keypair, Signature, Signer},
slot_history::{Check, SlotHistory},
};
use solana_vote_program::{
vote_instruction,
vote_state::{BlockTimestamp, Lockout, Vote, VoteState, MAX_LOCKOUT_HISTORY},
};
use std::{
cmp::Ordering,
collections::{HashMap, HashSet},
fs::{self, File},
io::BufReader,
ops::{
Bound::{Included, Unbounded},
Deref,
use {
crate::{
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks,
progress_map::{LockoutIntervals, ProgressMap},
},
path::{Path, PathBuf},
chrono::prelude::*,
solana_ledger::{ancestor_iterator::AncestorIterator, blockstore::Blockstore, blockstore_db},
solana_runtime::{
bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE,
vote_account::ArcVoteAccount,
},
solana_sdk::{
clock::{Slot, UnixTimestamp},
hash::Hash,
instruction::Instruction,
pubkey::Pubkey,
signature::{Keypair, Signature, Signer},
slot_history::{Check, SlotHistory},
},
solana_vote_program::{
vote_instruction,
vote_state::{BlockTimestamp, Lockout, Vote, VoteState, MAX_LOCKOUT_HISTORY},
},
std::{
cmp::Ordering,
collections::{HashMap, HashSet},
fs::{self, File},
io::BufReader,
ops::{
Bound::{Included, Unbounded},
Deref,
},
path::PathBuf,
},
thiserror::Error,
};
use thiserror::Error;
#[derive(PartialEq, Clone, Debug, AbiExample)]
pub enum SwitchForkDecision {
@ -119,12 +120,6 @@ pub struct Tower {
last_vote_tx_blockhash: Hash,
last_timestamp: BlockTimestamp,
#[serde(skip)]
pub(crate) ledger_path: PathBuf,
#[serde(skip)]
path: PathBuf,
#[serde(skip)]
tmp_path: PathBuf, // used before atomic fs::rename()
#[serde(skip)]
// Restored last voted slot which cannot be found in SlotHistory at replayed root
// (This is a special field for slashing-free validator restart with edge cases).
// This could be emptied after some time; but left intact indefinitely for easier
@ -146,9 +141,6 @@ impl Default for Tower {
last_vote: Vote::default(),
last_timestamp: BlockTimestamp::default(),
last_vote_tx_blockhash: Hash::default(),
ledger_path: PathBuf::default(),
path: PathBuf::default(),
tmp_path: PathBuf::default(),
stray_restored_slot: Option::default(),
last_switch_threshold_check: Option::default(),
};
@ -164,25 +156,15 @@ impl Tower {
vote_account_pubkey: &Pubkey,
root: Slot,
bank: &Bank,
ledger_path: &Path,
) -> Self {
let mut tower = Tower {
ledger_path: ledger_path.into(),
node_pubkey: *node_pubkey,
..Tower::default()
};
tower.set_identity(*node_pubkey);
tower.initialize_lockouts_from_bank(vote_account_pubkey, root, bank);
tower
}
fn set_identity(&mut self, node_pubkey: Pubkey) {
let path = Self::get_filename(&self.ledger_path, &node_pubkey);
let tmp_path = Self::get_tmp_filename(&path);
self.node_pubkey = node_pubkey;
self.path = path;
self.tmp_path = tmp_path;
}
#[cfg(test)]
pub fn new_for_tests(threshold_depth: usize, threshold_size: f64) -> Self {
Self {
@ -194,7 +176,6 @@ impl Tower {
pub fn new_from_bankforks(
bank_forks: &BankForks,
ledger_path: &Path,
node_pubkey: &Pubkey,
vote_account: &Pubkey,
) -> Self {
@ -216,7 +197,7 @@ impl Tower {
)
.clone();
Self::new(node_pubkey, vote_account, root, &heaviest_bank, ledger_path)
Self::new(node_pubkey, vote_account, root, &heaviest_bank)
}
pub(crate) fn collect_vote_lockouts<F>(
@ -1195,71 +1176,15 @@ impl Tower {
self.vote_state.root_slot = Some(root);
}
pub fn get_filename(path: &Path, node_pubkey: &Pubkey) -> PathBuf {
path.join(format!("tower-{}", node_pubkey))
.with_extension("bin")
}
fn get_tmp_filename(path: &Path) -> PathBuf {
path.with_extension("bin.new")
}
pub fn save(&self, node_keypair: &Keypair) -> Result<()> {
let mut measure = Measure::start("tower_save-ms");
if self.node_pubkey != node_keypair.pubkey() {
return Err(TowerError::WrongTower(format!(
"node_pubkey is {:?} but found tower for {:?}",
node_keypair.pubkey(),
self.node_pubkey
)));
}
let filename = &self.path;
let new_filename = &self.tmp_path;
{
// overwrite anything if exists
let mut file = File::create(&new_filename)?;
let saved_tower = SavedTower::new(self, node_keypair)?;
bincode::serialize_into(&mut file, &saved_tower)?;
// file.sync_all() hurts performance; pipeline sync-ing and submitting votes to the cluster!
}
trace!("persisted votes: {:?}", self.voted_slots());
fs::rename(&new_filename, &filename)?;
// self.path.parent().sync_all() hurts performance same as the above sync
measure.stop();
inc_new_counter_info!("tower_save-ms", measure.as_ms() as usize);
pub fn save(&self, tower_storage: &dyn TowerStorage, node_keypair: &Keypair) -> Result<()> {
let saved_tower = SavedTower::new(self, node_keypair)?;
tower_storage.store(&saved_tower)?;
Ok(())
}
pub fn restore(ledger_path: &Path, node_pubkey: &Pubkey) -> Result<Self> {
let filename = Self::get_filename(ledger_path, node_pubkey);
// Ensure to create parent dir here, because restore() precedes save() always
fs::create_dir_all(&filename.parent().unwrap())?;
let file = File::open(&filename)?;
let mut stream = BufReader::new(file);
let saved_tower: SavedTower = bincode::deserialize_from(&mut stream)?;
if !saved_tower.verify(node_pubkey) {
return Err(TowerError::InvalidSignature);
}
let mut tower = saved_tower.deserialize()?;
tower.ledger_path = ledger_path.into();
tower.path = filename;
tower.tmp_path = Self::get_tmp_filename(&tower.path);
// check that the tower actually belongs to this node
if &tower.node_pubkey != node_pubkey {
return Err(TowerError::WrongTower(format!(
"node_pubkey is {:?} but found tower for {:?}",
node_pubkey, tower.node_pubkey
)));
}
Ok(tower)
pub fn restore(tower_storage: &dyn TowerStorage, node_pubkey: &Pubkey) -> Result<Self> {
let saved_tower = tower_storage.load(node_pubkey)?;
saved_tower.try_into_tower(node_pubkey)
}
}
@ -1300,26 +1225,104 @@ impl TowerError {
}
}
pub trait TowerStorage: Sync + Send {
fn load(&self, node_pubkey: &Pubkey) -> Result<SavedTower>;
fn store(&self, saved_tower: &SavedTower) -> Result<()>;
}
#[derive(Debug, Default, Clone, PartialEq)]
pub struct FileTowerStorage {
pub tower_path: PathBuf,
}
impl FileTowerStorage {
pub fn new(tower_path: PathBuf) -> Self {
Self { tower_path }
}
pub fn filename(&self, node_pubkey: &Pubkey) -> PathBuf {
self.tower_path
.join(format!("tower-{}", node_pubkey))
.with_extension("bin")
}
}
impl TowerStorage for FileTowerStorage {
fn load(&self, node_pubkey: &Pubkey) -> Result<SavedTower> {
let filename = self.filename(node_pubkey);
trace!("load {}", filename.display());
// Ensure to create parent dir here, because restore() precedes save() always
fs::create_dir_all(&filename.parent().unwrap())?;
let file = File::open(&filename)?;
let mut stream = BufReader::new(file);
bincode::deserialize_from(&mut stream).map_err(|e| e.into())
}
fn store(&self, saved_tower: &SavedTower) -> Result<()> {
let filename = self.filename(&saved_tower.node_pubkey);
trace!("store: {}", filename.display());
let new_filename = filename.with_extension("bin.new");
{
// overwrite anything if exists
let mut file = File::create(&new_filename)?;
bincode::serialize_into(&mut file, saved_tower)?;
// file.sync_all() hurts performance; pipeline sync-ing and submitting votes to the cluster!
}
fs::rename(&new_filename, &filename)?;
// self.path.parent().sync_all() hurts performance same as the above sync
Ok(())
}
}
#[frozen_abi(digest = "Gaxfwvx5MArn52mKZQgzHmDCyn5YfCuTHvp5Et3rFfpp")]
#[derive(Default, Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)]
pub struct SavedTower {
signature: Signature,
data: Vec<u8>,
#[serde(skip)]
node_pubkey: Pubkey,
}
impl SavedTower {
pub fn new<T: Signer>(tower: &Tower, keypair: &T) -> Result<Self> {
let node_pubkey = keypair.pubkey();
if tower.node_pubkey != node_pubkey {
return Err(TowerError::WrongTower(format!(
"node_pubkey is {:?} but found tower for {:?}",
node_pubkey, tower.node_pubkey
)));
}
let data = bincode::serialize(tower)?;
let signature = keypair.sign_message(&data);
Ok(Self { signature, data })
Ok(Self {
signature,
data,
node_pubkey,
})
}
pub fn verify(&self, pubkey: &Pubkey) -> bool {
self.signature.verify(pubkey.as_ref(), &self.data)
}
pub fn try_into_tower(self, node_pubkey: &Pubkey) -> Result<Tower> {
// This method assumes that `self` was just deserialized
assert_eq!(self.node_pubkey, Pubkey::default());
pub fn deserialize(&self) -> Result<Tower> {
bincode::deserialize(&self.data).map_err(|e| e.into())
if !self.signature.verify(node_pubkey.as_ref(), &self.data) {
return Err(TowerError::InvalidSignature);
}
bincode::deserialize(&self.data)
.map_err(|e| e.into())
.and_then(|tower: Tower| {
if tower.node_pubkey != *node_pubkey {
return Err(TowerError::WrongTower(format!(
"node_pubkey is {:?} but found tower for {:?}",
node_pubkey, tower.node_pubkey
)));
}
Ok(tower)
})
}
}
@ -2518,20 +2521,20 @@ pub mod test {
F: Fn(&mut Tower, &Pubkey),
G: Fn(&PathBuf),
{
let dir = TempDir::new().unwrap();
let tower_path = TempDir::new().unwrap();
let identity_keypair = Arc::new(Keypair::new());
let node_pubkey = identity_keypair.pubkey();
// Use values that will not match the default derived from BankForks
let mut tower = Tower::new_for_tests(10, 0.9);
tower.ledger_path = dir.path().to_path_buf();
tower.path = Tower::get_filename(&tower.ledger_path, &identity_keypair.pubkey());
tower.tmp_path = Tower::get_tmp_filename(&tower.path);
modify_original(&mut tower, &identity_keypair.pubkey());
let tower_storage = FileTowerStorage::new(tower_path.path().to_path_buf());
tower.save(&identity_keypair).unwrap();
modify_serialized(&tower.path);
let loaded = Tower::restore(dir.path(), &identity_keypair.pubkey());
modify_original(&mut tower, &node_pubkey);
tower.save(&tower_storage, &identity_keypair).unwrap();
modify_serialized(&tower_storage.filename(&node_pubkey));
let loaded = Tower::restore(&tower_storage, &node_pubkey);
(tower, loaded)
}
@ -2760,8 +2763,9 @@ pub mod test {
fn test_load_tower_wrong_identity() {
let identity_keypair = Arc::new(Keypair::new());
let tower = Tower::default();
let tower_storage = FileTowerStorage::default();
assert_matches!(
tower.save(&identity_keypair),
tower.save(&tower_storage, &identity_keypair),
Err(TowerError::WrongTower(_))
)
}

View File

@ -1,70 +1,73 @@
//! The `replay_stage` replays transactions broadcast by the leader.
use crate::{
ancestor_hashes_service::AncestorHashesReplayUpdateSender,
broadcast_stage::RetransmitSlotsSender,
cache_block_meta_service::CacheBlockMetaSender,
cluster_info_vote_listener::{
GossipDuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, VoteTracker,
use {
crate::{
ancestor_hashes_service::AncestorHashesReplayUpdateSender,
broadcast_stage::RetransmitSlotsSender,
cache_block_meta_service::CacheBlockMetaSender,
cluster_info_vote_listener::{
GossipDuplicateConfirmedSlotsReceiver, GossipVerifiedVoteHashReceiver, VoteTracker,
},
cluster_slot_state_verifier::*,
cluster_slots::ClusterSlots,
cluster_slots_service::ClusterSlotsUpdateSender,
commitment_service::{AggregateCommitmentService, CommitmentAggregationData},
consensus::{
ComputedBankState, Stake, SwitchForkDecision, Tower, TowerStorage, VotedStakes,
SWITCH_FORK_THRESHOLD,
},
fork_choice::{ForkChoice, SelectVoteAndResetForkResult},
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks,
progress_map::{ForkProgress, ProgressMap, PropagatedStats},
repair_service::DuplicateSlotsResetReceiver,
rewards_recorder_service::RewardsRecorderSender,
unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes,
voting_service::VoteOp,
window_service::DuplicateSlotReceiver,
},
cluster_slot_state_verifier::*,
cluster_slots::ClusterSlots,
cluster_slots_service::ClusterSlotsUpdateSender,
commitment_service::{AggregateCommitmentService, CommitmentAggregationData},
consensus::{
ComputedBankState, Stake, SwitchForkDecision, Tower, VotedStakes, SWITCH_FORK_THRESHOLD,
solana_client::rpc_response::SlotUpdate,
solana_entry::entry::VerifyRecyclers,
solana_gossip::cluster_info::ClusterInfo,
solana_ledger::{
block_error::BlockError,
blockstore::Blockstore,
blockstore_processor::{self, BlockstoreProcessorError, TransactionStatusSender},
leader_schedule_cache::LeaderScheduleCache,
},
fork_choice::{ForkChoice, SelectVoteAndResetForkResult},
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks,
progress_map::{ForkProgress, ProgressMap, PropagatedStats},
repair_service::DuplicateSlotsResetReceiver,
rewards_recorder_service::RewardsRecorderSender,
unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes,
voting_service::VoteOp,
window_service::DuplicateSlotReceiver,
};
use solana_client::rpc_response::SlotUpdate;
use solana_entry::entry::VerifyRecyclers;
use solana_gossip::cluster_info::ClusterInfo;
use solana_ledger::{
block_error::BlockError,
blockstore::Blockstore,
blockstore_processor::{self, BlockstoreProcessorError, TransactionStatusSender},
leader_schedule_cache::LeaderScheduleCache,
};
use solana_measure::measure::Measure;
use solana_metrics::inc_new_counter_info;
use solana_poh::poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS};
use solana_rpc::{
optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender},
rpc_subscriptions::RpcSubscriptions,
};
use solana_runtime::{
accounts_background_service::AbsRequestSender, bank::Bank, bank::ExecuteTimings,
bank_forks::BankForks, commitment::BlockCommitmentCache, vote_sender_types::ReplayVoteSender,
};
use solana_sdk::{
clock::{BankId, Slot, MAX_PROCESSING_AGE, NUM_CONSECUTIVE_LEADER_SLOTS},
genesis_config::ClusterType,
hash::Hash,
pubkey::Pubkey,
signature::Signature,
signature::{Keypair, Signer},
timing::timestamp,
transaction::Transaction,
};
use solana_vote_program::vote_state::Vote;
use std::{
collections::{HashMap, HashSet},
result,
sync::{
atomic::{AtomicBool, Ordering},
mpsc::{Receiver, RecvTimeoutError, Sender},
Arc, Mutex, RwLock,
solana_measure::measure::Measure,
solana_metrics::inc_new_counter_info,
solana_poh::poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
solana_rpc::{
optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender},
rpc_subscriptions::RpcSubscriptions,
},
solana_runtime::{
accounts_background_service::AbsRequestSender, bank::Bank, bank::ExecuteTimings,
bank_forks::BankForks, commitment::BlockCommitmentCache,
vote_sender_types::ReplayVoteSender,
},
solana_sdk::{
clock::{BankId, Slot, MAX_PROCESSING_AGE, NUM_CONSECUTIVE_LEADER_SLOTS},
genesis_config::ClusterType,
hash::Hash,
pubkey::Pubkey,
signature::Signature,
signature::{Keypair, Signer},
timing::timestamp,
transaction::Transaction,
},
solana_vote_program::vote_state::Vote,
std::{
collections::{HashMap, HashSet},
result,
sync::{
atomic::{AtomicBool, Ordering},
mpsc::{Receiver, RecvTimeoutError, Sender},
Arc, Mutex, RwLock,
},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
};
pub const MAX_ENTRY_RECV_PER_ITER: usize = 512;
@ -128,6 +131,7 @@ pub struct ReplayStageConfig {
pub bank_notification_sender: Option<BankNotificationSender>,
pub wait_for_vote_to_start_leader: bool,
pub ancestor_hashes_replay_update_sender: AncestorHashesReplayUpdateSender,
pub tower_storage: Arc<dyn TowerStorage>,
}
#[derive(Default)]
@ -336,6 +340,7 @@ impl ReplayStage {
bank_notification_sender,
wait_for_vote_to_start_leader,
ancestor_hashes_replay_update_sender,
tower_storage,
} = config;
trace!("replay stage");
@ -593,6 +598,7 @@ impl ReplayStage {
switch_fork_decision,
&bank_forks,
&mut tower,
tower_storage.as_ref(),
&mut progress,
&vote_account,
&identity_keypair,
@ -648,7 +654,7 @@ impl ReplayStage {
my_pubkey = identity_keypair.pubkey();
// Load the new identity's tower
tower = Tower::restore(&tower.ledger_path, &my_pubkey)
tower = Tower::restore(tower_storage.as_ref(), &my_pubkey)
.and_then(|restored_tower| {
let root_bank = bank_forks.read().unwrap().root_bank();
let slot_history = root_bank.get_slot_history();
@ -1482,6 +1488,7 @@ impl ReplayStage {
switch_fork_decision: &SwitchForkDecision,
bank_forks: &Arc<RwLock<BankForks>>,
tower: &mut Tower,
tower_storage: &dyn TowerStorage,
progress: &mut ProgressMap,
vote_account_pubkey: &Pubkey,
identity_keypair: &Keypair,
@ -1509,9 +1516,14 @@ impl ReplayStage {
trace!("handle votable bank {}", bank.slot());
let new_root = tower.record_bank_vote(bank, vote_account_pubkey);
if let Err(err) = tower.save(identity_keypair) {
error!("Unable to save tower: {:?}", err);
std::process::exit(1);
{
let mut measure = Measure::start("tower_save-ms");
if let Err(err) = tower.save(tower_storage, identity_keypair) {
error!("Unable to save tower: {:?}", err);
std::process::exit(1);
}
measure.stop();
inc_new_counter_info!("tower_save-ms", measure.as_ms() as usize);
}
if let Some(new_root) = new_root {
@ -2877,7 +2889,6 @@ pub mod tests {
let my_vote_pubkey = my_keypairs.vote_keypair.pubkey();
let tower = Tower::new_from_bankforks(
&bank_forks.read().unwrap(),
blockstore.ledger_path(),
&cluster_info.id(),
&my_vote_pubkey,
);

View File

@ -11,7 +11,7 @@ use crate::{
},
cluster_slots::ClusterSlots,
completed_data_sets_service::CompletedDataSetsSender,
consensus::Tower,
consensus::{Tower, TowerStorage},
cost_model::CostModel,
cost_update_service::CostUpdateService,
ledger_cleanup_service::LedgerCleanupService,
@ -114,6 +114,7 @@ impl Tvu {
rpc_subscriptions: &Arc<RpcSubscriptions>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
tower: Tower,
tower_storage: Arc<dyn TowerStorage>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
exit: &Arc<AtomicBool>,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
@ -277,6 +278,7 @@ impl Tvu {
bank_notification_sender,
wait_for_vote_to_start_leader: tvu_config.wait_for_vote_to_start_leader,
ancestor_hashes_replay_update_sender,
tower_storage,
};
let (voting_sender, voting_receiver) = channel();
@ -449,6 +451,7 @@ pub mod tests {
)),
&poh_recorder,
tower,
Arc::new(crate::consensus::FileTowerStorage::default()),
&leader_schedule_cache,
&exit,
block_commitment_cache,

View File

@ -5,7 +5,7 @@ use crate::{
cache_block_meta_service::{CacheBlockMetaSender, CacheBlockMetaService},
cluster_info_vote_listener::VoteTracker,
completed_data_sets_service::CompletedDataSetsService,
consensus::{reconcile_blockstore_roots_with_tower, Tower},
consensus::{reconcile_blockstore_roots_with_tower, FileTowerStorage, Tower, TowerStorage},
cost_model::CostModel,
rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService},
sample_performance_service::SamplePerformanceService,
@ -90,7 +90,6 @@ use std::{
const MAX_COMPLETED_DATA_SETS_IN_CHANNEL: usize = 100_000;
const WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT: u64 = 90;
#[derive(Debug)]
pub struct ValidatorConfig {
pub dev_halt_at_slot: Option<Slot>,
pub expected_genesis_hash: Option<Hash>,
@ -124,7 +123,7 @@ pub struct ValidatorConfig {
pub wal_recovery_mode: Option<BlockstoreRecoveryMode>,
pub poh_verify: bool, // Perform PoH verification during blockstore processing at boo
pub require_tower: bool,
pub tower_path: Option<PathBuf>,
pub tower_storage: Arc<dyn TowerStorage>,
pub debug_keys: Option<Arc<HashSet<Pubkey>>>,
pub contact_debug_interval: u64,
pub contact_save_interval: u64,
@ -181,7 +180,7 @@ impl Default for ValidatorConfig {
wal_recovery_mode: None,
poh_verify: true,
require_tower: false,
tower_path: None,
tower_storage: Arc::new(FileTowerStorage::new(PathBuf::default())),
debug_keys: None,
contact_debug_interval: DEFAULT_CONTACT_DEBUG_INTERVAL_MILLIS,
contact_save_interval: DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS,
@ -715,6 +714,7 @@ impl Validator {
&rpc_subscriptions,
&poh_recorder,
tower,
config.tower_storage.clone(),
&leader_schedule_cache,
&exit,
block_commitment_cache,
@ -949,7 +949,6 @@ fn post_process_restored_tower(
validator_identity: &Pubkey,
vote_account: &Pubkey,
config: &ValidatorConfig,
tower_path: &Path,
bank_forks: &BankForks,
) -> Tower {
let mut should_require_tower = config.require_tower;
@ -1028,7 +1027,6 @@ fn post_process_restored_tower(
Tower::new_from_bankforks(
bank_forks,
tower_path,
validator_identity,
vote_account,
)
@ -1096,9 +1094,7 @@ fn new_banks_from_ledger(
.expect("Failed to open ledger database");
blockstore.set_no_compaction(config.no_rocksdb_compaction);
let tower_path = config.tower_path.as_deref().unwrap_or(ledger_path);
let restored_tower = Tower::restore(tower_path, validator_identity);
let restored_tower = Tower::restore(config.tower_storage.as_ref(), validator_identity);
if let Ok(tower) = &restored_tower {
reconcile_blockstore_roots_with_tower(tower, &blockstore).unwrap_or_else(|err| {
error!("Failed to reconcile blockstore with tower: {:?}", err);
@ -1219,7 +1215,6 @@ fn new_banks_from_ledger(
validator_identity,
vote_account,
config,
tower_path,
&bank_forks,
);