Send votes from banking stage to vote listener (#11434)

*  Send votes from banking stage to vote listener

Co-authored-by: Carl <carl@solana.com>
This commit is contained in:
carllin
2020-08-07 11:21:35 -07:00
committed by GitHub
parent b7c2681903
commit 7e25130529
23 changed files with 268 additions and 156 deletions

View File

@ -24,7 +24,9 @@ use solana_perf::{
use solana_runtime::{
accounts_db::ErrorCounters,
bank::{Bank, TransactionBalancesSet, TransactionProcessResult},
bank_utils,
transaction_batch::TransactionBatch,
vote_sender_types::ReplayVoteSender,
};
use solana_sdk::{
clock::{
@ -81,6 +83,7 @@ impl BankingStage {
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender,
) -> Self {
Self::new_num_threads(
cluster_info,
@ -89,6 +92,7 @@ impl BankingStage {
verified_vote_receiver,
Self::num_threads(),
transaction_status_sender,
gossip_vote_sender,
)
}
@ -99,6 +103,7 @@ impl BankingStage {
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
num_threads: u32,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender,
) -> Self {
let batch_limit = TOTAL_BUFFERED_PACKETS / ((num_threads - 1) as usize * PACKETS_PER_BATCH);
// Single thread to generate entries from many banks.
@ -119,6 +124,7 @@ impl BankingStage {
let cluster_info = cluster_info.clone();
let mut recv_start = Instant::now();
let transaction_status_sender = transaction_status_sender.clone();
let gossip_vote_sender = gossip_vote_sender.clone();
Builder::new()
.name("solana-banking-stage-tx".to_string())
.spawn(move || {
@ -132,7 +138,8 @@ impl BankingStage {
enable_forwarding,
i,
batch_limit,
transaction_status_sender.clone(),
transaction_status_sender,
gossip_vote_sender,
);
})
.unwrap()
@ -168,6 +175,7 @@ impl BankingStage {
buffered_packets: &mut Vec<PacketsAndOffsets>,
batch_limit: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> UnprocessedPackets {
let mut unprocessed_packets = vec![];
let mut rebuffered_packets = 0;
@ -199,6 +207,7 @@ impl BankingStage {
&msgs,
unprocessed_indexes.to_owned(),
transaction_status_sender.clone(),
gossip_vote_sender,
);
new_tx_count += processed;
@ -283,6 +292,7 @@ impl BankingStage {
)
}
#[allow(clippy::too_many_arguments)]
fn process_buffered_packets(
my_pubkey: &Pubkey,
socket: &std::net::UdpSocket,
@ -292,6 +302,7 @@ impl BankingStage {
enable_forwarding: bool,
batch_limit: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> BufferedPacketsDecision {
let (leader_at_slot_offset, poh_has_bank, would_be_leader) = {
let poh = poh_recorder.lock().unwrap();
@ -319,6 +330,7 @@ impl BankingStage {
buffered_packets,
batch_limit,
transaction_status_sender,
gossip_vote_sender,
);
buffered_packets.append(&mut unprocessed);
}
@ -352,6 +364,7 @@ impl BankingStage {
decision
}
#[allow(clippy::too_many_arguments)]
pub fn process_loop(
my_pubkey: Pubkey,
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
@ -362,6 +375,7 @@ impl BankingStage {
id: u32,
batch_limit: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender,
) {
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut buffered_packets = vec![];
@ -376,6 +390,7 @@ impl BankingStage {
enable_forwarding,
batch_limit,
transaction_status_sender.clone(),
&gossip_vote_sender,
);
if decision == BufferedPacketsDecision::Hold {
// If we are waiting on a new bank,
@ -403,6 +418,7 @@ impl BankingStage {
id,
batch_limit,
transaction_status_sender.clone(),
&gossip_vote_sender,
) {
Err(RecvTimeoutError::Timeout) => (),
Err(RecvTimeoutError::Disconnected) => break,
@ -501,6 +517,7 @@ impl BankingStage {
poh: &Arc<Mutex<PohRecorder>>,
batch: &TransactionBatch,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> (Result<usize, PohRecorderError>, Vec<usize>) {
let mut load_execute_time = Measure::start("load_execute_time");
// Use a shorter maximum age when adding transactions into the pipeline. This will reduce
@ -533,24 +550,23 @@ impl BankingStage {
let num_to_commit = num_to_commit.unwrap();
if num_to_commit != 0 {
let transaction_statuses = bank
.commit_transactions(
txs,
None,
&mut loaded_accounts,
&results,
tx_count,
signature_count,
)
.processing_results;
let tx_results = bank.commit_transactions(
txs,
None,
&mut loaded_accounts,
&results,
tx_count,
signature_count,
);
bank_utils::find_and_send_votes(txs, &tx_results, Some(gossip_vote_sender));
if let Some(sender) = transaction_status_sender {
let post_balances = bank.collect_balances(batch);
send_transaction_status_batch(
bank.clone(),
batch.transactions(),
batch.iteration_order_vec(),
transaction_statuses,
tx_results.processing_results,
TransactionBalancesSet::new(pre_balances, post_balances),
sender,
);
@ -578,6 +594,7 @@ impl BankingStage {
poh: &Arc<Mutex<PohRecorder>>,
chunk_offset: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> (Result<usize, PohRecorderError>, Vec<usize>) {
let mut lock_time = Measure::start("lock_time");
// Once accounts are locked, other threads cannot encode transactions that will modify the
@ -590,6 +607,7 @@ impl BankingStage {
poh,
&batch,
transaction_status_sender,
gossip_vote_sender,
);
retryable_txs.iter_mut().for_each(|x| *x += chunk_offset);
@ -618,6 +636,7 @@ impl BankingStage {
transactions: &[Transaction],
poh: &Arc<Mutex<PohRecorder>>,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> (usize, Vec<usize>) {
let mut chunk_start = 0;
let mut unprocessed_txs = vec![];
@ -633,6 +652,7 @@ impl BankingStage {
poh,
chunk_start,
transaction_status_sender.clone(),
gossip_vote_sender,
);
trace!("process_transactions result: {:?}", result);
@ -764,6 +784,7 @@ impl BankingStage {
msgs: &Packets,
packet_indexes: Vec<usize>,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> (usize, usize, Vec<usize>) {
let (transactions, transaction_to_packet_indexes) =
Self::transactions_from_packets(msgs, &packet_indexes);
@ -775,8 +796,13 @@ impl BankingStage {
let tx_len = transactions.len();
let (processed, unprocessed_tx_indexes) =
Self::process_transactions(bank, &transactions, poh, transaction_status_sender);
let (processed, unprocessed_tx_indexes) = Self::process_transactions(
bank,
&transactions,
poh,
transaction_status_sender,
gossip_vote_sender,
);
let unprocessed_tx_count = unprocessed_tx_indexes.len();
@ -846,6 +872,7 @@ impl BankingStage {
.collect()
}
#[allow(clippy::too_many_arguments)]
/// Process the incoming packets
pub fn process_packets(
my_pubkey: &Pubkey,
@ -856,6 +883,7 @@ impl BankingStage {
id: u32,
batch_limit: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> Result<UnprocessedPackets, RecvTimeoutError> {
let mut recv_time = Measure::start("process_packets_recv");
let mms = verified_receiver.recv_timeout(recv_timeout)?;
@ -898,6 +926,7 @@ impl BankingStage {
&msgs,
packet_indexes,
transaction_status_sender.clone(),
gossip_vote_sender,
);
new_tx_count += processed;
@ -1045,6 +1074,7 @@ mod tests {
let bank = Arc::new(Bank::new(&genesis_config));
let (verified_sender, verified_receiver) = unbounded();
let (vote_sender, vote_receiver) = unbounded();
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
@ -1061,6 +1091,7 @@ mod tests {
verified_receiver,
vote_receiver,
None,
gossip_vote_sender,
);
drop(verified_sender);
drop(vote_sender);
@ -1095,12 +1126,15 @@ mod tests {
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
verified_receiver,
vote_receiver,
None,
gossip_vote_sender,
);
trace!("sending bank");
drop(verified_sender);
@ -1158,12 +1192,15 @@ mod tests {
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
verified_receiver,
vote_receiver,
None,
gossip_vote_sender,
);
// fund another account so we can send 2 good transactions in a single batch.
@ -1284,6 +1321,8 @@ mod tests {
let (vote_sender, vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let entry_receiver = {
// start a banking_stage to eat verified receiver
let bank = Arc::new(Bank::new(&genesis_config));
@ -1306,6 +1345,7 @@ mod tests {
vote_receiver,
2,
None,
gossip_vote_sender,
);
// wait for banking_stage to eat the packets
@ -1683,6 +1723,7 @@ mod tests {
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
BankingStage::process_and_record_transactions(
&bank,
@ -1690,6 +1731,7 @@ mod tests {
&poh_recorder,
0,
None,
&gossip_vote_sender,
)
.0
.unwrap();
@ -1726,6 +1768,7 @@ mod tests {
&poh_recorder,
0,
None,
&gossip_vote_sender,
)
.0,
Err(PohRecorderError::MaxHeightReached)
@ -1777,12 +1820,15 @@ mod tests {
poh_recorder.lock().unwrap().set_working_bank(working_bank);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let (result, unprocessed) = BankingStage::process_and_record_transactions(
&bank,
&transactions,
&poh_recorder,
0,
None,
&gossip_vote_sender,
);
assert!(result.is_ok());
@ -1866,8 +1912,16 @@ mod tests {
// record
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let (processed_transactions_count, mut retryable_txs) =
BankingStage::process_transactions(&bank, &transactions, &poh_recorder, None);
BankingStage::process_transactions(
&bank,
&transactions,
&poh_recorder,
None,
&gossip_vote_sender,
);
assert_eq!(processed_transactions_count, 0,);
@ -1944,12 +1998,15 @@ mod tests {
&Arc::new(AtomicBool::new(false)),
);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let _ = BankingStage::process_and_record_transactions(
&bank,
&transactions,
&poh_recorder,
0,
Some(transaction_status_sender),
&gossip_vote_sender,
);
transaction_status_service.join().unwrap();

View File

@ -15,10 +15,7 @@ use crossbeam_channel::{
};
use itertools::izip;
use log::*;
use solana_ledger::{
blockstore::Blockstore,
blockstore_processor::{ReplayVotesReceiver, ReplayedVote},
};
use solana_ledger::blockstore::Blockstore;
use solana_metrics::inc_new_counter_debug;
use solana_perf::packet::{self, Packets};
use solana_runtime::{
@ -26,6 +23,7 @@ use solana_runtime::{
bank_forks::BankForks,
epoch_stakes::{EpochAuthorizedVoters, EpochStakes},
stakes::Stakes,
vote_sender_types::{ReplayVoteReceiver, ReplayedVote},
};
use solana_sdk::{
clock::{Epoch, Slot},
@ -248,7 +246,7 @@ impl ClusterInfoVoteListener {
bank_forks: Arc<RwLock<BankForks>>,
subscriptions: Arc<RpcSubscriptions>,
verified_vote_sender: VerifiedVoteSender,
replay_votes_receiver: ReplayVotesReceiver,
replay_votes_receiver: ReplayVoteReceiver,
blockstore: Arc<Blockstore>,
) -> Self {
let exit_ = exit.clone();
@ -420,7 +418,7 @@ impl ClusterInfoVoteListener {
bank_forks: Arc<RwLock<BankForks>>,
subscriptions: Arc<RpcSubscriptions>,
verified_vote_sender: VerifiedVoteSender,
replay_votes_receiver: ReplayVotesReceiver,
replay_votes_receiver: ReplayVoteReceiver,
blockstore: Arc<Blockstore>,
) -> Result<()> {
let mut optimistic_confirmation_verifier =
@ -478,7 +476,7 @@ impl ClusterInfoVoteListener {
root_bank: &Bank,
subscriptions: &RpcSubscriptions,
verified_vote_sender: &VerifiedVoteSender,
replay_votes_receiver: &ReplayVotesReceiver,
replay_votes_receiver: &ReplayVoteReceiver,
) -> Result<Vec<(Slot, Hash)>> {
Self::get_and_process_votes(
gossip_vote_txs_receiver,
@ -496,7 +494,7 @@ impl ClusterInfoVoteListener {
root_bank: &Bank,
subscriptions: &RpcSubscriptions,
verified_vote_sender: &VerifiedVoteSender,
replay_votes_receiver: &ReplayVotesReceiver,
replay_votes_receiver: &ReplayVoteReceiver,
) -> Result<Vec<(Slot, Hash)>> {
let mut sel = Select::new();
sel.recv(gossip_vote_txs_receiver);
@ -772,12 +770,12 @@ impl ClusterInfoVoteListener {
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::blockstore_processor::ReplayVotesSender;
use solana_perf::packet;
use solana_runtime::{
bank::Bank,
commitment::BlockCommitmentCache,
genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs},
vote_sender_types::ReplayVoteSender,
};
use solana_sdk::{
hash::Hash,
@ -1040,7 +1038,7 @@ mod tests {
validator_voting_keypairs: &[ValidatorVoteKeypairs],
switch_proof_hash: Option<Hash>,
votes_sender: &VerifiedVoteTransactionsSender,
replay_votes_sender: &ReplayVotesSender,
replay_votes_sender: &ReplayVoteSender,
) {
validator_voting_keypairs.iter().for_each(|keypairs| {
let node_keypair = &keypairs.node_keypair;

View File

@ -21,9 +21,7 @@ use crate::{
use solana_ledger::{
block_error::BlockError,
blockstore::Blockstore,
blockstore_processor::{
self, BlockstoreProcessorError, ReplayVotesSender, TransactionStatusSender,
},
blockstore_processor::{self, BlockstoreProcessorError, TransactionStatusSender},
entry::VerifyRecyclers,
leader_schedule_cache::LeaderScheduleCache,
};
@ -31,7 +29,7 @@ use solana_measure::{measure::Measure, thread_mem_usage};
use solana_metrics::inc_new_counter_info;
use solana_runtime::{
bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache,
snapshot_package::AccountsPackageSender,
snapshot_package::AccountsPackageSender, vote_sender_types::ReplayVoteSender,
};
use solana_sdk::{
clock::{Slot, NUM_CONSECUTIVE_LEADER_SLOTS},
@ -223,7 +221,7 @@ impl ReplayStage {
cluster_slots: Arc<ClusterSlots>,
retransmit_slots_sender: RetransmitSlotsSender,
duplicate_slots_reset_receiver: DuplicateSlotsResetReceiver,
replay_votes_sender: ReplayVotesSender,
replay_vote_sender: ReplayVoteSender,
) -> Self {
let ReplayStageConfig {
my_pubkey,
@ -344,7 +342,7 @@ impl ReplayStage {
&verify_recyclers,
&mut heaviest_subtree_fork_choice,
&subscriptions,
&replay_votes_sender,
&replay_vote_sender,
);
replay_active_banks_time.stop();
Self::report_memory(&allocated, "replay_active_banks", start);
@ -940,7 +938,7 @@ impl ReplayStage {
blockstore: &Blockstore,
bank_progress: &mut ForkProgress,
transaction_status_sender: Option<TransactionStatusSender>,
replay_votes_sender: &ReplayVotesSender,
replay_vote_sender: &ReplayVoteSender,
verify_recyclers: &VerifyRecyclers,
) -> result::Result<usize, BlockstoreProcessorError> {
let tx_count_before = bank_progress.replay_progress.num_txs;
@ -951,7 +949,7 @@ impl ReplayStage {
&mut bank_progress.replay_progress,
false,
transaction_status_sender,
Some(replay_votes_sender),
Some(replay_vote_sender),
None,
verify_recyclers,
);
@ -1214,7 +1212,7 @@ impl ReplayStage {
verify_recyclers: &VerifyRecyclers,
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
subscriptions: &Arc<RpcSubscriptions>,
replay_votes_sender: &ReplayVotesSender,
replay_vote_sender: &ReplayVoteSender,
) -> bool {
let mut did_complete_bank = false;
let mut tx_count = 0;
@ -1260,7 +1258,7 @@ impl ReplayStage {
&blockstore,
bank_progress,
transaction_status_sender.clone(),
replay_votes_sender,
replay_vote_sender,
verify_recyclers,
);
match replay_result {
@ -2417,7 +2415,7 @@ pub(crate) mod tests {
F: Fn(&Keypair, Arc<Bank>) -> Vec<Shred>,
{
let ledger_path = get_tmp_ledger_path!();
let (replay_votes_sender, _replay_votes_receiver) = unbounded();
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
let res = {
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
@ -2442,8 +2440,8 @@ pub(crate) mod tests {
&blockstore,
&mut bank0_progress,
None,
&replay_votes_sender,
&VerifyRecyclers::default(),
&replay_vote_sender,
&&VerifyRecyclers::default(),
);
// Check that the erroring bank was marked as dead in the progress map
@ -2606,7 +2604,7 @@ pub(crate) mod tests {
blockstore.set_roots(&[slot]).unwrap();
let (transaction_status_sender, transaction_status_receiver) = unbounded();
let (replay_votes_sender, _replay_votes_receiver) = unbounded();
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
let transaction_status_service = TransactionStatusService::new(
transaction_status_receiver,
blockstore,
@ -2620,7 +2618,7 @@ pub(crate) mod tests {
&entries,
true,
Some(transaction_status_sender),
Some(&replay_votes_sender),
Some(&replay_vote_sender),
);
transaction_status_service.join().unwrap();

View File

@ -13,11 +13,11 @@ use crate::{
sigverify_stage::SigVerifyStage,
};
use crossbeam_channel::unbounded;
use solana_ledger::{
blockstore::Blockstore,
blockstore_processor::{ReplayVotesReceiver, TransactionStatusSender},
use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionStatusSender};
use solana_runtime::{
bank_forks::BankForks,
vote_sender_types::{ReplayVoteReceiver, ReplayVoteSender},
};
use solana_runtime::bank_forks::BankForks;
use std::{
net::UdpSocket,
sync::{
@ -55,7 +55,8 @@ impl Tpu {
vote_tracker: Arc<VoteTracker>,
bank_forks: Arc<RwLock<BankForks>>,
verified_vote_sender: VerifiedVoteSender,
replay_votes_receiver: ReplayVotesReceiver,
replay_vote_receiver: ReplayVoteReceiver,
replay_vote_sender: ReplayVoteSender,
) -> Self {
let (packet_sender, packet_receiver) = channel();
let fetch_stage = FetchStage::new_with_sender(
@ -82,7 +83,7 @@ impl Tpu {
bank_forks,
subscriptions.clone(),
verified_vote_sender,
replay_votes_receiver,
replay_vote_receiver,
blockstore.clone(),
);
@ -92,6 +93,7 @@ impl Tpu {
verified_receiver,
verified_vote_packets_receiver,
transaction_status_sender,
replay_vote_sender,
);
let broadcast_stage = broadcast_type.new_broadcast_stage(

View File

@ -57,7 +57,7 @@ impl TransactionStatusService {
} = write_transaction_status_receiver.recv_timeout(Duration::from_secs(1))?;
let slot = bank.slot();
for (((transaction, (status, hash_age_kind)), pre_balances), post_balances) in
for ((((_, transaction), (status, hash_age_kind)), pre_balances), post_balances) in
OrderedIterator::new(&transactions, iteration_order.as_deref())
.zip(statuses)
.zip(balances.pre_balances)

View File

@ -21,12 +21,12 @@ use crate::{
use crossbeam_channel::unbounded;
use solana_ledger::{
blockstore::{Blockstore, CompletedSlotsReceiver},
blockstore_processor::{ReplayVotesSender, TransactionStatusSender},
blockstore_processor::TransactionStatusSender,
leader_schedule_cache::LeaderScheduleCache,
};
use solana_runtime::{
bank_forks::BankForks, commitment::BlockCommitmentCache,
snapshot_package::AccountsPackageSender,
snapshot_package::AccountsPackageSender, vote_sender_types::ReplayVoteSender,
};
use solana_sdk::{
pubkey::Pubkey,
@ -98,7 +98,7 @@ impl Tvu {
vote_tracker: Arc<VoteTracker>,
retransmit_slots_sender: RetransmitSlotsSender,
verified_vote_receiver: VerifiedVoteReceiver,
replay_votes_sender: ReplayVotesSender,
replay_vote_sender: ReplayVoteSender,
tvu_config: TvuConfig,
) -> Self {
let keypair: Arc<Keypair> = cluster_info.keypair.clone();
@ -199,7 +199,7 @@ impl Tvu {
cluster_slots,
retransmit_slots_sender,
duplicate_slots_reset_receiver,
replay_votes_sender,
replay_vote_sender,
);
let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| {
@ -285,7 +285,7 @@ pub mod tests {
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let (retransmit_slots_sender, _retransmit_slots_receiver) = unbounded();
let (_verified_vote_sender, verified_vote_receiver) = unbounded();
let (replay_votes_sender, _replay_votes_receiver) = unbounded();
let (replay_vote_sender, _replay_vote_receiver) = unbounded();
let bank_forks = Arc::new(RwLock::new(bank_forks));
let tvu = Tvu::new(
&vote_keypair.pubkey(),
@ -319,7 +319,7 @@ pub mod tests {
Arc::new(VoteTracker::new(&bank)),
retransmit_slots_sender,
verified_vote_receiver,
replay_votes_sender,
replay_vote_sender,
TvuConfig::default(),
);
exit.store(true, Ordering::Relaxed);

View File

@ -28,7 +28,7 @@ use solana_ledger::{
bank_forks_utils,
blockstore::{Blockstore, CompletedSlotsReceiver, PurgeType},
blockstore_db::BlockstoreRecoveryMode,
blockstore_processor::{self, ReplayVotesSender, TransactionStatusSender},
blockstore_processor::{self, TransactionStatusSender},
create_new_tmp_ledger,
leader_schedule::FixedSchedule,
leader_schedule_cache::LeaderScheduleCache,
@ -224,7 +224,7 @@ impl Validator {
validator_exit.register_exit(Box::new(move || exit_.store(true, Ordering::Relaxed)));
let validator_exit = Arc::new(RwLock::new(Some(validator_exit)));
let (replay_votes_sender, replay_votes_receiver) = unbounded();
let (replay_vote_sender, replay_vote_receiver) = unbounded();
let (
genesis_config,
bank_forks,
@ -239,7 +239,7 @@ impl Validator {
rewards_recorder_sender,
rewards_recorder_service,
},
) = new_banks_from_ledger(config, ledger_path, poh_verify, &exit, &replay_votes_sender);
) = new_banks_from_ledger(config, ledger_path, poh_verify, &exit);
let leader_schedule_cache = Arc::new(leader_schedule_cache);
let bank = bank_forks.working_bank();
@ -465,7 +465,7 @@ impl Validator {
vote_tracker.clone(),
retransmit_slots_sender,
verified_vote_receiver,
replay_votes_sender,
replay_vote_sender.clone(),
TvuConfig {
max_ledger_shreds: config.max_ledger_shreds,
halt_on_trusted_validators_accounts_hash_mismatch: config
@ -493,7 +493,8 @@ impl Validator {
vote_tracker,
bank_forks,
verified_vote_sender,
replay_votes_receiver,
replay_vote_receiver,
replay_vote_sender,
);
datapoint_info!("validator-new", ("id", id.to_string(), String));
@ -587,7 +588,6 @@ fn new_banks_from_ledger(
ledger_path: &Path,
poh_verify: bool,
exit: &Arc<AtomicBool>,
replay_votes_sender: &ReplayVotesSender,
) -> (
GenesisConfig,
BankForks,
@ -649,7 +649,6 @@ fn new_banks_from_ledger(
transaction_history_services
.transaction_status_sender
.clone(),
Some(&replay_votes_sender),
)
.unwrap_or_else(|err| {
error!("Failed to load ledger: {:?}", err);