* chore: cargo +nightly clippy --fix -Z unstable-options (cherry picked from commit6514096a67
) # Conflicts: # core/src/banking_stage.rs # core/src/cost_model.rs # core/src/cost_tracker.rs # core/src/execute_cost_table.rs # core/src/replay_stage.rs # core/src/tvu.rs # ledger-tool/src/main.rs # programs/bpf_loader/build.rs # rbpf-cli/src/main.rs # sdk/cargo-build-bpf/src/main.rs # sdk/cargo-test-bpf/src/main.rs # sdk/src/secp256k1_instruction.rs * chore: cargo fmt (cherry picked from commit789f33e8db
) * Updates BPF program assert_instruction_count tests. (cherry picked from commitc1e03f3410
) # Conflicts: # programs/bpf/tests/programs.rs * Resolve conflicts Co-authored-by: Alexander Meißner <AlexanderMeissner@gmx.net> Co-authored-by: Michael Vines <mvines@gmail.com>
This commit is contained in:
@@ -148,7 +148,7 @@ impl AccountsHashVerifier {
|
||||
for (slot, hash) in hashes.iter() {
|
||||
slot_to_hash.insert(*slot, *hash);
|
||||
}
|
||||
if Self::should_halt(&cluster_info, trusted_validators, &mut slot_to_hash) {
|
||||
if Self::should_halt(cluster_info, trusted_validators, &mut slot_to_hash) {
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
@@ -352,9 +352,9 @@ impl BankingStage {
|
||||
// We've hit the end of this slot, no need to perform more processing,
|
||||
// just filter the remaining packets for the invalid (e.g. too old) ones
|
||||
let new_unprocessed_indexes = Self::filter_unprocessed_packets(
|
||||
&bank,
|
||||
&msgs,
|
||||
&original_unprocessed_indexes,
|
||||
bank,
|
||||
msgs,
|
||||
original_unprocessed_indexes,
|
||||
my_pubkey,
|
||||
*next_leader,
|
||||
);
|
||||
@@ -369,8 +369,8 @@ impl BankingStage {
|
||||
Self::process_packets_transactions(
|
||||
&bank,
|
||||
&bank_creation_time,
|
||||
&recorder,
|
||||
&msgs,
|
||||
recorder,
|
||||
msgs,
|
||||
original_unprocessed_indexes.to_owned(),
|
||||
transaction_status_sender.clone(),
|
||||
gossip_vote_sender,
|
||||
@@ -403,7 +403,7 @@ impl BankingStage {
|
||||
// `original_unprocessed_indexes` must have remaining packets to process
|
||||
// if not yet processed.
|
||||
assert!(Self::packet_has_more_unprocessed_transactions(
|
||||
&original_unprocessed_indexes
|
||||
original_unprocessed_indexes
|
||||
));
|
||||
true
|
||||
}
|
||||
@@ -597,7 +597,7 @@ impl BankingStage {
|
||||
let decision = Self::process_buffered_packets(
|
||||
&my_pubkey,
|
||||
&socket,
|
||||
&poh_recorder,
|
||||
poh_recorder,
|
||||
cluster_info,
|
||||
&mut buffered_packets,
|
||||
enable_forwarding,
|
||||
@@ -627,8 +627,8 @@ impl BankingStage {
|
||||
|
||||
match Self::process_packets(
|
||||
&my_pubkey,
|
||||
&verified_receiver,
|
||||
&poh_recorder,
|
||||
verified_receiver,
|
||||
poh_recorder,
|
||||
recv_start,
|
||||
recv_timeout,
|
||||
id,
|
||||
@@ -738,7 +738,7 @@ impl BankingStage {
|
||||
let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new();
|
||||
|
||||
let pre_token_balances = if transaction_status_sender.is_some() {
|
||||
collect_token_balances(&bank, &batch, &mut mint_decimals)
|
||||
collect_token_balances(bank, batch, &mut mint_decimals)
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
@@ -798,7 +798,7 @@ impl BankingStage {
|
||||
if let Some(transaction_status_sender) = transaction_status_sender {
|
||||
let txs = batch.transactions_iter().cloned().collect();
|
||||
let post_balances = bank.collect_balances(batch);
|
||||
let post_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals);
|
||||
let post_token_balances = collect_token_balances(bank, batch, &mut mint_decimals);
|
||||
transaction_status_sender.send_transaction_status_batch(
|
||||
bank.clone(),
|
||||
txs,
|
||||
@@ -1249,7 +1249,7 @@ impl BankingStage {
|
||||
&bank,
|
||||
&msgs,
|
||||
&packet_indexes,
|
||||
&my_pubkey,
|
||||
my_pubkey,
|
||||
next_leader,
|
||||
);
|
||||
Self::push_unprocessed(
|
||||
@@ -2449,7 +2449,7 @@ mod tests {
|
||||
Receiver<WorkingBankEntry>,
|
||||
JoinHandle<()>,
|
||||
) {
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(ledger_path).unwrap();
|
||||
let genesis_config_info = create_slow_genesis_config(10_000);
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@@ -2457,8 +2457,8 @@ mod tests {
|
||||
..
|
||||
} = &genesis_config_info;
|
||||
let blockstore =
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
|
||||
Blockstore::open(ledger_path).expect("Expected to be able to open database ledger");
|
||||
let bank = Arc::new(Bank::new_no_wallclock_throttle(genesis_config));
|
||||
let exit = Arc::new(AtomicBool::default());
|
||||
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
|
||||
bank.tick_height(),
|
||||
@@ -2479,9 +2479,9 @@ mod tests {
|
||||
let pubkey1 = solana_sdk::pubkey::new_rand();
|
||||
let pubkey2 = solana_sdk::pubkey::new_rand();
|
||||
let transactions = vec![
|
||||
system_transaction::transfer(&mint_keypair, &pubkey0, 1, genesis_config.hash()),
|
||||
system_transaction::transfer(&mint_keypair, &pubkey1, 1, genesis_config.hash()),
|
||||
system_transaction::transfer(&mint_keypair, &pubkey2, 1, genesis_config.hash()),
|
||||
system_transaction::transfer(mint_keypair, &pubkey0, 1, genesis_config.hash()),
|
||||
system_transaction::transfer(mint_keypair, &pubkey1, 1, genesis_config.hash()),
|
||||
system_transaction::transfer(mint_keypair, &pubkey2, 1, genesis_config.hash()),
|
||||
];
|
||||
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
|
||||
|
||||
|
@@ -408,7 +408,7 @@ pub fn broadcast_shreds(
|
||||
let packets: Vec<_> = shreds
|
||||
.iter()
|
||||
.map(|shred| {
|
||||
let broadcast_index = weighted_best(&peers_and_stakes, shred.seed());
|
||||
let broadcast_index = weighted_best(peers_and_stakes, shred.seed());
|
||||
|
||||
(&shred.payload, &peers[broadcast_index].tvu)
|
||||
})
|
||||
@@ -429,7 +429,7 @@ pub fn broadcast_shreds(
|
||||
send_mmsg_time.stop();
|
||||
transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us();
|
||||
|
||||
let num_live_peers = num_live_peers(&peers);
|
||||
let num_live_peers = num_live_peers(peers);
|
||||
update_peer_stats(
|
||||
num_live_peers,
|
||||
broadcast_len as i64 + 1,
|
||||
|
@@ -212,9 +212,9 @@ impl BroadcastRun for BroadcastDuplicatesRun {
|
||||
.collect();
|
||||
stakes.sort_by(|(l_key, l_stake), (r_key, r_stake)| {
|
||||
if r_stake == l_stake {
|
||||
l_key.cmp(&r_key)
|
||||
l_key.cmp(r_key)
|
||||
} else {
|
||||
r_stake.cmp(&l_stake)
|
||||
r_stake.cmp(l_stake)
|
||||
}
|
||||
});
|
||||
|
||||
|
@@ -161,7 +161,7 @@ impl StandardBroadcastRun {
|
||||
) -> Result<()> {
|
||||
let (bsend, brecv) = channel();
|
||||
let (ssend, srecv) = channel();
|
||||
self.process_receive_results(&blockstore, &ssend, &bsend, receive_results)?;
|
||||
self.process_receive_results(blockstore, &ssend, &bsend, receive_results)?;
|
||||
let srecv = Arc::new(Mutex::new(srecv));
|
||||
let brecv = Arc::new(Mutex::new(brecv));
|
||||
//data
|
||||
|
@@ -110,7 +110,7 @@ impl VoteTracker {
|
||||
epoch_schedule: *root_bank.epoch_schedule(),
|
||||
..VoteTracker::default()
|
||||
};
|
||||
vote_tracker.progress_with_new_root_bank(&root_bank);
|
||||
vote_tracker.progress_with_new_root_bank(root_bank);
|
||||
assert_eq!(
|
||||
*vote_tracker.leader_schedule_epoch.read().unwrap(),
|
||||
root_bank.get_leader_schedule_epoch(root_bank.slot())
|
||||
@@ -603,7 +603,7 @@ impl ClusterInfoVoteListener {
|
||||
if slot == last_vote_slot {
|
||||
let vote_accounts = Stakes::vote_accounts(epoch_stakes.stakes());
|
||||
let stake = vote_accounts
|
||||
.get(&vote_pubkey)
|
||||
.get(vote_pubkey)
|
||||
.map(|(stake, _)| *stake)
|
||||
.unwrap_or_default();
|
||||
let total_stake = epoch_stakes.total_stake();
|
||||
@@ -692,7 +692,7 @@ impl ClusterInfoVoteListener {
|
||||
// voters trying to make votes for slots earlier than the epoch for
|
||||
// which they are authorized
|
||||
let actual_authorized_voter =
|
||||
vote_tracker.get_authorized_voter(&vote_pubkey, *last_vote_slot);
|
||||
vote_tracker.get_authorized_voter(vote_pubkey, *last_vote_slot);
|
||||
|
||||
if actual_authorized_voter.is_none() {
|
||||
return false;
|
||||
@@ -700,7 +700,7 @@ impl ClusterInfoVoteListener {
|
||||
|
||||
// Voting without the correct authorized pubkey, dump the vote
|
||||
if !VoteTracker::vote_contains_authorized_voter(
|
||||
&gossip_tx,
|
||||
gossip_tx,
|
||||
&actual_authorized_voter.unwrap(),
|
||||
) {
|
||||
return false;
|
||||
@@ -738,7 +738,7 @@ impl ClusterInfoVoteListener {
|
||||
Self::track_new_votes_and_notify_confirmations(
|
||||
vote,
|
||||
&vote_pubkey,
|
||||
&vote_tracker,
|
||||
vote_tracker,
|
||||
root_bank,
|
||||
subscriptions,
|
||||
verified_vote_sender,
|
||||
|
@@ -192,7 +192,7 @@ fn get_cluster_duplicate_confirmed_hash<'a>(
|
||||
slot, gossip_duplicate_confirmed_hash, local_duplicate_confirmed_hash
|
||||
);
|
||||
}
|
||||
Some(&local_frozen_hash)
|
||||
Some(local_frozen_hash)
|
||||
}
|
||||
(Some(local_frozen_hash), None) => Some(local_frozen_hash),
|
||||
_ => gossip_duplicate_confirmed_hash,
|
||||
|
@@ -352,15 +352,15 @@ mod tests {
|
||||
if *a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
assert_eq!(*commitment.get(a).unwrap(), expected);
|
||||
} else if i <= 4 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(2, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
assert_eq!(*commitment.get(a).unwrap(), expected);
|
||||
} else if i <= 6 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
assert_eq!(*commitment.get(a).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
|
@@ -164,7 +164,7 @@ impl Tower {
|
||||
bank: &Bank,
|
||||
path: &Path,
|
||||
) -> Self {
|
||||
let path = Self::get_filename(&path, node_pubkey);
|
||||
let path = Self::get_filename(path, node_pubkey);
|
||||
let tmp_path = Self::get_tmp_filename(&path);
|
||||
let mut tower = Self {
|
||||
node_pubkey: *node_pubkey,
|
||||
@@ -205,8 +205,8 @@ impl Tower {
|
||||
crate::replay_stage::ReplayStage::initialize_progress_and_fork_choice(
|
||||
root_bank.deref(),
|
||||
bank_forks.frozen_banks().values().cloned().collect(),
|
||||
&my_pubkey,
|
||||
&vote_account,
|
||||
my_pubkey,
|
||||
vote_account,
|
||||
);
|
||||
let root = root_bank.slot();
|
||||
|
||||
@@ -218,13 +218,7 @@ impl Tower {
|
||||
)
|
||||
.clone();
|
||||
|
||||
Self::new(
|
||||
&my_pubkey,
|
||||
&vote_account,
|
||||
root,
|
||||
&heaviest_bank,
|
||||
&ledger_path,
|
||||
)
|
||||
Self::new(my_pubkey, vote_account, root, &heaviest_bank, ledger_path)
|
||||
}
|
||||
|
||||
pub(crate) fn collect_vote_lockouts<F>(
|
||||
@@ -736,7 +730,7 @@ impl Tower {
|
||||
// finding any lockout intervals in the `lockout_intervals` tree
|
||||
// for this bank that contain `last_vote`.
|
||||
let lockout_intervals = &progress
|
||||
.get(&candidate_slot)
|
||||
.get(candidate_slot)
|
||||
.unwrap()
|
||||
.fork_stats
|
||||
.lockout_intervals;
|
||||
@@ -1328,7 +1322,7 @@ pub fn reconcile_blockstore_roots_with_tower(
|
||||
if last_blockstore_root < tower_root {
|
||||
// Ensure tower_root itself to exist and be marked as rooted in the blockstore
|
||||
// in addition to its ancestors.
|
||||
let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, &blockstore)
|
||||
let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, blockstore)
|
||||
.take_while(|current| match current.cmp(&last_blockstore_root) {
|
||||
Ordering::Greater => true,
|
||||
Ordering::Equal => false,
|
||||
@@ -1490,7 +1484,7 @@ pub mod test {
|
||||
tower: &mut Tower,
|
||||
) -> Vec<HeaviestForkFailures> {
|
||||
// Try to simulate the vote
|
||||
let my_keypairs = self.validator_keypairs.get(&my_pubkey).unwrap();
|
||||
let my_keypairs = self.validator_keypairs.get(my_pubkey).unwrap();
|
||||
let my_vote_pubkey = my_keypairs.vote_keypair.pubkey();
|
||||
let ancestors = self.bank_forks.read().unwrap().ancestors();
|
||||
let mut frozen_banks: Vec<_> = self
|
||||
@@ -1503,7 +1497,7 @@ pub mod test {
|
||||
.collect();
|
||||
|
||||
let _ = ReplayStage::compute_bank_stats(
|
||||
&my_pubkey,
|
||||
my_pubkey,
|
||||
&ancestors,
|
||||
&mut frozen_banks,
|
||||
tower,
|
||||
@@ -1582,9 +1576,9 @@ pub mod test {
|
||||
.filter_map(|slot| {
|
||||
let mut fork_tip_parent = tr(slot - 1);
|
||||
fork_tip_parent.push_front(tr(slot));
|
||||
self.fill_bank_forks(fork_tip_parent, &cluster_votes);
|
||||
self.fill_bank_forks(fork_tip_parent, cluster_votes);
|
||||
if votes_to_simulate.contains(&slot) {
|
||||
Some((slot, self.simulate_vote(slot, &my_pubkey, tower)))
|
||||
Some((slot, self.simulate_vote(slot, my_pubkey, tower)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@@ -1627,7 +1621,7 @@ pub mod test {
|
||||
fork_tip_parent.push_front(tr(start_slot + i));
|
||||
self.fill_bank_forks(fork_tip_parent, cluster_votes);
|
||||
if self
|
||||
.simulate_vote(i + start_slot, &my_pubkey, tower)
|
||||
.simulate_vote(i + start_slot, my_pubkey, tower)
|
||||
.is_empty()
|
||||
{
|
||||
cluster_votes
|
||||
@@ -2850,7 +2844,7 @@ pub mod test {
|
||||
|
||||
tower.save(&identity_keypair).unwrap();
|
||||
modify_serialized(&tower.path);
|
||||
let loaded = Tower::restore(&dir.path(), &identity_keypair.pubkey());
|
||||
let loaded = Tower::restore(dir.path(), &identity_keypair.pubkey());
|
||||
|
||||
(tower, loaded)
|
||||
}
|
||||
|
@@ -34,7 +34,7 @@ impl FetchStage {
|
||||
tpu_forwards_sockets,
|
||||
exit,
|
||||
&sender,
|
||||
&poh_recorder,
|
||||
poh_recorder,
|
||||
coalesce_ms,
|
||||
),
|
||||
receiver,
|
||||
@@ -54,8 +54,8 @@ impl FetchStage {
|
||||
tx_sockets,
|
||||
tpu_forwards_sockets,
|
||||
exit,
|
||||
&sender,
|
||||
&poh_recorder,
|
||||
sender,
|
||||
poh_recorder,
|
||||
coalesce_ms,
|
||||
)
|
||||
}
|
||||
@@ -108,7 +108,7 @@ impl FetchStage {
|
||||
let tpu_threads = sockets.into_iter().map(|socket| {
|
||||
streamer::receiver(
|
||||
socket,
|
||||
&exit,
|
||||
exit,
|
||||
sender.clone(),
|
||||
recycler.clone(),
|
||||
"fetch_stage",
|
||||
@@ -121,7 +121,7 @@ impl FetchStage {
|
||||
let tpu_forwards_threads = tpu_forwards_sockets.into_iter().map(|socket| {
|
||||
streamer::receiver(
|
||||
socket,
|
||||
&exit,
|
||||
exit,
|
||||
forward_sender.clone(),
|
||||
recycler.clone(),
|
||||
"fetch_forward_stage",
|
||||
|
@@ -457,7 +457,7 @@ impl HeaviestSubtreeForkChoice {
|
||||
|
||||
pub fn is_duplicate_confirmed(&self, slot_hash_key: &SlotHashKey) -> Option<bool> {
|
||||
self.fork_infos
|
||||
.get(&slot_hash_key)
|
||||
.get(slot_hash_key)
|
||||
.map(|fork_info| fork_info.is_duplicate_confirmed())
|
||||
}
|
||||
|
||||
@@ -472,7 +472,7 @@ impl HeaviestSubtreeForkChoice {
|
||||
/// Returns false if the node or any of its ancestors have been marked as duplicate
|
||||
pub fn is_candidate(&self, slot_hash_key: &SlotHashKey) -> Option<bool> {
|
||||
self.fork_infos
|
||||
.get(&slot_hash_key)
|
||||
.get(slot_hash_key)
|
||||
.map(|fork_info| fork_info.is_candidate())
|
||||
}
|
||||
|
||||
@@ -585,7 +585,7 @@ impl HeaviestSubtreeForkChoice {
|
||||
for child_key in &fork_info.children {
|
||||
let child_fork_info = self
|
||||
.fork_infos
|
||||
.get(&child_key)
|
||||
.get(child_key)
|
||||
.expect("Child must exist in fork_info map");
|
||||
let child_stake_voted_subtree = child_fork_info.stake_voted_subtree;
|
||||
is_duplicate_confirmed |= child_fork_info.is_duplicate_confirmed;
|
||||
@@ -770,7 +770,7 @@ impl HeaviestSubtreeForkChoice {
|
||||
let epoch = epoch_schedule.get_epoch(new_vote_slot_hash.0);
|
||||
let stake_update = epoch_stakes
|
||||
.get(&epoch)
|
||||
.map(|epoch_stakes| epoch_stakes.vote_account_stake(&pubkey))
|
||||
.map(|epoch_stakes| epoch_stakes.vote_account_stake(pubkey))
|
||||
.unwrap_or(0);
|
||||
|
||||
update_operations
|
||||
@@ -896,7 +896,7 @@ impl TreeDiff for HeaviestSubtreeForkChoice {
|
||||
|
||||
fn children(&self, slot_hash_key: &SlotHashKey) -> Option<&[SlotHashKey]> {
|
||||
self.fork_infos
|
||||
.get(&slot_hash_key)
|
||||
.get(slot_hash_key)
|
||||
.map(|fork_info| &fork_info.children[..])
|
||||
}
|
||||
}
|
||||
@@ -1497,7 +1497,7 @@ mod test {
|
||||
.chain(std::iter::once(&duplicate_leaves_descended_from_4[1]))
|
||||
{
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.children(&duplicate_leaf)
|
||||
.children(duplicate_leaf)
|
||||
.unwrap()
|
||||
.is_empty(),);
|
||||
}
|
||||
@@ -3116,11 +3116,11 @@ mod test {
|
||||
let slot = slot_hash_key.0;
|
||||
if slot <= duplicate_confirmed_slot {
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
} else {
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
}
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
@@ -3139,7 +3139,7 @@ mod test {
|
||||
// 1) Be duplicate confirmed
|
||||
// 2) Have no invalid ancestors
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
@@ -3149,7 +3149,7 @@ mod test {
|
||||
// 1) Not be duplicate confirmed
|
||||
// 2) Should have an invalid ancestor == `invalid_descendant_slot`
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
@@ -3162,7 +3162,7 @@ mod test {
|
||||
// 1) Not be duplicate confirmed
|
||||
// 2) Should not have an invalid ancestor
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
@@ -3186,7 +3186,7 @@ mod test {
|
||||
// 1) Be duplicate confirmed
|
||||
// 2) Have no invalid ancestors
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
@@ -3196,7 +3196,7 @@ mod test {
|
||||
// 1) Not be duplicate confirmed
|
||||
// 2) Should have an invalid ancestor == `invalid_descendant_slot`
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
@@ -3209,7 +3209,7 @@ mod test {
|
||||
// 1) Not be duplicate confirmed
|
||||
// 2) Should not have an invalid ancestor
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
@@ -3223,7 +3223,7 @@ mod test {
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&last_duplicate_confirmed_key);
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&slot_hash_key)
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
|
@@ -187,7 +187,7 @@ impl LedgerCleanupService {
|
||||
*last_purge_slot = root;
|
||||
|
||||
let (slots_to_clean, purge_first_slot, lowest_cleanup_slot, total_shreds) =
|
||||
Self::find_slots_to_clean(&blockstore, root, max_ledger_shreds);
|
||||
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
|
||||
|
||||
if slots_to_clean {
|
||||
let purge_complete = Arc::new(AtomicBool::new(false));
|
||||
|
@@ -36,7 +36,7 @@ impl OptimisticConfirmationVerifier {
|
||||
.into_iter()
|
||||
.filter(|(optimistic_slot, optimistic_hash)| {
|
||||
(*optimistic_slot == root && *optimistic_hash != root_bank.hash())
|
||||
|| (!root_ancestors.contains_key(&optimistic_slot) &&
|
||||
|| (!root_ancestors.contains_key(optimistic_slot) &&
|
||||
// In this second part of the `and`, we account for the possibility that
|
||||
// there was some other root `rootX` set in BankForks where:
|
||||
//
|
||||
|
@@ -271,7 +271,7 @@ impl PropagatedStats {
|
||||
pub fn add_node_pubkey(&mut self, node_pubkey: &Pubkey, bank: &Bank) {
|
||||
if !self.propagated_node_ids.contains(node_pubkey) {
|
||||
let node_vote_accounts = bank
|
||||
.epoch_vote_accounts_for_node_id(&node_pubkey)
|
||||
.epoch_vote_accounts_for_node_id(node_pubkey)
|
||||
.map(|v| &v.vote_accounts);
|
||||
|
||||
if let Some(node_vote_accounts) = node_vote_accounts {
|
||||
|
@@ -224,7 +224,7 @@ impl RepairService {
|
||||
|
||||
add_votes_elapsed = Measure::start("add_votes");
|
||||
repair_weight.add_votes(
|
||||
&blockstore,
|
||||
blockstore,
|
||||
slot_to_vote_pubkeys.into_iter(),
|
||||
root_bank.epoch_stakes_map(),
|
||||
root_bank.epoch_schedule(),
|
||||
@@ -272,7 +272,7 @@ impl RepairService {
|
||||
let mut outstanding_requests = outstanding_requests.write().unwrap();
|
||||
repairs.into_iter().for_each(|repair_request| {
|
||||
if let Ok((to, req)) = serve_repair.repair_request(
|
||||
&cluster_slots,
|
||||
cluster_slots,
|
||||
repair_request,
|
||||
&mut cache,
|
||||
&mut repair_stats,
|
||||
@@ -488,7 +488,7 @@ impl RepairService {
|
||||
repair_validators,
|
||||
);
|
||||
if let Some((repair_pubkey, repair_addr)) = status.repair_pubkey_and_addr {
|
||||
let repairs = Self::generate_duplicate_repairs_for_slot(&blockstore, *slot);
|
||||
let repairs = Self::generate_duplicate_repairs_for_slot(blockstore, *slot);
|
||||
|
||||
if let Some(repairs) = repairs {
|
||||
let mut outstanding_requests = outstanding_requests.write().unwrap();
|
||||
@@ -530,7 +530,7 @@ impl RepairService {
|
||||
nonce: Nonce,
|
||||
) -> Result<()> {
|
||||
let req =
|
||||
serve_repair.map_repair_request(&repair_type, repair_pubkey, repair_stats, nonce)?;
|
||||
serve_repair.map_repair_request(repair_type, repair_pubkey, repair_stats, nonce)?;
|
||||
repair_socket.send_to(&req, to)?;
|
||||
Ok(())
|
||||
}
|
||||
|
@@ -495,7 +495,7 @@ impl RepairWeight {
|
||||
for ((slot, _), _) in all_slots {
|
||||
*self
|
||||
.slot_to_tree
|
||||
.get_mut(&slot)
|
||||
.get_mut(slot)
|
||||
.expect("Nodes in tree must exist in `self.slot_to_tree`") = root2;
|
||||
}
|
||||
}
|
||||
@@ -521,9 +521,9 @@ impl RepairWeight {
|
||||
fn sort_by_stake_weight_slot(slot_stake_voted: &mut Vec<(Slot, u64)>) {
|
||||
slot_stake_voted.sort_by(|(slot, stake_voted), (slot_, stake_voted_)| {
|
||||
if stake_voted == stake_voted_ {
|
||||
slot.cmp(&slot_)
|
||||
slot.cmp(slot_)
|
||||
} else {
|
||||
stake_voted.cmp(&stake_voted_).reverse()
|
||||
stake_voted.cmp(stake_voted_).reverse()
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -757,7 +757,7 @@ mod test {
|
||||
);
|
||||
|
||||
for slot in &[8, 10, 11] {
|
||||
assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 8);
|
||||
assert_eq!(*repair_weight.slot_to_tree.get(slot).unwrap(), 8);
|
||||
}
|
||||
for slot in 0..=1 {
|
||||
assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 0);
|
||||
@@ -772,7 +772,7 @@ mod test {
|
||||
);
|
||||
|
||||
for slot in &[8, 10, 11] {
|
||||
assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 0);
|
||||
assert_eq!(*repair_weight.slot_to_tree.get(slot).unwrap(), 0);
|
||||
}
|
||||
assert_eq!(repair_weight.trees.len(), 1);
|
||||
assert!(repair_weight.trees.contains_key(&0));
|
||||
@@ -1088,10 +1088,10 @@ mod test {
|
||||
let purged_slots = vec![0, 1, 2, 4, 8, 10];
|
||||
let mut expected_unrooted_len = 0;
|
||||
for purged_slot in &purged_slots {
|
||||
assert!(!repair_weight.slot_to_tree.contains_key(&purged_slot));
|
||||
assert!(!repair_weight.trees.contains_key(&purged_slot));
|
||||
assert!(!repair_weight.slot_to_tree.contains_key(purged_slot));
|
||||
assert!(!repair_weight.trees.contains_key(purged_slot));
|
||||
if *purged_slot > 3 {
|
||||
assert!(repair_weight.unrooted_slots.contains(&purged_slot));
|
||||
assert!(repair_weight.unrooted_slots.contains(purged_slot));
|
||||
expected_unrooted_len += 1;
|
||||
}
|
||||
}
|
||||
|
@@ -101,7 +101,7 @@ pub fn get_best_repair_shreds<'a>(
|
||||
let new_repairs = RepairService::generate_repairs_for_slot(
|
||||
blockstore,
|
||||
slot,
|
||||
&slot_meta,
|
||||
slot_meta,
|
||||
max_repairs - repairs.len(),
|
||||
);
|
||||
repairs.extend(new_repairs);
|
||||
|
@@ -560,7 +560,7 @@ impl ReplayStage {
|
||||
}
|
||||
|
||||
Self::handle_votable_bank(
|
||||
&vote_bank,
|
||||
vote_bank,
|
||||
&poh_recorder,
|
||||
switch_fork_decision,
|
||||
&bank_forks,
|
||||
@@ -751,12 +751,7 @@ impl ReplayStage {
|
||||
)
|
||||
};
|
||||
|
||||
Self::initialize_progress_and_fork_choice(
|
||||
&root_bank,
|
||||
frozen_banks,
|
||||
&my_pubkey,
|
||||
&vote_account,
|
||||
)
|
||||
Self::initialize_progress_and_fork_choice(&root_bank, frozen_banks, my_pubkey, vote_account)
|
||||
}
|
||||
|
||||
pub(crate) fn initialize_progress_and_fork_choice(
|
||||
@@ -774,14 +769,7 @@ impl ReplayStage {
|
||||
let prev_leader_slot = progress.get_bank_prev_leader_slot(bank);
|
||||
progress.insert(
|
||||
bank.slot(),
|
||||
ForkProgress::new_from_bank(
|
||||
bank,
|
||||
&my_pubkey,
|
||||
&vote_account,
|
||||
prev_leader_slot,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
ForkProgress::new_from_bank(bank, my_pubkey, vote_account, prev_leader_slot, 0, 0),
|
||||
);
|
||||
}
|
||||
let root = root_bank.slot();
|
||||
@@ -878,7 +866,7 @@ impl ReplayStage {
|
||||
.expect("must exist based on earlier check")
|
||||
{
|
||||
descendants
|
||||
.get_mut(&a)
|
||||
.get_mut(a)
|
||||
.expect("If exists in ancestor map must exist in descendants map")
|
||||
.retain(|d| *d != slot && !slot_descendants.contains(d));
|
||||
}
|
||||
@@ -888,9 +876,9 @@ impl ReplayStage {
|
||||
|
||||
// Purge all the descendants of this slot from both maps
|
||||
for descendant in slot_descendants {
|
||||
ancestors.remove(&descendant).expect("must exist");
|
||||
ancestors.remove(descendant).expect("must exist");
|
||||
descendants
|
||||
.remove(&descendant)
|
||||
.remove(descendant)
|
||||
.expect("must exist based on earlier check");
|
||||
}
|
||||
descendants
|
||||
@@ -1348,7 +1336,7 @@ impl ReplayStage {
|
||||
);
|
||||
Self::handle_new_root(
|
||||
new_root,
|
||||
&bank_forks,
|
||||
bank_forks,
|
||||
progress,
|
||||
accounts_background_request_sender,
|
||||
highest_confirmed_root,
|
||||
@@ -1454,7 +1442,7 @@ impl ReplayStage {
|
||||
let vote_ix = switch_fork_decision
|
||||
.to_vote_instruction(
|
||||
vote,
|
||||
&vote_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
&authorized_voter_keypair.pubkey(),
|
||||
)
|
||||
.expect("Switch threshold failure should not lead to voting");
|
||||
@@ -1606,9 +1594,9 @@ impl ReplayStage {
|
||||
leader_schedule_cache: &LeaderScheduleCache,
|
||||
) {
|
||||
let next_leader_slot = leader_schedule_cache.next_leader_slot(
|
||||
&my_pubkey,
|
||||
my_pubkey,
|
||||
bank.slot(),
|
||||
&bank,
|
||||
bank,
|
||||
Some(blockstore),
|
||||
GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS,
|
||||
);
|
||||
@@ -1685,7 +1673,7 @@ impl ReplayStage {
|
||||
let bank_progress = &mut progress.entry(bank.slot()).or_insert_with(|| {
|
||||
ForkProgress::new_from_bank(
|
||||
&bank,
|
||||
&my_pubkey,
|
||||
my_pubkey,
|
||||
vote_account,
|
||||
prev_leader_slot,
|
||||
num_blocks_on_fork,
|
||||
@@ -1696,7 +1684,7 @@ impl ReplayStage {
|
||||
let root_slot = bank_forks.read().unwrap().root();
|
||||
let replay_result = Self::replay_blockstore_into_bank(
|
||||
&bank,
|
||||
&blockstore,
|
||||
blockstore,
|
||||
bank_progress,
|
||||
transaction_status_sender,
|
||||
replay_vote_sender,
|
||||
@@ -1776,7 +1764,7 @@ impl ReplayStage {
|
||||
);
|
||||
}
|
||||
}
|
||||
Self::record_rewards(&bank, &rewards_recorder_sender);
|
||||
Self::record_rewards(&bank, rewards_recorder_sender);
|
||||
} else {
|
||||
trace!(
|
||||
"bank {} not completed tick_height: {}, max_tick_height: {}",
|
||||
@@ -1820,14 +1808,14 @@ impl ReplayStage {
|
||||
my_vote_pubkey,
|
||||
bank_slot,
|
||||
bank.vote_accounts().into_iter(),
|
||||
&ancestors,
|
||||
ancestors,
|
||||
|slot| progress.get_hash(slot),
|
||||
latest_validator_votes_for_frozen_banks,
|
||||
);
|
||||
// Notify any listeners of the votes found in this newly computed
|
||||
// bank
|
||||
heaviest_subtree_fork_choice.compute_bank_stats(
|
||||
&bank,
|
||||
bank,
|
||||
tower,
|
||||
latest_validator_votes_for_frozen_banks,
|
||||
);
|
||||
@@ -1984,9 +1972,9 @@ impl ReplayStage {
|
||||
let selected_fork = {
|
||||
let switch_fork_decision = tower.check_switch_threshold(
|
||||
heaviest_bank.slot(),
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&progress,
|
||||
ancestors,
|
||||
descendants,
|
||||
progress,
|
||||
heaviest_bank.total_epoch_stake(),
|
||||
heaviest_bank
|
||||
.epoch_vote_accounts(heaviest_bank.epoch())
|
||||
@@ -2232,7 +2220,7 @@ impl ReplayStage {
|
||||
.contains(vote_pubkey);
|
||||
leader_propagated_stats.add_vote_pubkey(
|
||||
*vote_pubkey,
|
||||
leader_bank.epoch_vote_account_stake(&vote_pubkey),
|
||||
leader_bank.epoch_vote_account_stake(vote_pubkey),
|
||||
);
|
||||
!exists
|
||||
});
|
||||
@@ -2704,7 +2692,7 @@ mod tests {
|
||||
&bank1,
|
||||
bank1.collector_id(),
|
||||
validator_node_to_vote_keys
|
||||
.get(&bank1.collector_id())
|
||||
.get(bank1.collector_id())
|
||||
.unwrap(),
|
||||
Some(0),
|
||||
0,
|
||||
@@ -2961,7 +2949,7 @@ mod tests {
|
||||
&bad_hash,
|
||||
hashes_per_tick.saturating_sub(1),
|
||||
vec![system_transaction::transfer(
|
||||
&genesis_keypair,
|
||||
genesis_keypair,
|
||||
&keypair2.pubkey(),
|
||||
2,
|
||||
blockhash,
|
||||
@@ -3078,8 +3066,7 @@ mod tests {
|
||||
let mut entries =
|
||||
entry::create_ticks(bank.ticks_per_slot(), hashes_per_tick, blockhash);
|
||||
let last_entry_hash = entries.last().unwrap().hash;
|
||||
let tx =
|
||||
system_transaction::transfer(&genesis_keypair, &keypair.pubkey(), 2, blockhash);
|
||||
let tx = system_transaction::transfer(genesis_keypair, &keypair.pubkey(), 2, blockhash);
|
||||
let trailing_entry = entry::next_entry(&last_entry_hash, 1, vec![tx]);
|
||||
entries.push(trailing_entry);
|
||||
entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true, 0)
|
||||
@@ -3159,7 +3146,7 @@ mod tests {
|
||||
&mut bank0_progress,
|
||||
None,
|
||||
&replay_vote_sender,
|
||||
&&VerifyRecyclers::default(),
|
||||
&VerifyRecyclers::default(),
|
||||
);
|
||||
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
@@ -3199,12 +3186,12 @@ mod tests {
|
||||
#[test]
|
||||
fn test_replay_commitment_cache() {
|
||||
fn leader_vote(vote_slot: Slot, bank: &Arc<Bank>, pubkey: &Pubkey) {
|
||||
let mut leader_vote_account = bank.get_account(&pubkey).unwrap();
|
||||
let mut leader_vote_account = bank.get_account(pubkey).unwrap();
|
||||
let mut vote_state = VoteState::from(&leader_vote_account).unwrap();
|
||||
vote_state.process_slot_vote_unchecked(vote_slot);
|
||||
let versioned = VoteStateVersions::new_current(vote_state);
|
||||
VoteState::to(&versioned, &mut leader_vote_account).unwrap();
|
||||
bank.store_account(&pubkey, &leader_vote_account);
|
||||
bank.store_account(pubkey, &leader_vote_account);
|
||||
}
|
||||
|
||||
let leader_pubkey = solana_sdk::pubkey::new_rand();
|
||||
@@ -3741,7 +3728,7 @@ mod tests {
|
||||
success_index: usize,
|
||||
) {
|
||||
let stake = 10_000;
|
||||
let (bank_forks, _, _) = initialize_state(&all_keypairs, stake);
|
||||
let (bank_forks, _, _) = initialize_state(all_keypairs, stake);
|
||||
let root_bank = bank_forks.root_bank();
|
||||
let mut propagated_stats = PropagatedStats {
|
||||
total_epoch_stake: stake * all_keypairs.len() as u64,
|
||||
@@ -4375,7 +4362,7 @@ mod tests {
|
||||
));
|
||||
assert!(check_map_eq(
|
||||
&descendants,
|
||||
&bank_forks.read().unwrap().descendants()
|
||||
bank_forks.read().unwrap().descendants()
|
||||
));
|
||||
|
||||
// Try to purge the root
|
||||
@@ -4514,7 +4501,7 @@ mod tests {
|
||||
|
||||
// Record the vote for 4
|
||||
tower.record_bank_vote(
|
||||
&bank_forks.read().unwrap().get(4).unwrap(),
|
||||
bank_forks.read().unwrap().get(4).unwrap(),
|
||||
&Pubkey::default(),
|
||||
);
|
||||
|
||||
@@ -4714,7 +4701,7 @@ mod tests {
|
||||
&cluster_info,
|
||||
refresh_bank,
|
||||
&poh_recorder,
|
||||
Tower::last_voted_slot_in_bank(&refresh_bank, &my_vote_pubkey).unwrap(),
|
||||
Tower::last_voted_slot_in_bank(refresh_bank, &my_vote_pubkey).unwrap(),
|
||||
&my_vote_pubkey,
|
||||
&my_vote_keypair,
|
||||
&mut voted_signatures,
|
||||
@@ -4894,12 +4881,12 @@ mod tests {
|
||||
progress,
|
||||
&VoteTracker::default(),
|
||||
&ClusterSlots::default(),
|
||||
&bank_forks,
|
||||
bank_forks,
|
||||
heaviest_subtree_fork_choice,
|
||||
latest_validator_votes_for_frozen_banks,
|
||||
);
|
||||
let (heaviest_bank, heaviest_bank_on_same_fork) = heaviest_subtree_fork_choice
|
||||
.select_forks(&frozen_banks, &tower, &progress, &ancestors, bank_forks);
|
||||
.select_forks(&frozen_banks, tower, progress, ancestors, bank_forks);
|
||||
assert!(heaviest_bank_on_same_fork.is_none());
|
||||
let SelectVoteAndResetForkResult {
|
||||
vote_bank,
|
||||
@@ -4908,8 +4895,8 @@ mod tests {
|
||||
} = ReplayStage::select_vote_and_reset_forks(
|
||||
&heaviest_bank,
|
||||
heaviest_bank_on_same_fork.as_ref(),
|
||||
&ancestors,
|
||||
&descendants,
|
||||
ancestors,
|
||||
descendants,
|
||||
progress,
|
||||
tower,
|
||||
latest_validator_votes_for_frozen_banks,
|
||||
|
@@ -171,7 +171,7 @@ impl ServeRepair {
|
||||
Self::run_window_request(
|
||||
recycler,
|
||||
from,
|
||||
&from_addr,
|
||||
from_addr,
|
||||
blockstore,
|
||||
&me.read().unwrap().my_info,
|
||||
*slot,
|
||||
@@ -186,7 +186,7 @@ impl ServeRepair {
|
||||
(
|
||||
Self::run_highest_window_request(
|
||||
recycler,
|
||||
&from_addr,
|
||||
from_addr,
|
||||
blockstore,
|
||||
*slot,
|
||||
*highest_index,
|
||||
@@ -200,7 +200,7 @@ impl ServeRepair {
|
||||
(
|
||||
Self::run_orphan(
|
||||
recycler,
|
||||
&from_addr,
|
||||
from_addr,
|
||||
blockstore,
|
||||
*slot,
|
||||
MAX_ORPHAN_REPAIR_RESPONSES,
|
||||
@@ -256,7 +256,7 @@ impl ServeRepair {
|
||||
|
||||
let mut time = Measure::start("repair::handle_packets");
|
||||
for reqs in reqs_v {
|
||||
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender, stats);
|
||||
Self::handle_packets(obj, recycler, blockstore, reqs, response_sender, stats);
|
||||
}
|
||||
time.stop();
|
||||
if total_packets >= *max_packets {
|
||||
@@ -411,7 +411,7 @@ impl ServeRepair {
|
||||
let (repair_peers, weighted_index) = match cache.entry(slot) {
|
||||
Entry::Occupied(entry) => entry.into_mut(),
|
||||
Entry::Vacant(entry) => {
|
||||
let repair_peers = self.repair_peers(&repair_validators, slot);
|
||||
let repair_peers = self.repair_peers(repair_validators, slot);
|
||||
if repair_peers.is_empty() {
|
||||
return Err(Error::from(ClusterInfoError::NoPeers));
|
||||
}
|
||||
|
@@ -28,7 +28,7 @@ impl ServeRepairService {
|
||||
);
|
||||
let t_receiver = streamer::receiver(
|
||||
serve_repair_socket.clone(),
|
||||
&exit,
|
||||
exit,
|
||||
request_sender,
|
||||
Recycler::default(),
|
||||
"serve_repair_receiver",
|
||||
|
@@ -145,7 +145,7 @@ impl ShredFetchStage {
|
||||
.map(|s| {
|
||||
streamer::receiver(
|
||||
s,
|
||||
&exit,
|
||||
exit,
|
||||
packet_sender.clone(),
|
||||
recycler.clone(),
|
||||
"packet_modifier",
|
||||
@@ -174,7 +174,7 @@ impl ShredFetchStage {
|
||||
|
||||
let (mut tvu_threads, tvu_filter) = Self::packet_modifier(
|
||||
sockets,
|
||||
&exit,
|
||||
exit,
|
||||
sender.clone(),
|
||||
recycler.clone(),
|
||||
bank_forks.clone(),
|
||||
@@ -184,7 +184,7 @@ impl ShredFetchStage {
|
||||
|
||||
let (tvu_forwards_threads, fwd_thread_hdl) = Self::packet_modifier(
|
||||
forward_sockets,
|
||||
&exit,
|
||||
exit,
|
||||
sender.clone(),
|
||||
recycler.clone(),
|
||||
bank_forks.clone(),
|
||||
@@ -194,7 +194,7 @@ impl ShredFetchStage {
|
||||
|
||||
let (repair_receiver, repair_handler) = Self::packet_modifier(
|
||||
vec![repair_socket],
|
||||
&exit,
|
||||
exit,
|
||||
sender.clone(),
|
||||
recycler,
|
||||
bank_forks,
|
||||
|
@@ -74,9 +74,9 @@ impl Tpu {
|
||||
let fetch_stage = FetchStage::new_with_sender(
|
||||
transactions_sockets,
|
||||
tpu_forwards_sockets,
|
||||
&exit,
|
||||
exit,
|
||||
&packet_sender,
|
||||
&poh_recorder,
|
||||
poh_recorder,
|
||||
tpu_coalesce_ms,
|
||||
);
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
@@ -88,10 +88,10 @@ impl Tpu {
|
||||
|
||||
let (verified_vote_packets_sender, verified_vote_packets_receiver) = unbounded();
|
||||
let cluster_info_vote_listener = ClusterInfoVoteListener::new(
|
||||
&exit,
|
||||
exit,
|
||||
cluster_info.clone(),
|
||||
verified_vote_packets_sender,
|
||||
&poh_recorder,
|
||||
poh_recorder,
|
||||
vote_tracker,
|
||||
bank_forks,
|
||||
subscriptions.clone(),
|
||||
@@ -104,7 +104,7 @@ impl Tpu {
|
||||
);
|
||||
|
||||
let banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
cluster_info,
|
||||
poh_recorder,
|
||||
verified_receiver,
|
||||
verified_vote_packets_receiver,
|
||||
@@ -117,7 +117,7 @@ impl Tpu {
|
||||
cluster_info.clone(),
|
||||
entry_receiver,
|
||||
retransmit_slots_receiver,
|
||||
&exit,
|
||||
exit,
|
||||
blockstore,
|
||||
shred_version,
|
||||
);
|
||||
|
@@ -152,7 +152,7 @@ impl Tvu {
|
||||
repair_socket.clone(),
|
||||
&fetch_sender,
|
||||
Some(bank_forks.clone()),
|
||||
&exit,
|
||||
exit,
|
||||
);
|
||||
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
@@ -172,7 +172,7 @@ impl Tvu {
|
||||
bank_forks.clone(),
|
||||
leader_schedule_cache,
|
||||
blockstore.clone(),
|
||||
&cluster_info,
|
||||
cluster_info,
|
||||
Arc::new(retransmit_sockets),
|
||||
repair_socket,
|
||||
verified_receiver,
|
||||
@@ -212,7 +212,7 @@ impl Tvu {
|
||||
accounts_hash_receiver,
|
||||
pending_snapshot_package,
|
||||
exit,
|
||||
&cluster_info,
|
||||
cluster_info,
|
||||
tvu_config.trusted_validators.clone(),
|
||||
tvu_config.halt_on_trusted_validators_accounts_hash_mismatch,
|
||||
tvu_config.accounts_hash_fault_injection_slots,
|
||||
@@ -300,7 +300,7 @@ impl Tvu {
|
||||
ledger_cleanup_slot_receiver,
|
||||
blockstore.clone(),
|
||||
max_ledger_shreds,
|
||||
&exit,
|
||||
exit,
|
||||
compaction_interval,
|
||||
max_compaction_jitter,
|
||||
)
|
||||
@@ -308,7 +308,7 @@ impl Tvu {
|
||||
|
||||
let accounts_background_service = AccountsBackgroundService::new(
|
||||
bank_forks.clone(),
|
||||
&exit,
|
||||
exit,
|
||||
accounts_background_request_handler,
|
||||
tvu_config.accounts_db_caching_enabled,
|
||||
tvu_config.test_hash_calculation,
|
||||
|
@@ -116,7 +116,7 @@ mod tests {
|
||||
if *unfrozen_vote_slot >= frozen_vote_slot {
|
||||
let vote_hashes_map = unfrozen_gossip_verified_vote_hashes
|
||||
.votes_per_slot
|
||||
.get(&unfrozen_vote_slot)
|
||||
.get(unfrozen_vote_slot)
|
||||
.unwrap();
|
||||
assert_eq!(vote_hashes_map.len(), num_duplicate_hashes);
|
||||
for pubkey_votes in vote_hashes_map.values() {
|
||||
|
@@ -959,7 +959,7 @@ fn post_process_restored_tower(
|
||||
})
|
||||
.unwrap_or_else(|err| {
|
||||
let voting_has_been_active =
|
||||
active_vote_account_exists_in_bank(&bank_forks.working_bank(), &vote_account);
|
||||
active_vote_account_exists_in_bank(&bank_forks.working_bank(), vote_account);
|
||||
if !err.is_file_missing() {
|
||||
datapoint_error!(
|
||||
"tower_error",
|
||||
@@ -992,10 +992,10 @@ fn post_process_restored_tower(
|
||||
}
|
||||
|
||||
Tower::new_from_bankforks(
|
||||
&bank_forks,
|
||||
bank_forks,
|
||||
tower_path,
|
||||
&validator_identity,
|
||||
&vote_account,
|
||||
validator_identity,
|
||||
vote_account,
|
||||
)
|
||||
})
|
||||
}
|
||||
@@ -1063,9 +1063,9 @@ fn new_banks_from_ledger(
|
||||
|
||||
let tower_path = config.tower_path.as_deref().unwrap_or(ledger_path);
|
||||
|
||||
let restored_tower = Tower::restore(tower_path, &validator_identity);
|
||||
let restored_tower = Tower::restore(tower_path, validator_identity);
|
||||
if let Ok(tower) = &restored_tower {
|
||||
reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap_or_else(|err| {
|
||||
reconcile_blockstore_roots_with_tower(tower, &blockstore).unwrap_or_else(|err| {
|
||||
error!("Failed to reconcile blockstore with tower: {:?}", err);
|
||||
abort()
|
||||
});
|
||||
@@ -1167,7 +1167,7 @@ fn new_banks_from_ledger(
|
||||
None,
|
||||
&snapshot_config.snapshot_package_output_path,
|
||||
snapshot_config.archive_format,
|
||||
Some(&bank_forks.root_bank().get_thread_pool()),
|
||||
Some(bank_forks.root_bank().get_thread_pool()),
|
||||
snapshot_config.maximum_snapshots_to_retain,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
@@ -1179,9 +1179,9 @@ fn new_banks_from_ledger(
|
||||
|
||||
let tower = post_process_restored_tower(
|
||||
restored_tower,
|
||||
&validator_identity,
|
||||
&vote_account,
|
||||
&config,
|
||||
validator_identity,
|
||||
vote_account,
|
||||
config,
|
||||
tower_path,
|
||||
&bank_forks,
|
||||
);
|
||||
@@ -1386,7 +1386,7 @@ fn wait_for_supermajority(
|
||||
);
|
||||
}
|
||||
|
||||
let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info, i % 10 == 0);
|
||||
let gossip_stake_percent = get_stake_percent_in_gossip(bank, cluster_info, i % 10 == 0);
|
||||
|
||||
if gossip_stake_percent >= WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT {
|
||||
break;
|
||||
|
@@ -134,7 +134,7 @@ fn verify_repair(
|
||||
.map(|repair_meta| {
|
||||
outstanding_requests.register_response(
|
||||
repair_meta.nonce,
|
||||
&shred,
|
||||
shred,
|
||||
solana_sdk::timing::timestamp(),
|
||||
)
|
||||
})
|
||||
@@ -153,7 +153,7 @@ fn prune_shreds_invalid_repair(
|
||||
let mut outstanding_requests = outstanding_requests.write().unwrap();
|
||||
shreds.retain(|shred| {
|
||||
let should_keep = (
|
||||
verify_repair(&mut outstanding_requests, &shred, &repair_infos[i]),
|
||||
verify_repair(&mut outstanding_requests, shred, &repair_infos[i]),
|
||||
i += 1,
|
||||
)
|
||||
.0;
|
||||
@@ -630,7 +630,7 @@ mod test {
|
||||
keypair: &Arc<Keypair>,
|
||||
) -> Vec<Shred> {
|
||||
let shredder = Shredder::new(slot, parent, keypair.clone(), 0, 0).unwrap();
|
||||
shredder.entries_to_shreds(&entries, true, 0).0
|
||||
shredder.entries_to_shreds(entries, true, 0).0
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
Reference in New Issue
Block a user