Resolve nightly-2021-10-05 clippy complaints

This commit is contained in:
Michael Vines
2021-10-05 22:24:48 -07:00
parent eb4ce3dfed
commit 7027d56064
53 changed files with 229 additions and 293 deletions

View File

@ -4,7 +4,6 @@ use {
itertools::Itertools,
solana_entry::entry::Entry,
solana_ledger::shred::Shredder,
solana_runtime::blockhash_queue::BlockhashQueue,
solana_sdk::{
hash::Hash,
signature::{Keypair, Signature, Signer},
@ -26,11 +25,6 @@ pub struct BroadcastDuplicatesConfig {
#[derive(Clone)]
pub(super) struct BroadcastDuplicatesRun {
config: BroadcastDuplicatesConfig,
// Local queue for broadcast to track which duplicate blockhashes we've sent
duplicate_queue: BlockhashQueue,
// Buffer for duplicate entries
duplicate_entries_buffer: Vec<Entry>,
last_duplicate_entry_hash: Hash,
current_slot: Slot,
next_shred_index: u32,
shred_version: u16,
@ -50,10 +44,7 @@ impl BroadcastDuplicatesRun {
));
Self {
config,
duplicate_queue: BlockhashQueue::default(),
duplicate_entries_buffer: vec![],
next_shred_index: u32::MAX,
last_duplicate_entry_hash: Hash::default(),
shred_version,
current_slot: 0,
recent_blockhash: None,

View File

@ -32,7 +32,6 @@ pub struct StandardBroadcastRun {
last_datapoint_submit: Arc<AtomicInterval>,
num_batches: usize,
cluster_nodes_cache: Arc<ClusterNodesCache<BroadcastStage>>,
last_peer_update: Arc<AtomicInterval>,
}
impl StandardBroadcastRun {
@ -52,7 +51,6 @@ impl StandardBroadcastRun {
last_datapoint_submit: Arc::default(),
num_batches: 0,
cluster_nodes_cache,
last_peer_update: Arc::new(AtomicInterval::default()),
}
}

View File

@ -1313,7 +1313,7 @@ pub mod test {
}
VoteState::serialize(
&VoteStateVersions::new_current(vote_state),
&mut account.data_as_mut_slice(),
account.data_as_mut_slice(),
)
.expect("serialize state");
(

View File

@ -3535,7 +3535,7 @@ pub mod tests {
let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1);
bank_forks.write().unwrap().insert(bank1);
let bank1 = bank_forks.read().unwrap().get(1).cloned().unwrap();
let mut bank1_progress = progress
let bank1_progress = progress
.entry(bank1.slot())
.or_insert_with(|| ForkProgress::new(bank1.last_blockhash(), None, None, 0, 0));
let shreds = shred_to_insert(
@ -3548,7 +3548,7 @@ pub mod tests {
let res = ReplayStage::replay_blockstore_into_bank(
&bank1,
&blockstore,
&mut bank1_progress,
bank1_progress,
None,
&replay_vote_sender,
&VerifyRecyclers::default(),
@ -3923,7 +3923,7 @@ pub mod tests {
.values()
.cloned()
.collect();
let mut heaviest_subtree_fork_choice = &mut vote_simulator.heaviest_subtree_fork_choice;
let heaviest_subtree_fork_choice = &mut vote_simulator.heaviest_subtree_fork_choice;
let mut latest_validator_votes_for_frozen_banks =
LatestValidatorVotesForFrozenBanks::default();
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
@ -3938,7 +3938,7 @@ pub mod tests {
&VoteTracker::default(),
&ClusterSlots::default(),
&vote_simulator.bank_forks,
&mut heaviest_subtree_fork_choice,
heaviest_subtree_fork_choice,
&mut latest_validator_votes_for_frozen_banks,
);

View File

@ -95,9 +95,9 @@ impl ShredFetchStage {
}
}
stats.shred_count += p.packets.len();
p.packets.iter_mut().for_each(|mut packet| {
p.packets.iter_mut().for_each(|packet| {
Self::process_packet(
&mut packet,
packet,
&mut shreds_received,
&mut stats,
last_root,