Clippy cleanup for all targets and nighly rust (also support 1.44.0) (#10445)
* address warnings from 'rustup run beta cargo clippy --workspace' minor refactoring in: - cli/src/cli.rs - cli/src/offline/blockhash_query.rs - logger/src/lib.rs - runtime/src/accounts_db.rs expect some performance improvement AccountsDB::clean_accounts() * address warnings from 'rustup run beta cargo clippy --workspace --tests' * address warnings from 'rustup run nightly cargo clippy --workspace --all-targets' * rustfmt * fix warning stragglers * properly fix clippy warnings test_vote_subscribe() replace ref-to-arc with ref parameters where arc not cloned * Remove lock around JsonRpcRequestProcessor (#10417) automerge * make ancestors parameter optional to avoid forcing construction of empty hash maps Co-authored-by: Greg Fitzgerald <greg@solana.com>
This commit is contained in:
committed by
GitHub
parent
fa3a6c5584
commit
e23340d89e
@ -438,7 +438,7 @@ impl ClusterInfo {
|
||||
|
||||
pub fn update_contact_info<F>(&self, modify: F)
|
||||
where
|
||||
F: FnOnce(&mut ContactInfo) -> (),
|
||||
F: FnOnce(&mut ContactInfo),
|
||||
{
|
||||
let my_id = self.id();
|
||||
modify(&mut self.my_contact_info.write().unwrap());
|
||||
@ -1917,19 +1917,18 @@ impl ClusterInfo {
|
||||
.into_iter()
|
||||
.filter_map(|(from, prune_set)| {
|
||||
inc_new_counter_debug!("cluster_info-push_message-prunes", prune_set.len());
|
||||
me.lookup_contact_info(&from, |ci| ci.clone())
|
||||
.and_then(|ci| {
|
||||
let mut prune_msg = PruneData {
|
||||
pubkey: self_id,
|
||||
prunes: prune_set.into_iter().collect(),
|
||||
signature: Signature::default(),
|
||||
destination: from,
|
||||
wallclock: timestamp(),
|
||||
};
|
||||
prune_msg.sign(&me.keypair);
|
||||
let rsp = Protocol::PruneMessage(self_id, prune_msg);
|
||||
Some((ci.gossip, rsp))
|
||||
})
|
||||
me.lookup_contact_info(&from, |ci| ci.clone()).map(|ci| {
|
||||
let mut prune_msg = PruneData {
|
||||
pubkey: self_id,
|
||||
prunes: prune_set.into_iter().collect(),
|
||||
signature: Signature::default(),
|
||||
destination: from,
|
||||
wallclock: timestamp(),
|
||||
};
|
||||
prune_msg.sign(&me.keypair);
|
||||
let rsp = Protocol::PruneMessage(self_id, prune_msg);
|
||||
(ci.gossip, rsp)
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
if rsp.is_empty() {
|
||||
@ -2932,7 +2931,7 @@ mod tests {
|
||||
assert_eq!(slots.len(), 1);
|
||||
assert!(since.is_some());
|
||||
|
||||
let (slots, since2) = cluster_info.get_epoch_slots_since(since.clone());
|
||||
let (slots, since2) = cluster_info.get_epoch_slots_since(since);
|
||||
assert!(slots.is_empty());
|
||||
assert_eq!(since2, since);
|
||||
}
|
||||
|
@ -385,7 +385,7 @@ impl ClusterInfoVoteListener {
|
||||
&vote_txs_receiver,
|
||||
&vote_tracker,
|
||||
root_bank.slot(),
|
||||
subscriptions.clone(),
|
||||
&subscriptions,
|
||||
epoch_stakes,
|
||||
) {
|
||||
match e {
|
||||
@ -404,9 +404,9 @@ impl ClusterInfoVoteListener {
|
||||
#[cfg(test)]
|
||||
pub fn get_and_process_votes_for_tests(
|
||||
vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
|
||||
vote_tracker: &Arc<VoteTracker>,
|
||||
vote_tracker: &VoteTracker,
|
||||
last_root: Slot,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
subscriptions: &RpcSubscriptions,
|
||||
) -> Result<()> {
|
||||
Self::get_and_process_votes(
|
||||
vote_txs_receiver,
|
||||
@ -419,9 +419,9 @@ impl ClusterInfoVoteListener {
|
||||
|
||||
fn get_and_process_votes(
|
||||
vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
|
||||
vote_tracker: &Arc<VoteTracker>,
|
||||
vote_tracker: &VoteTracker,
|
||||
last_root: Slot,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
subscriptions: &RpcSubscriptions,
|
||||
epoch_stakes: Option<&EpochStakes>,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(200);
|
||||
@ -443,7 +443,7 @@ impl ClusterInfoVoteListener {
|
||||
vote_tracker: &VoteTracker,
|
||||
vote_txs: Vec<Transaction>,
|
||||
root: Slot,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
subscriptions: &RpcSubscriptions,
|
||||
epoch_stakes: Option<&EpochStakes>,
|
||||
) {
|
||||
let mut diff: HashMap<Slot, HashSet<Arc<Pubkey>>> = HashMap::new();
|
||||
@ -574,7 +574,7 @@ impl ClusterInfoVoteListener {
|
||||
fn notify_for_stake_change(
|
||||
current_stake: u64,
|
||||
previous_stake: u64,
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
subscriptions: &RpcSubscriptions,
|
||||
epoch_stakes: Option<&EpochStakes>,
|
||||
slot: Slot,
|
||||
) {
|
||||
@ -804,7 +804,7 @@ mod tests {
|
||||
&votes_receiver,
|
||||
&vote_tracker,
|
||||
0,
|
||||
subscriptions,
|
||||
&subscriptions,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
@ -854,7 +854,7 @@ mod tests {
|
||||
&votes_receiver,
|
||||
&vote_tracker,
|
||||
0,
|
||||
subscriptions,
|
||||
&subscriptions,
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
@ -974,13 +974,7 @@ mod tests {
|
||||
&validator0_keypairs.vote_keypair,
|
||||
)];
|
||||
|
||||
ClusterInfoVoteListener::process_votes(
|
||||
&vote_tracker,
|
||||
vote_tx,
|
||||
0,
|
||||
subscriptions.clone(),
|
||||
None,
|
||||
);
|
||||
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_tx, 0, &subscriptions, None);
|
||||
let ref_count = Arc::strong_count(
|
||||
&vote_tracker
|
||||
.keys
|
||||
@ -1031,7 +1025,7 @@ mod tests {
|
||||
})
|
||||
.collect();
|
||||
|
||||
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, subscriptions, None);
|
||||
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, &subscriptions, None);
|
||||
|
||||
let ref_count = Arc::strong_count(
|
||||
&vote_tracker
|
||||
|
@ -165,7 +165,7 @@ impl Tower {
|
||||
let key = all_pubkeys.get_or_insert(&key);
|
||||
lockout_intervals
|
||||
.entry(vote.expiration_slot())
|
||||
.or_insert_with(|| vec![])
|
||||
.or_insert_with(Vec::new)
|
||||
.push((vote.slot, key));
|
||||
}
|
||||
|
||||
|
@ -152,7 +152,7 @@ impl CrdsGossipPush {
|
||||
let new_value = crds.new_versioned(now, value);
|
||||
let value_hash = new_value.value_hash;
|
||||
if let Some((_, ref mut received_set)) = self.received_cache.get_mut(&value_hash) {
|
||||
received_set.insert(from.clone());
|
||||
received_set.insert(*from);
|
||||
return Err(CrdsGossipError::PushMessageAlreadyReceived);
|
||||
}
|
||||
let old = crds.insert_versioned(new_value);
|
||||
@ -160,7 +160,7 @@ impl CrdsGossipPush {
|
||||
return Err(CrdsGossipError::PushMessageOldVersion);
|
||||
}
|
||||
let mut received_set = HashSet::new();
|
||||
received_set.insert(from.clone());
|
||||
received_set.insert(*from);
|
||||
self.push_messages.insert(label, value_hash);
|
||||
self.received_cache.insert(value_hash, (now, received_set));
|
||||
Ok(old.ok().and_then(|opt| opt))
|
||||
|
@ -459,7 +459,7 @@ mod test {
|
||||
fn test_keys_and_values() {
|
||||
let v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(v.wallclock(), 0);
|
||||
let key = v.clone().contact_info().unwrap().id;
|
||||
let key = v.contact_info().unwrap().id;
|
||||
assert_eq!(v.label(), CrdsValueLabel::ContactInfo(key));
|
||||
|
||||
let v = CrdsValue::new_unsigned(CrdsData::Vote(
|
||||
@ -467,7 +467,7 @@ mod test {
|
||||
Vote::new(&Pubkey::default(), test_tx(), 0),
|
||||
));
|
||||
assert_eq!(v.wallclock(), 0);
|
||||
let key = v.clone().vote().unwrap().from;
|
||||
let key = v.vote().unwrap().from;
|
||||
assert_eq!(v.label(), CrdsValueLabel::Vote(0, key));
|
||||
|
||||
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(
|
||||
@ -475,7 +475,7 @@ mod test {
|
||||
LowestSlot::new(Pubkey::default(), 0, 0),
|
||||
));
|
||||
assert_eq!(v.wallclock(), 0);
|
||||
let key = v.clone().lowest_slot().unwrap().from;
|
||||
let key = v.lowest_slot().unwrap().from;
|
||||
assert_eq!(v.label(), CrdsValueLabel::LowestSlot(key));
|
||||
}
|
||||
|
||||
|
@ -262,7 +262,7 @@ fn make_gossip_node(
|
||||
cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint));
|
||||
}
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let gossip_service = GossipService::new(&cluster_info.clone(), None, gossip_socket, &exit);
|
||||
let gossip_service = GossipService::new(&cluster_info, None, gossip_socket, &exit);
|
||||
(gossip_service, ip_echo, cluster_info)
|
||||
}
|
||||
|
||||
|
@ -721,7 +721,7 @@ mod tests {
|
||||
assert_eq!(poh_recorder.tick_height, 5);
|
||||
assert!(poh_recorder.working_bank.is_none());
|
||||
let mut num_entries = 0;
|
||||
while let Ok(_) = entry_receiver.try_recv() {
|
||||
while entry_receiver.try_recv().is_ok() {
|
||||
num_entries += 1;
|
||||
}
|
||||
assert_eq!(num_entries, 3);
|
||||
@ -1409,7 +1409,7 @@ mod tests {
|
||||
for _ in 0..(bank.ticks_per_slot() * 2) {
|
||||
poh_recorder.tick();
|
||||
}
|
||||
poh_recorder.set_bank(&bank.clone());
|
||||
poh_recorder.set_bank(&bank);
|
||||
assert_eq!(Some(false), bank.check_hash_age(&genesis_hash, 1));
|
||||
}
|
||||
}
|
||||
|
@ -1289,11 +1289,11 @@ impl ReplayStage {
|
||||
let newly_voted_pubkeys = slot_vote_tracker
|
||||
.as_ref()
|
||||
.and_then(|slot_vote_tracker| slot_vote_tracker.write().unwrap().get_updates())
|
||||
.unwrap_or_else(|| vec![]);
|
||||
.unwrap_or_else(Vec::new);
|
||||
|
||||
let cluster_slot_pubkeys = cluster_slot_pubkeys
|
||||
.map(|v| v.read().unwrap().keys().cloned().collect())
|
||||
.unwrap_or_else(|| vec![]);
|
||||
.unwrap_or_else(Vec::new);
|
||||
|
||||
Self::update_fork_propagated_threshold_from_votes(
|
||||
progress,
|
||||
|
@ -431,7 +431,7 @@ impl RetransmitStage {
|
||||
epoch_schedule,
|
||||
duplicate_slots_reset_sender,
|
||||
};
|
||||
let leader_schedule_cache = leader_schedule_cache.clone();
|
||||
let leader_schedule_cache_clone = leader_schedule_cache.clone();
|
||||
let window_service = WindowService::new(
|
||||
blockstore,
|
||||
cluster_info.clone(),
|
||||
@ -440,7 +440,7 @@ impl RetransmitStage {
|
||||
repair_socket,
|
||||
exit,
|
||||
repair_info,
|
||||
&leader_schedule_cache.clone(),
|
||||
leader_schedule_cache,
|
||||
move |id, shred, working_bank, last_root| {
|
||||
let is_connected = cfg
|
||||
.as_ref()
|
||||
@ -449,7 +449,7 @@ impl RetransmitStage {
|
||||
let rv = should_retransmit_and_persist(
|
||||
shred,
|
||||
working_bank,
|
||||
&leader_schedule_cache,
|
||||
&leader_schedule_cache_clone,
|
||||
id,
|
||||
last_root,
|
||||
shred_version,
|
||||
|
@ -1171,7 +1171,7 @@ impl RpcSol for RpcSolImpl {
|
||||
leader_schedule.get_slot_leaders().iter().enumerate()
|
||||
{
|
||||
let pubkey = pubkey.to_string();
|
||||
map.entry(pubkey).or_insert_with(|| vec![]).push(slot_index);
|
||||
map.entry(pubkey).or_insert_with(Vec::new).push(slot_index);
|
||||
}
|
||||
map
|
||||
},
|
||||
@ -1314,7 +1314,7 @@ impl RpcSol for RpcSolImpl {
|
||||
let faucet_addr = meta.config.faucet_addr.ok_or_else(Error::invalid_request)?;
|
||||
let pubkey = verify_pubkey(pubkey_str)?;
|
||||
|
||||
let blockhash = meta.bank(commitment.clone())?.confirmed_last_blockhash().0;
|
||||
let blockhash = meta.bank(commitment)?.confirmed_last_blockhash().0;
|
||||
let transaction = request_airdrop_transaction(&faucet_addr, &pubkey, lamports, blockhash)
|
||||
.map_err(|err| {
|
||||
info!("request_airdrop_transaction failed: {:?}", err);
|
||||
|
@ -387,7 +387,7 @@ mod tests {
|
||||
};
|
||||
|
||||
fn process_transaction_and_notify(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
tx: &Transaction,
|
||||
subscriptions: &RpcSubscriptions,
|
||||
current_slot: Slot,
|
||||
@ -921,13 +921,11 @@ mod tests {
|
||||
});
|
||||
|
||||
// Process votes and check they were notified.
|
||||
// FIX-ME-BETTER-LATER - clone below is required for testcase to pass
|
||||
#[allow(clippy::redundant_clone)]
|
||||
ClusterInfoVoteListener::get_and_process_votes_for_tests(
|
||||
&votes_receiver,
|
||||
&vote_tracker,
|
||||
0,
|
||||
rpc.subscriptions.clone(),
|
||||
&rpc.subscriptions,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
@ -115,7 +115,7 @@ pub struct ValidatorExit {
|
||||
}
|
||||
|
||||
impl ValidatorExit {
|
||||
pub fn register_exit(&mut self, exit: Box<dyn FnOnce() -> () + Send + Sync>) {
|
||||
pub fn register_exit(&mut self, exit: Box<dyn FnOnce() + Send + Sync>) {
|
||||
self.exits.push(exit);
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ fn run_insert<F>(
|
||||
metrics: &mut BlockstoreInsertionMetrics,
|
||||
) -> Result<()>
|
||||
where
|
||||
F: Fn(Shred) -> (),
|
||||
F: Fn(Shred),
|
||||
{
|
||||
let timer = Duration::from_millis(200);
|
||||
let (mut shreds, mut repair_infos) = shred_receiver.recv_timeout(timer)?;
|
||||
@ -503,8 +503,8 @@ impl WindowService {
|
||||
|
||||
fn should_exit_on_error<F, H>(e: Error, handle_timeout: &mut F, handle_error: &H) -> bool
|
||||
where
|
||||
F: FnMut() -> (),
|
||||
H: Fn() -> (),
|
||||
F: FnMut(),
|
||||
H: Fn(),
|
||||
{
|
||||
match e {
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => true,
|
||||
|
Reference in New Issue
Block a user