Clippy cleanup for all targets and nighly rust (also support 1.44.0) (#10445)

* address warnings from 'rustup run beta cargo clippy --workspace'

minor refactoring in:
- cli/src/cli.rs
- cli/src/offline/blockhash_query.rs
- logger/src/lib.rs
- runtime/src/accounts_db.rs

expect some performance improvement AccountsDB::clean_accounts()

* address warnings from 'rustup run beta cargo clippy --workspace --tests'

* address warnings from 'rustup run nightly cargo clippy --workspace --all-targets'

* rustfmt

* fix warning stragglers

* properly fix clippy warnings test_vote_subscribe()
replace ref-to-arc with ref parameters where arc not cloned

* Remove lock around JsonRpcRequestProcessor (#10417)

automerge

* make ancestors parameter optional to avoid forcing construction of empty hash maps

Co-authored-by: Greg Fitzgerald <greg@solana.com>
This commit is contained in:
Kristofer Peterson
2020-06-09 01:38:14 +01:00
committed by GitHub
parent fa3a6c5584
commit e23340d89e
63 changed files with 258 additions and 308 deletions

View File

@ -109,7 +109,6 @@ fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec<Tra
fn make_programs_txs(txes: usize, hash: Hash) -> Vec<Transaction> {
let progs = 4;
(0..txes)
.into_iter()
.map(|_| {
let mut instructions = vec![];
let from_key = Keypair::new();
@ -181,7 +180,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
assert!(r.is_ok(), "sanity parallel execution");
}
bank.clear_signatures();
let verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH);
let verified: Vec<_> = to_packets_chunked(&transactions, PACKETS_PER_BATCH);
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
@ -207,7 +206,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
// If it is dropped before poh_service, then poh_service will error when
// calling send() on the channel.
let signal_receiver = Arc::new(signal_receiver);
let signal_receiver2 = signal_receiver.clone();
let signal_receiver2 = signal_receiver;
bencher.iter(move || {
let now = Instant::now();
let mut sent = 0;
@ -262,7 +261,7 @@ fn simulate_process_entries(
mint_keypair: &Keypair,
mut tx_vector: Vec<Transaction>,
genesis_config: &GenesisConfig,
keypairs: &Vec<Keypair>,
keypairs: &[Keypair],
initial_lamports: u64,
num_accounts: usize,
) {
@ -288,7 +287,7 @@ fn simulate_process_entries(
hash: next_hash(&bank.last_blockhash(), 1, &tx_vector),
transactions: tx_vector,
};
process_entries(&bank, &vec![entry], randomize_txs, None).unwrap();
process_entries(&bank, &[entry], randomize_txs, None).unwrap();
}
fn bench_process_entries(randomize_txs: bool, bencher: &mut Bencher) {

View File

@ -1,6 +1,4 @@
#![feature(test)]
use rand;
extern crate solana_ledger;
extern crate test;

View File

@ -22,7 +22,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
solana_logger::setup();
let leader_pubkey = Pubkey::new_rand();
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info);
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
const NUM_SHREDS: usize = 32;
@ -37,7 +37,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
}
let stakes = Arc::new(stakes);
let cluster_info = Arc::new(cluster_info);
let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(stakes.clone()));
let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(stakes));
let shreds = Arc::new(shreds);
let last_datapoint = Arc::new(AtomicU64::new(0));
bencher.iter(move || {

View File

@ -14,7 +14,6 @@ const NUM_ENTRIES: usize = 800;
fn bench_poh_verify_ticks(bencher: &mut Bencher) {
let zero = Hash::default();
let mut cur_hash = hash(&zero.as_ref());
let start = *&cur_hash;
let mut ticks: Vec<Entry> = Vec::with_capacity(NUM_ENTRIES);
for _ in 0..NUM_ENTRIES {
@ -22,7 +21,7 @@ fn bench_poh_verify_ticks(bencher: &mut Bencher) {
}
bencher.iter(|| {
ticks.verify(&start);
ticks.verify(&cur_hash);
})
}
@ -30,7 +29,6 @@ fn bench_poh_verify_ticks(bencher: &mut Bencher) {
fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) {
let zero = Hash::default();
let mut cur_hash = hash(&zero.as_ref());
let start = *&cur_hash;
let keypair1 = Keypair::new();
let pubkey1 = keypair1.pubkey();
@ -42,6 +40,6 @@ fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) {
}
bencher.iter(|| {
ticks.verify(&start);
ticks.verify(&cur_hash);
})
}

View File

@ -65,7 +65,10 @@ fn bench_retransmitter(bencher: &mut Bencher) {
let tx = test_tx();
const NUM_PACKETS: usize = 50;
let chunk_size = NUM_PACKETS / (4 * NUM_THREADS);
let batches = to_packets_chunked(&vec![tx; NUM_PACKETS], chunk_size);
let batches = to_packets_chunked(
&std::iter::repeat(tx).take(NUM_PACKETS).collect::<Vec<_>>(),
chunk_size,
);
info!("batches: {}", batches.len());
let retransmitter_handles = retransmitter(
@ -80,7 +83,6 @@ fn bench_retransmitter(bencher: &mut Bencher) {
bencher.iter(move || {
let peer_sockets1 = peer_sockets.clone();
let handles: Vec<_> = (0..NUM_PEERS)
.into_iter()
.map(|p| {
let peer_sockets2 = peer_sockets1.clone();
let total2 = total.clone();

View File

@ -37,16 +37,14 @@ fn bench_sigverify_stage(bencher: &mut Bencher) {
let from_keypair = Keypair::new();
let to_keypair = Keypair::new();
let txs: Vec<_> = (0..len)
.into_iter()
.map(|_| {
let amount = thread_rng().gen();
let tx = system_transaction::transfer(
system_transaction::transfer(
&from_keypair,
&to_keypair.pubkey(),
amount,
Hash::default(),
);
tx
)
})
.collect();
to_packets_chunked(&txs, chunk_size)

View File

@ -438,7 +438,7 @@ impl ClusterInfo {
pub fn update_contact_info<F>(&self, modify: F)
where
F: FnOnce(&mut ContactInfo) -> (),
F: FnOnce(&mut ContactInfo),
{
let my_id = self.id();
modify(&mut self.my_contact_info.write().unwrap());
@ -1917,19 +1917,18 @@ impl ClusterInfo {
.into_iter()
.filter_map(|(from, prune_set)| {
inc_new_counter_debug!("cluster_info-push_message-prunes", prune_set.len());
me.lookup_contact_info(&from, |ci| ci.clone())
.and_then(|ci| {
let mut prune_msg = PruneData {
pubkey: self_id,
prunes: prune_set.into_iter().collect(),
signature: Signature::default(),
destination: from,
wallclock: timestamp(),
};
prune_msg.sign(&me.keypair);
let rsp = Protocol::PruneMessage(self_id, prune_msg);
Some((ci.gossip, rsp))
})
me.lookup_contact_info(&from, |ci| ci.clone()).map(|ci| {
let mut prune_msg = PruneData {
pubkey: self_id,
prunes: prune_set.into_iter().collect(),
signature: Signature::default(),
destination: from,
wallclock: timestamp(),
};
prune_msg.sign(&me.keypair);
let rsp = Protocol::PruneMessage(self_id, prune_msg);
(ci.gossip, rsp)
})
})
.collect();
if rsp.is_empty() {
@ -2932,7 +2931,7 @@ mod tests {
assert_eq!(slots.len(), 1);
assert!(since.is_some());
let (slots, since2) = cluster_info.get_epoch_slots_since(since.clone());
let (slots, since2) = cluster_info.get_epoch_slots_since(since);
assert!(slots.is_empty());
assert_eq!(since2, since);
}

View File

@ -385,7 +385,7 @@ impl ClusterInfoVoteListener {
&vote_txs_receiver,
&vote_tracker,
root_bank.slot(),
subscriptions.clone(),
&subscriptions,
epoch_stakes,
) {
match e {
@ -404,9 +404,9 @@ impl ClusterInfoVoteListener {
#[cfg(test)]
pub fn get_and_process_votes_for_tests(
vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
vote_tracker: &Arc<VoteTracker>,
vote_tracker: &VoteTracker,
last_root: Slot,
subscriptions: Arc<RpcSubscriptions>,
subscriptions: &RpcSubscriptions,
) -> Result<()> {
Self::get_and_process_votes(
vote_txs_receiver,
@ -419,9 +419,9 @@ impl ClusterInfoVoteListener {
fn get_and_process_votes(
vote_txs_receiver: &VerifiedVoteTransactionsReceiver,
vote_tracker: &Arc<VoteTracker>,
vote_tracker: &VoteTracker,
last_root: Slot,
subscriptions: Arc<RpcSubscriptions>,
subscriptions: &RpcSubscriptions,
epoch_stakes: Option<&EpochStakes>,
) -> Result<()> {
let timer = Duration::from_millis(200);
@ -443,7 +443,7 @@ impl ClusterInfoVoteListener {
vote_tracker: &VoteTracker,
vote_txs: Vec<Transaction>,
root: Slot,
subscriptions: Arc<RpcSubscriptions>,
subscriptions: &RpcSubscriptions,
epoch_stakes: Option<&EpochStakes>,
) {
let mut diff: HashMap<Slot, HashSet<Arc<Pubkey>>> = HashMap::new();
@ -574,7 +574,7 @@ impl ClusterInfoVoteListener {
fn notify_for_stake_change(
current_stake: u64,
previous_stake: u64,
subscriptions: &Arc<RpcSubscriptions>,
subscriptions: &RpcSubscriptions,
epoch_stakes: Option<&EpochStakes>,
slot: Slot,
) {
@ -804,7 +804,7 @@ mod tests {
&votes_receiver,
&vote_tracker,
0,
subscriptions,
&subscriptions,
None,
)
.unwrap();
@ -854,7 +854,7 @@ mod tests {
&votes_receiver,
&vote_tracker,
0,
subscriptions,
&subscriptions,
None,
)
.unwrap();
@ -974,13 +974,7 @@ mod tests {
&validator0_keypairs.vote_keypair,
)];
ClusterInfoVoteListener::process_votes(
&vote_tracker,
vote_tx,
0,
subscriptions.clone(),
None,
);
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_tx, 0, &subscriptions, None);
let ref_count = Arc::strong_count(
&vote_tracker
.keys
@ -1031,7 +1025,7 @@ mod tests {
})
.collect();
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, subscriptions, None);
ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, &subscriptions, None);
let ref_count = Arc::strong_count(
&vote_tracker

View File

@ -165,7 +165,7 @@ impl Tower {
let key = all_pubkeys.get_or_insert(&key);
lockout_intervals
.entry(vote.expiration_slot())
.or_insert_with(|| vec![])
.or_insert_with(Vec::new)
.push((vote.slot, key));
}

View File

@ -152,7 +152,7 @@ impl CrdsGossipPush {
let new_value = crds.new_versioned(now, value);
let value_hash = new_value.value_hash;
if let Some((_, ref mut received_set)) = self.received_cache.get_mut(&value_hash) {
received_set.insert(from.clone());
received_set.insert(*from);
return Err(CrdsGossipError::PushMessageAlreadyReceived);
}
let old = crds.insert_versioned(new_value);
@ -160,7 +160,7 @@ impl CrdsGossipPush {
return Err(CrdsGossipError::PushMessageOldVersion);
}
let mut received_set = HashSet::new();
received_set.insert(from.clone());
received_set.insert(*from);
self.push_messages.insert(label, value_hash);
self.received_cache.insert(value_hash, (now, received_set));
Ok(old.ok().and_then(|opt| opt))

View File

@ -459,7 +459,7 @@ mod test {
fn test_keys_and_values() {
let v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
assert_eq!(v.wallclock(), 0);
let key = v.clone().contact_info().unwrap().id;
let key = v.contact_info().unwrap().id;
assert_eq!(v.label(), CrdsValueLabel::ContactInfo(key));
let v = CrdsValue::new_unsigned(CrdsData::Vote(
@ -467,7 +467,7 @@ mod test {
Vote::new(&Pubkey::default(), test_tx(), 0),
));
assert_eq!(v.wallclock(), 0);
let key = v.clone().vote().unwrap().from;
let key = v.vote().unwrap().from;
assert_eq!(v.label(), CrdsValueLabel::Vote(0, key));
let v = CrdsValue::new_unsigned(CrdsData::LowestSlot(
@ -475,7 +475,7 @@ mod test {
LowestSlot::new(Pubkey::default(), 0, 0),
));
assert_eq!(v.wallclock(), 0);
let key = v.clone().lowest_slot().unwrap().from;
let key = v.lowest_slot().unwrap().from;
assert_eq!(v.label(), CrdsValueLabel::LowestSlot(key));
}

View File

@ -262,7 +262,7 @@ fn make_gossip_node(
cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint));
}
let cluster_info = Arc::new(cluster_info);
let gossip_service = GossipService::new(&cluster_info.clone(), None, gossip_socket, &exit);
let gossip_service = GossipService::new(&cluster_info, None, gossip_socket, &exit);
(gossip_service, ip_echo, cluster_info)
}

View File

@ -721,7 +721,7 @@ mod tests {
assert_eq!(poh_recorder.tick_height, 5);
assert!(poh_recorder.working_bank.is_none());
let mut num_entries = 0;
while let Ok(_) = entry_receiver.try_recv() {
while entry_receiver.try_recv().is_ok() {
num_entries += 1;
}
assert_eq!(num_entries, 3);
@ -1409,7 +1409,7 @@ mod tests {
for _ in 0..(bank.ticks_per_slot() * 2) {
poh_recorder.tick();
}
poh_recorder.set_bank(&bank.clone());
poh_recorder.set_bank(&bank);
assert_eq!(Some(false), bank.check_hash_age(&genesis_hash, 1));
}
}

View File

@ -1289,11 +1289,11 @@ impl ReplayStage {
let newly_voted_pubkeys = slot_vote_tracker
.as_ref()
.and_then(|slot_vote_tracker| slot_vote_tracker.write().unwrap().get_updates())
.unwrap_or_else(|| vec![]);
.unwrap_or_else(Vec::new);
let cluster_slot_pubkeys = cluster_slot_pubkeys
.map(|v| v.read().unwrap().keys().cloned().collect())
.unwrap_or_else(|| vec![]);
.unwrap_or_else(Vec::new);
Self::update_fork_propagated_threshold_from_votes(
progress,

View File

@ -431,7 +431,7 @@ impl RetransmitStage {
epoch_schedule,
duplicate_slots_reset_sender,
};
let leader_schedule_cache = leader_schedule_cache.clone();
let leader_schedule_cache_clone = leader_schedule_cache.clone();
let window_service = WindowService::new(
blockstore,
cluster_info.clone(),
@ -440,7 +440,7 @@ impl RetransmitStage {
repair_socket,
exit,
repair_info,
&leader_schedule_cache.clone(),
leader_schedule_cache,
move |id, shred, working_bank, last_root| {
let is_connected = cfg
.as_ref()
@ -449,7 +449,7 @@ impl RetransmitStage {
let rv = should_retransmit_and_persist(
shred,
working_bank,
&leader_schedule_cache,
&leader_schedule_cache_clone,
id,
last_root,
shred_version,

View File

@ -1171,7 +1171,7 @@ impl RpcSol for RpcSolImpl {
leader_schedule.get_slot_leaders().iter().enumerate()
{
let pubkey = pubkey.to_string();
map.entry(pubkey).or_insert_with(|| vec![]).push(slot_index);
map.entry(pubkey).or_insert_with(Vec::new).push(slot_index);
}
map
},
@ -1314,7 +1314,7 @@ impl RpcSol for RpcSolImpl {
let faucet_addr = meta.config.faucet_addr.ok_or_else(Error::invalid_request)?;
let pubkey = verify_pubkey(pubkey_str)?;
let blockhash = meta.bank(commitment.clone())?.confirmed_last_blockhash().0;
let blockhash = meta.bank(commitment)?.confirmed_last_blockhash().0;
let transaction = request_airdrop_transaction(&faucet_addr, &pubkey, lamports, blockhash)
.map_err(|err| {
info!("request_airdrop_transaction failed: {:?}", err);

View File

@ -387,7 +387,7 @@ mod tests {
};
fn process_transaction_and_notify(
bank_forks: &Arc<RwLock<BankForks>>,
bank_forks: &RwLock<BankForks>,
tx: &Transaction,
subscriptions: &RpcSubscriptions,
current_slot: Slot,
@ -921,13 +921,11 @@ mod tests {
});
// Process votes and check they were notified.
// FIX-ME-BETTER-LATER - clone below is required for testcase to pass
#[allow(clippy::redundant_clone)]
ClusterInfoVoteListener::get_and_process_votes_for_tests(
&votes_receiver,
&vote_tracker,
0,
rpc.subscriptions.clone(),
&rpc.subscriptions,
)
.unwrap();

View File

@ -115,7 +115,7 @@ pub struct ValidatorExit {
}
impl ValidatorExit {
pub fn register_exit(&mut self, exit: Box<dyn FnOnce() -> () + Send + Sync>) {
pub fn register_exit(&mut self, exit: Box<dyn FnOnce() + Send + Sync>) {
self.exits.push(exit);
}

View File

@ -125,7 +125,7 @@ fn run_insert<F>(
metrics: &mut BlockstoreInsertionMetrics,
) -> Result<()>
where
F: Fn(Shred) -> (),
F: Fn(Shred),
{
let timer = Duration::from_millis(200);
let (mut shreds, mut repair_infos) = shred_receiver.recv_timeout(timer)?;
@ -503,8 +503,8 @@ impl WindowService {
fn should_exit_on_error<F, H>(e: Error, handle_timeout: &mut F, handle_error: &H) -> bool
where
F: FnMut() -> (),
H: Fn() -> (),
F: FnMut(),
H: Fn(),
{
match e {
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => true,

View File

@ -33,7 +33,7 @@ fn test_node(exit: &Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSoc
/// tests that actually use this function are below
fn run_gossip_topo<F>(num: usize, topo: F)
where
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>) -> (),
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>),
{
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(&exit)).collect();