diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 824237da88..ddbacb7572 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -209,7 +209,7 @@ fn main() { bank.clear_signatures(); } - let mut verified: Vec<_> = to_packets_chunked(&transactions.clone(), packets_per_chunk); + let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk); let ledger_path = get_tmp_ledger_path!(); { let blockstore = Arc::new( diff --git a/cli/src/cli.rs b/cli/src/cli.rs index d86e3f2e1c..f78fd50218 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -754,25 +754,18 @@ pub fn parse_command( ("airdrop", Some(matches)) => { let faucet_port = matches .value_of("faucet_port") - .unwrap() + .ok_or_else(|| CliError::BadParameter("Missing faucet port".to_string()))? .parse() - .or_else(|err| { - Err(CliError::BadParameter(format!( - "Invalid faucet port: {}", - err - ))) - })?; + .map_err(|err| CliError::BadParameter(format!("Invalid faucet port: {}", err)))?; - let faucet_host = if let Some(faucet_host) = matches.value_of("faucet_host") { - Some(solana_net_utils::parse_host(faucet_host).or_else(|err| { - Err(CliError::BadParameter(format!( - "Invalid faucet host: {}", - err - ))) - })?) - } else { - None - }; + let faucet_host = matches + .value_of("faucet_host") + .map(|faucet_host| { + solana_net_utils::parse_host(faucet_host).map_err(|err| { + CliError::BadParameter(format!("Invalid faucet host: {}", err)) + }) + }) + .transpose()?; let pubkey = pubkey_of_signer(matches, "to", wallet_manager)?; let signers = if pubkey.is_some() { vec![] diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index 274f327b91..75ad5ee6b3 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -659,7 +659,7 @@ pub fn process_get_epoch_info( commitment_config: CommitmentConfig, ) -> ProcessResult { let epoch_info: CliEpochInfo = rpc_client - .get_epoch_info_with_commitment(commitment_config.clone())? + .get_epoch_info_with_commitment(commitment_config)? .into(); Ok(config.output_format.formatted_string(&epoch_info)) } @@ -673,7 +673,7 @@ pub fn process_get_slot( rpc_client: &RpcClient, commitment_config: CommitmentConfig, ) -> ProcessResult { - let slot = rpc_client.get_slot_with_commitment(commitment_config.clone())?; + let slot = rpc_client.get_slot_with_commitment(commitment_config)?; Ok(slot.to_string()) } @@ -681,7 +681,7 @@ pub fn process_get_epoch( rpc_client: &RpcClient, commitment_config: CommitmentConfig, ) -> ProcessResult { - let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config.clone())?; + let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config)?; Ok(epoch_info.epoch.to_string()) } @@ -868,7 +868,7 @@ pub fn process_supply( commitment_config: CommitmentConfig, print_accounts: bool, ) -> ProcessResult { - let supply_response = rpc_client.supply_with_commitment(commitment_config.clone())?; + let supply_response = rpc_client.supply_with_commitment(commitment_config)?; let mut supply: CliSupply = supply_response.value.into(); supply.print_accounts = print_accounts; Ok(config.output_format.formatted_string(&supply)) @@ -878,7 +878,7 @@ pub fn process_total_supply( rpc_client: &RpcClient, commitment_config: CommitmentConfig, ) -> ProcessResult { - let total_supply = rpc_client.total_supply_with_commitment(commitment_config.clone())?; + let total_supply = rpc_client.total_supply_with_commitment(commitment_config)?; Ok(format!("{} SOL", lamports_to_sol(total_supply))) } @@ -886,8 +886,7 @@ pub fn process_get_transaction_count( rpc_client: &RpcClient, commitment_config: CommitmentConfig, ) -> ProcessResult { - let transaction_count = - rpc_client.get_transaction_count_with_commitment(commitment_config.clone())?; + let transaction_count = rpc_client.get_transaction_count_with_commitment(commitment_config)?; Ok(transaction_count.to_string()) } @@ -952,10 +951,8 @@ pub fn process_ping( Ok(signature) => { let transaction_sent = Instant::now(); loop { - let signature_status = rpc_client.get_signature_status_with_commitment( - &signature, - commitment_config.clone(), - )?; + let signature_status = rpc_client + .get_signature_status_with_commitment(&signature, commitment_config)?; let elapsed_time = Instant::now().duration_since(transaction_sent); if let Some(transaction_status) = signature_status { match transaction_status { diff --git a/cli/src/offline/blockhash_query.rs b/cli/src/offline/blockhash_query.rs index b6ee58a57d..10d39fe671 100644 --- a/cli/src/offline/blockhash_query.rs +++ b/cli/src/offline/blockhash_query.rs @@ -35,16 +35,11 @@ impl Source { Ok(res) } Self::NonceAccount(ref pubkey) => { - let res = nonce::get_account(rpc_client, pubkey) - .and_then(|ref a| nonce::data_from_account(a)) - .and_then(|d| { - if d.blockhash == *blockhash { - Ok(Some(d.fee_calculator)) - } else { - Ok(None) - } - })?; - Ok(res) + let res = nonce::get_account(rpc_client, pubkey)?; + let res = nonce::data_from_account(&res)?; + Ok(Some(res) + .filter(|d| d.blockhash == *blockhash) + .map(|d| d.fee_calculator)) } } } diff --git a/client/src/rpc_client.rs b/client/src/rpc_client.rs index 58c86bb6ca..c3f7754652 100644 --- a/client/src/rpc_client.rs +++ b/client/src/rpc_client.rs @@ -664,7 +664,7 @@ impl RpcClient { ) -> ClientResult { let now = Instant::now(); loop { - match self.get_balance_with_commitment(&pubkey, commitment_config.clone()) { + match self.get_balance_with_commitment(&pubkey, commitment_config) { Ok(bal) => { return Ok(bal.value); } @@ -699,8 +699,7 @@ impl RpcClient { ) -> Option { const LAST: usize = 30; for run in 0..LAST { - let balance_result = - self.poll_get_balance_with_commitment(pubkey, commitment_config.clone()); + let balance_result = self.poll_get_balance_with_commitment(pubkey, commitment_config); if expected_balance.is_none() { return balance_result.ok(); } @@ -734,7 +733,7 @@ impl RpcClient { let now = Instant::now(); loop { if let Ok(Some(_)) = - self.get_signature_status_with_commitment(&signature, commitment_config.clone()) + self.get_signature_status_with_commitment(&signature, commitment_config) { break; } diff --git a/client/src/rpc_request.rs b/client/src/rpc_request.rs index 253b5aef53..d637aaf954 100644 --- a/client/src/rpc_request.rs +++ b/client/src/rpc_request.rs @@ -136,12 +136,12 @@ mod tests { fn test_build_request_json() { let test_request = RpcRequest::GetAccountInfo; let addr = json!("deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"); - let request = test_request.build_request_json(1, json!([addr.clone()])); + let request = test_request.build_request_json(1, json!([addr])); assert_eq!(request["method"], "getAccountInfo"); assert_eq!(request["params"], json!([addr])); let test_request = RpcRequest::GetBalance; - let request = test_request.build_request_json(1, json!([addr.clone()])); + let request = test_request.build_request_json(1, json!([addr])); assert_eq!(request["method"], "getBalance"); let test_request = RpcRequest::GetEpochInfo; @@ -186,13 +186,12 @@ mod tests { // Test request with CommitmentConfig and no params let test_request = RpcRequest::GetRecentBlockhash; - let request = test_request.build_request_json(1, json!([commitment_config.clone()])); + let request = test_request.build_request_json(1, json!([commitment_config])); assert_eq!(request["params"], json!([commitment_config.clone()])); // Test request with CommitmentConfig and params let test_request = RpcRequest::GetBalance; - let request = - test_request.build_request_json(1, json!([addr.clone(), commitment_config.clone()])); + let request = test_request.build_request_json(1, json!([addr, commitment_config])); assert_eq!(request["params"], json!([addr, commitment_config])); } } diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 9336b37049..3e2b09fee7 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -109,7 +109,6 @@ fn make_accounts_txs(txes: usize, mint_keypair: &Keypair, hash: Hash) -> Vec Vec { let progs = 4; (0..txes) - .into_iter() .map(|_| { let mut instructions = vec![]; let from_key = Keypair::new(); @@ -181,7 +180,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { assert!(r.is_ok(), "sanity parallel execution"); } bank.clear_signatures(); - let verified: Vec<_> = to_packets_chunked(&transactions.clone(), PACKETS_PER_BATCH); + let verified: Vec<_> = to_packets_chunked(&transactions, PACKETS_PER_BATCH); let ledger_path = get_tmp_ledger_path!(); { let blockstore = Arc::new( @@ -207,7 +206,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { // If it is dropped before poh_service, then poh_service will error when // calling send() on the channel. let signal_receiver = Arc::new(signal_receiver); - let signal_receiver2 = signal_receiver.clone(); + let signal_receiver2 = signal_receiver; bencher.iter(move || { let now = Instant::now(); let mut sent = 0; @@ -262,7 +261,7 @@ fn simulate_process_entries( mint_keypair: &Keypair, mut tx_vector: Vec, genesis_config: &GenesisConfig, - keypairs: &Vec, + keypairs: &[Keypair], initial_lamports: u64, num_accounts: usize, ) { @@ -288,7 +287,7 @@ fn simulate_process_entries( hash: next_hash(&bank.last_blockhash(), 1, &tx_vector), transactions: tx_vector, }; - process_entries(&bank, &vec![entry], randomize_txs, None).unwrap(); + process_entries(&bank, &[entry], randomize_txs, None).unwrap(); } fn bench_process_entries(randomize_txs: bool, bencher: &mut Bencher) { diff --git a/core/benches/blockstore.rs b/core/benches/blockstore.rs index aac755a952..99daf07695 100644 --- a/core/benches/blockstore.rs +++ b/core/benches/blockstore.rs @@ -1,6 +1,4 @@ #![feature(test)] -use rand; - extern crate solana_ledger; extern crate test; diff --git a/core/benches/cluster_info.rs b/core/benches/cluster_info.rs index 3e7cbb3abb..697db76e81 100644 --- a/core/benches/cluster_info.rs +++ b/core/benches/cluster_info.rs @@ -22,7 +22,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) { solana_logger::setup(); let leader_pubkey = Pubkey::new_rand(); let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey); - let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone()); + let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info); let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); const NUM_SHREDS: usize = 32; @@ -37,7 +37,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) { } let stakes = Arc::new(stakes); let cluster_info = Arc::new(cluster_info); - let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(stakes.clone())); + let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(stakes)); let shreds = Arc::new(shreds); let last_datapoint = Arc::new(AtomicU64::new(0)); bencher.iter(move || { diff --git a/core/benches/poh_verify.rs b/core/benches/poh_verify.rs index ba8ab239cc..f8d3d6bdd1 100644 --- a/core/benches/poh_verify.rs +++ b/core/benches/poh_verify.rs @@ -14,7 +14,6 @@ const NUM_ENTRIES: usize = 800; fn bench_poh_verify_ticks(bencher: &mut Bencher) { let zero = Hash::default(); let mut cur_hash = hash(&zero.as_ref()); - let start = *&cur_hash; let mut ticks: Vec = Vec::with_capacity(NUM_ENTRIES); for _ in 0..NUM_ENTRIES { @@ -22,7 +21,7 @@ fn bench_poh_verify_ticks(bencher: &mut Bencher) { } bencher.iter(|| { - ticks.verify(&start); + ticks.verify(&cur_hash); }) } @@ -30,7 +29,6 @@ fn bench_poh_verify_ticks(bencher: &mut Bencher) { fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) { let zero = Hash::default(); let mut cur_hash = hash(&zero.as_ref()); - let start = *&cur_hash; let keypair1 = Keypair::new(); let pubkey1 = keypair1.pubkey(); @@ -42,6 +40,6 @@ fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) { } bencher.iter(|| { - ticks.verify(&start); + ticks.verify(&cur_hash); }) } diff --git a/core/benches/retransmit_stage.rs b/core/benches/retransmit_stage.rs index 699100dceb..cee923b382 100644 --- a/core/benches/retransmit_stage.rs +++ b/core/benches/retransmit_stage.rs @@ -65,7 +65,10 @@ fn bench_retransmitter(bencher: &mut Bencher) { let tx = test_tx(); const NUM_PACKETS: usize = 50; let chunk_size = NUM_PACKETS / (4 * NUM_THREADS); - let batches = to_packets_chunked(&vec![tx; NUM_PACKETS], chunk_size); + let batches = to_packets_chunked( + &std::iter::repeat(tx).take(NUM_PACKETS).collect::>(), + chunk_size, + ); info!("batches: {}", batches.len()); let retransmitter_handles = retransmitter( @@ -80,7 +83,6 @@ fn bench_retransmitter(bencher: &mut Bencher) { bencher.iter(move || { let peer_sockets1 = peer_sockets.clone(); let handles: Vec<_> = (0..NUM_PEERS) - .into_iter() .map(|p| { let peer_sockets2 = peer_sockets1.clone(); let total2 = total.clone(); diff --git a/core/benches/sigverify_stage.rs b/core/benches/sigverify_stage.rs index 2974f2a754..435a420179 100644 --- a/core/benches/sigverify_stage.rs +++ b/core/benches/sigverify_stage.rs @@ -37,16 +37,14 @@ fn bench_sigverify_stage(bencher: &mut Bencher) { let from_keypair = Keypair::new(); let to_keypair = Keypair::new(); let txs: Vec<_> = (0..len) - .into_iter() .map(|_| { let amount = thread_rng().gen(); - let tx = system_transaction::transfer( + system_transaction::transfer( &from_keypair, &to_keypair.pubkey(), amount, Hash::default(), - ); - tx + ) }) .collect(); to_packets_chunked(&txs, chunk_size) diff --git a/core/src/cluster_info.rs b/core/src/cluster_info.rs index f894b4cb79..a10d621d63 100644 --- a/core/src/cluster_info.rs +++ b/core/src/cluster_info.rs @@ -438,7 +438,7 @@ impl ClusterInfo { pub fn update_contact_info(&self, modify: F) where - F: FnOnce(&mut ContactInfo) -> (), + F: FnOnce(&mut ContactInfo), { let my_id = self.id(); modify(&mut self.my_contact_info.write().unwrap()); @@ -1917,19 +1917,18 @@ impl ClusterInfo { .into_iter() .filter_map(|(from, prune_set)| { inc_new_counter_debug!("cluster_info-push_message-prunes", prune_set.len()); - me.lookup_contact_info(&from, |ci| ci.clone()) - .and_then(|ci| { - let mut prune_msg = PruneData { - pubkey: self_id, - prunes: prune_set.into_iter().collect(), - signature: Signature::default(), - destination: from, - wallclock: timestamp(), - }; - prune_msg.sign(&me.keypair); - let rsp = Protocol::PruneMessage(self_id, prune_msg); - Some((ci.gossip, rsp)) - }) + me.lookup_contact_info(&from, |ci| ci.clone()).map(|ci| { + let mut prune_msg = PruneData { + pubkey: self_id, + prunes: prune_set.into_iter().collect(), + signature: Signature::default(), + destination: from, + wallclock: timestamp(), + }; + prune_msg.sign(&me.keypair); + let rsp = Protocol::PruneMessage(self_id, prune_msg); + (ci.gossip, rsp) + }) }) .collect(); if rsp.is_empty() { @@ -2932,7 +2931,7 @@ mod tests { assert_eq!(slots.len(), 1); assert!(since.is_some()); - let (slots, since2) = cluster_info.get_epoch_slots_since(since.clone()); + let (slots, since2) = cluster_info.get_epoch_slots_since(since); assert!(slots.is_empty()); assert_eq!(since2, since); } diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index fc1f1f3c88..90ca0dd633 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -385,7 +385,7 @@ impl ClusterInfoVoteListener { &vote_txs_receiver, &vote_tracker, root_bank.slot(), - subscriptions.clone(), + &subscriptions, epoch_stakes, ) { match e { @@ -404,9 +404,9 @@ impl ClusterInfoVoteListener { #[cfg(test)] pub fn get_and_process_votes_for_tests( vote_txs_receiver: &VerifiedVoteTransactionsReceiver, - vote_tracker: &Arc, + vote_tracker: &VoteTracker, last_root: Slot, - subscriptions: Arc, + subscriptions: &RpcSubscriptions, ) -> Result<()> { Self::get_and_process_votes( vote_txs_receiver, @@ -419,9 +419,9 @@ impl ClusterInfoVoteListener { fn get_and_process_votes( vote_txs_receiver: &VerifiedVoteTransactionsReceiver, - vote_tracker: &Arc, + vote_tracker: &VoteTracker, last_root: Slot, - subscriptions: Arc, + subscriptions: &RpcSubscriptions, epoch_stakes: Option<&EpochStakes>, ) -> Result<()> { let timer = Duration::from_millis(200); @@ -443,7 +443,7 @@ impl ClusterInfoVoteListener { vote_tracker: &VoteTracker, vote_txs: Vec, root: Slot, - subscriptions: Arc, + subscriptions: &RpcSubscriptions, epoch_stakes: Option<&EpochStakes>, ) { let mut diff: HashMap>> = HashMap::new(); @@ -574,7 +574,7 @@ impl ClusterInfoVoteListener { fn notify_for_stake_change( current_stake: u64, previous_stake: u64, - subscriptions: &Arc, + subscriptions: &RpcSubscriptions, epoch_stakes: Option<&EpochStakes>, slot: Slot, ) { @@ -804,7 +804,7 @@ mod tests { &votes_receiver, &vote_tracker, 0, - subscriptions, + &subscriptions, None, ) .unwrap(); @@ -854,7 +854,7 @@ mod tests { &votes_receiver, &vote_tracker, 0, - subscriptions, + &subscriptions, None, ) .unwrap(); @@ -974,13 +974,7 @@ mod tests { &validator0_keypairs.vote_keypair, )]; - ClusterInfoVoteListener::process_votes( - &vote_tracker, - vote_tx, - 0, - subscriptions.clone(), - None, - ); + ClusterInfoVoteListener::process_votes(&vote_tracker, vote_tx, 0, &subscriptions, None); let ref_count = Arc::strong_count( &vote_tracker .keys @@ -1031,7 +1025,7 @@ mod tests { }) .collect(); - ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, subscriptions, None); + ClusterInfoVoteListener::process_votes(&vote_tracker, vote_txs, 0, &subscriptions, None); let ref_count = Arc::strong_count( &vote_tracker diff --git a/core/src/consensus.rs b/core/src/consensus.rs index f1d3164581..ce7ac5836e 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -165,7 +165,7 @@ impl Tower { let key = all_pubkeys.get_or_insert(&key); lockout_intervals .entry(vote.expiration_slot()) - .or_insert_with(|| vec![]) + .or_insert_with(Vec::new) .push((vote.slot, key)); } diff --git a/core/src/crds_gossip_push.rs b/core/src/crds_gossip_push.rs index 453c9f32ca..f9877d0d1b 100644 --- a/core/src/crds_gossip_push.rs +++ b/core/src/crds_gossip_push.rs @@ -152,7 +152,7 @@ impl CrdsGossipPush { let new_value = crds.new_versioned(now, value); let value_hash = new_value.value_hash; if let Some((_, ref mut received_set)) = self.received_cache.get_mut(&value_hash) { - received_set.insert(from.clone()); + received_set.insert(*from); return Err(CrdsGossipError::PushMessageAlreadyReceived); } let old = crds.insert_versioned(new_value); @@ -160,7 +160,7 @@ impl CrdsGossipPush { return Err(CrdsGossipError::PushMessageOldVersion); } let mut received_set = HashSet::new(); - received_set.insert(from.clone()); + received_set.insert(*from); self.push_messages.insert(label, value_hash); self.received_cache.insert(value_hash, (now, received_set)); Ok(old.ok().and_then(|opt| opt)) diff --git a/core/src/crds_value.rs b/core/src/crds_value.rs index f6ef6c83c2..880655a674 100644 --- a/core/src/crds_value.rs +++ b/core/src/crds_value.rs @@ -459,7 +459,7 @@ mod test { fn test_keys_and_values() { let v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default())); assert_eq!(v.wallclock(), 0); - let key = v.clone().contact_info().unwrap().id; + let key = v.contact_info().unwrap().id; assert_eq!(v.label(), CrdsValueLabel::ContactInfo(key)); let v = CrdsValue::new_unsigned(CrdsData::Vote( @@ -467,7 +467,7 @@ mod test { Vote::new(&Pubkey::default(), test_tx(), 0), )); assert_eq!(v.wallclock(), 0); - let key = v.clone().vote().unwrap().from; + let key = v.vote().unwrap().from; assert_eq!(v.label(), CrdsValueLabel::Vote(0, key)); let v = CrdsValue::new_unsigned(CrdsData::LowestSlot( @@ -475,7 +475,7 @@ mod test { LowestSlot::new(Pubkey::default(), 0, 0), )); assert_eq!(v.wallclock(), 0); - let key = v.clone().lowest_slot().unwrap().from; + let key = v.lowest_slot().unwrap().from; assert_eq!(v.label(), CrdsValueLabel::LowestSlot(key)); } diff --git a/core/src/gossip_service.rs b/core/src/gossip_service.rs index c80cdb5bc1..843d28dd06 100644 --- a/core/src/gossip_service.rs +++ b/core/src/gossip_service.rs @@ -262,7 +262,7 @@ fn make_gossip_node( cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint)); } let cluster_info = Arc::new(cluster_info); - let gossip_service = GossipService::new(&cluster_info.clone(), None, gossip_socket, &exit); + let gossip_service = GossipService::new(&cluster_info, None, gossip_socket, &exit); (gossip_service, ip_echo, cluster_info) } diff --git a/core/src/poh_recorder.rs b/core/src/poh_recorder.rs index 2c871c6011..9a2ea5e30c 100644 --- a/core/src/poh_recorder.rs +++ b/core/src/poh_recorder.rs @@ -721,7 +721,7 @@ mod tests { assert_eq!(poh_recorder.tick_height, 5); assert!(poh_recorder.working_bank.is_none()); let mut num_entries = 0; - while let Ok(_) = entry_receiver.try_recv() { + while entry_receiver.try_recv().is_ok() { num_entries += 1; } assert_eq!(num_entries, 3); @@ -1409,7 +1409,7 @@ mod tests { for _ in 0..(bank.ticks_per_slot() * 2) { poh_recorder.tick(); } - poh_recorder.set_bank(&bank.clone()); + poh_recorder.set_bank(&bank); assert_eq!(Some(false), bank.check_hash_age(&genesis_hash, 1)); } } diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index b8d81735ab..c4063d6b7a 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -1289,11 +1289,11 @@ impl ReplayStage { let newly_voted_pubkeys = slot_vote_tracker .as_ref() .and_then(|slot_vote_tracker| slot_vote_tracker.write().unwrap().get_updates()) - .unwrap_or_else(|| vec![]); + .unwrap_or_else(Vec::new); let cluster_slot_pubkeys = cluster_slot_pubkeys .map(|v| v.read().unwrap().keys().cloned().collect()) - .unwrap_or_else(|| vec![]); + .unwrap_or_else(Vec::new); Self::update_fork_propagated_threshold_from_votes( progress, diff --git a/core/src/retransmit_stage.rs b/core/src/retransmit_stage.rs index 1023414185..ad978f0049 100644 --- a/core/src/retransmit_stage.rs +++ b/core/src/retransmit_stage.rs @@ -431,7 +431,7 @@ impl RetransmitStage { epoch_schedule, duplicate_slots_reset_sender, }; - let leader_schedule_cache = leader_schedule_cache.clone(); + let leader_schedule_cache_clone = leader_schedule_cache.clone(); let window_service = WindowService::new( blockstore, cluster_info.clone(), @@ -440,7 +440,7 @@ impl RetransmitStage { repair_socket, exit, repair_info, - &leader_schedule_cache.clone(), + leader_schedule_cache, move |id, shred, working_bank, last_root| { let is_connected = cfg .as_ref() @@ -449,7 +449,7 @@ impl RetransmitStage { let rv = should_retransmit_and_persist( shred, working_bank, - &leader_schedule_cache, + &leader_schedule_cache_clone, id, last_root, shred_version, diff --git a/core/src/rpc.rs b/core/src/rpc.rs index 6b166660d5..f33282602c 100644 --- a/core/src/rpc.rs +++ b/core/src/rpc.rs @@ -1171,7 +1171,7 @@ impl RpcSol for RpcSolImpl { leader_schedule.get_slot_leaders().iter().enumerate() { let pubkey = pubkey.to_string(); - map.entry(pubkey).or_insert_with(|| vec![]).push(slot_index); + map.entry(pubkey).or_insert_with(Vec::new).push(slot_index); } map }, @@ -1314,7 +1314,7 @@ impl RpcSol for RpcSolImpl { let faucet_addr = meta.config.faucet_addr.ok_or_else(Error::invalid_request)?; let pubkey = verify_pubkey(pubkey_str)?; - let blockhash = meta.bank(commitment.clone())?.confirmed_last_blockhash().0; + let blockhash = meta.bank(commitment)?.confirmed_last_blockhash().0; let transaction = request_airdrop_transaction(&faucet_addr, &pubkey, lamports, blockhash) .map_err(|err| { info!("request_airdrop_transaction failed: {:?}", err); diff --git a/core/src/rpc_pubsub.rs b/core/src/rpc_pubsub.rs index 80c429f69d..ec2fe518d2 100644 --- a/core/src/rpc_pubsub.rs +++ b/core/src/rpc_pubsub.rs @@ -387,7 +387,7 @@ mod tests { }; fn process_transaction_and_notify( - bank_forks: &Arc>, + bank_forks: &RwLock, tx: &Transaction, subscriptions: &RpcSubscriptions, current_slot: Slot, @@ -921,13 +921,11 @@ mod tests { }); // Process votes and check they were notified. - // FIX-ME-BETTER-LATER - clone below is required for testcase to pass - #[allow(clippy::redundant_clone)] ClusterInfoVoteListener::get_and_process_votes_for_tests( &votes_receiver, &vote_tracker, 0, - rpc.subscriptions.clone(), + &rpc.subscriptions, ) .unwrap(); diff --git a/core/src/validator.rs b/core/src/validator.rs index 5e4e1466fd..2e5c30ac3b 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -115,7 +115,7 @@ pub struct ValidatorExit { } impl ValidatorExit { - pub fn register_exit(&mut self, exit: Box () + Send + Sync>) { + pub fn register_exit(&mut self, exit: Box) { self.exits.push(exit); } diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 16628ab7ce..6ea5819f07 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -125,7 +125,7 @@ fn run_insert( metrics: &mut BlockstoreInsertionMetrics, ) -> Result<()> where - F: Fn(Shred) -> (), + F: Fn(Shred), { let timer = Duration::from_millis(200); let (mut shreds, mut repair_infos) = shred_receiver.recv_timeout(timer)?; @@ -503,8 +503,8 @@ impl WindowService { fn should_exit_on_error(e: Error, handle_timeout: &mut F, handle_error: &H) -> bool where - F: FnMut() -> (), - H: Fn() -> (), + F: FnMut(), + H: Fn(), { match e { Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => true, diff --git a/core/tests/gossip.rs b/core/tests/gossip.rs index fd0d05049c..c9011f5789 100644 --- a/core/tests/gossip.rs +++ b/core/tests/gossip.rs @@ -33,7 +33,7 @@ fn test_node(exit: &Arc) -> (Arc, GossipService, UdpSoc /// tests that actually use this function are below fn run_gossip_topo(num: usize, topo: F) where - F: Fn(&Vec<(Arc, GossipService, UdpSocket)>) -> (), + F: Fn(&Vec<(Arc, GossipService, UdpSocket)>), { let exit = Arc::new(AtomicBool::new(false)); let listen: Vec<_> = (0..num).map(|_| test_node(&exit)).collect(); diff --git a/faucet/src/faucet.rs b/faucet/src/faucet.rs index 522fff597c..cb9b99d2fc 100644 --- a/faucet/src/faucet.rs +++ b/faucet/src/faucet.rs @@ -142,22 +142,22 @@ impl Faucet { } } pub fn process_faucet_request(&mut self, bytes: &BytesMut) -> Result { - let req: FaucetRequest = deserialize(bytes).or_else(|err| { - Err(io::Error::new( + let req: FaucetRequest = deserialize(bytes).map_err(|err| { + io::Error::new( io::ErrorKind::Other, format!("deserialize packet in faucet: {:?}", err), - )) + ) })?; info!("Airdrop transaction requested...{:?}", req); let res = self.build_airdrop_transaction(req); match res { Ok(tx) => { - let response_vec = bincode::serialize(&tx).or_else(|err| { - Err(io::Error::new( + let response_vec = bincode::serialize(&tx).map_err(|err| { + io::Error::new( io::ErrorKind::Other, format!("deserialize packet in faucet: {:?}", err), - )) + ) })?; let mut response_vec_with_length = vec![0; 2]; @@ -205,12 +205,12 @@ pub fn request_airdrop_transaction( // Read length of transaction let mut buffer = [0; 2]; - stream.read_exact(&mut buffer).or_else(|err| { + stream.read_exact(&mut buffer).map(|err| { info!( "request_airdrop_transaction: buffer length read_exact error: {:?}", err ); - Err(Error::new(ErrorKind::Other, "Airdrop failed")) + Error::new(ErrorKind::Other, "Airdrop failed") })?; let transaction_length = LittleEndian::read_u16(&buffer) as usize; if transaction_length >= PACKET_DATA_SIZE { @@ -226,19 +226,19 @@ pub fn request_airdrop_transaction( // Read the transaction let mut buffer = Vec::new(); buffer.resize(transaction_length, 0); - stream.read_exact(&mut buffer).or_else(|err| { + stream.read_exact(&mut buffer).map_err(|err| { info!( "request_airdrop_transaction: buffer read_exact error: {:?}", err ); - Err(Error::new(ErrorKind::Other, "Airdrop failed")) + Error::new(ErrorKind::Other, "Airdrop failed") })?; - let transaction: Transaction = deserialize(&buffer).or_else(|err| { - Err(Error::new( + let transaction: Transaction = deserialize(&buffer).map_err(|err| { + Error::new( ErrorKind::Other, format!("request_airdrop_transaction deserialize failure: {:?}", err), - )) + ) })?; Ok(transaction) } diff --git a/genesis/src/main.rs b/genesis/src/main.rs index 1d277d8eed..5ef1bd0369 100644 --- a/genesis/src/main.rs +++ b/genesis/src/main.rs @@ -452,7 +452,7 @@ fn main() -> Result<(), Box> { ); let native_instruction_processors = - solana_genesis_programs::get_programs(operating_mode, 0).unwrap_or_else(|| vec![]); + solana_genesis_programs::get_programs(operating_mode, 0).unwrap_or_else(Vec::new); let inflation = solana_genesis_programs::get_inflation(operating_mode, 0).unwrap(); let mut genesis_config = GenesisConfig { diff --git a/install/src/lib.rs b/install/src/lib.rs index 298676ba2e..443c21c39f 100644 --- a/install/src/lib.rs +++ b/install/src/lib.rs @@ -255,7 +255,7 @@ pub fn main() -> Result<(), String> { let program_arguments = matches .values_of("program_arguments") .map(Iterator::collect) - .unwrap_or_else(|| vec![]); + .unwrap_or_else(Vec::new); command::run(config_file, program_name, program_arguments) } diff --git a/ledger/benches/sigverify_shreds.rs b/ledger/benches/sigverify_shreds.rs index f4244606b7..116e1fd77a 100644 --- a/ledger/benches/sigverify_shreds.rs +++ b/ledger/benches/sigverify_shreds.rs @@ -20,7 +20,7 @@ fn bench_sigverify_shreds_sign_gpu(bencher: &mut Bencher) { let mut packets = Packets::default(); packets.packets.set_pinnable(); - let slot = 0xdeadc0de; + let slot = 0xdead_c0de; // need to pin explicitly since the resize will not cause re-allocation packets.packets.reserve_and_pin(NUM_PACKETS); packets.packets.resize(NUM_PACKETS, Packet::default()); @@ -54,7 +54,7 @@ fn bench_sigverify_shreds_sign_gpu(bencher: &mut Bencher) { #[bench] fn bench_sigverify_shreds_sign_cpu(bencher: &mut Bencher) { let mut packets = Packets::default(); - let slot = 0xdeadc0de; + let slot = 0xdead_c0de; packets.packets.resize(NUM_PACKETS, Packet::default()); for p in packets.packets.iter_mut() { let shred = Shred::new_from_data( diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 78efd4398c..25ec18765f 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -621,7 +621,7 @@ impl Blockstore { metrics: &mut BlockstoreInsertionMetrics, ) -> Result<()> where - F: Fn(Shred) -> (), + F: Fn(Shred), { let mut total_start = Measure::start("Total elapsed"); let mut start = Measure::start("Blockstore lock"); @@ -918,7 +918,7 @@ impl Blockstore { is_recovered: bool, ) -> bool where - F: Fn(Shred) -> (), + F: Fn(Shred), { let slot = shred.slot(); let shred_index = u64::from(shred.index()); @@ -1533,7 +1533,7 @@ impl Blockstore { let blockhash = get_last_hash(slot_entries.iter()) .unwrap_or_else(|| panic!("Rooted slot {:?} must have blockhash", slot)); - let rewards = self.rewards_cf.get(slot)?.unwrap_or_else(|| vec![]); + let rewards = self.rewards_cf.get(slot)?.unwrap_or_else(Vec::new); let block = ConfirmedBlock { previous_blockhash: previous_blockhash.to_string(), @@ -1743,7 +1743,7 @@ impl Blockstore { "blockstore-rpc-api", ("method", "get_confirmed_transaction".to_string(), String) ); - if let Some((slot, status)) = self.get_transaction_status(signature.clone())? { + if let Some((slot, status)) = self.get_transaction_status(signature)? { let transaction = self.find_transaction_in_slot(slot, signature)? .expect("Transaction to exist in slot entries if it exists in statuses and hasn't been cleaned up"); let encoding = encoding.unwrap_or(TransactionEncoding::Json); @@ -4948,7 +4948,7 @@ pub mod tests { // Insert will fail, slot < root blockstore - .insert_shreds(shreds1.clone()[..].to_vec(), None, false) + .insert_shreds(shreds1[..].to_vec(), None, false) .unwrap(); assert!(blockstore.get_data_shred(1, 0).unwrap().is_none()); @@ -5229,7 +5229,7 @@ pub mod tests { stakes.insert(keypair.pubkey(), (1 + i as u64, Account::default())); } let slot_duration = Duration::from_millis(400); - let block_time_slot_3 = blockstore.get_block_time(3, slot_duration.clone(), &stakes); + let block_time_slot_3 = blockstore.get_block_time(3, slot_duration, &stakes); let mut total_stake = 0; let mut expected_time: u64 = (0..6) @@ -5246,7 +5246,7 @@ pub mod tests { assert_eq!(block_time_slot_3.unwrap().unwrap() as u64, expected_time); assert_eq!( blockstore - .get_block_time(8, slot_duration.clone(), &stakes) + .get_block_time(8, slot_duration, &stakes) .unwrap() .unwrap() as u64, expected_time + 2 // At 400ms block duration, 5 slots == 2sec diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 5469fb2076..affd584405 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -257,7 +257,7 @@ pub enum BlockstoreProcessorError { } /// Callback for accessing bank state while processing the blockstore -pub type ProcessCallback = Arc () + Sync + Send>; +pub type ProcessCallback = Arc; #[derive(Default, Clone)] pub struct ProcessOptions { diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index c2d0f8258c..2f15b86606 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -133,7 +133,7 @@ fn slot_key_data_for_gpu< let key = slot_keys.get(slot).unwrap(); keys_to_slots .entry(*key) - .or_insert_with(|| vec![]) + .or_insert_with(Vec::new) .push(*slot); } } diff --git a/ledger/tests/shred.rs b/ledger/tests/shred.rs index 9494729b5b..911c41014a 100644 --- a/ledger/tests/shred.rs +++ b/ledger/tests/shred.rs @@ -177,7 +177,7 @@ fn sort_data_coding_into_fec_sets( data_slot_and_index.insert(key); let fec_entry = fec_data .entry(shred.common_header.fec_set_index) - .or_insert_with(|| vec![]); + .or_insert_with(Vec::new); fec_entry.push(shred); } for shred in coding_shreds { @@ -188,7 +188,7 @@ fn sort_data_coding_into_fec_sets( coding_slot_and_index.insert(key); let fec_entry = fec_coding .entry(shred.common_header.fec_set_index) - .or_insert_with(|| vec![]); + .or_insert_with(Vec::new); fec_entry.push(shred); } } diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index ff096da535..386ad73472 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -213,8 +213,8 @@ fn run_cluster_partition( on_partition_start: E, on_partition_resolved: F, ) where - E: Fn(&mut LocalCluster) -> (), - F: Fn(&mut LocalCluster) -> (), + E: Fn(&mut LocalCluster), + F: Fn(&mut LocalCluster), { solana_logger::setup(); info!("PARTITION_TEST!"); diff --git a/logger/src/lib.rs b/logger/src/lib.rs index 2c456fd292..2e044a01ce 100644 --- a/logger/src/lib.rs +++ b/logger/src/lib.rs @@ -23,10 +23,8 @@ impl log::Log for LoggerShim { } fn replace_logger(logger: env_logger::Logger) { - let max_level = logger.filter(); - log::set_max_level(max_level); - let mut rw = LOGGER.write().unwrap(); - std::mem::replace(&mut *rw, logger); + log::set_max_level(logger.filter()); + *LOGGER.write().unwrap() = logger; let _ = log::set_boxed_logger(Box::new(LoggerShim {})); } diff --git a/metrics/src/metrics.rs b/metrics/src/metrics.rs index c77f56c7a5..ee0a37bd29 100644 --- a/metrics/src/metrics.rs +++ b/metrics/src/metrics.rs @@ -332,9 +332,8 @@ lazy_static! { } pub fn set_host_id(host_id: String) { - let mut rw = HOST_ID.write().unwrap(); info!("host id: {}", host_id); - std::mem::replace(&mut *rw, host_id); + *HOST_ID.write().unwrap() = host_id; } /// Submits a new point from any thread. Note that points are internally queued diff --git a/net-utils/src/ip_echo_server.rs b/net-utils/src/ip_echo_server.rs index 07f904ff14..2e9f7cce2b 100644 --- a/net-utils/src/ip_echo_server.rs +++ b/net-utils/src/ip_echo_server.rs @@ -85,11 +85,11 @@ pub fn ip_echo_server(tcp: std::net::TcpListener) -> IpEchoServer { bincode::deserialize::(&data[4..]) .map(Some) - .or_else(|err| { - Err(io::Error::new( + .map_err(|err| { + io::Error::new( io::ErrorKind::Other, format!("Failed to deserialize IpEchoServerMessage: {:?}", err), - )) + ) }) }) .and_then(move |maybe_msg| { diff --git a/net-utils/src/lib.rs b/net-utils/src/lib.rs index abeed62f28..e18f86a3e7 100644 --- a/net-utils/src/lib.rs +++ b/net-utils/src/lib.rs @@ -372,7 +372,7 @@ pub fn bind_to(ip_addr: IpAddr, port: u16, reuseaddr: bool) -> io::Result io::Result { diff --git a/perf/benches/sigverify.rs b/perf/benches/sigverify.rs index d943b38c80..fd2bea1ad0 100644 --- a/perf/benches/sigverify.rs +++ b/perf/benches/sigverify.rs @@ -13,7 +13,7 @@ fn bench_sigverify(bencher: &mut Bencher) { let tx = test_tx(); // generate packet vector - let batches = to_packets(&vec![tx; 128]); + let batches = to_packets(&std::iter::repeat(tx).take(128).collect::>()); let recycler = Recycler::default(); let recycler_out = Recycler::default(); @@ -28,7 +28,7 @@ fn bench_get_offsets(bencher: &mut Bencher) { let tx = test_tx(); // generate packet vector - let batches = to_packets(&vec![tx; 1024]); + let batches = to_packets(&std::iter::repeat(tx).take(1024).collect::>()); let recycler = Recycler::default(); // verify packets diff --git a/programs/budget/src/budget_expr.rs b/programs/budget/src/budget_expr.rs index 413f7ae35d..f1d5fb1be4 100644 --- a/programs/budget/src/budget_expr.rs +++ b/programs/budget/src/budget_expr.rs @@ -7,7 +7,6 @@ use chrono::prelude::*; use serde_derive::{Deserialize, Serialize}; use solana_sdk::hash::Hash; use solana_sdk::pubkey::Pubkey; -use std::mem; /// The types of events a payment plan can process. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] @@ -256,7 +255,7 @@ impl BudgetExpr { _ => None, }; if let Some(expr) = new_expr { - mem::replace(self, *expr); + *self = *expr; } } } diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index e5a4a7ab38..cae712849b 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -14,7 +14,7 @@ fn deposit_many(bank: &Bank, pubkeys: &mut Vec, num: usize) { for t in 0..num { let pubkey = Pubkey::new_rand(); let account = Account::new((t + 1) as u64, 0, &Account::default().owner); - pubkeys.push(pubkey.clone()); + pubkeys.push(pubkey); assert!(bank.get_account(&pubkey).is_none()); bank.deposit(&pubkey, (t + 1) as u64); assert_eq!(bank.get_account(&pubkey).unwrap(), account); @@ -48,7 +48,7 @@ fn test_accounts_squash(bencher: &mut Bencher) { &[], )); let mut pubkeys: Vec = vec![]; - deposit_many(&bank1, &mut pubkeys, 250000); + deposit_many(&bank1, &mut pubkeys, 250_000); bank1.freeze(); // Measures the performance of the squash operation. diff --git a/runtime/benches/accounts_index.rs b/runtime/benches/accounts_index.rs index 033ffd4cc2..8106005928 100644 --- a/runtime/benches/accounts_index.rs +++ b/runtime/benches/accounts_index.rs @@ -10,18 +10,15 @@ use test::Bencher; #[bench] fn bench_accounts_index(bencher: &mut Bencher) { const NUM_PUBKEYS: usize = 10_000; - let pubkeys: Vec<_> = (0..NUM_PUBKEYS) - .into_iter() - .map(|_| Pubkey::new_rand()) - .collect(); + let pubkeys: Vec<_> = (0..NUM_PUBKEYS).map(|_| Pubkey::new_rand()).collect(); const NUM_FORKS: u64 = 16; let mut reclaims = vec![]; let mut index = AccountsIndex::::default(); for f in 0..NUM_FORKS { - for _p in 0..NUM_PUBKEYS { - index.insert(f, &pubkeys[_p], AccountInfo::default(), &mut reclaims); + for pubkey in pubkeys.iter().take(NUM_PUBKEYS) { + index.insert(f, pubkey, AccountInfo::default(), &mut reclaims); } } diff --git a/runtime/benches/append_vec.rs b/runtime/benches/append_vec.rs index e701216a91..5eba3a827c 100644 --- a/runtime/benches/append_vec.rs +++ b/runtime/benches/append_vec.rs @@ -32,7 +32,6 @@ fn append_vec_append(bencher: &mut Bencher) { fn add_test_accounts(vec: &AppendVec, size: usize) -> Vec<(usize, usize)> { (0..size) - .into_iter() .filter_map(|sample| { let (meta, account) = create_test_account(sample); vec.append_account(meta, &account, Hash::default()) @@ -92,7 +91,7 @@ fn append_vec_concurrent_append_read(bencher: &mut Bencher) { bencher.iter(|| { let len = indexes.lock().unwrap().len(); let random_index: usize = thread_rng().gen_range(0, len); - let (sample, pos) = indexes.lock().unwrap().get(random_index).unwrap().clone(); + let (sample, pos) = *indexes.lock().unwrap().get(random_index).unwrap(); let (account, _next) = vec.get_account(pos).unwrap(); let (_meta, test) = create_test_account(sample); assert_eq!(account.data, test.data.as_slice()); @@ -112,12 +111,7 @@ fn append_vec_concurrent_read_append(bencher: &mut Bencher) { continue; } let random_index: usize = thread_rng().gen_range(0, len + 1); - let (sample, pos) = indexes1 - .lock() - .unwrap() - .get(random_index % len) - .unwrap() - .clone(); + let (sample, pos) = *indexes1.lock().unwrap().get(random_index % len).unwrap(); let (account, _next) = vec1.get_account(pos).unwrap(); let (_meta, test) = create_test_account(sample); assert_eq!(account.data, test.data.as_slice()); diff --git a/runtime/benches/bank.rs b/runtime/benches/bank.rs index c2f8753e0a..0dcbe94710 100644 --- a/runtime/benches/bank.rs +++ b/runtime/benches/bank.rs @@ -19,13 +19,13 @@ use std::{sync::Arc, thread::sleep, time::Duration}; use test::Bencher; const BUILTIN_PROGRAM_ID: [u8; 32] = [ - 098, 117, 105, 108, 116, 105, 110, 095, 112, 114, 111, 103, 114, 097, 109, 095, 105, 100, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 98, 117, 105, 108, 116, 105, 110, 95, 112, 114, 111, 103, 114, 97, 109, 95, 105, 100, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; const NOOP_PROGRAM_ID: [u8; 32] = [ - 098, 117, 105, 108, 116, 105, 110, 095, 112, 114, 111, 103, 114, 097, 109, 095, 105, 100, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 98, 117, 105, 108, 116, 105, 110, 95, 112, 114, 111, 103, 114, 97, 109, 95, 105, 100, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ]; fn process_instruction( @@ -43,13 +43,12 @@ pub fn create_builtin_transactions( let program_id = Pubkey::new(&BUILTIN_PROGRAM_ID); (0..4096) - .into_iter() .map(|_| { // Seed the signer account let rando0 = Keypair::new(); bank_client .transfer(10_000, &mint_keypair, &rando0.pubkey()) - .expect(&format!("{}:{}", line!(), file!())); + .unwrap_or_else(|_| panic!("{}:{}", line!(), file!())); let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8); let (blockhash, _fee_calculator) = bank_client.get_recent_blockhash().unwrap(); @@ -65,13 +64,12 @@ pub fn create_native_loader_transactions( let program_id = Pubkey::new(&NOOP_PROGRAM_ID); (0..4096) - .into_iter() .map(|_| { // Seed the signer account©41 let rando0 = Keypair::new(); bank_client .transfer(10_000, &mint_keypair, &rando0.pubkey()) - .expect(&format!("{}:{}", line!(), file!())); + .unwrap_or_else(|_| panic!("{}:{}", line!(), file!())); let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8); let (blockhash, _fee_calculator) = bank_client.get_recent_blockhash().unwrap(); @@ -80,13 +78,13 @@ pub fn create_native_loader_transactions( .collect() } -fn sync_bencher(bank: &Arc, _bank_client: &BankClient, transactions: &Vec) { +fn sync_bencher(bank: &Arc, _bank_client: &BankClient, transactions: &[Transaction]) { let results = bank.process_transactions(&transactions); assert!(results.iter().all(Result::is_ok)); } -fn async_bencher(bank: &Arc, bank_client: &BankClient, transactions: &Vec) { - for transaction in transactions.clone() { +fn async_bencher(bank: &Arc, bank_client: &BankClient, transactions: &[Transaction]) { + for transaction in transactions.to_owned() { bank_client.async_send_transaction(transaction).unwrap(); } for _ in 0..1_000_000_000_u64 { @@ -98,23 +96,23 @@ fn async_bencher(bank: &Arc, bank_client: &BankClient, transactions: &Vec< } sleep(Duration::from_nanos(1)); } - if !bank + if bank .get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap()) .unwrap() - .is_ok() + .is_err() { error!( "transaction failed: {:?}", bank.get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap()) .unwrap() ); - assert!(false); + panic!(); } } fn do_bench_transactions( bencher: &mut Bencher, - bench_work: &dyn Fn(&Arc, &BankClient, &Vec), + bench_work: &dyn Fn(&Arc, &BankClient, &[Transaction]), create_transactions: &dyn Fn(&BankClient, &Keypair) -> Vec, ) { solana_logger::setup(); diff --git a/runtime/benches/bloom.rs b/runtime/benches/bloom.rs index 9b227a3b10..b961fb8ee1 100644 --- a/runtime/benches/bloom.rs +++ b/runtime/benches/bloom.rs @@ -47,10 +47,7 @@ fn bench_sigs_bloom(bencher: &mut Bencher) { // https://hur.st/bloomfilter/?n=1000000&p=1.0E-8&m=&k= let blockhash = hash(Hash::default().as_ref()); // info!("blockhash = {:?}", blockhash); - let keys = (0..27) - .into_iter() - .map(|i| blockhash.hash_at_index(i)) - .collect(); + let keys = (0..27).map(|i| blockhash.hash_at_index(i)).collect(); let mut sigs: Bloom = Bloom::new(38_340_234, keys); let mut id = blockhash; diff --git a/runtime/benches/status_cache.rs b/runtime/benches/status_cache.rs index 36c1edbabd..b6ab4dd467 100644 --- a/runtime/benches/status_cache.rs +++ b/runtime/benches/status_cache.rs @@ -30,6 +30,6 @@ fn test_statuscache_serialize(bencher: &mut Bencher) { } } bencher.iter(|| { - let _ = serialize(&status_cache.slot_deltas(&vec![0])).unwrap(); + let _ = serialize(&status_cache.slot_deltas(&[0])).unwrap(); }); } diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 0bfa675333..06bc928d1b 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -153,12 +153,12 @@ impl Accounts { } let (account, rent) = AccountsDB::load(storage, ancestors, accounts_index, key) - .and_then(|(mut account, _)| { + .map(|(mut account, _)| { if message.is_writable(i) && !account.executable { let rent_due = rent_collector.update(&key, &mut account); - Some((account, rent_due)) + (account, rent_due) } else { - Some((account, 0)) + (account, 0) } }) .unwrap_or_default(); diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index d844bfacc4..64b3da28c2 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -617,7 +617,6 @@ impl AccountsDB { pub fn clean_accounts(&self) { self.report_store_stats(); - let no_ancestors = HashMap::new(); let mut accounts_scan = Measure::start("accounts_scan"); let accounts_index = self.accounts_index.read().unwrap(); let pubkeys: Vec = accounts_index.account_maps.keys().cloned().collect(); @@ -628,7 +627,7 @@ impl AccountsDB { let mut purges_in_root = Vec::new(); let mut purges = HashMap::new(); for pubkey in pubkeys { - if let Some((list, index)) = accounts_index.get(pubkey, &no_ancestors) { + if let Some((list, index)) = accounts_index.get(pubkey, None) { let (slot, account_info) = &list[index]; if account_info.lamports == 0 { purges.insert(*pubkey, accounts_index.would_purge(pubkey)); @@ -641,16 +640,11 @@ impl AccountsDB { }) .reduce( || (HashMap::new(), Vec::new()), - |m1, m2| { + |mut m1, m2| { // Collapse down the hashmaps/vecs into one. - let x = m2.0.iter().fold(m1.0, |mut acc, (k, vs)| { - acc.insert(k.clone(), vs.clone()); - acc - }); - let mut y = vec![]; - y.extend(m1.1); - y.extend(m2.1); - (x, y) + m1.0.extend(m2.0); + m1.1.extend(m2.1); + m1 }, ); @@ -806,7 +800,6 @@ impl AccountsDB { } let alive_accounts: Vec<_> = { - let no_ancestors = HashMap::new(); let accounts_index = self.accounts_index.read().unwrap(); stored_accounts .iter() @@ -819,7 +812,7 @@ impl AccountsDB { (store_id, offset), _write_version, )| { - if let Some((list, _)) = accounts_index.get(pubkey, &no_ancestors) { + if let Some((list, _)) = accounts_index.get(pubkey, None) { list.iter() .any(|(_slot, i)| i.store_id == *store_id && i.offset == *offset) } else { @@ -927,7 +920,7 @@ impl AccountsDB { pub fn scan_accounts(&self, ancestors: &Ancestors, scan_func: F) -> A where - F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>) -> (), + F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>), A: Default, { let mut collector = A::default(); @@ -946,7 +939,7 @@ impl AccountsDB { pub fn range_scan_accounts(&self, ancestors: &Ancestors, range: R, scan_func: F) -> A where - F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>) -> (), + F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>), A: Default, R: RangeBounds, { @@ -968,7 +961,7 @@ impl AccountsDB { // PERF: Sequentially read each storage entry in parallel pub fn scan_account_storage(&self, slot: Slot, scan_func: F) -> Vec where - F: Fn(&StoredAccount, AppendVecId, &mut B) -> () + Send + Sync, + F: Fn(&StoredAccount, AppendVecId, &mut B) + Send + Sync, B: Send + Default, { let storage_maps: Vec> = self @@ -1020,7 +1013,7 @@ impl AccountsDB { accounts_index: &AccountsIndex, pubkey: &Pubkey, ) -> Option<(Account, Slot)> { - let (lock, index) = accounts_index.get(pubkey, ancestors)?; + let (lock, index) = accounts_index.get(pubkey, Some(ancestors))?; let slot = lock[index].0; //TODO: thread this as a ref if let Some(slot_storage) = storage.0.get(&slot) { @@ -1037,7 +1030,7 @@ impl AccountsDB { #[cfg(test)] fn load_account_hash(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Hash { let accounts_index = self.accounts_index.read().unwrap(); - let (lock, index) = accounts_index.get(pubkey, ancestors).unwrap(); + let (lock, index) = accounts_index.get(pubkey, Some(ancestors)).unwrap(); let slot = lock[index].0; let storage = self.storage.read().unwrap(); let slot_storage = storage.0.get(&slot).unwrap(); @@ -1449,7 +1442,7 @@ impl AccountsDB { let hashes: Vec<_> = keys .par_iter() .filter_map(|pubkey| { - if let Some((list, index)) = accounts_index.get(pubkey, ancestors) { + if let Some((list, index)) = accounts_index.get(pubkey, Some(ancestors)) { let (slot, account_info) = &list[index]; if account_info.lamports != 0 { storage @@ -1839,7 +1832,7 @@ impl AccountsDB { }; let entry = accum .entry(stored_account.meta.pubkey) - .or_insert_with(|| vec![]); + .or_insert_with(Vec::new); entry.push((stored_account.meta.write_version, account_info)); }, ); @@ -1847,7 +1840,7 @@ impl AccountsDB { let mut accounts_map: HashMap> = HashMap::new(); for accumulator_entry in accumulator.iter() { for (pubkey, storage_entry) in accumulator_entry { - let entry = accounts_map.entry(*pubkey).or_insert_with(|| vec![]); + let entry = accounts_map.entry(*pubkey).or_insert_with(Vec::new); entry.extend(storage_entry.iter().cloned()); } } @@ -2118,7 +2111,7 @@ pub mod tests { .accounts_index .read() .unwrap() - .get(&key, &ancestors) + .get(&key, Some(&ancestors)) .is_some()); assert_load_account(&db, unrooted_slot, key, 1); @@ -2139,7 +2132,7 @@ pub mod tests { .accounts_index .read() .unwrap() - .get(&key, &ancestors) + .get(&key, Some(&ancestors)) .is_none()); // Test we can store for the same slot again and get the right information @@ -2188,14 +2181,14 @@ pub mod tests { for t in 0..num { let pubkey = Pubkey::new_rand(); let account = Account::new((t + 1) as u64, space, &Account::default().owner); - pubkeys.push(pubkey.clone()); + pubkeys.push(pubkey); assert!(accounts.load_slow(&ancestors, &pubkey).is_none()); accounts.store(slot, &[(&pubkey, &account)]); } for t in 0..num_vote { let pubkey = Pubkey::new_rand(); let account = Account::new((num + t + 1) as u64, space, &solana_vote_program::id()); - pubkeys.push(pubkey.clone()); + pubkeys.push(pubkey); let ancestors = vec![(slot, 0)].into_iter().collect(); assert!(accounts.load_slow(&ancestors, &pubkey).is_none()); accounts.store(slot, &[(&pubkey, &account)]); @@ -2435,7 +2428,7 @@ pub mod tests { let ancestors = vec![(0, 0)].into_iter().collect(); let id = { let index = accounts.accounts_index.read().unwrap(); - let (list, idx) = index.get(&pubkey, &ancestors).unwrap(); + let (list, idx) = index.get(&pubkey, Some(&ancestors)).unwrap(); list[idx].1.store_id }; accounts.add_root(1); diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index 110721023e..e07b462fda 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -24,29 +24,29 @@ pub struct AccountsIndex { impl<'a, T: 'a + Clone> AccountsIndex { fn do_scan_accounts(&self, ancestors: &Ancestors, mut func: F, iter: I) where - F: FnMut(&Pubkey, (&T, Slot)) -> (), + F: FnMut(&Pubkey, (&T, Slot)), I: Iterator)>, { for (pubkey, list) in iter { let list_r = &list.1.read().unwrap(); - if let Some(index) = self.latest_slot(ancestors, &list_r) { + if let Some(index) = self.latest_slot(Some(ancestors), &list_r) { func(pubkey, (&list_r[index].1, list_r[index].0)); } } } /// call func with every pubkey and index visible from a given set of ancestors - pub fn scan_accounts(&self, ancestors: &Ancestors, func: F) + pub(crate) fn scan_accounts(&self, ancestors: &Ancestors, func: F) where - F: FnMut(&Pubkey, (&T, Slot)) -> (), + F: FnMut(&Pubkey, (&T, Slot)), { self.do_scan_accounts(ancestors, func, self.account_maps.iter()); } /// call func with every pubkey and index visible from a given set of ancestors with range - pub fn range_scan_accounts(&self, ancestors: &Ancestors, range: R, func: F) + pub(crate) fn range_scan_accounts(&self, ancestors: &Ancestors, range: R, func: F) where - F: FnMut(&Pubkey, (&T, Slot)) -> (), + F: FnMut(&Pubkey, (&T, Slot)), R: RangeBounds, { self.do_scan_accounts(ancestors, func, self.account_maps.range(range)); @@ -76,11 +76,14 @@ impl<'a, T: 'a + Clone> AccountsIndex { // find the latest slot and T in a slice for a given ancestor // returns index into 'slice' if found, None if not. - fn latest_slot(&self, ancestors: &Ancestors, slice: SlotSlice) -> Option { + fn latest_slot(&self, ancestors: Option<&Ancestors>, slice: SlotSlice) -> Option { let mut max = 0; let mut rv = None; for (i, (slot, _t)) in slice.iter().rev().enumerate() { - if *slot >= max && (ancestors.contains_key(slot) || self.is_root(*slot)) { + if *slot >= max + && (ancestors.map_or(false, |ancestors| ancestors.contains_key(slot)) + || self.is_root(*slot)) + { rv = Some((slice.len() - 1) - i); max = *slot; } @@ -90,10 +93,10 @@ impl<'a, T: 'a + Clone> AccountsIndex { /// Get an account /// The latest account that appears in `ancestors` or `roots` is returned. - pub fn get( + pub(crate) fn get( &self, pubkey: &Pubkey, - ancestors: &Ancestors, + ancestors: Option<&Ancestors>, ) -> Option<(RwLockReadGuard>, usize)> { self.account_maps.get(pubkey).and_then(|list| { let list_r = list.1.read().unwrap(); @@ -245,7 +248,8 @@ mod tests { let key = Keypair::new(); let index = AccountsIndex::::default(); let ancestors = HashMap::new(); - assert!(index.get(&key.pubkey(), &ancestors).is_none()); + assert!(index.get(&key.pubkey(), Some(&ancestors)).is_none()); + assert!(index.get(&key.pubkey(), None).is_none()); let mut num = 0; index.scan_accounts(&ancestors, |_pubkey, _index| num += 1); @@ -261,7 +265,8 @@ mod tests { assert!(gc.is_empty()); let ancestors = HashMap::new(); - assert!(index.get(&key.pubkey(), &ancestors).is_none()); + assert!(index.get(&key.pubkey(), Some(&ancestors)).is_none()); + assert!(index.get(&key.pubkey(), None).is_none()); let mut num = 0; index.scan_accounts(&ancestors, |_pubkey, _index| num += 1); @@ -277,7 +282,7 @@ mod tests { assert!(gc.is_empty()); let ancestors = vec![(1, 1)].into_iter().collect(); - assert!(index.get(&key.pubkey(), &ancestors).is_none()); + assert!(index.get(&key.pubkey(), Some(&ancestors)).is_none()); let mut num = 0; index.scan_accounts(&ancestors, |_pubkey, _index| num += 1); @@ -293,7 +298,7 @@ mod tests { assert!(gc.is_empty()); let ancestors = vec![(0, 0)].into_iter().collect(); - let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap(); + let (list, idx) = index.get(&key.pubkey(), Some(&ancestors)).unwrap(); assert_eq!(list[idx], (0, true)); let mut num = 0; @@ -324,9 +329,8 @@ mod tests { index.insert(0, &key.pubkey(), true, &mut gc); assert!(gc.is_empty()); - let ancestors = vec![].into_iter().collect(); index.add_root(0); - let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap(); + let (list, idx) = index.get(&key.pubkey(), None).unwrap(); assert_eq!(list[idx], (0, true)); } @@ -369,14 +373,14 @@ mod tests { let mut gc = Vec::new(); index.insert(0, &key.pubkey(), true, &mut gc); assert!(gc.is_empty()); - let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap(); + let (list, idx) = index.get(&key.pubkey(), Some(&ancestors)).unwrap(); assert_eq!(list[idx], (0, true)); drop(list); let mut gc = Vec::new(); index.insert(0, &key.pubkey(), false, &mut gc); assert_eq!(gc, vec![(0, true)]); - let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap(); + let (list, idx) = index.get(&key.pubkey(), Some(&ancestors)).unwrap(); assert_eq!(list[idx], (0, false)); } @@ -391,10 +395,10 @@ mod tests { assert!(gc.is_empty()); index.insert(1, &key.pubkey(), false, &mut gc); assert!(gc.is_empty()); - let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap(); + let (list, idx) = index.get(&key.pubkey(), Some(&ancestors)).unwrap(); assert_eq!(list[idx], (0, true)); let ancestors = vec![(1, 0)].into_iter().collect(); - let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap(); + let (list, idx) = index.get(&key.pubkey(), Some(&ancestors)).unwrap(); assert_eq!(list[idx], (1, false)); } @@ -413,13 +417,12 @@ mod tests { index.add_root(3); index.insert(4, &key.pubkey(), true, &mut gc); assert_eq!(gc, vec![(0, true), (1, false), (2, true)]); - let ancestors = vec![].into_iter().collect(); - let (list, idx) = index.get(&key.pubkey(), &ancestors).unwrap(); + let (list, idx) = index.get(&key.pubkey(), None).unwrap(); assert_eq!(list[idx], (3, true)); let mut num = 0; let mut found_key = false; - index.scan_accounts(&ancestors, |pubkey, _index| { + index.scan_accounts(&Ancestors::new(), |pubkey, _index| { if pubkey == &key.pubkey() { found_key = true; assert_eq!(_index, (&true, 3)); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 891b5ced0f..9e1708daa2 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -150,7 +150,7 @@ impl StatusCacheRc { } } -pub type EnteredEpochCallback = Box () + Sync + Send>; +pub type EnteredEpochCallback = Box; pub type TransactionProcessResult = (Result<()>, Option); pub struct TransactionResults { @@ -3854,7 +3854,7 @@ mod tests { impl Bank { fn slots_by_pubkey(&self, pubkey: &Pubkey, ancestors: &Ancestors) -> Vec { let accounts_index = self.rc.accounts.accounts_db.accounts_index.read().unwrap(); - let (accounts, _) = accounts_index.get(&pubkey, &ancestors).unwrap(); + let (accounts, _) = accounts_index.get(&pubkey, Some(&ancestors)).unwrap(); accounts .iter() .map(|(slot, _)| *slot) @@ -4988,7 +4988,7 @@ mod tests { let (genesis_config, mint_keypair) = create_genesis_config(2_000); let bank0 = Arc::new(Bank::new(&genesis_config)); let initial_state = bank0.hash_internal_state(); - let bank1 = Bank::new_from_parent(&bank0.clone(), &Pubkey::default(), 1); + let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); assert_ne!(bank1.hash_internal_state(), initial_state); info!("transfer bank1"); diff --git a/runtime/src/bloom.rs b/runtime/src/bloom.rs index 3739fe099d..c6dbd42bc5 100644 --- a/runtime/src/bloom.rs +++ b/runtime/src/bloom.rs @@ -42,15 +42,20 @@ impl Bloom { let keys: Vec = (0..num_keys).map(|_| rand::thread_rng().gen()).collect(); Self::new(num_bits, keys) } - pub fn num_bits(num_items: f64, false_rate: f64) -> f64 { + fn num_bits(num_items: f64, false_rate: f64) -> f64 { let n = num_items; let p = false_rate; ((n * p.ln()) / (1f64 / 2f64.powf(2f64.ln())).ln()).ceil() } - pub fn num_keys(num_bits: f64, num_items: f64) -> f64 { + fn num_keys(num_bits: f64, num_items: f64) -> f64 { let n = num_items; let m = num_bits; - 1f64.max(((m / n) * 2f64.ln()).round()) + // infinity as usize is zero in rust 1.43 but 2^64-1 in rust 1.45; ensure it's zero here + if n == 0.0 { + 0.0 + } else { + 1f64.max(((m / n) * 2f64.ln()).round()) + } } fn pos(&self, key: &T, k: u64) -> u64 { key.hash_at_index(k) % self.bits.len() diff --git a/runtime/src/legacy_system_instruction_processor0.rs b/runtime/src/legacy_system_instruction_processor0.rs index dc3693a354..5237a3cd7c 100644 --- a/runtime/src/legacy_system_instruction_processor0.rs +++ b/runtime/src/legacy_system_instruction_processor0.rs @@ -930,7 +930,7 @@ mod tests { fn with_create_zero_lamport(callback: F) where - F: Fn(&Bank) -> (), + F: Fn(&Bank), { solana_logger::setup(); diff --git a/runtime/src/nonce_utils.rs b/runtime/src/nonce_utils.rs index 70e6c5218c..9b5cd47d62 100644 --- a/runtime/src/nonce_utils.rs +++ b/runtime/src/nonce_utils.rs @@ -206,7 +206,7 @@ mod tests { fn verify_nonce_ok() { with_test_keyed_account(42, true, |nonce_account| { let mut signers = HashSet::new(); - signers.insert(nonce_account.signer_key().unwrap().clone()); + signers.insert(nonce_account.signer_key().unwrap()); let state: State = nonce_account.state().unwrap(); // New is in Uninitialzed state assert_eq!(state, State::Uninitialized); @@ -236,7 +236,7 @@ mod tests { fn verify_nonce_bad_query_hash_fail() { with_test_keyed_account(42, true, |nonce_account| { let mut signers = HashSet::new(); - signers.insert(nonce_account.signer_key().unwrap().clone()); + signers.insert(nonce_account.signer_key().unwrap()); let state: State = nonce_account.state().unwrap(); // New is in Uninitialzed state assert_eq!(state, State::Uninitialized); diff --git a/runtime/src/rent_collector.rs b/runtime/src/rent_collector.rs index 6d95a45795..e71a1575cf 100644 --- a/runtime/src/rent_collector.rs +++ b/runtime/src/rent_collector.rs @@ -48,11 +48,16 @@ impl RentCollector { .map(|epoch| self.epoch_schedule.get_slots_in_epoch(epoch + 1)) .sum(); - let (rent_due, exempt) = self.rent.due( - account.lamports, - account.data.len(), - slots_elapsed as f64 / self.slots_per_year, - ); + // avoid infinite rent in rust 1.45 + let years_elapsed = if self.slots_per_year != 0.0 { + slots_elapsed as f64 / self.slots_per_year + } else { + 0.0 + }; + + let (rent_due, exempt) = + self.rent + .due(account.lamports, account.data.len(), years_elapsed); if exempt || rent_due != 0 { if account.lamports > rent_due { diff --git a/runtime/src/status_cache.rs b/runtime/src/status_cache.rs index d27674785a..ffeff7db1c 100644 --- a/runtime/src/status_cache.rs +++ b/runtime/src/status_cache.rs @@ -271,7 +271,7 @@ impl StatusCache { .or_insert((slot, sig_index, HashMap::new())); sig_map.0 = std::cmp::max(slot, sig_map.0); - let sig_forks = sig_map.2.entry(sig_slice).or_insert_with(|| vec![]); + let sig_forks = sig_map.2.entry(sig_slice).or_insert_with(Vec::new); sig_forks.push((slot, res.clone())); let slot_deltas = self.slot_deltas.entry(slot).or_default(); let mut fork_entry = slot_deltas.lock().unwrap(); diff --git a/runtime/src/system_instruction_processor.rs b/runtime/src/system_instruction_processor.rs index 2f64ab0a09..fdc632f2ae 100644 --- a/runtime/src/system_instruction_processor.rs +++ b/runtime/src/system_instruction_processor.rs @@ -933,7 +933,7 @@ mod tests { fn with_create_zero_lamport(callback: F) where - F: Fn(&Bank) -> (), + F: Fn(&Bank), { solana_logger::setup(); diff --git a/sdk/benches/short_vec.rs b/sdk/benches/short_vec.rs index 69b823bebb..c1e687edf3 100644 --- a/sdk/benches/short_vec.rs +++ b/sdk/benches/short_vec.rs @@ -8,14 +8,14 @@ use test::Bencher; // Return a ShortVec with 127 bytes fn create_encoded_short_vec() -> Vec { let mut bytes = vec![127]; - bytes.extend_from_slice(&vec![0u8; 127]); + bytes.extend_from_slice(&[0u8; 127]); bytes } // Return a Vec with 127 bytes fn create_encoded_vec() -> Vec { let mut bytes = vec![127, 0, 0, 0, 0, 0, 0, 0]; - bytes.extend_from_slice(&vec![0u8; 127]); + bytes.extend_from_slice(&[0u8; 127]); bytes } diff --git a/sdk/benches/slot_history.rs b/sdk/benches/slot_history.rs index dd4e0fe050..7c95f10ebc 100644 --- a/sdk/benches/slot_history.rs +++ b/sdk/benches/slot_history.rs @@ -22,7 +22,7 @@ fn bench_slot_history_add_new(b: &mut Bencher) { b.iter(|| { for _ in 0..5 { slot_history.add(slot); - slot += 100000; + slot += 100_000; } }); } diff --git a/sdk/src/abi_example.rs b/sdk/src/abi_example.rs index 043d0b9925..8f4c369a16 100644 --- a/sdk/src/abi_example.rs +++ b/sdk/src/abi_example.rs @@ -298,7 +298,7 @@ impl AbiExample for Box { } } -impl AbiExample for Box () + Sync + Send> { +impl AbiExample for Box { fn example() -> Self { info!("AbiExample for (Box): {}", type_name::()); Box::new(move |_t: &mut T| {}) diff --git a/sdk/src/nonce/account.rs b/sdk/src/nonce/account.rs index aaac8a7434..925ef5f029 100644 --- a/sdk/src/nonce/account.rs +++ b/sdk/src/nonce/account.rs @@ -211,7 +211,7 @@ mod test { ..nonce::state::Data::default() }; let mut signers = HashSet::new(); - signers.insert(keyed_account.signer_key().unwrap().clone()); + signers.insert(*keyed_account.signer_key().unwrap()); let state = AccountUtilsState::::state(keyed_account) .unwrap() .convert_to_current(); @@ -326,7 +326,7 @@ mod test { let min_lamports = rent.minimum_balance(State::size()); with_test_keyed_account(min_lamports + 42, true, |keyed_account| { let mut signers = HashSet::new(); - signers.insert(keyed_account.signer_key().unwrap().clone()); + signers.insert(*keyed_account.signer_key().unwrap()); let recent_blockhashes = create_test_recent_blockhashes(0); let authorized = *keyed_account.unsigned_key(); keyed_account @@ -347,7 +347,7 @@ mod test { let min_lamports = rent.minimum_balance(State::size()); with_test_keyed_account(min_lamports + 42, true, |keyed_account| { let mut signers = HashSet::new(); - signers.insert(keyed_account.signer_key().unwrap().clone()); + signers.insert(*keyed_account.signer_key().unwrap()); let recent_blockhashes = create_test_recent_blockhashes(63); let authorized = *keyed_account.unsigned_key(); keyed_account @@ -367,7 +367,7 @@ mod test { let min_lamports = rent.minimum_balance(State::size()); with_test_keyed_account(min_lamports + 42, true, |keyed_account| { let mut signers = HashSet::new(); - signers.insert(keyed_account.signer_key().unwrap().clone()); + signers.insert(*keyed_account.signer_key().unwrap()); let recent_blockhashes = create_test_recent_blockhashes(63); let result = keyed_account.advance_nonce_account(&recent_blockhashes, &signers); assert_eq!(result, Err(NonceError::BadAccountState.into())); @@ -384,14 +384,14 @@ mod test { with_test_keyed_account(min_lamports + 42, true, |nonce_account| { with_test_keyed_account(42, true, |nonce_authority| { let mut signers = HashSet::new(); - signers.insert(nonce_account.signer_key().unwrap().clone()); + signers.insert(*nonce_account.signer_key().unwrap()); let recent_blockhashes = create_test_recent_blockhashes(63); let authorized = *nonce_authority.unsigned_key(); nonce_account .initialize_nonce_account(&authorized, &recent_blockhashes, &rent) .unwrap(); let mut signers = HashSet::new(); - signers.insert(nonce_authority.signer_key().unwrap().clone()); + signers.insert(*nonce_authority.signer_key().unwrap()); let recent_blockhashes = create_test_recent_blockhashes(31); let result = nonce_account.advance_nonce_account(&recent_blockhashes, &signers); assert_eq!(result, Ok(())); @@ -409,7 +409,7 @@ mod test { with_test_keyed_account(min_lamports + 42, true, |nonce_account| { with_test_keyed_account(42, false, |nonce_authority| { let mut signers = HashSet::new(); - signers.insert(nonce_account.signer_key().unwrap().clone()); + signers.insert(*nonce_account.signer_key().unwrap()); let recent_blockhashes = create_test_recent_blockhashes(63); let authorized = *nonce_authority.unsigned_key(); nonce_account @@ -435,7 +435,7 @@ mod test { assert_eq!(state, State::Uninitialized); with_test_keyed_account(42, false, |to_keyed| { let mut signers = HashSet::new(); - signers.insert(nonce_keyed.signer_key().unwrap().clone()); + signers.insert(*nonce_keyed.signer_key().unwrap()); let recent_blockhashes = create_test_recent_blockhashes(0); let withdraw_lamports = nonce_keyed.account.borrow().lamports; let expect_nonce_lamports = @@ -506,7 +506,7 @@ mod test { assert_eq!(state, State::Uninitialized); with_test_keyed_account(42, false, |to_keyed| { let mut signers = HashSet::new(); - signers.insert(nonce_keyed.signer_key().unwrap().clone()); + signers.insert(*nonce_keyed.signer_key().unwrap()); let recent_blockhashes = create_test_recent_blockhashes(0); let lamports = nonce_keyed.account.borrow().lamports + 1; let result = nonce_keyed.withdraw_nonce_account( @@ -531,7 +531,7 @@ mod test { with_test_keyed_account(min_lamports + 42, true, |nonce_keyed| { with_test_keyed_account(42, false, |to_keyed| { let mut signers = HashSet::new(); - signers.insert(nonce_keyed.signer_key().unwrap().clone()); + signers.insert(*nonce_keyed.signer_key().unwrap()); let recent_blockhashes = create_test_recent_blockhashes(0); let withdraw_lamports = nonce_keyed.account.borrow().lamports / 2; let nonce_expect_lamports = @@ -584,7 +584,7 @@ mod test { let min_lamports = rent.minimum_balance(State::size()); with_test_keyed_account(min_lamports + 42, true, |nonce_keyed| { let mut signers = HashSet::new(); - signers.insert(nonce_keyed.signer_key().unwrap().clone()); + signers.insert(*nonce_keyed.signer_key().unwrap()); let recent_blockhashes = create_test_recent_blockhashes(31); let authority = *nonce_keyed.unsigned_key(); nonce_keyed @@ -659,7 +659,7 @@ mod test { .unwrap(); with_test_keyed_account(42, false, |to_keyed| { let mut signers = HashSet::new(); - signers.insert(nonce_keyed.signer_key().unwrap().clone()); + signers.insert(*nonce_keyed.signer_key().unwrap()); let withdraw_lamports = nonce_keyed.account.borrow().lamports; let result = nonce_keyed.withdraw_nonce_account( withdraw_lamports, @@ -689,7 +689,7 @@ mod test { with_test_keyed_account(42, false, |to_keyed| { let recent_blockhashes = create_test_recent_blockhashes(63); let mut signers = HashSet::new(); - signers.insert(nonce_keyed.signer_key().unwrap().clone()); + signers.insert(*nonce_keyed.signer_key().unwrap()); let withdraw_lamports = nonce_keyed.account.borrow().lamports + 1; let result = nonce_keyed.withdraw_nonce_account( withdraw_lamports, @@ -719,7 +719,7 @@ mod test { with_test_keyed_account(42, false, |to_keyed| { let recent_blockhashes = create_test_recent_blockhashes(63); let mut signers = HashSet::new(); - signers.insert(nonce_keyed.signer_key().unwrap().clone()); + signers.insert(*nonce_keyed.signer_key().unwrap()); let withdraw_lamports = nonce_keyed.account.borrow().lamports - min_lamports + 1; let result = nonce_keyed.withdraw_nonce_account( withdraw_lamports, @@ -746,7 +746,7 @@ mod test { .convert_to_current(); assert_eq!(state, State::Uninitialized); let mut signers = HashSet::new(); - signers.insert(keyed_account.signer_key().unwrap().clone()); + signers.insert(*keyed_account.signer_key().unwrap()); let recent_blockhashes = create_test_recent_blockhashes(0); let authority = *keyed_account.unsigned_key(); let result = @@ -773,7 +773,7 @@ mod test { let min_lamports = rent.minimum_balance(State::size()); with_test_keyed_account(min_lamports + 42, true, |keyed_account| { let mut signers = HashSet::new(); - signers.insert(keyed_account.signer_key().unwrap().clone()); + signers.insert(*keyed_account.signer_key().unwrap()); let recent_blockhashes = RecentBlockhashes::from_iter(vec![].into_iter()); let authorized = *keyed_account.unsigned_key(); let result = @@ -827,7 +827,7 @@ mod test { let min_lamports = rent.minimum_balance(State::size()); with_test_keyed_account(min_lamports + 42, true, |nonce_account| { let mut signers = HashSet::new(); - signers.insert(nonce_account.signer_key().unwrap().clone()); + signers.insert(*nonce_account.signer_key().unwrap()); let recent_blockhashes = create_test_recent_blockhashes(31); let authorized = *nonce_account.unsigned_key(); nonce_account @@ -857,7 +857,7 @@ mod test { let min_lamports = rent.minimum_balance(State::size()); with_test_keyed_account(min_lamports + 42, true, |nonce_account| { let mut signers = HashSet::new(); - signers.insert(nonce_account.signer_key().unwrap().clone()); + signers.insert(*nonce_account.signer_key().unwrap()); let result = nonce_account.authorize_nonce_account(&Pubkey::default(), &signers); assert_eq!(result, Err(NonceError::BadAccountState.into())); }) @@ -872,7 +872,7 @@ mod test { let min_lamports = rent.minimum_balance(State::size()); with_test_keyed_account(min_lamports + 42, true, |nonce_account| { let mut signers = HashSet::new(); - signers.insert(nonce_account.signer_key().unwrap().clone()); + signers.insert(*nonce_account.signer_key().unwrap()); let recent_blockhashes = create_test_recent_blockhashes(31); let authorized = &Pubkey::default().clone(); nonce_account diff --git a/validator/src/main.rs b/validator/src/main.rs index 8ea03eb58e..220405aaa6 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -152,12 +152,7 @@ fn start_gossip_node( let cluster_info = Arc::new(cluster_info); let gossip_exit_flag = Arc::new(AtomicBool::new(false)); - let gossip_service = GossipService::new( - &cluster_info.clone(), - None, - gossip_socket, - &gossip_exit_flag, - ); + let gossip_service = GossipService::new(&cluster_info, None, gossip_socket, &gossip_exit_flag); (cluster_info, gossip_exit_flag, gossip_service) } @@ -955,7 +950,7 @@ pub fn main() { .collect(); let snapshot_interval_slots = value_t_or_exit!(matches, "snapshot_interval_slots", u64); - let snapshot_path = ledger_path.clone().join("snapshot"); + let snapshot_path = ledger_path.join("snapshot"); fs::create_dir_all(&snapshot_path).unwrap_or_else(|err| { eprintln!( "Failed to create snapshots directory {:?}: {}", @@ -1235,7 +1230,7 @@ pub fn main() { Ok(()) } }) - .and_then(|_| { + .map(|_| { if !validator_config.voting_disabled && !no_check_vote_account { check_vote_account( &rpc_client, @@ -1254,7 +1249,6 @@ pub fn main() { exit(1); }); } - Ok(()) }); if result.is_ok() { diff --git a/watchtower/src/main.rs b/watchtower/src/main.rs index 1372fb91cb..664985072f 100644 --- a/watchtower/src/main.rs +++ b/watchtower/src/main.rs @@ -125,7 +125,7 @@ fn get_config() -> Config { let json_rpc_url = value_t!(matches, "json_rpc_url", String).unwrap_or_else(|_| config.json_rpc_url); let validator_identity_pubkeys: Vec<_> = pubkeys_of(&matches, "validator_identities") - .unwrap_or_else(|| vec![]) + .unwrap_or_else(Vec::new) .into_iter() .map(|i| i.to_string()) .collect();