diff --git a/core/src/broadcast_stage/broadcast_metrics.rs b/core/src/broadcast_stage/broadcast_metrics.rs index af821de38c..8f58cfd4d4 100644 --- a/core/src/broadcast_stage/broadcast_metrics.rs +++ b/core/src/broadcast_stage/broadcast_metrics.rs @@ -96,7 +96,7 @@ impl BroadcastStats for InsertShredsStats { } } -// Tracks metrics of type `T` acrosss multiple threads +// Tracks metrics of type `T` across multiple threads #[derive(Default)] pub(crate) struct BatchCounter { // The number of batches processed across all threads so far diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 6641af5343..cf95840911 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -1245,7 +1245,7 @@ pub mod test { // The other two validators voted at slots 46, 47, which // will only both show up in slot 48, at which point // 2/5 > SWITCH_FORK_THRESHOLD of the stake has voted - // on another fork, so switching should suceed + // on another fork, so switching should succeed let votes_to_simulate = (46..=48).collect(); let results = vote_simulator.create_and_vote_new_branch( 45, @@ -1377,9 +1377,8 @@ pub mod test { pubkey_votes.sort(); assert_eq!(pubkey_votes, account_latest_votes); - // Each acccount has 1 vote in it. After simulating a vote in collect_vote_lockouts, + // Each account has 1 vote in it. After simulating a vote in collect_vote_lockouts, // the account will have 2 votes, with lockout 2 + 4 = 6. So expected weight for - // two acccounts is 2 * 6 = 12 assert_eq!(bank_weight, 12) } diff --git a/core/src/contact_info.rs b/core/src/contact_info.rs index dfe66054c7..962b92b8b6 100644 --- a/core/src/contact_info.rs +++ b/core/src/contact_info.rs @@ -203,7 +203,7 @@ impl ContactInfo { } /// port must not be 0 - /// ip must be specified and not mulitcast + /// ip must be specified and not multicast /// loopback ip is only allowed in tests pub fn is_valid_address(addr: &SocketAddr) -> bool { (addr.port() != 0) && Self::is_valid_ip(addr.ip()) diff --git a/core/src/crds.rs b/core/src/crds.rs index 1f6cfe3c6f..37c02f794b 100644 --- a/core/src/crds.rs +++ b/core/src/crds.rs @@ -156,7 +156,7 @@ impl Crds { } } - /// Update the timestamp's of all the labels that are assosciated with Pubkey + /// Update the timestamp's of all the labels that are associated with Pubkey pub fn update_record_timestamp(&mut self, pubkey: &Pubkey, now: u64) { for label in &CrdsValue::record_labels(pubkey) { self.update_label_timestamp(label, now); diff --git a/core/src/progress_map.rs b/core/src/progress_map.rs index cdb867c17e..cb59609475 100644 --- a/core/src/progress_map.rs +++ b/core/src/progress_map.rs @@ -415,7 +415,7 @@ mod test { assert!(stats.propagated_validators.contains(&vote_pubkey)); assert_eq!(stats.propagated_validators_stake, 1); - // Addding another pubkey should succeed + // Adding another pubkey should succeed vote_pubkey = Pubkey::new_rand(); stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 2); assert!(stats.propagated_validators.contains(&vote_pubkey)); @@ -475,7 +475,7 @@ mod test { staked_vote_accounts as u64 ); - // Addding another pubkey with same vote accounts should succeed, but stake + // Adding another pubkey with same vote accounts should succeed, but stake // shouldn't increase node_pubkey = Pubkey::new_rand(); stats.add_node_pubkey_internal( @@ -494,7 +494,7 @@ mod test { 3 ); - // Addding another pubkey with different vote accounts should succeed + // Adding another pubkey with different vote accounts should succeed // and increase stake node_pubkey = Pubkey::new_rand(); let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(Pubkey::new_rand) diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index 14d7ef3a67..dc7d594a0a 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -1052,7 +1052,7 @@ mod test { // Should not be able to find signature for slot 9 for the tx assert!(bank9.get_signature_status(&vote_tx.signatures[0]).is_none()); - // Getting balance should return the old balance (acounts were cleared) + // Getting balance should return the old balance (accounts were cleared) assert_eq!( bank9.get_balance(&keypairs.node_keypair.pubkey()), old_balance diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 58fe4d936e..69ef5276e4 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -1373,7 +1373,7 @@ impl ReplayStage { // 2) The best "selected" bank is on a different fork, // switch_threshold fails // 3) The best "selected" bank is on a different fork, - // switch_threshold succceeds + // switch_threshold succeeds let mut failure_reasons = vec![]; let selected_fork = { let switch_fork_decision = tower.check_switch_threshold( @@ -1551,9 +1551,9 @@ impl ReplayStage { // Remove the vote/node pubkeys that we already know voted for this // slot. These vote accounts/validator identities are safe to drop - // because they don't to be ported back any further because earler + // because they don't to be ported back any further because earlier // parents must have: - // 1) Also recorded these pubkeyss already, or + // 1) Also recorded these pubkeys already, or // 2) Already reached the propagation threshold, in which case // they no longer need to track the set of propagated validators newly_voted_pubkeys.retain(|vote_pubkey| { @@ -1941,7 +1941,7 @@ pub(crate) mod tests { .get(2 * NUM_CONSECUTIVE_LEADER_SLOTS) .is_some()); - // // There are 20 equally staked acccounts, of which 3 have built + // // There are 20 equally staked accounts, of which 3 have built // banks above or at bank 1. Because 3/20 < SUPERMINORITY_THRESHOLD, // we should see 3 validators in bank 1's propagated_validator set. let expected_leader_slots = vec![ @@ -3388,7 +3388,7 @@ pub(crate) mod tests { let mut parent_slot = 3; // Set up the progress map to show that the last leader slot of 4 is 3, - // which means 3 and 4 are consecutiive leader slots + // which means 3 and 4 are consecutive leader slots progress_map.insert( 3, ForkProgress::new( @@ -3519,7 +3519,7 @@ pub(crate) mod tests { ); // Result should be equivalent to removing slot from BankForks - // and regeneratinig the `ancestor` `descendant` maps + // and regenerating the `ancestor` `descendant` maps for d in slot_2_descendants { bank_forks.write().unwrap().remove(d); } diff --git a/core/src/snapshot_packager_service.rs b/core/src/snapshot_packager_service.rs index 7457449165..7f67407340 100644 --- a/core/src/snapshot_packager_service.rs +++ b/core/src/snapshot_packager_service.rs @@ -179,7 +179,7 @@ mod tests { // Make tarball from packageable snapshot snapshot_utils::archive_snapshot_package(&snapshot_package).unwrap(); - // before we compare, stick an empty status_cache in this dir so that the package comparision works + // before we compare, stick an empty status_cache in this dir so that the package comparison works // This is needed since the status_cache is added by the packager and is not collected from // the source dir for snapshots let dummy_slot_deltas: Vec = vec![]; diff --git a/core/src/validator.rs b/core/src/validator.rs index d4b1bc8dc1..f112a6f65f 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -312,7 +312,7 @@ impl Validator { ); if config.dev_halt_at_slot.is_some() { - // Simulate a confirmed root to avoid RPC errors with CommitmentmentConfig::max() and + // Simulate a confirmed root to avoid RPC errors with CommitmentConfig::max() and // to ensure RPC endpoints like getConfirmedBlock, which require a confirmed root, work block_commitment_cache .write() diff --git a/core/tests/bank_forks.rs b/core/tests/bank_forks.rs index f614aadb61..8077664803 100644 --- a/core/tests/bank_forks.rs +++ b/core/tests/bank_forks.rs @@ -170,7 +170,7 @@ mod tests { #[test] fn test_bank_forks_snapshot_n() { - // create banks upto slot 4 and create 1 new account in each bank. test that bank 4 snapshots + // create banks up to slot 4 and create 1 new account in each bank. test that bank 4 snapshots // and restores correctly run_bank_forks_snapshot_n( 4, @@ -333,7 +333,7 @@ mod tests { // Check the archive we cached the state for earlier was generated correctly - // before we compare, stick an empty status_cache in this dir so that the package comparision works + // before we compare, stick an empty status_cache in this dir so that the package comparison works // This is needed since the status_cache is added by the packager and is not collected from // the source dir for snapshots let dummy_slot_deltas: Vec = vec![]; @@ -401,7 +401,7 @@ mod tests { #[test] fn test_bank_forks_status_cache_snapshot_n() { - // create banks upto slot (MAX_CACHE_ENTRIES * 2) + 1 while transferring 1 lamport into 2 different accounts each time + // create banks up to slot (MAX_CACHE_ENTRIES * 2) + 1 while transferring 1 lamport into 2 different accounts each time // this is done to ensure the AccountStorageEntries keep getting cleaned up as the root moves // ahead. Also tests the status_cache purge and status cache snapshotting. // Makes sure that the last bank is restored correctly diff --git a/core/tests/fork-selection.rs b/core/tests/fork-selection.rs index 66fb20b761..b9eebfb0ea 100644 --- a/core/tests/fork-selection.rs +++ b/core/tests/fork-selection.rs @@ -495,7 +495,7 @@ fn test_no_partitions() { /// * num_partitions - 1 to 100 partitions /// * fail_rate - 0 to 1.0 rate of packet receive failure /// * delay_count - number of forks to observe before voting -/// * parasite_rate - number of parasite nodes that vote oposite the greedy choice +/// * parasite_rate - number of parasite nodes that vote opposite the greedy choice fn test_with_partitions( num_partitions: usize, fail_rate: f64, diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 12bcce8a88..84ce077da8 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -1176,7 +1176,7 @@ impl Blockstore { buffer_offset += shred_len; last_index = index; // All shreds are of the same length. - // Let's check if we have scope to accomodate another shred + // Let's check if we have scope to accommodate another shred // If not, let's break right away, as it'll save on 1 DB read if buffer.len().saturating_sub(buffer_offset) < shred_len { break; @@ -2123,7 +2123,7 @@ impl Blockstore { .expect("fetch from DuplicateSlots column family failed") } - // `new_shred` is asssumed to have slot and index equal to the given slot and index. + // `new_shred` is assumed to have slot and index equal to the given slot and index. // Returns the existing shred if `new_shred` is not equal to the existing shred at the // given slot and index as this implies the leader generated two different shreds with // the same slot and index @@ -2674,7 +2674,7 @@ pub fn create_new_ledger( } // ensure the genesis archive can be unpacked and it is under - // max_genesis_archive_unpacked_size, immedately after creating it above. + // max_genesis_archive_unpacked_size, immediately after creating it above. { let temp_dir = tempfile::TempDir::new().unwrap(); // unpack into a temp dir, while completely discarding the unpacked files diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index b6c156c564..f5a1a4ef3f 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -1482,7 +1482,7 @@ pub mod tests { let rooted_slots: Vec<_> = (0..=last_slot).collect(); blockstore.set_roots(&rooted_slots).unwrap(); - // Set a root on the next slot of the confrimed epoch + // Set a root on the next slot of the confirmed epoch blockstore.set_roots(&[last_slot + 1]).unwrap(); // Check that we can properly restart the ledger / leader scheduler doesn't fail diff --git a/ledger/src/entry.rs b/ledger/src/entry.rs index a34f749298..2ce5acbf7b 100644 --- a/ledger/src/entry.rs +++ b/ledger/src/entry.rs @@ -738,7 +738,7 @@ mod tests { let tx0 = system_transaction::transfer(&keypair, &keypair.pubkey(), 0, zero); let tx1 = system_transaction::transfer(&keypair, &keypair.pubkey(), 1, zero); - // Verify entry with 2 transctions + // Verify entry with 2 transactions let mut e0 = vec![Entry::new(&zero, 0, vec![tx0, tx1])]; assert!(e0.verify(&zero)); diff --git a/ledger/src/rooted_slot_iterator.rs b/ledger/src/rooted_slot_iterator.rs index 202a1b8b67..9bd3b77b73 100644 --- a/ledger/src/rooted_slot_iterator.rs +++ b/ledger/src/rooted_slot_iterator.rs @@ -51,7 +51,7 @@ impl<'a> Iterator for RootedSlotIterator<'a> { .map(|r| { self.blockstore .meta(r) - .expect("Database failure, couldnt fetch SlotMeta") + .expect("Database failure, couldn't fetch SlotMeta") }) .unwrap_or(None); diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 8a658ee5d3..b6bf970454 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -1584,7 +1584,7 @@ pub mod tests { }); coding_shreds.iter().enumerate().for_each(|(i, s)| { - // There'll be half the number of coding shreds, as FEC rate is 0.5 + // There will be half the number of coding shreds, as FEC rate is 0.5 // So multiply i with 2 let expected_fec_set_index = start_index + ((i * 2 / max_per_block) * max_per_block) as u32; diff --git a/ledger/tests/shred.rs b/ledger/tests/shred.rs index 911c41014a..23fc4559d8 100644 --- a/ledger/tests/shred.rs +++ b/ledger/tests/shred.rs @@ -143,7 +143,7 @@ fn test_multi_fec_block_different_size_coding() { // Necessary in order to ensure the last shred in the slot // is part of the recovered set, and that the below `index` - // cacluation in the loop is correct + // calcuation in the loop is correct assert!(fec_data_shreds.len() % 2 == 0); for (i, recovered_shred) in recovered_data.into_iter().enumerate() { let index = first_data_index + (i * 2) + 1; diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index b385c9b36e..091ead9484 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -1,5 +1,5 @@ use log::*; -/// Cluster independant integration tests +/// Cluster independent integration tests /// /// All tests must start from an entry point and a funding keypair and /// discover the rest of the network. diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 3254cbab6b..a72d15399c 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -76,7 +76,7 @@ fn test_ledger_cleanup_service() { .slot_meta_iterator(0) .unwrap() .for_each(|_| slots += 1); - // with 3 nodes upto 3 slots can be in progress and not complete so max slots in blockstore should be upto 103 + // with 3 nodes up to 3 slots can be in progress and not complete so max slots in blockstore should be up to 103 assert!(slots <= 103, "got {}", slots); } } @@ -363,7 +363,7 @@ fn test_kill_heaviest_partition() { // This test: // 1) Spins up four partitions, the heaviest being the first with more stake // 2) Schedules the other validators for sufficient slots in the schedule - // so that they will still be locked out of voting for the major partitoin + // so that they will still be locked out of voting for the major partition // when the partition resolves // 3) Kills the most staked partition. Validators are locked out, but should all // eventually choose the major partition diff --git a/merkle-tree/src/merkle_tree.rs b/merkle-tree/src/merkle_tree.rs index 7b13b2aa91..4b39d3f5df 100644 --- a/merkle-tree/src/merkle_tree.rs +++ b/merkle-tree/src/merkle_tree.rs @@ -81,7 +81,7 @@ impl MerkleTree { // this cause the total nodes number increased by tree height, we use this // condition as the max nodes consuming case. // n is current leaf nodes number - // asuming n-1 is a full balanced binary tree, n-1 tree nodes number will be + // assuming n-1 is a full balanced binary tree, n-1 tree nodes number will be // 2(n-1) - 1, n tree height is closed to log2(n) + 1 // so the max nodes number is 2(n-1) - 1 + log2(n) + 1, finally we can use // 2n + log2(n+1) as a safe capacity value. diff --git a/remote-wallet/src/remote_wallet.rs b/remote-wallet/src/remote_wallet.rs index 3cf4485ca0..4f92fd8cf4 100644 --- a/remote-wallet/src/remote_wallet.rs +++ b/remote-wallet/src/remote_wallet.rs @@ -81,7 +81,7 @@ impl From for SignerError { } } -/// Collection of conntected RemoteWallets +/// Collection of connected RemoteWallets pub struct RemoteWalletManager { usb: Arc>, devices: RwLock>, diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 06bc928d1b..c006db6ac1 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -60,7 +60,7 @@ pub type TransactionLoaders = Vec>; pub type TransactionLoadResult = (TransactionAccounts, TransactionLoaders, TransactionRent); pub enum AccountAddressFilter { - Exclude, // exclude all addresses matching the fiter + Exclude, // exclude all addresses matching the filter Include, // only include addresses matching the filter } diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 1b070f0d1d..49e7ccba0a 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -748,7 +748,7 @@ impl AccountsDB { } } - // Atomicallly process reclaims and new dead_slots in this thread, gauranteeing + // Atomically process reclaims and new dead_slots in this thread, guaranteeing // complete data removal for slots in reclaims. fn handle_reclaims_ensure_cleanup(&self, reclaims: SlotSlice) { let mut dead_accounts = Measure::start("reclaims::remove_dead_accounts"); @@ -3683,7 +3683,7 @@ pub mod tests { let mut current_slot = 0; let accounts = AccountsDB::new_single(); - // create intermidiate updates to purged_pubkey1 so that + // create intermediate updates to purged_pubkey1 so that // generate_index must add slots as root last at once current_slot += 1; accounts.store(current_slot, &[(&pubkey, &account)]); diff --git a/runtime/src/append_vec.rs b/runtime/src/append_vec.rs index 283c21be18..82a3b3b06f 100644 --- a/runtime/src/append_vec.rs +++ b/runtime/src/append_vec.rs @@ -696,7 +696,7 @@ pub mod tests { account.set_data_len_unsafe(crafted_data_len); assert_eq!(account.meta.data_len, crafted_data_len); - // Reload accoutns and observe crafted_data_len + // Reload accounts and observe crafted_data_len let accounts = av.accounts(0); let account = accounts.first().unwrap(); assert_eq!(account.meta.data_len, crafted_data_len); @@ -763,7 +763,7 @@ pub mod tests { { let executable_bool: &bool = &account.account_meta.executable; // Depending on use, *executable_bool can be truthy or falsy due to direct memory manipulation - // assert_eq! thinks *exeutable_bool is equal to false but the if condition thinks it's not, contradictly. + // assert_eq! thinks *executable_bool is equal to false but the if condition thinks it's not, contradictorily. assert_eq!(*executable_bool, false); const FALSE: bool = false; // keep clippy happy if *executable_bool == FALSE { diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 65ecbb85b2..13e7796930 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -1,4 +1,4 @@ -//! The `bank_forks` module implments BankForks a DAG of checkpointed Banks +//! The `bank_forks` module implements BankForks a DAG of checkpointed Banks use crate::snapshot_package::{AccountsPackageSendError, AccountsPackageSender}; use crate::snapshot_utils::{self, SnapshotError}; diff --git a/runtime/src/legacy_system_instruction_processor0.rs b/runtime/src/legacy_system_instruction_processor0.rs index f5f29605fe..7413605966 100644 --- a/runtime/src/legacy_system_instruction_processor0.rs +++ b/runtime/src/legacy_system_instruction_processor0.rs @@ -464,7 +464,7 @@ mod tests { #[test] fn test_create_with_zero_lamports() { - // create account with zero lamports tranferred + // create account with zero lamports transferred let new_owner = Pubkey::new(&[9; 32]); let from = Pubkey::new_rand(); let from_account = Account::new_ref(100, 1, &Pubkey::new_rand()); // not from system account diff --git a/runtime/src/message_processor.rs b/runtime/src/message_processor.rs index 5754499055..d7e9872534 100644 --- a/runtime/src/message_processor.rs +++ b/runtime/src/message_processor.rs @@ -861,7 +861,7 @@ mod tests { .lamports(1, 2) .verify(), Err(InstructionError::ExecutableLamportChange), - "owner should not be able to add lamports once makred executable" + "owner should not be able to add lamports once marked executable" ); assert_eq!( Change::new(&owner, &owner) @@ -1045,7 +1045,7 @@ mod tests { .data(vec![0], vec![0, 0]) .verify(), Ok(()), - "system program should be able to change acount data size" + "system program should be able to change account data size" ); } diff --git a/sdk/src/abi_digester.rs b/sdk/src/abi_digester.rs index 1fb5ab5342..2efb11b262 100644 --- a/sdk/src/abi_digester.rs +++ b/sdk/src/abi_digester.rs @@ -62,7 +62,7 @@ impl AbiDigester { } } - // must create separate instances because we can't pass the single instnace to + // must create separate instances because we can't pass the single instance to // `.serialize()` multiple times pub fn create_new(&self) -> Self { Self { diff --git a/sdk/src/slot_history.rs b/sdk/src/slot_history.rs index bdee200a45..695669aafd 100644 --- a/sdk/src/slot_history.rs +++ b/sdk/src/slot_history.rs @@ -83,7 +83,7 @@ mod tests { #[test] fn slot_history_test1() { solana_logger::setup(); - // should be divisable by 64 since the clear logic works on blocks + // should be divisible by 64 since the clear logic works on blocks assert_eq!(MAX_ENTRIES % 64, 0); let mut slot_history = SlotHistory::default(); info!("add 2");