Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
This commit is contained in:
Greg Fitzgerald
2020-06-17 21:54:52 -06:00
committed by GitHub
parent 5b9cd72d8f
commit 0550b893b0
29 changed files with 45 additions and 46 deletions

View File

@ -96,7 +96,7 @@ impl BroadcastStats for InsertShredsStats {
}
}
// Tracks metrics of type `T` acrosss multiple threads
// Tracks metrics of type `T` across multiple threads
#[derive(Default)]
pub(crate) struct BatchCounter<T: BroadcastStats + Default> {
// The number of batches processed across all threads so far

View File

@ -1245,7 +1245,7 @@ pub mod test {
// The other two validators voted at slots 46, 47, which
// will only both show up in slot 48, at which point
// 2/5 > SWITCH_FORK_THRESHOLD of the stake has voted
// on another fork, so switching should suceed
// on another fork, so switching should succeed
let votes_to_simulate = (46..=48).collect();
let results = vote_simulator.create_and_vote_new_branch(
45,
@ -1377,9 +1377,8 @@ pub mod test {
pubkey_votes.sort();
assert_eq!(pubkey_votes, account_latest_votes);
// Each acccount has 1 vote in it. After simulating a vote in collect_vote_lockouts,
// Each account has 1 vote in it. After simulating a vote in collect_vote_lockouts,
// the account will have 2 votes, with lockout 2 + 4 = 6. So expected weight for
// two acccounts is 2 * 6 = 12
assert_eq!(bank_weight, 12)
}

View File

@ -203,7 +203,7 @@ impl ContactInfo {
}
/// port must not be 0
/// ip must be specified and not mulitcast
/// ip must be specified and not multicast
/// loopback ip is only allowed in tests
pub fn is_valid_address(addr: &SocketAddr) -> bool {
(addr.port() != 0) && Self::is_valid_ip(addr.ip())

View File

@ -156,7 +156,7 @@ impl Crds {
}
}
/// Update the timestamp's of all the labels that are assosciated with Pubkey
/// Update the timestamp's of all the labels that are associated with Pubkey
pub fn update_record_timestamp(&mut self, pubkey: &Pubkey, now: u64) {
for label in &CrdsValue::record_labels(pubkey) {
self.update_label_timestamp(label, now);

View File

@ -415,7 +415,7 @@ mod test {
assert!(stats.propagated_validators.contains(&vote_pubkey));
assert_eq!(stats.propagated_validators_stake, 1);
// Addding another pubkey should succeed
// Adding another pubkey should succeed
vote_pubkey = Pubkey::new_rand();
stats.add_vote_pubkey(&vote_pubkey, &mut all_pubkeys, 2);
assert!(stats.propagated_validators.contains(&vote_pubkey));
@ -475,7 +475,7 @@ mod test {
staked_vote_accounts as u64
);
// Addding another pubkey with same vote accounts should succeed, but stake
// Adding another pubkey with same vote accounts should succeed, but stake
// shouldn't increase
node_pubkey = Pubkey::new_rand();
stats.add_node_pubkey_internal(
@ -494,7 +494,7 @@ mod test {
3
);
// Addding another pubkey with different vote accounts should succeed
// Adding another pubkey with different vote accounts should succeed
// and increase stake
node_pubkey = Pubkey::new_rand();
let vote_account_pubkeys: Vec<_> = std::iter::repeat_with(Pubkey::new_rand)

View File

@ -1052,7 +1052,7 @@ mod test {
// Should not be able to find signature for slot 9 for the tx
assert!(bank9.get_signature_status(&vote_tx.signatures[0]).is_none());
// Getting balance should return the old balance (acounts were cleared)
// Getting balance should return the old balance (accounts were cleared)
assert_eq!(
bank9.get_balance(&keypairs.node_keypair.pubkey()),
old_balance

View File

@ -1373,7 +1373,7 @@ impl ReplayStage {
// 2) The best "selected" bank is on a different fork,
// switch_threshold fails
// 3) The best "selected" bank is on a different fork,
// switch_threshold succceeds
// switch_threshold succeeds
let mut failure_reasons = vec![];
let selected_fork = {
let switch_fork_decision = tower.check_switch_threshold(
@ -1551,9 +1551,9 @@ impl ReplayStage {
// Remove the vote/node pubkeys that we already know voted for this
// slot. These vote accounts/validator identities are safe to drop
// because they don't to be ported back any further because earler
// because they don't to be ported back any further because earlier
// parents must have:
// 1) Also recorded these pubkeyss already, or
// 1) Also recorded these pubkeys already, or
// 2) Already reached the propagation threshold, in which case
// they no longer need to track the set of propagated validators
newly_voted_pubkeys.retain(|vote_pubkey| {
@ -1941,7 +1941,7 @@ pub(crate) mod tests {
.get(2 * NUM_CONSECUTIVE_LEADER_SLOTS)
.is_some());
// // There are 20 equally staked acccounts, of which 3 have built
// // There are 20 equally staked accounts, of which 3 have built
// banks above or at bank 1. Because 3/20 < SUPERMINORITY_THRESHOLD,
// we should see 3 validators in bank 1's propagated_validator set.
let expected_leader_slots = vec![
@ -3388,7 +3388,7 @@ pub(crate) mod tests {
let mut parent_slot = 3;
// Set up the progress map to show that the last leader slot of 4 is 3,
// which means 3 and 4 are consecutiive leader slots
// which means 3 and 4 are consecutive leader slots
progress_map.insert(
3,
ForkProgress::new(
@ -3519,7 +3519,7 @@ pub(crate) mod tests {
);
// Result should be equivalent to removing slot from BankForks
// and regeneratinig the `ancestor` `descendant` maps
// and regenerating the `ancestor` `descendant` maps
for d in slot_2_descendants {
bank_forks.write().unwrap().remove(d);
}

View File

@ -179,7 +179,7 @@ mod tests {
// Make tarball from packageable snapshot
snapshot_utils::archive_snapshot_package(&snapshot_package).unwrap();
// before we compare, stick an empty status_cache in this dir so that the package comparision works
// before we compare, stick an empty status_cache in this dir so that the package comparison works
// This is needed since the status_cache is added by the packager and is not collected from
// the source dir for snapshots
let dummy_slot_deltas: Vec<BankSlotDelta> = vec![];

View File

@ -312,7 +312,7 @@ impl Validator {
);
if config.dev_halt_at_slot.is_some() {
// Simulate a confirmed root to avoid RPC errors with CommitmentmentConfig::max() and
// Simulate a confirmed root to avoid RPC errors with CommitmentConfig::max() and
// to ensure RPC endpoints like getConfirmedBlock, which require a confirmed root, work
block_commitment_cache
.write()

View File

@ -170,7 +170,7 @@ mod tests {
#[test]
fn test_bank_forks_snapshot_n() {
// create banks upto slot 4 and create 1 new account in each bank. test that bank 4 snapshots
// create banks up to slot 4 and create 1 new account in each bank. test that bank 4 snapshots
// and restores correctly
run_bank_forks_snapshot_n(
4,
@ -333,7 +333,7 @@ mod tests {
// Check the archive we cached the state for earlier was generated correctly
// before we compare, stick an empty status_cache in this dir so that the package comparision works
// before we compare, stick an empty status_cache in this dir so that the package comparison works
// This is needed since the status_cache is added by the packager and is not collected from
// the source dir for snapshots
let dummy_slot_deltas: Vec<BankSlotDelta> = vec![];
@ -401,7 +401,7 @@ mod tests {
#[test]
fn test_bank_forks_status_cache_snapshot_n() {
// create banks upto slot (MAX_CACHE_ENTRIES * 2) + 1 while transferring 1 lamport into 2 different accounts each time
// create banks up to slot (MAX_CACHE_ENTRIES * 2) + 1 while transferring 1 lamport into 2 different accounts each time
// this is done to ensure the AccountStorageEntries keep getting cleaned up as the root moves
// ahead. Also tests the status_cache purge and status cache snapshotting.
// Makes sure that the last bank is restored correctly

View File

@ -495,7 +495,7 @@ fn test_no_partitions() {
/// * num_partitions - 1 to 100 partitions
/// * fail_rate - 0 to 1.0 rate of packet receive failure
/// * delay_count - number of forks to observe before voting
/// * parasite_rate - number of parasite nodes that vote oposite the greedy choice
/// * parasite_rate - number of parasite nodes that vote opposite the greedy choice
fn test_with_partitions(
num_partitions: usize,
fail_rate: f64,