Upgrade Rust to 1.52.0 (#17096)
* Upgrade Rust to 1.52.0 update nightly_version to newly pushed docker image fix clippy lint errors 1.52 comes with grcov 0.8.0, include this version to script * upgrade to Rust 1.52.1 * disabling Serum from downstream projects until it is upgraded to Rust 1.52.1
This commit is contained in:
@ -1547,7 +1547,7 @@ mod tests {
|
||||
.collect();
|
||||
trace!("done");
|
||||
assert_eq!(entries.len(), genesis_config.ticks_per_slot as usize);
|
||||
assert_eq!(entries.verify(&start_hash), true);
|
||||
assert!(entries.verify(&start_hash));
|
||||
assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash());
|
||||
banking_stage.join().unwrap();
|
||||
}
|
||||
@ -1656,7 +1656,7 @@ mod tests {
|
||||
.map(|(_bank, (entry, _tick_height))| entry)
|
||||
.collect();
|
||||
|
||||
assert_eq!(entries.verify(&blockhash), true);
|
||||
assert!(entries.verify(&blockhash));
|
||||
if !entries.is_empty() {
|
||||
blockhash = entries.last().unwrap().hash;
|
||||
for entry in entries {
|
||||
@ -2124,7 +2124,7 @@ mod tests {
|
||||
}
|
||||
trace!("done ticking");
|
||||
|
||||
assert_eq!(done, true);
|
||||
assert!(done);
|
||||
|
||||
let transactions = vec![system_transaction::transfer(
|
||||
&mint_keypair,
|
||||
|
@ -2369,6 +2369,7 @@ impl ClusterInfo {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::needless_collect)]
|
||||
fn handle_batch_push_messages(
|
||||
&self,
|
||||
messages: Vec<(Pubkey, Vec<CrdsValue>)>,
|
||||
@ -3229,6 +3230,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::needless_collect)]
|
||||
fn test_handle_ping_messages() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let this_node = Arc::new(Keypair::new());
|
||||
@ -3583,7 +3585,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.new_push_messages(cluster_info.drain_push_queue(), timestamp());
|
||||
// there should be some pushes ready
|
||||
assert_eq!(push_messages.is_empty(), false);
|
||||
assert!(!push_messages.is_empty());
|
||||
push_messages
|
||||
.values()
|
||||
.for_each(|v| v.par_iter().for_each(|v| assert!(v.verify())));
|
||||
@ -3934,6 +3936,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::needless_collect)]
|
||||
fn test_split_messages_packet_size() {
|
||||
// Test that if a value is smaller than payload size but too large to be wrapped in a vec
|
||||
// that it is still dropped
|
||||
@ -3967,9 +3970,10 @@ mod tests {
|
||||
let expected_len = (NUM_VALUES + num_values_per_payload - 1) / num_values_per_payload;
|
||||
let msgs = vec![value; NUM_VALUES as usize];
|
||||
|
||||
let split: Vec<_> =
|
||||
ClusterInfo::split_gossip_messages(PUSH_MESSAGE_MAX_PAYLOAD_SIZE, msgs).collect();
|
||||
assert!(split.len() as u64 <= expected_len);
|
||||
assert!(
|
||||
ClusterInfo::split_gossip_messages(PUSH_MESSAGE_MAX_PAYLOAD_SIZE, msgs).count() as u64
|
||||
<= expected_len
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -91,6 +91,8 @@ pub type Stake = u64;
|
||||
pub type VotedStakes = HashMap<Slot, Stake>;
|
||||
pub type PubkeyVotes = Vec<(Pubkey, Slot)>;
|
||||
|
||||
// lint warning "bank_weight is never read"
|
||||
#[allow(dead_code)]
|
||||
pub(crate) struct ComputedBankState {
|
||||
pub voted_stakes: VotedStakes,
|
||||
pub total_stake: Stake,
|
||||
|
@ -1476,11 +1476,13 @@ mod test {
|
||||
}
|
||||
}
|
||||
fn run_test_mask(mask_bits: u32) {
|
||||
let masks: Vec<_> = (0..2u64.pow(mask_bits))
|
||||
.map(|seed| CrdsFilter::compute_mask(seed, mask_bits))
|
||||
.dedup()
|
||||
.collect();
|
||||
assert_eq!(masks.len(), 2u64.pow(mask_bits) as usize)
|
||||
assert_eq!(
|
||||
(0..2u64.pow(mask_bits))
|
||||
.map(|seed| CrdsFilter::compute_mask(seed, mask_bits))
|
||||
.dedup()
|
||||
.count(),
|
||||
2u64.pow(mask_bits) as usize
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -382,7 +382,7 @@ mod tests {
|
||||
assert_eq!(slots.to_slots(1), vec![1, 2, 10]);
|
||||
assert_eq!(slots.to_slots(2), vec![2, 10]);
|
||||
assert_eq!(slots.to_slots(3), vec![10]);
|
||||
assert_eq!(slots.to_slots(11).is_empty(), true);
|
||||
assert!(slots.to_slots(11).is_empty());
|
||||
}
|
||||
#[test]
|
||||
fn test_epoch_slots_compressed() {
|
||||
@ -452,7 +452,7 @@ mod tests {
|
||||
assert_eq!(slots.wallclock, 1);
|
||||
assert_eq!(slots.to_slots(0), range);
|
||||
assert_eq!(slots.to_slots(4999), vec![4999]);
|
||||
assert_eq!(slots.to_slots(5000).is_empty(), true);
|
||||
assert!(slots.to_slots(5000).is_empty());
|
||||
}
|
||||
#[test]
|
||||
fn test_epoch_slots_fill_sparce_range() {
|
||||
|
@ -342,19 +342,19 @@ mod tests {
|
||||
let spy_ref = Arc::new(cluster_info);
|
||||
|
||||
let (met_criteria, secs, _, tvu_peers) = spy(spy_ref.clone(), None, Some(1), None, None);
|
||||
assert_eq!(met_criteria, false);
|
||||
assert!(!met_criteria);
|
||||
assert_eq!(secs, 1);
|
||||
assert_eq!(tvu_peers, spy_ref.tvu_peers());
|
||||
|
||||
// Find num_nodes
|
||||
let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(1), None, None, None);
|
||||
assert_eq!(met_criteria, true);
|
||||
assert!(met_criteria);
|
||||
let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(2), None, None, None);
|
||||
assert_eq!(met_criteria, true);
|
||||
assert!(met_criteria);
|
||||
|
||||
// Find specific node by pubkey
|
||||
let (met_criteria, _, _, _) = spy(spy_ref.clone(), None, None, Some(peer0), None);
|
||||
assert_eq!(met_criteria, true);
|
||||
assert!(met_criteria);
|
||||
let (met_criteria, _, _, _) = spy(
|
||||
spy_ref.clone(),
|
||||
None,
|
||||
@ -362,13 +362,13 @@ mod tests {
|
||||
Some(solana_sdk::pubkey::new_rand()),
|
||||
None,
|
||||
);
|
||||
assert_eq!(met_criteria, false);
|
||||
assert!(!met_criteria);
|
||||
|
||||
// Find num_nodes *and* specific node by pubkey
|
||||
let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(1), None, Some(peer0), None);
|
||||
assert_eq!(met_criteria, true);
|
||||
assert!(met_criteria);
|
||||
let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(3), Some(0), Some(peer0), None);
|
||||
assert_eq!(met_criteria, false);
|
||||
assert!(!met_criteria);
|
||||
let (met_criteria, _, _, _) = spy(
|
||||
spy_ref.clone(),
|
||||
Some(1),
|
||||
@ -376,12 +376,12 @@ mod tests {
|
||||
Some(solana_sdk::pubkey::new_rand()),
|
||||
None,
|
||||
);
|
||||
assert_eq!(met_criteria, false);
|
||||
assert!(!met_criteria);
|
||||
|
||||
// Find specific node by gossip address
|
||||
let (met_criteria, _, _, _) =
|
||||
spy(spy_ref.clone(), None, None, None, Some(&peer0_info.gossip));
|
||||
assert_eq!(met_criteria, true);
|
||||
assert!(met_criteria);
|
||||
|
||||
let (met_criteria, _, _, _) = spy(
|
||||
spy_ref,
|
||||
@ -390,6 +390,6 @@ mod tests {
|
||||
None,
|
||||
Some(&"1.1.1.1:1234".parse().unwrap()),
|
||||
);
|
||||
assert_eq!(met_criteria, false);
|
||||
assert!(!met_criteria);
|
||||
}
|
||||
}
|
||||
|
@ -2685,7 +2685,7 @@ mod test {
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower.record_vote(1, Hash::default());
|
||||
|
||||
assert_eq!(tower.is_stray_last_vote(), false);
|
||||
assert!(!tower.is_stray_last_vote());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.heaviest_slot_on_same_voted_fork(&tower),
|
||||
Some((2, Hash::default()))
|
||||
@ -2700,7 +2700,7 @@ mod test {
|
||||
.adjust_lockouts_after_replay(0, &slot_history)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(tower.is_stray_last_vote(), true);
|
||||
assert!(tower.is_stray_last_vote());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.heaviest_slot_on_same_voted_fork(&tower),
|
||||
Some((2, Hash::default()))
|
||||
@ -2712,7 +2712,7 @@ mod test {
|
||||
.adjust_lockouts_after_replay(0, &slot_history)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(tower.is_stray_last_vote(), true);
|
||||
assert!(tower.is_stray_last_vote());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.heaviest_slot_on_same_voted_fork(&tower),
|
||||
None
|
||||
|
@ -1,4 +1,4 @@
|
||||
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))]
|
||||
#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))]
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
//! The `solana` library implements the Solana high-performance blockchain architecture.
|
||||
//! It includes a full Rust implementation of the architecture (see
|
||||
|
@ -1413,7 +1413,7 @@ mod tests {
|
||||
|
||||
let bootstrap_validator_id = leader_schedule_cache.slot_leader_at(0, None).unwrap();
|
||||
|
||||
assert_eq!(poh_recorder.reached_leader_tick(0), true);
|
||||
assert!(poh_recorder.reached_leader_tick(0));
|
||||
|
||||
let grace_ticks = bank.ticks_per_slot() * MAX_GRACE_SLOTS;
|
||||
let new_tick_height = NUM_CONSECUTIVE_LEADER_SLOTS * bank.ticks_per_slot();
|
||||
@ -1475,11 +1475,11 @@ mod tests {
|
||||
);
|
||||
|
||||
// Test that with no next leader slot, we don't reach the leader slot
|
||||
assert_eq!(poh_recorder.reached_leader_slot().0, false);
|
||||
assert!(!poh_recorder.reached_leader_slot().0);
|
||||
|
||||
// Test that with no next leader slot in reset(), we don't reach the leader slot
|
||||
poh_recorder.reset(bank.last_blockhash(), 0, None);
|
||||
assert_eq!(poh_recorder.reached_leader_slot().0, false);
|
||||
assert!(!poh_recorder.reached_leader_slot().0);
|
||||
|
||||
// Provide a leader slot one slot down
|
||||
poh_recorder.reset(bank.last_blockhash(), 0, Some((2, 2)));
|
||||
@ -1507,13 +1507,13 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
// Test that we don't reach the leader slot because of grace ticks
|
||||
assert_eq!(poh_recorder.reached_leader_slot().0, false);
|
||||
assert!(!poh_recorder.reached_leader_slot().0);
|
||||
|
||||
// reset poh now. we should immediately be leader
|
||||
poh_recorder.reset(bank.last_blockhash(), 1, Some((2, 2)));
|
||||
let (reached_leader_slot, grace_ticks, leader_slot, ..) =
|
||||
poh_recorder.reached_leader_slot();
|
||||
assert_eq!(reached_leader_slot, true);
|
||||
assert!(reached_leader_slot);
|
||||
assert_eq!(grace_ticks, 0);
|
||||
assert_eq!(leader_slot, 2);
|
||||
|
||||
@ -1527,7 +1527,7 @@ mod tests {
|
||||
}
|
||||
|
||||
// We are not the leader yet, as expected
|
||||
assert_eq!(poh_recorder.reached_leader_slot().0, false);
|
||||
assert!(!poh_recorder.reached_leader_slot().0);
|
||||
|
||||
// Send the grace ticks
|
||||
for _ in 0..bank.ticks_per_slot() / GRACE_TICKS_FACTOR {
|
||||
@ -1537,7 +1537,7 @@ mod tests {
|
||||
// We should be the leader now
|
||||
let (reached_leader_slot, grace_ticks, leader_slot, ..) =
|
||||
poh_recorder.reached_leader_slot();
|
||||
assert_eq!(reached_leader_slot, true);
|
||||
assert!(reached_leader_slot);
|
||||
assert_eq!(grace_ticks, bank.ticks_per_slot() / GRACE_TICKS_FACTOR);
|
||||
assert_eq!(leader_slot, 3);
|
||||
|
||||
@ -1551,13 +1551,13 @@ mod tests {
|
||||
}
|
||||
|
||||
// We are not the leader yet, as expected
|
||||
assert_eq!(poh_recorder.reached_leader_slot().0, false);
|
||||
assert!(!poh_recorder.reached_leader_slot().0);
|
||||
poh_recorder.reset(bank.last_blockhash(), 3, Some((4, 4)));
|
||||
|
||||
// without sending more ticks, we should be leader now
|
||||
let (reached_leader_slot, grace_ticks, leader_slot, ..) =
|
||||
poh_recorder.reached_leader_slot();
|
||||
assert_eq!(reached_leader_slot, true);
|
||||
assert!(reached_leader_slot);
|
||||
assert_eq!(grace_ticks, 0);
|
||||
assert_eq!(leader_slot, 4);
|
||||
|
||||
@ -1575,7 +1575,7 @@ mod tests {
|
||||
// We are overdue to lead
|
||||
let (reached_leader_slot, grace_ticks, leader_slot, ..) =
|
||||
poh_recorder.reached_leader_slot();
|
||||
assert_eq!(reached_leader_slot, true);
|
||||
assert!(reached_leader_slot);
|
||||
assert_eq!(grace_ticks, overshoot_factor * bank.ticks_per_slot());
|
||||
assert_eq!(leader_slot, 9);
|
||||
}
|
||||
@ -1605,47 +1605,29 @@ mod tests {
|
||||
);
|
||||
|
||||
// Test that with no leader slot, we don't reach the leader tick
|
||||
assert_eq!(
|
||||
poh_recorder.would_be_leader(2 * bank.ticks_per_slot()),
|
||||
false
|
||||
);
|
||||
assert!(!poh_recorder.would_be_leader(2 * bank.ticks_per_slot()));
|
||||
|
||||
poh_recorder.reset(bank.last_blockhash(), 0, None);
|
||||
|
||||
assert_eq!(
|
||||
poh_recorder.would_be_leader(2 * bank.ticks_per_slot()),
|
||||
false
|
||||
);
|
||||
assert!(!poh_recorder.would_be_leader(2 * bank.ticks_per_slot()));
|
||||
|
||||
// We reset with leader slot after 3 slots
|
||||
let bank_slot = bank.slot() + 3;
|
||||
poh_recorder.reset(bank.last_blockhash(), 0, Some((bank_slot, bank_slot)));
|
||||
|
||||
// Test that the node won't be leader in next 2 slots
|
||||
assert_eq!(
|
||||
poh_recorder.would_be_leader(2 * bank.ticks_per_slot()),
|
||||
false
|
||||
);
|
||||
assert!(!poh_recorder.would_be_leader(2 * bank.ticks_per_slot()));
|
||||
|
||||
// Test that the node will be leader in next 3 slots
|
||||
assert_eq!(
|
||||
poh_recorder.would_be_leader(3 * bank.ticks_per_slot()),
|
||||
true
|
||||
);
|
||||
assert!(poh_recorder.would_be_leader(3 * bank.ticks_per_slot()));
|
||||
|
||||
assert_eq!(
|
||||
poh_recorder.would_be_leader(2 * bank.ticks_per_slot()),
|
||||
false
|
||||
);
|
||||
assert!(!poh_recorder.would_be_leader(2 * bank.ticks_per_slot()));
|
||||
|
||||
// Move the bank up a slot (so that max_tick_height > slot 0's tick_height)
|
||||
let bank = Arc::new(Bank::new_from_parent(&bank, &Pubkey::default(), 1));
|
||||
// If we set the working bank, the node should be leader within next 2 slots
|
||||
poh_recorder.set_bank(&bank);
|
||||
assert_eq!(
|
||||
poh_recorder.would_be_leader(2 * bank.ticks_per_slot()),
|
||||
true
|
||||
);
|
||||
assert!(poh_recorder.would_be_leader(2 * bank.ticks_per_slot()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -739,7 +739,7 @@ mod tests {
|
||||
let mut packets = Packets::new(vec![]);
|
||||
solana_streamer::packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
|
||||
assert_eq!(packets.packets.len(), 1);
|
||||
assert_eq!(packets.packets[0].meta.repair, false);
|
||||
assert!(!packets.packets[0].meta.repair);
|
||||
|
||||
let mut repair = packet.clone();
|
||||
repair.meta.repair = true;
|
||||
@ -752,7 +752,7 @@ mod tests {
|
||||
let mut packets = Packets::new(vec![]);
|
||||
solana_streamer::packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
|
||||
assert_eq!(packets.packets.len(), 1);
|
||||
assert_eq!(packets.packets[0].meta.repair, false);
|
||||
assert!(!packets.packets[0].meta.repair);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -153,7 +153,7 @@ pub mod tests {
|
||||
batch[0].packets[1].meta.size = shred.payload.len();
|
||||
|
||||
let rv = verifier.verify_batch(batch);
|
||||
assert_eq!(rv[0].packets[0].meta.discard, false);
|
||||
assert_eq!(rv[0].packets[1].meta.discard, true);
|
||||
assert!(!rv[0].packets[0].meta.discard);
|
||||
assert!(rv[0].packets[1].meta.discard);
|
||||
}
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ mod tests {
|
||||
// Only the nonempty packet had a timestamp greater than 1
|
||||
let (new_update_version, updates) = verified_vote_packets.get_latest_votes(1);
|
||||
assert_eq!(new_update_version, 2);
|
||||
assert_eq!(updates.packets.is_empty(), false);
|
||||
assert!(!updates.packets.is_empty());
|
||||
|
||||
// If the given timestamp is greater than all timestamps in any update,
|
||||
// returned timestamp should be the same as the given timestamp, and
|
||||
|
@ -671,63 +671,94 @@ mod test {
|
||||
let mut shreds = local_entries_to_shred(&[Entry::default()], 0, 0, &leader_keypair);
|
||||
|
||||
// with a Bank for slot 0, shred continues
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, 0, 0),
|
||||
true
|
||||
);
|
||||
assert!(should_retransmit_and_persist(
|
||||
&shreds[0],
|
||||
Some(bank.clone()),
|
||||
&cache,
|
||||
&me_id,
|
||||
0,
|
||||
0
|
||||
));
|
||||
// with the wrong shred_version, shred gets thrown out
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, 0, 1),
|
||||
false
|
||||
);
|
||||
assert!(!should_retransmit_and_persist(
|
||||
&shreds[0],
|
||||
Some(bank.clone()),
|
||||
&cache,
|
||||
&me_id,
|
||||
0,
|
||||
1
|
||||
));
|
||||
|
||||
// If it's a coding shred, test that slot >= root
|
||||
let (common, coding) = Shredder::new_coding_shred_header(5, 5, 5, 6, 6, 0);
|
||||
let mut coding_shred =
|
||||
Shred::new_empty_from_header(common, DataShredHeader::default(), coding);
|
||||
Shredder::sign_shred(&leader_keypair, &mut coding_shred);
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 0, 0),
|
||||
true
|
||||
);
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 5, 0),
|
||||
true
|
||||
);
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 6, 0),
|
||||
false
|
||||
);
|
||||
assert!(should_retransmit_and_persist(
|
||||
&coding_shred,
|
||||
Some(bank.clone()),
|
||||
&cache,
|
||||
&me_id,
|
||||
0,
|
||||
0
|
||||
));
|
||||
assert!(should_retransmit_and_persist(
|
||||
&coding_shred,
|
||||
Some(bank.clone()),
|
||||
&cache,
|
||||
&me_id,
|
||||
5,
|
||||
0
|
||||
));
|
||||
assert!(!should_retransmit_and_persist(
|
||||
&coding_shred,
|
||||
Some(bank.clone()),
|
||||
&cache,
|
||||
&me_id,
|
||||
6,
|
||||
0
|
||||
));
|
||||
|
||||
// with a Bank and no idea who leader is, shred gets thrown out
|
||||
shreds[0].set_slot(MINIMUM_SLOTS_PER_EPOCH as u64 * 3);
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, 0, 0),
|
||||
false
|
||||
);
|
||||
assert!(!should_retransmit_and_persist(
|
||||
&shreds[0],
|
||||
Some(bank.clone()),
|
||||
&cache,
|
||||
&me_id,
|
||||
0,
|
||||
0
|
||||
));
|
||||
|
||||
// with a shred where shred.slot() == root, shred gets thrown out
|
||||
let slot = MINIMUM_SLOTS_PER_EPOCH as u64 * 3;
|
||||
let shreds = local_entries_to_shred(&[Entry::default()], slot, slot - 1, &leader_keypair);
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, slot, 0),
|
||||
false
|
||||
);
|
||||
assert!(!should_retransmit_and_persist(
|
||||
&shreds[0],
|
||||
Some(bank.clone()),
|
||||
&cache,
|
||||
&me_id,
|
||||
slot,
|
||||
0
|
||||
));
|
||||
|
||||
// with a shred where shred.parent() < root, shred gets thrown out
|
||||
let slot = MINIMUM_SLOTS_PER_EPOCH as u64 * 3;
|
||||
let shreds =
|
||||
local_entries_to_shred(&[Entry::default()], slot + 1, slot - 1, &leader_keypair);
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&shreds[0], Some(bank), &cache, &me_id, slot, 0),
|
||||
false
|
||||
);
|
||||
assert!(!should_retransmit_and_persist(
|
||||
&shreds[0],
|
||||
Some(bank),
|
||||
&cache,
|
||||
&me_id,
|
||||
slot,
|
||||
0
|
||||
));
|
||||
|
||||
// if the shred came back from me, it doesn't continue, whether or not I have a bank
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&shreds[0], None, &cache, &me_id, 0, 0),
|
||||
false
|
||||
);
|
||||
assert!(!should_retransmit_and_persist(
|
||||
&shreds[0], None, &cache, &me_id, 0, 0
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
Reference in New Issue
Block a user