Clippy
This commit is contained in:
@ -1171,8 +1171,10 @@ mod tests {
|
||||
Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let mut poh_config = PohConfig::default();
|
||||
poh_config.target_tick_count = Some(bank.max_tick_height() + num_extra_ticks);
|
||||
let poh_config = PohConfig {
|
||||
target_tick_count: Some(bank.max_tick_height() + num_extra_ticks),
|
||||
..PohConfig::default()
|
||||
};
|
||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
@ -1236,9 +1238,12 @@ mod tests {
|
||||
Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let mut poh_config = PohConfig::default();
|
||||
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
|
||||
poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
|
||||
let poh_config = PohConfig {
|
||||
// limit tick count to avoid clearing working_bank at PohRecord then
|
||||
// PohRecorderError(MaxHeightReached) at BankingStage
|
||||
target_tick_count: Some(bank.max_tick_height() - 1),
|
||||
..PohConfig::default()
|
||||
};
|
||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
@ -1381,9 +1386,12 @@ mod tests {
|
||||
Blockstore::open(&ledger_path)
|
||||
.expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let mut poh_config = PohConfig::default();
|
||||
// limit tick count to avoid clearing working_bank at PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
|
||||
poh_config.target_tick_count = Some(bank.max_tick_height() - 1);
|
||||
let poh_config = PohConfig {
|
||||
// limit tick count to avoid clearing working_bank at
|
||||
// PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
|
||||
target_tick_count: Some(bank.max_tick_height() - 1),
|
||||
..PohConfig::default()
|
||||
};
|
||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||
let cluster_info =
|
||||
@ -1973,7 +1981,7 @@ mod tests {
|
||||
|
||||
assert_eq!(processed_transactions_count, 0,);
|
||||
|
||||
retryable_txs.sort();
|
||||
retryable_txs.sort_unstable();
|
||||
let expected: Vec<usize> = (0..transactions.len()).collect();
|
||||
assert_eq!(retryable_txs, expected);
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
//! A stage to broadcast data from a leader node to validators
|
||||
#![allow(clippy::rc_buffer)]
|
||||
use self::{
|
||||
broadcast_fake_shreds_run::BroadcastFakeShredsRun, broadcast_metrics::*,
|
||||
fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun,
|
||||
@ -518,8 +519,10 @@ pub mod test {
|
||||
|
||||
#[test]
|
||||
fn test_num_live_peers() {
|
||||
let mut ci = ContactInfo::default();
|
||||
ci.wallclock = std::u64::MAX;
|
||||
let mut ci = ContactInfo {
|
||||
wallclock: std::u64::MAX,
|
||||
..ContactInfo::default()
|
||||
};
|
||||
assert_eq!(num_live_peers(&[ci.clone()]), 1);
|
||||
ci.wallclock = timestamp() - 1;
|
||||
assert_eq!(num_live_peers(&[ci.clone()]), 2);
|
||||
|
@ -270,10 +270,9 @@ mod test {
|
||||
}
|
||||
|
||||
assert!(slot_broadcast_stats.lock().unwrap().0.get(&slot).is_none());
|
||||
let (returned_count, returned_slot, returned_instant) = receiver.recv().unwrap();
|
||||
let (returned_count, returned_slot, _returned_instant) = receiver.recv().unwrap();
|
||||
assert_eq!(returned_count, num_threads);
|
||||
assert_eq!(returned_slot, slot);
|
||||
assert_eq!(returned_instant, returned_instant);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +1,5 @@
|
||||
#![allow(clippy::rc_buffer)]
|
||||
|
||||
use super::{
|
||||
broadcast_utils::{self, ReceiveResults},
|
||||
*,
|
||||
@ -284,7 +286,7 @@ impl StandardBroadcastRun {
|
||||
blockstore: &Arc<Blockstore>,
|
||||
shreds: Arc<Vec<Shred>>,
|
||||
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
|
||||
) -> Result<()> {
|
||||
) {
|
||||
// Insert shreds into blockstore
|
||||
let insert_shreds_start = Instant::now();
|
||||
// The first shred is inserted synchronously
|
||||
@ -302,7 +304,6 @@ impl StandardBroadcastRun {
|
||||
num_shreds: shreds.len(),
|
||||
};
|
||||
self.update_insertion_metrics(&new_insert_shreds_stats, &broadcast_shred_batch_info);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_insertion_metrics(
|
||||
@ -438,7 +439,8 @@ impl BroadcastRun for StandardBroadcastRun {
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> Result<()> {
|
||||
let (shreds, slot_start_ts) = receiver.lock().unwrap().recv()?;
|
||||
self.insert(blockstore, shreds, slot_start_ts)
|
||||
self.insert(blockstore, shreds, slot_start_ts);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -884,7 +884,7 @@ impl ClusterInfo {
|
||||
))
|
||||
})
|
||||
.collect();
|
||||
current_slots.sort();
|
||||
current_slots.sort_unstable();
|
||||
let min_slot: Slot = current_slots
|
||||
.iter()
|
||||
.map(|((_, s), _)| *s)
|
||||
@ -4139,8 +4139,10 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_protocol_sanitize() {
|
||||
let mut pd = PruneData::default();
|
||||
pd.wallclock = MAX_WALLCLOCK;
|
||||
let pd = PruneData {
|
||||
wallclock: MAX_WALLCLOCK,
|
||||
..PruneData::default()
|
||||
};
|
||||
let msg = Protocol::PruneMessage(Pubkey::default(), pd);
|
||||
assert_eq!(msg.sanitize(), Err(SanitizeError::ValueOutOfBounds));
|
||||
}
|
||||
|
@ -125,6 +125,7 @@ impl ClusterSlotsService {
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
#[allow(clippy::stable_sort_primitive)]
|
||||
slots.sort();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
@ -163,7 +164,7 @@ impl ClusterSlotsService {
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
slots.sort();
|
||||
slots.sort_unstable();
|
||||
slots.dedup();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
|
@ -1574,9 +1574,11 @@ pub mod test {
|
||||
fn gen_stakes(stake_votes: &[(u64, &[u64])]) -> Vec<(Pubkey, (u64, ArcVoteAccount))> {
|
||||
let mut stakes = vec![];
|
||||
for (lamports, votes) in stake_votes {
|
||||
let mut account = Account::default();
|
||||
account.data = vec![0; VoteState::size_of()];
|
||||
account.lamports = *lamports;
|
||||
let mut account = Account {
|
||||
data: vec![0; VoteState::size_of()],
|
||||
lamports: *lamports,
|
||||
..Account::default()
|
||||
};
|
||||
let mut vote_state = VoteState::default();
|
||||
for slot in *votes {
|
||||
vote_state.process_slot_vote_unchecked(*slot);
|
||||
@ -2059,7 +2061,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_check_vote_threshold_without_votes() {
|
||||
let tower = Tower::new_for_tests(1, 0.67);
|
||||
let stakes = vec![(0, 1 as Stake)].into_iter().collect();
|
||||
let stakes = vec![(0, 1)].into_iter().collect();
|
||||
assert!(tower.check_vote_stake_threshold(0, &stakes, 2));
|
||||
}
|
||||
|
||||
@ -2069,7 +2071,7 @@ pub mod test {
|
||||
let mut tower = Tower::new_for_tests(4, 0.67);
|
||||
let mut stakes = HashMap::new();
|
||||
for i in 0..(MAX_LOCKOUT_HISTORY as u64 + 1) {
|
||||
stakes.insert(i, 1 as Stake);
|
||||
stakes.insert(i, 1);
|
||||
tower.record_vote(i, Hash::default());
|
||||
}
|
||||
assert!(!tower.check_vote_stake_threshold(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2,));
|
||||
@ -2078,7 +2080,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_is_slot_confirmed_not_enough_stake_failure() {
|
||||
let tower = Tower::new_for_tests(1, 0.67);
|
||||
let stakes = vec![(0, 1 as Stake)].into_iter().collect();
|
||||
let stakes = vec![(0, 1)].into_iter().collect();
|
||||
assert!(!tower.is_slot_confirmed(0, &stakes, 2));
|
||||
}
|
||||
|
||||
@ -2092,7 +2094,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_is_slot_confirmed_pass() {
|
||||
let tower = Tower::new_for_tests(1, 0.67);
|
||||
let stakes = vec![(0, 2 as Stake)].into_iter().collect();
|
||||
let stakes = vec![(0, 2)].into_iter().collect();
|
||||
assert!(tower.is_slot_confirmed(0, &stakes, 2));
|
||||
}
|
||||
|
||||
@ -2205,14 +2207,14 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_check_vote_threshold_below_threshold() {
|
||||
let mut tower = Tower::new_for_tests(1, 0.67);
|
||||
let stakes = vec![(0, 1 as Stake)].into_iter().collect();
|
||||
let stakes = vec![(0, 1)].into_iter().collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
assert!(!tower.check_vote_stake_threshold(1, &stakes, 2));
|
||||
}
|
||||
#[test]
|
||||
fn test_check_vote_threshold_above_threshold() {
|
||||
let mut tower = Tower::new_for_tests(1, 0.67);
|
||||
let stakes = vec![(0, 2 as Stake)].into_iter().collect();
|
||||
let stakes = vec![(0, 2)].into_iter().collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
assert!(tower.check_vote_stake_threshold(1, &stakes, 2));
|
||||
}
|
||||
@ -2220,7 +2222,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_check_vote_threshold_above_threshold_after_pop() {
|
||||
let mut tower = Tower::new_for_tests(1, 0.67);
|
||||
let stakes = vec![(0, 2 as Stake)].into_iter().collect();
|
||||
let stakes = vec![(0, 2)].into_iter().collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
tower.record_vote(1, Hash::default());
|
||||
tower.record_vote(2, Hash::default());
|
||||
@ -2239,7 +2241,7 @@ pub mod test {
|
||||
fn test_check_vote_threshold_lockouts_not_updated() {
|
||||
solana_logger::setup();
|
||||
let mut tower = Tower::new_for_tests(1, 0.67);
|
||||
let stakes = vec![(0, 1 as Stake), (1, 2 as Stake)].into_iter().collect();
|
||||
let stakes = vec![(0, 1), (1, 2)].into_iter().collect();
|
||||
tower.record_vote(0, Hash::default());
|
||||
tower.record_vote(1, Hash::default());
|
||||
tower.record_vote(2, Hash::default());
|
||||
@ -2249,8 +2251,10 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_stake_is_updated_for_entire_branch() {
|
||||
let mut voted_stakes = HashMap::new();
|
||||
let mut account = Account::default();
|
||||
account.lamports = 1;
|
||||
let account = Account {
|
||||
lamports: 1,
|
||||
..Account::default()
|
||||
};
|
||||
let set: HashSet<u64> = vec![0u64, 1u64].into_iter().collect();
|
||||
let ancestors: HashMap<u64, HashSet<u64>> = [(2u64, set)].iter().cloned().collect();
|
||||
Tower::update_ancestor_voted_stakes(&mut voted_stakes, 2, account.lamports, &ancestors);
|
||||
|
@ -304,8 +304,10 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_prune_errors() {
|
||||
let mut crds_gossip = CrdsGossip::default();
|
||||
crds_gossip.id = Pubkey::new(&[0; 32]);
|
||||
let mut crds_gossip = CrdsGossip {
|
||||
id: Pubkey::new(&[0; 32]),
|
||||
..CrdsGossip::default()
|
||||
};
|
||||
let id = crds_gossip.id;
|
||||
let ci = ContactInfo::new_localhost(&Pubkey::new(&[1; 32]), 0);
|
||||
let prune_pubkey = Pubkey::new(&[2; 32]);
|
||||
|
@ -337,10 +337,7 @@ impl CrdsGossipPull {
|
||||
for r in responses {
|
||||
let owner = r.label().pubkey();
|
||||
// Check if the crds value is older than the msg_timeout
|
||||
if now
|
||||
> r.wallclock()
|
||||
.checked_add(self.msg_timeout)
|
||||
.unwrap_or_else(|| 0)
|
||||
if now > r.wallclock().checked_add(self.msg_timeout).unwrap_or(0)
|
||||
|| now + self.msg_timeout < r.wallclock()
|
||||
{
|
||||
match &r.label() {
|
||||
@ -350,7 +347,7 @@ impl CrdsGossipPull {
|
||||
let timeout = *timeouts
|
||||
.get(&owner)
|
||||
.unwrap_or_else(|| timeouts.get(&Pubkey::default()).unwrap());
|
||||
if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0)
|
||||
if now > r.wallclock().checked_add(timeout).unwrap_or(0)
|
||||
|| now + timeout < r.wallclock()
|
||||
{
|
||||
stats.timeout_count += 1;
|
||||
|
@ -175,12 +175,7 @@ impl CrdsGossipPush {
|
||||
now: u64,
|
||||
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
|
||||
self.num_total += 1;
|
||||
if now
|
||||
> value
|
||||
.wallclock()
|
||||
.checked_add(self.msg_timeout)
|
||||
.unwrap_or_else(|| 0)
|
||||
{
|
||||
if now > value.wallclock().checked_add(self.msg_timeout).unwrap_or(0) {
|
||||
return Err(CrdsGossipError::PushMessageTimeout);
|
||||
}
|
||||
if now + self.msg_timeout < value.wallclock() {
|
||||
@ -208,7 +203,7 @@ impl CrdsGossipPush {
|
||||
/// push pull responses
|
||||
pub fn push_pull_responses(&mut self, values: Vec<(CrdsValueLabel, Hash, u64)>, now: u64) {
|
||||
for (label, value_hash, wc) in values {
|
||||
if now > wc.checked_add(self.msg_timeout).unwrap_or_else(|| 0) {
|
||||
if now > wc.checked_add(self.msg_timeout).unwrap_or(0) {
|
||||
continue;
|
||||
}
|
||||
self.push_messages.insert(label, value_hash);
|
||||
|
@ -116,7 +116,7 @@ impl Uncompressed {
|
||||
pub fn to_slots(&self, min_slot: Slot) -> Vec<Slot> {
|
||||
let mut rv = vec![];
|
||||
let start = if min_slot < self.first_slot {
|
||||
0 as usize
|
||||
0
|
||||
} else {
|
||||
(min_slot - self.first_slot) as usize
|
||||
};
|
||||
|
@ -1228,8 +1228,10 @@ mod tests {
|
||||
init_ticks + bank.ticks_per_slot()
|
||||
);
|
||||
|
||||
let mut parent_meta = SlotMeta::default();
|
||||
parent_meta.received = 1;
|
||||
let parent_meta = SlotMeta {
|
||||
received: 1,
|
||||
..SlotMeta::default()
|
||||
};
|
||||
poh_recorder
|
||||
.blockstore
|
||||
.put_meta_bytes(0, &serialize(&parent_meta).unwrap())
|
||||
|
@ -736,7 +736,7 @@ mod test {
|
||||
let num_slots = 2;
|
||||
|
||||
// Create some shreds
|
||||
let (mut shreds, _) = make_many_slot_entries(0, num_slots as u64, 150 as u64);
|
||||
let (mut shreds, _) = make_many_slot_entries(0, num_slots as u64, 150);
|
||||
let num_shreds = shreds.len() as u64;
|
||||
let num_shreds_per_slot = num_shreds / num_slots;
|
||||
|
||||
@ -852,9 +852,10 @@ mod test {
|
||||
// sides of the range)
|
||||
for start in 0..slots.len() {
|
||||
for end in start..slots.len() {
|
||||
let mut repair_slot_range = RepairSlotRange::default();
|
||||
repair_slot_range.start = slots[start];
|
||||
repair_slot_range.end = slots[end];
|
||||
let repair_slot_range = RepairSlotRange {
|
||||
start: slots[start],
|
||||
end: slots[end],
|
||||
};
|
||||
let expected: Vec<RepairType> = (repair_slot_range.start
|
||||
..=repair_slot_range.end)
|
||||
.map(|slot_index| {
|
||||
@ -907,9 +908,7 @@ mod test {
|
||||
RepairType::HighestShred(end, 0),
|
||||
];
|
||||
|
||||
let mut repair_slot_range = RepairSlotRange::default();
|
||||
repair_slot_range.start = 2;
|
||||
repair_slot_range.end = end;
|
||||
let repair_slot_range = RepairSlotRange { start: 2, end };
|
||||
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs_in_range(
|
||||
|
@ -455,7 +455,7 @@ impl ReplayStage {
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&cache_block_time_sender,
|
||||
&bank_notification_sender,
|
||||
)?;
|
||||
);
|
||||
};
|
||||
voting_time.stop();
|
||||
|
||||
@ -1047,7 +1047,7 @@ impl ReplayStage {
|
||||
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
|
||||
cache_block_time_sender: &Option<CacheBlockTimeSender>,
|
||||
bank_notification_sender: &Option<BankNotificationSender>,
|
||||
) -> Result<()> {
|
||||
) {
|
||||
if bank.is_empty() {
|
||||
inc_new_counter_info!("replay_stage-voted_empty_bank", 1);
|
||||
}
|
||||
@ -1130,7 +1130,6 @@ impl ReplayStage {
|
||||
tower_index,
|
||||
switch_fork_decision,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn push_vote(
|
||||
|
@ -1,4 +1,5 @@
|
||||
//! The `retransmit_stage` retransmits shreds between validators
|
||||
#![allow(clippy::rc_buffer)]
|
||||
|
||||
use crate::shred_fetch_stage::ShredFetchStage;
|
||||
use crate::shred_fetch_stage::ShredFetchStats;
|
||||
|
@ -4511,8 +4511,10 @@ pub mod tests {
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let mut config = JsonRpcConfig::default();
|
||||
config.enable_validator_exit = true;
|
||||
let config = JsonRpcConfig {
|
||||
enable_validator_exit: true,
|
||||
..JsonRpcConfig::default()
|
||||
};
|
||||
let bank_forks = new_bank_forks().0;
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
let tpu_address = cluster_info.my_contact_info().tpu;
|
||||
@ -4601,8 +4603,10 @@ pub mod tests {
|
||||
CommitmentSlots::new_from_slot(bank_forks.read().unwrap().highest_slot()),
|
||||
)));
|
||||
|
||||
let mut config = JsonRpcConfig::default();
|
||||
config.enable_validator_exit = true;
|
||||
let config = JsonRpcConfig {
|
||||
enable_validator_exit: true,
|
||||
..JsonRpcConfig::default()
|
||||
};
|
||||
let cluster_info = Arc::new(ClusterInfo::default());
|
||||
let tpu_address = cluster_info.my_contact_info().tpu;
|
||||
let (request_processor, receiver) = JsonRpcRequestProcessor::new(
|
||||
|
@ -534,8 +534,10 @@ mod tests {
|
||||
.get(current_slot)
|
||||
.unwrap()
|
||||
.process_transaction(tx)?;
|
||||
let mut commitment_slots = CommitmentSlots::default();
|
||||
commitment_slots.slot = current_slot;
|
||||
let commitment_slots = CommitmentSlots {
|
||||
slot: current_slot,
|
||||
..CommitmentSlots::default()
|
||||
};
|
||||
subscriptions.notify_subscribers(commitment_slots);
|
||||
Ok(())
|
||||
}
|
||||
@ -1018,8 +1020,10 @@ mod tests {
|
||||
.unwrap()
|
||||
.process_transaction(&tx)
|
||||
.unwrap();
|
||||
let mut commitment_slots = CommitmentSlots::default();
|
||||
commitment_slots.slot = 1;
|
||||
let commitment_slots = CommitmentSlots {
|
||||
slot: 1,
|
||||
..CommitmentSlots::default()
|
||||
};
|
||||
rpc.subscriptions.notify_subscribers(commitment_slots);
|
||||
|
||||
let commitment_slots = CommitmentSlots {
|
||||
|
@ -973,7 +973,7 @@ impl RpcSubscriptions {
|
||||
}
|
||||
|
||||
pub fn notify_roots(&self, mut rooted_slots: Vec<Slot>) {
|
||||
rooted_slots.sort();
|
||||
rooted_slots.sort_unstable();
|
||||
rooted_slots.into_iter().for_each(|root| {
|
||||
self.enqueue_notification(NotificationEntry::Root(root));
|
||||
});
|
||||
@ -1359,8 +1359,8 @@ pub(crate) mod tests {
|
||||
let (create_sub, _id_receiver, create_recv) = Subscriber::new_test("accountNotification");
|
||||
let (close_sub, _id_receiver, close_recv) = Subscriber::new_test("accountNotification");
|
||||
|
||||
let create_sub_id = SubscriptionId::Number(0 as u64);
|
||||
let close_sub_id = SubscriptionId::Number(1 as u64);
|
||||
let create_sub_id = SubscriptionId::Number(0);
|
||||
let close_sub_id = SubscriptionId::Number(1);
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
@ -1404,8 +1404,10 @@ pub(crate) mod tests {
|
||||
.unwrap()
|
||||
.process_transaction(&tx)
|
||||
.unwrap();
|
||||
let mut commitment_slots = CommitmentSlots::default();
|
||||
commitment_slots.slot = 1;
|
||||
let commitment_slots = CommitmentSlots {
|
||||
slot: 1,
|
||||
..CommitmentSlots::default()
|
||||
};
|
||||
subscriptions.notify_subscribers(commitment_slots);
|
||||
let (response, _) = robust_poll_or_panic(create_recv);
|
||||
let expected = json!({
|
||||
@ -1513,7 +1515,7 @@ pub(crate) mod tests {
|
||||
|
||||
let (subscriber, _id_receiver, transport_receiver) =
|
||||
Subscriber::new_test("programNotification");
|
||||
let sub_id = SubscriptionId::Number(0 as u64);
|
||||
let sub_id = SubscriptionId::Number(0);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let optimistically_confirmed_bank =
|
||||
OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks);
|
||||
@ -1659,7 +1661,7 @@ pub(crate) mod tests {
|
||||
commitment: Some(CommitmentConfig::recent()),
|
||||
enable_received_notification: Some(false),
|
||||
}),
|
||||
SubscriptionId::Number(1 as u64),
|
||||
SubscriptionId::Number(1),
|
||||
past_bank_sub1,
|
||||
);
|
||||
subscriptions.add_signature_subscription(
|
||||
@ -1668,7 +1670,7 @@ pub(crate) mod tests {
|
||||
commitment: Some(CommitmentConfig::root()),
|
||||
enable_received_notification: Some(false),
|
||||
}),
|
||||
SubscriptionId::Number(2 as u64),
|
||||
SubscriptionId::Number(2),
|
||||
past_bank_sub2,
|
||||
);
|
||||
subscriptions.add_signature_subscription(
|
||||
@ -1677,7 +1679,7 @@ pub(crate) mod tests {
|
||||
commitment: Some(CommitmentConfig::recent()),
|
||||
enable_received_notification: Some(false),
|
||||
}),
|
||||
SubscriptionId::Number(3 as u64),
|
||||
SubscriptionId::Number(3),
|
||||
processed_sub,
|
||||
);
|
||||
subscriptions.add_signature_subscription(
|
||||
@ -1686,7 +1688,7 @@ pub(crate) mod tests {
|
||||
commitment: Some(CommitmentConfig::recent()),
|
||||
enable_received_notification: Some(false),
|
||||
}),
|
||||
SubscriptionId::Number(4 as u64),
|
||||
SubscriptionId::Number(4),
|
||||
Subscriber::new_test("signatureNotification").0,
|
||||
);
|
||||
// Add a subscription that gets `received` notifications
|
||||
@ -1696,7 +1698,7 @@ pub(crate) mod tests {
|
||||
commitment: Some(CommitmentConfig::recent()),
|
||||
enable_received_notification: Some(true),
|
||||
}),
|
||||
SubscriptionId::Number(5 as u64),
|
||||
SubscriptionId::Number(5),
|
||||
processed_sub3,
|
||||
);
|
||||
|
||||
@ -1789,7 +1791,7 @@ pub(crate) mod tests {
|
||||
fn test_check_slot_subscribe() {
|
||||
let (subscriber, _id_receiver, transport_receiver) =
|
||||
Subscriber::new_test("slotNotification");
|
||||
let sub_id = SubscriptionId::Number(0 as u64);
|
||||
let sub_id = SubscriptionId::Number(0);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
@ -1840,7 +1842,7 @@ pub(crate) mod tests {
|
||||
fn test_check_root_subscribe() {
|
||||
let (subscriber, _id_receiver, mut transport_receiver) =
|
||||
Subscriber::new_test("rootNotification");
|
||||
let sub_id = SubscriptionId::Number(0 as u64);
|
||||
let sub_id = SubscriptionId::Number(0);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
@ -1976,7 +1978,7 @@ pub(crate) mod tests {
|
||||
))),
|
||||
optimistically_confirmed_bank.clone(),
|
||||
));
|
||||
let sub_id0 = SubscriptionId::Number(0 as u64);
|
||||
let sub_id0 = SubscriptionId::Number(0);
|
||||
subscriptions.add_account_subscription(
|
||||
alice.pubkey(),
|
||||
Some(RpcAccountInfoConfig {
|
||||
@ -2057,7 +2059,7 @@ pub(crate) mod tests {
|
||||
assert_eq!(serde_json::to_string(&expected).unwrap(), response);
|
||||
subscriptions.remove_account_subscription(&sub_id0);
|
||||
|
||||
let sub_id1 = SubscriptionId::Number(1 as u64);
|
||||
let sub_id1 = SubscriptionId::Number(1);
|
||||
subscriptions.add_account_subscription(
|
||||
alice.pubkey(),
|
||||
Some(RpcAccountInfoConfig {
|
||||
|
@ -708,11 +708,15 @@ mod tests {
|
||||
nonce,
|
||||
);
|
||||
assert!(rv.is_none());
|
||||
let mut common_header = ShredCommonHeader::default();
|
||||
common_header.slot = slot;
|
||||
common_header.index = 1;
|
||||
let mut data_header = DataShredHeader::default();
|
||||
data_header.parent_offset = 1;
|
||||
let common_header = ShredCommonHeader {
|
||||
slot,
|
||||
index: 1,
|
||||
..ShredCommonHeader::default()
|
||||
};
|
||||
let data_header = DataShredHeader {
|
||||
parent_offset: 1,
|
||||
..DataShredHeader::default()
|
||||
};
|
||||
let shred_info = Shred::new_empty_from_header(
|
||||
common_header,
|
||||
data_header,
|
||||
|
@ -63,8 +63,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_get_latest_votes() {
|
||||
let pubkey = solana_sdk::pubkey::new_rand();
|
||||
let label1 = CrdsValueLabel::Vote(0 as u8, pubkey);
|
||||
let label2 = CrdsValueLabel::Vote(1 as u8, pubkey);
|
||||
let label1 = CrdsValueLabel::Vote(0, pubkey);
|
||||
let label2 = CrdsValueLabel::Vote(1, pubkey);
|
||||
let mut verified_vote_packets = VerifiedVotePackets(HashMap::new());
|
||||
|
||||
let data = Packet {
|
||||
@ -107,8 +107,8 @@ mod tests {
|
||||
fn test_get_and_process_vote_packets() {
|
||||
let (s, r) = unbounded();
|
||||
let pubkey = solana_sdk::pubkey::new_rand();
|
||||
let label1 = CrdsValueLabel::Vote(0 as u8, pubkey);
|
||||
let label2 = CrdsValueLabel::Vote(1 as u8, pubkey);
|
||||
let label1 = CrdsValueLabel::Vote(0, pubkey);
|
||||
let label2 = CrdsValueLabel::Vote(1, pubkey);
|
||||
let mut update_version = 0;
|
||||
s.send(vec![(label1.clone(), Packets::default())]).unwrap();
|
||||
s.send(vec![(label2.clone(), Packets::default())]).unwrap();
|
||||
|
Reference in New Issue
Block a user