committed by
GitHub
parent
1da1667920
commit
58ef02f02b
@ -267,7 +267,7 @@ mod tests {
|
||||
};
|
||||
|
||||
bank_forks
|
||||
.generate_accounts_package(slot, &vec![], &package_sender)
|
||||
.generate_accounts_package(slot, &[], &package_sender)
|
||||
.unwrap();
|
||||
|
||||
if slot == saved_slot as u64 {
|
||||
|
@ -79,7 +79,7 @@ fn test_rpc_client() {
|
||||
assert!(confirmed_tx);
|
||||
|
||||
assert_eq!(client.get_balance(&bob_pubkey).unwrap(), 20);
|
||||
assert_eq!(client.get_balance(&alice.pubkey()).unwrap(), 999980);
|
||||
assert_eq!(client.get_balance(&alice.pubkey()).unwrap(), 999_980);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
|
@ -65,6 +65,7 @@ fn retransmit(
|
||||
shred
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn run_simulation(stakes: &[u64], fanout: usize) {
|
||||
let num_threads = num_threads();
|
||||
// set timeout to 5 minutes
|
||||
@ -91,7 +92,7 @@ fn run_simulation(stakes: &[u64], fanout: usize) {
|
||||
let range: Vec<_> = (1..=stakes.len()).collect();
|
||||
let chunk_size = (stakes.len() + num_threads - 1) / num_threads;
|
||||
range.chunks(chunk_size).for_each(|chunk| {
|
||||
chunk.into_iter().for_each(|i| {
|
||||
chunk.iter().for_each(|i| {
|
||||
//distribute neighbors across threads to maximize parallel compute
|
||||
let batch_ix = *i as usize % batches.len();
|
||||
let node = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||
@ -121,16 +122,15 @@ fn run_simulation(stakes: &[u64], fanout: usize) {
|
||||
&stakes_and_index,
|
||||
seed,
|
||||
);
|
||||
let peers = shuffled_stakes_and_indexes
|
||||
shuffled_stakes_and_indexes
|
||||
.into_iter()
|
||||
.map(|(_, i)| peers[i].clone())
|
||||
.collect();
|
||||
peers
|
||||
.collect()
|
||||
})
|
||||
.collect();
|
||||
|
||||
// create some "shreds".
|
||||
(0..shreds_len).into_iter().for_each(|i| {
|
||||
(0..shreds_len).for_each(|i| {
|
||||
let broadcast_table = &shuffled_peers[i];
|
||||
find_insert_shred(&broadcast_table[0].id, i as i32, &mut batches);
|
||||
});
|
||||
|
@ -93,7 +93,7 @@ fn star_network_create(num: usize) -> Network {
|
||||
.collect();
|
||||
let mut node = CrdsGossip::default();
|
||||
let id = entry.label().pubkey();
|
||||
node.crds.insert(entry.clone(), 0).unwrap();
|
||||
node.crds.insert(entry, 0).unwrap();
|
||||
node.set_self(&id);
|
||||
network.insert(id, Node::new(Arc::new(Mutex::new(node))));
|
||||
Network::new(network)
|
||||
@ -106,7 +106,7 @@ fn rstar_network_create(num: usize) -> Network {
|
||||
)));
|
||||
let mut origin = CrdsGossip::default();
|
||||
let id = entry.label().pubkey();
|
||||
origin.crds.insert(entry.clone(), 0).unwrap();
|
||||
origin.crds.insert(entry, 0).unwrap();
|
||||
origin.set_self(&id);
|
||||
let mut network: HashMap<_, _> = (1..num)
|
||||
.map(|_| {
|
||||
@ -144,7 +144,7 @@ fn ring_network_create(num: usize) -> Network {
|
||||
for k in 0..keys.len() {
|
||||
let start_info = {
|
||||
let start = &network[&keys[k]];
|
||||
let start_id = start.lock().unwrap().id.clone();
|
||||
let start_id = start.lock().unwrap().id;
|
||||
start
|
||||
.lock()
|
||||
.unwrap()
|
||||
@ -183,7 +183,7 @@ fn connected_staked_network_create(stakes: &[u64]) -> Network {
|
||||
.iter()
|
||||
.map(|k| {
|
||||
let start = &network[k].lock().unwrap();
|
||||
let start_id = start.id.clone();
|
||||
let start_id = start.id;
|
||||
let start_label = CrdsValueLabel::ContactInfo(start_id);
|
||||
start.crds.lookup(&start_label).unwrap().clone()
|
||||
})
|
||||
@ -448,7 +448,7 @@ fn network_run_pull(
|
||||
.unwrap();
|
||||
bytes += serialized_size(&rsp).unwrap() as usize;
|
||||
msgs += rsp.len();
|
||||
network.get(&from).map(|node| {
|
||||
if let Some(node) = network.get(&from) {
|
||||
node.lock()
|
||||
.unwrap()
|
||||
.mark_pull_request_creation_time(&from, now);
|
||||
@ -456,7 +456,7 @@ fn network_run_pull(
|
||||
.lock()
|
||||
.unwrap()
|
||||
.process_pull_response(&from, &timeouts, rsp, now);
|
||||
});
|
||||
}
|
||||
(bytes, msgs, overhead)
|
||||
})
|
||||
.collect();
|
||||
|
@ -175,9 +175,9 @@ impl Tower {
|
||||
}
|
||||
}
|
||||
let trunk = self.votes.get(self.converge_depth).cloned();
|
||||
trunk.map(|t| {
|
||||
if let Some(t) = trunk {
|
||||
self.delayed_votes.retain(|v| v.fork.id > t.fork.id);
|
||||
});
|
||||
}
|
||||
}
|
||||
pub fn pop_best_votes(
|
||||
&mut self,
|
||||
@ -361,6 +361,7 @@ fn test_is_trunk_of_4() {
|
||||
assert!(!b2.is_trunk_of(&b1, &tree));
|
||||
}
|
||||
#[test]
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
fn test_push_vote() {
|
||||
let tree = HashMap::new();
|
||||
let bmap = HashMap::new();
|
||||
@ -396,7 +397,7 @@ fn test_push_vote() {
|
||||
assert_eq!(tower.votes[0].lockout, 2);
|
||||
|
||||
let b1 = Fork { id: 1, base: 1 };
|
||||
let vote = Vote::new(b1.clone(), 8);
|
||||
let vote = Vote::new(b1, 8);
|
||||
assert!(!tower.push_vote(vote, &tree, &bmap));
|
||||
|
||||
let vote = Vote::new(b0.clone(), 8);
|
||||
@ -408,7 +409,7 @@ fn test_push_vote() {
|
||||
assert_eq!(tower.votes[2].lockout, 8);
|
||||
assert_eq!(tower.votes[3].lockout, 16);
|
||||
|
||||
let vote = Vote::new(b0.clone(), 10);
|
||||
let vote = Vote::new(b0, 10);
|
||||
assert!(tower.push_vote(vote, &tree, &bmap));
|
||||
assert_eq!(tower.votes.len(), 2);
|
||||
assert_eq!(tower.votes[0].lockout, 2);
|
||||
@ -417,7 +418,6 @@ fn test_push_vote() {
|
||||
|
||||
fn create_towers(sz: usize, height: usize, delay_count: usize) -> Vec<Tower> {
|
||||
(0..sz)
|
||||
.into_iter()
|
||||
.map(|_| Tower::new(32, height, delay_count))
|
||||
.collect()
|
||||
}
|
||||
@ -438,7 +438,7 @@ fn calc_fork_depth(fork_tree: &HashMap<usize, Fork>, id: usize) -> usize {
|
||||
/// map of `fork id` to `tower count`
|
||||
/// This map contains the number of nodes that have the fork as an ancestor.
|
||||
/// The fork with the highest count that is the newest is the cluster "trunk".
|
||||
fn calc_fork_map(towers: &Vec<Tower>, fork_tree: &HashMap<usize, Fork>) -> HashMap<usize, usize> {
|
||||
fn calc_fork_map(towers: &[Tower], fork_tree: &HashMap<usize, Fork>) -> HashMap<usize, usize> {
|
||||
let mut lca_map: HashMap<usize, usize> = HashMap::new();
|
||||
for tower in towers {
|
||||
let mut start = tower.last_fork();
|
||||
@ -460,7 +460,7 @@ fn calc_newest_trunk(bmap: &HashMap<usize, usize>) -> (usize, usize) {
|
||||
data.last().map(|v| (*v.0, *v.1)).unwrap()
|
||||
}
|
||||
/// how common is the latest fork of all the nodes
|
||||
fn calc_tip_converged(towers: &Vec<Tower>, bmap: &HashMap<usize, usize>) -> usize {
|
||||
fn calc_tip_converged(towers: &[Tower], bmap: &HashMap<usize, usize>) -> usize {
|
||||
let sum: usize = towers
|
||||
.iter()
|
||||
.map(|n| *bmap.get(&n.last_fork().id).unwrap_or(&0))
|
||||
|
@ -81,18 +81,15 @@ mod tests {
|
||||
|
||||
impl CpuStats {
|
||||
fn update(&self) {
|
||||
match self.sys.cpu_load_aggregate() {
|
||||
Ok(cpu) => {
|
||||
std::thread::sleep(Duration::from_millis(400));
|
||||
let cpu_new = CpuStatsInner::from(cpu.done().unwrap());
|
||||
*self.stats.write().unwrap() = cpu_new;
|
||||
}
|
||||
_ => (),
|
||||
if let Ok(cpu) = self.sys.cpu_load_aggregate() {
|
||||
std::thread::sleep(Duration::from_millis(400));
|
||||
let cpu_new = CpuStatsInner::from(cpu.done().unwrap());
|
||||
*self.stats.write().unwrap() = cpu_new;
|
||||
}
|
||||
}
|
||||
|
||||
fn get_stats(&self) -> CpuStatsInner {
|
||||
self.stats.read().unwrap().clone()
|
||||
*self.stats.read().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@ -118,7 +115,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
Self {
|
||||
cpu_stats: cpu_stats.clone(),
|
||||
cpu_stats,
|
||||
t_cleanup,
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user