From 59f2a478b71d3c27f12b1b9fd45543e53504b34e Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Thu, 21 Mar 2019 19:51:20 +0000 Subject: [PATCH] v0.12 specific stability changes --- core/src/banking_stage.rs | 10 +--------- core/src/leader_schedule_utils.rs | 15 +++++++++------ net/remote/remote-client.sh | 1 + 3 files changed, 11 insertions(+), 15 deletions(-) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 0a1fdf653e..fc52626fc1 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -27,13 +27,9 @@ use std::sync::{Arc, Mutex, RwLock}; use std::thread::{self, Builder, JoinHandle}; use std::time::Duration; use std::time::Instant; -use sys_info; pub type UnprocessedPackets = Vec<(SharedPackets, usize)>; // `usize` is the index of the first unprocessed packet in `SharedPackets` -// number of threads is 1 until mt bank is ready -pub const NUM_THREADS: u32 = 10; - /// Stores the stage's thread handle and output receiver. pub struct BankingStage { bank_thread_hdls: Vec>, @@ -57,7 +53,7 @@ impl BankingStage { // Single thread to compute confirmation let lcs_handle = LeaderConfirmationService::start(&poh_recorder, exit.clone()); // Many banks that process transactions in parallel. - let mut bank_thread_hdls: Vec> = (0..Self::num_threads()) + let mut bank_thread_hdls: Vec> = (0..4) .map(|_| { let verified_receiver = verified_receiver.clone(); let poh_recorder = poh_recorder.clone(); @@ -189,10 +185,6 @@ impl BankingStage { } } - pub fn num_threads() -> u32 { - sys_info::cpu_num().unwrap_or(NUM_THREADS) - } - /// Convert the transactions from a blob of binary data to a vector of transactions fn deserialize_transactions(p: &Packets) -> Vec> { p.packets diff --git a/core/src/leader_schedule_utils.rs b/core/src/leader_schedule_utils.rs index 4fac35d600..5e9e4b6491 100644 --- a/core/src/leader_schedule_utils.rs +++ b/core/src/leader_schedule_utils.rs @@ -44,10 +44,9 @@ pub fn slot_leader_at(slot: u64, bank: &Bank) -> Option { } /// Return the next slot after the given current_slot that the given node will be leader -pub fn next_leader_slot(pubkey: &Pubkey, current_slot: u64, bank: &Bank) -> Option { - let (epoch, slot_index) = bank.get_epoch_and_slot_index(current_slot + 1); - - if let Some(leader_schedule) = leader_schedule(epoch, bank) { +pub fn next_leader_slot(pubkey: &Pubkey, mut current_slot: u64, bank: &Bank) -> Option { + let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1); + while let Some(leader_schedule) = leader_schedule(epoch, bank) { // clippy thinks I should do this: // for (i, ) in leader_schedule // .iter() @@ -57,11 +56,15 @@ pub fn next_leader_slot(pubkey: &Pubkey, current_slot: u64, bank: &Bank) -> Opti // // but leader_schedule doesn't implement Iter... #[allow(clippy::needless_range_loop)] - for i in slot_index..bank.get_slots_in_epoch(epoch) { + for i in start_index..bank.get_slots_in_epoch(epoch) { + current_slot += 1; if *pubkey == leader_schedule[i] { - return Some(current_slot + 1 + (i - slot_index) as u64); + return Some(current_slot); } } + + epoch += 1; + start_index = 0; } None } diff --git a/net/remote/remote-client.sh b/net/remote/remote-client.sh index 23a0631079..28c8473067 100755 --- a/net/remote/remote-client.sh +++ b/net/remote/remote-client.sh @@ -57,6 +57,7 @@ clientCommand="\ --duration 7500 \ --sustained \ --threads $threadCount \ + --tx_count 10000 \ " tmux new -s solana-bench-tps -d "