diff --git a/src/active_stakers.rs b/src/active_stakers.rs index 135d30fa7e..df676c115d 100644 --- a/src/active_stakers.rs +++ b/src/active_stakers.rs @@ -1,9 +1,10 @@ +use crate::leader_schedule::LeaderSchedule; use solana_runtime::bank::Bank; use solana_sdk::pubkey::Pubkey; -use solana_sdk::timing::{DEFAULT_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT}; +use solana_sdk::timing::DEFAULT_SLOTS_PER_EPOCH; use solana_sdk::vote_program::VoteState; -pub const DEFAULT_ACTIVE_WINDOW_TICK_LENGTH: u64 = DEFAULT_SLOTS_PER_EPOCH * DEFAULT_TICKS_PER_SLOT; +pub const DEFAULT_ACTIVE_WINDOW_NUM_SLOTS: u64 = DEFAULT_SLOTS_PER_EPOCH; // Return true of the latest vote is between the lower and upper bounds (inclusive) fn is_active_staker(vote_state: &VoteState, lower_bound: u64, upper_bound: u64) -> bool { @@ -56,7 +57,11 @@ impl ActiveStakers { } pub fn new(bank: &Bank) -> Self { - Self::new_with_bounds(bank, DEFAULT_ACTIVE_WINDOW_TICK_LENGTH, bank.tick_height()) + Self::new_with_bounds( + bank, + DEFAULT_ACTIVE_WINDOW_NUM_SLOTS, + LeaderSchedule::tick_height_to_slot(bank.tick_height(), bank.ticks_per_slot()), + ) } pub fn sorted_stakes(&self) -> Vec<(Pubkey, u64)> { diff --git a/src/banking_stage.rs b/src/banking_stage.rs index f45297f0d1..f094b1745b 100644 --- a/src/banking_stage.rs +++ b/src/banking_stage.rs @@ -59,7 +59,6 @@ impl BankingStage { max_tick_height, }; - let ticks_per_slot = max_tick_height - bank.tick_height(); let poh_recorder = PohRecorder::new(bank.tick_height(), *last_entry_id); // Single thread to generate entries from many banks. @@ -75,12 +74,8 @@ impl BankingStage { .expect("failed to send leader to poh_service"); // Single thread to compute confirmation - let leader_confirmation_service = LeaderConfirmationService::new( - bank.clone(), - leader_id, - poh_exit.clone(), - ticks_per_slot, - ); + let leader_confirmation_service = + LeaderConfirmationService::new(bank.clone(), leader_id, poh_exit.clone()); // Many banks that process transactions in parallel. let bank_thread_hdls: Vec> = (0..Self::num_threads()) diff --git a/src/fullnode.rs b/src/fullnode.rs index d3518b6e73..1ce539ff1e 100644 --- a/src/fullnode.rs +++ b/src/fullnode.rs @@ -605,8 +605,7 @@ mod tests { // epoch, check that the same leader knows to shut down and restart as a leader again. let ticks_per_slot = 5; let slots_per_epoch = 2; - let ticks_per_epoch = slots_per_epoch * ticks_per_slot; - let active_window_length = 10 * ticks_per_epoch; + let active_window_length = 10; let leader_scheduler_config = LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_length); @@ -660,11 +659,8 @@ mod tests { let mut fullnode_config = FullnodeConfig::default(); let ticks_per_slot = 16; let slots_per_epoch = 2; - fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new( - ticks_per_slot, - slots_per_epoch, - ticks_per_slot * slots_per_epoch, - ); + fullnode_config.leader_scheduler_config = + LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, slots_per_epoch); let blocktree_config = &fullnode_config.ledger_config(); // Create the leader and validator nodes @@ -675,9 +671,9 @@ mod tests { &bootstrap_leader_keypair, &validator_keypair, 0, - // Generate enough ticks for two epochs to flush the bootstrap_leader's vote at + // Generate enough ticks for an epochs to flush the bootstrap_leader's vote at // tick_height = 0 from the leader scheduler's active window - ticks_per_slot * slots_per_epoch * 2, + ticks_per_slot * slots_per_epoch, "test_wrong_role_transition", &blocktree_config, ); @@ -741,11 +737,8 @@ mod tests { let leader_keypair = Arc::new(Keypair::new()); let validator_keypair = Arc::new(Keypair::new()); let mut fullnode_config = FullnodeConfig::default(); - fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new( - ticks_per_slot, - slots_per_epoch, - ticks_per_slot * slots_per_epoch, - ); + fullnode_config.leader_scheduler_config = + LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, slots_per_epoch); let (leader_node, validator_node, validator_ledger_path, ledger_initial_len, last_id) = setup_leader_validator( &leader_keypair, @@ -835,6 +828,8 @@ mod tests { test_name: &str, blocktree_config: &BlocktreeConfig, ) -> (Node, Node, String, u64, Hash) { + info!("validator: {}", validator_keypair.pubkey()); + info!("leader: {}", leader_keypair.pubkey()); // Make a leader identity let leader_node = Node::new_localhost_with_pubkey(leader_keypair.pubkey()); @@ -857,7 +852,7 @@ mod tests { validator_keypair, &mint_keypair, 10, - 1, + 0, &last_entry_id, &last_id, num_ending_ticks, diff --git a/src/leader_confirmation_service.rs b/src/leader_confirmation_service.rs index 9c537336d9..2623fd0f18 100644 --- a/src/leader_confirmation_service.rs +++ b/src/leader_confirmation_service.rs @@ -30,7 +30,6 @@ impl LeaderConfirmationService { bank: &Arc, leader_id: Pubkey, last_valid_validator_timestamp: u64, - ticks_per_slot: u64, ) -> result::Result { let mut total_stake = 0; @@ -49,7 +48,12 @@ impl LeaderConfirmationService { .votes .back() // A vote for a slot is like a vote for the last tick in that slot - .map(|vote| ((vote.slot_height + 1) * ticks_per_slot - 1, validator_stake)) + .map(|vote| { + ( + (vote.slot_height + 1) * bank.ticks_per_slot() - 1, + validator_stake, + ) + }) }) .collect(); @@ -80,14 +84,10 @@ impl LeaderConfirmationService { bank: &Arc, leader_id: Pubkey, last_valid_validator_timestamp: &mut u64, - ticks_per_slot: u64, ) { - if let Ok(super_majority_timestamp) = Self::get_last_supermajority_timestamp( - bank, - leader_id, - *last_valid_validator_timestamp, - ticks_per_slot, - ) { + if let Ok(super_majority_timestamp) = + Self::get_last_supermajority_timestamp(bank, leader_id, *last_valid_validator_timestamp) + { let now = timing::timestamp(); let confirmation_ms = now - super_majority_timestamp; @@ -105,12 +105,7 @@ impl LeaderConfirmationService { } /// Create a new LeaderConfirmationService for computing confirmation. - pub fn new( - bank: Arc, - leader_id: Pubkey, - exit: Arc, - ticks_per_slot: u64, - ) -> Self { + pub fn new(bank: Arc, leader_id: Pubkey, exit: Arc) -> Self { let thread_hdl = Builder::new() .name("solana-leader-confirmation-service".to_string()) .spawn(move || { @@ -123,7 +118,6 @@ impl LeaderConfirmationService { &bank, leader_id, &mut last_valid_validator_timestamp, - ticks_per_slot, ); sleep(Duration::from_millis(COMPUTE_CONFIRMATION_MS)); } @@ -153,8 +147,6 @@ pub mod tests { use solana_sdk::signature::{Keypair, KeypairUtil}; use solana_sdk::vote_transaction::VoteTransaction; use std::sync::Arc; - use std::thread::sleep; - use std::time::Duration; #[test] fn test_compute_confirmation() { @@ -163,12 +155,10 @@ pub mod tests { let (genesis_block, mint_keypair) = GenesisBlock::new(1234); let bank = Arc::new(Bank::new(&genesis_block)); // generate 10 validators, but only vote for the first 6 validators - let ids: Vec<_> = (0..10) + let ids: Vec<_> = (0..10 * bank.ticks_per_slot()) .map(|i| { let last_id = hash(&serialize(&i).unwrap()); // Unique hash bank.register_tick(&last_id); - // sleep to get a different timestamp in the bank - sleep(Duration::from_millis(1)); last_id }) .collect(); @@ -201,7 +191,6 @@ pub mod tests { &bank, genesis_block.bootstrap_leader_id, &mut last_confirmation_time, - 1, ); // Get another validator to vote, so we now have 2/3 consensus @@ -213,7 +202,6 @@ pub mod tests { &bank, genesis_block.bootstrap_leader_id, &mut last_confirmation_time, - 1, ); assert!(last_confirmation_time > 0); } diff --git a/src/leader_schedule.rs b/src/leader_schedule.rs index 99ee08cb31..468aa65e44 100644 --- a/src/leader_schedule.rs +++ b/src/leader_schedule.rs @@ -39,6 +39,10 @@ impl LeaderSchedule { bank.slots_per_epoch(), ) } + + pub fn tick_height_to_slot(tick_height: u64, ticks_per_slot: u64) -> u64 { + tick_height / ticks_per_slot + } } impl Index for LeaderSchedule { diff --git a/src/leader_scheduler.rs b/src/leader_scheduler.rs index c1bf6dc35a..50393a1baf 100644 --- a/src/leader_scheduler.rs +++ b/src/leader_scheduler.rs @@ -1,7 +1,7 @@ //! The `leader_scheduler` module implements a structure and functions for tracking and //! managing the schedule for leader rotation -use crate::active_stakers::{ActiveStakers, DEFAULT_ACTIVE_WINDOW_TICK_LENGTH}; +use crate::active_stakers::{ActiveStakers, DEFAULT_ACTIVE_WINDOW_NUM_SLOTS}; use crate::entry::{create_ticks, next_entry_mut, Entry}; use crate::voting_keypair::VotingKeypair; use bincode::serialize; @@ -22,17 +22,17 @@ pub struct LeaderSchedulerConfig { pub slots_per_epoch: u64, // The tick length of the acceptable window for determining live validators - pub active_window_tick_length: u64, + pub active_window_length: u64, } // Used to toggle leader rotation in fullnode so that tests that don't // need leader rotation don't break impl LeaderSchedulerConfig { - pub fn new(ticks_per_slot: u64, slots_per_epoch: u64, active_window_tick_length: u64) -> Self { + pub fn new(ticks_per_slot: u64, slots_per_epoch: u64, active_window_length: u64) -> Self { LeaderSchedulerConfig { ticks_per_slot, slots_per_epoch, - active_window_tick_length, + active_window_length, } } } @@ -42,7 +42,7 @@ impl Default for LeaderSchedulerConfig { Self { ticks_per_slot: DEFAULT_TICKS_PER_SLOT, slots_per_epoch: DEFAULT_SLOTS_PER_EPOCH, - active_window_tick_length: DEFAULT_ACTIVE_WINDOW_TICK_LENGTH, + active_window_length: DEFAULT_ACTIVE_WINDOW_NUM_SLOTS, } } } @@ -56,9 +56,9 @@ pub struct LeaderScheduler { // This value must be divisible by ticks_per_slot pub ticks_per_epoch: u64, - // The length of time in ticks for which a vote qualifies a candidate for leader + // The number of slots for which a vote qualifies a candidate for leader // selection - active_window_tick_length: u64, + active_window_length: u64, // Round-robin ordering of the validators for the current epoch at epoch_schedule[0], and the // previous epoch at epoch_schedule[1] @@ -89,18 +89,18 @@ impl LeaderScheduler { pub fn new(config: &LeaderSchedulerConfig) -> Self { let ticks_per_slot = config.ticks_per_slot; let ticks_per_epoch = config.ticks_per_slot * config.slots_per_epoch; - let active_window_tick_length = config.active_window_tick_length; + let active_window_length = config.active_window_length; // Enforced invariants assert!(ticks_per_slot > 0); assert!(ticks_per_epoch >= ticks_per_slot); assert!(ticks_per_epoch % ticks_per_slot == 0); - assert!(active_window_tick_length > 0); + assert!(active_window_length > 0); LeaderScheduler { ticks_per_slot, ticks_per_epoch, - active_window_tick_length, + active_window_length, seed: 0, epoch_schedule: [Vec::new(), Vec::new()], current_epoch: 0, @@ -228,9 +228,9 @@ impl LeaderScheduler { } self.seed = Self::calculate_seed(tick_height); + let slot = self.tick_height_to_slot(tick_height); let ranked_active_set = - ActiveStakers::new_with_bounds(&bank, self.active_window_tick_length, tick_height) - .sorted_stakes(); + ActiveStakers::new_with_bounds(&bank, self.active_window_length, slot).sorted_stakes(); if ranked_active_set.is_empty() { info!( @@ -346,7 +346,7 @@ pub fn make_active_set_entries( active_keypair: &Arc, token_source: &Keypair, stake: u64, - tick_height_to_vote_on: u64, + slot_height_to_vote_on: u64, last_entry_id: &Hash, last_tick_id: &Hash, num_ending_ticks: u64, @@ -372,7 +372,7 @@ pub fn make_active_set_entries( // 3) Create vote entry let vote_tx = - VoteTransaction::new_vote(&voting_keypair, tick_height_to_vote_on, *last_tick_id, 0); + VoteTransaction::new_vote(&voting_keypair, slot_height_to_vote_on, *last_tick_id, 0); let vote_entry = next_entry_mut(&mut last_entry_id, 1, vec![vote_tx]); // 4) Create the ending empty ticks @@ -389,20 +389,17 @@ pub mod tests { use hashbrown::HashSet; use solana_sdk::genesis_block::{GenesisBlock, BOOTSTRAP_LEADER_TOKENS}; - fn run_scheduler_test(num_validators: usize, ticks_per_slot: u64, ticks_per_epoch: u64) { + fn run_scheduler_test(num_validators: u64, ticks_per_slot: u64, slots_per_epoch: u64) { info!( "run_scheduler_test({}, {}, {})", - num_validators, ticks_per_slot, ticks_per_epoch + num_validators, ticks_per_slot, slots_per_epoch ); // Allow the validators to be in the active window for the entire test - let active_window_tick_length = ticks_per_epoch; + let active_window_length = slots_per_epoch; // Set up the LeaderScheduler struct - let leader_scheduler_config = LeaderSchedulerConfig::new( - ticks_per_slot, - ticks_per_epoch / ticks_per_slot, - active_window_tick_length, - ); + let leader_scheduler_config = + LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_length); // Create the bank and validators, which are inserted in order of account balance let num_vote_account_tokens = 1; @@ -425,13 +422,13 @@ pub mod tests { .unwrap(); // Vote to make the validator part of the active set for the entire test - // (we made the active_window_tick_length large enough at the beginning of the test) + // (we made the active_window_length large enough at the beginning of the test) new_vote_account_with_vote( &new_validator, &voting_keypair, &bank, num_vote_account_tokens as u64, - ticks_per_epoch, + slots_per_epoch, ); } @@ -441,17 +438,14 @@ pub mod tests { leader_scheduler.generate_schedule(0, &bank); // The leader outside of the newly generated schedule window: - // (0, ticks_per_epoch] + // (0, slots_per_epoch] assert_eq!( leader_scheduler.get_leader_for_slot(0), Some(genesis_block.bootstrap_leader_id) ); - assert_eq!( - leader_scheduler - .get_leader_for_slot(leader_scheduler.tick_height_to_slot(ticks_per_epoch)), - None - ); + assert_eq!(leader_scheduler.get_leader_for_slot(slots_per_epoch), None); + let ticks_per_epoch = slots_per_epoch * ticks_per_slot; // Generate schedule for second epoch. This schedule won't be used but the schedule for // the third epoch cannot be generated without an existing schedule for the second epoch leader_scheduler.generate_schedule(ticks_per_epoch, &bank); @@ -464,7 +458,7 @@ pub mod tests { // For the next ticks_per_epoch entries, call get_leader_for_slot every // ticks_per_slot entries, and the next leader should be the next validator // in order of stake - let num_slots = ticks_per_epoch / ticks_per_slot; + let num_slots = slots_per_epoch; let mut start_leader_index = None; for i in 0..num_slots { let tick_height = 2 * ticks_per_epoch + i * ticks_per_slot; @@ -489,7 +483,7 @@ pub mod tests { } let expected_leader = - validators[(start_leader_index.unwrap() + i as usize) % num_validators]; + validators[((start_leader_index.unwrap() as u64 + i) % num_validators) as usize]; assert_eq!(current_leader, expected_leader); assert_eq!( slot, @@ -610,32 +604,28 @@ pub mod tests { // is selected once let mut num_validators = 100; let mut ticks_per_slot = 100; - let mut ticks_per_epoch = ticks_per_slot * num_validators as u64; - run_scheduler_test(num_validators, ticks_per_slot, ticks_per_epoch); + run_scheduler_test(num_validators, ticks_per_slot, num_validators); // Test when there are fewer validators than // ticks_per_epoch / ticks_per_slot, so each validator // is selected multiple times num_validators = 3; ticks_per_slot = 100; - ticks_per_epoch = 1000; - run_scheduler_test(num_validators, ticks_per_slot, ticks_per_epoch); + run_scheduler_test(num_validators, ticks_per_slot, num_validators); // Test when there are fewer number of validators than // ticks_per_epoch / ticks_per_slot, so each validator // may not be selected num_validators = 10; ticks_per_slot = 100; - ticks_per_epoch = 200; - run_scheduler_test(num_validators, ticks_per_slot, ticks_per_epoch); + run_scheduler_test(num_validators, ticks_per_slot, num_validators); // Test when ticks_per_epoch == ticks_per_slot, // only one validator should be selected num_validators = 10; ticks_per_slot = 2; - ticks_per_epoch = 2; - run_scheduler_test(num_validators, ticks_per_slot, ticks_per_epoch); + run_scheduler_test(num_validators, ticks_per_slot, num_validators); } #[test] @@ -650,16 +640,16 @@ pub mod tests { // is the cause of validators being truncated later) let ticks_per_slot = 100; let slots_per_epoch = num_validators; - let active_window_tick_length = ticks_per_slot * slots_per_epoch; + let active_window_length = slots_per_epoch; - // Create the bank and validators + // Create the bazzznk and validators let (genesis_block, mint_keypair) = GenesisBlock::new( ((((num_validators + 1) / 2) * (num_validators + 1)) + (num_vote_account_tokens * num_validators)) as u64, ); let bank = Bank::new(&genesis_block); let leader_scheduler_config = - LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_tick_length); + LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_length); let mut leader_scheduler = LeaderScheduler::new_with_bank(&leader_scheduler_config, &bank); let mut validators = vec![]; @@ -679,11 +669,11 @@ pub mod tests { .unwrap(); // Create a vote account and push a vote - let tick_height = (i + 2) * active_window_tick_length - 1; + let tick_height = (i + 2) * active_window_length - 1; new_vote_account_with_vote(&new_validator, &voting_keypair, &bank, 1, tick_height); } - // Generate schedule every active_window_tick_length entries and check that + // Generate schedule every active_window_length entries and check that // validators are falling out of the rotation as they fall out of the // active set trace!("bootstrap_leader_id: {}", genesis_block.bootstrap_leader_id); @@ -695,7 +685,8 @@ pub mod tests { assert_eq!(leader_scheduler.current_epoch, 0); for i in 0..=num_validators { info!("i === {}", i); - leader_scheduler.generate_schedule((i + 1) * active_window_tick_length, &bank); + leader_scheduler + .generate_schedule((i + 1) * ticks_per_slot * active_window_length, &bank); assert_eq!(leader_scheduler.current_epoch, i + 1); if i == 0 { assert_eq!( @@ -718,14 +709,14 @@ pub mod tests { let ticks_per_slot = 100; let slots_per_epoch = 2; let ticks_per_epoch = ticks_per_slot * slots_per_epoch; - let active_window_tick_length = 1; + let active_window_length = 1; // Check that the generate_schedule() function is being called by the // update_tick_height() function at the correct entry heights. let (genesis_block, _) = GenesisBlock::new(10_000); let bank = Bank::new(&genesis_block); let leader_scheduler_config = - LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_tick_length); + LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_length); let mut leader_scheduler = LeaderScheduler::new_with_bank(&leader_scheduler_config, &bank); info!( "bootstrap_leader_id: {:?}", @@ -840,10 +831,10 @@ pub mod tests { // Check actual arguments for LeaderScheduler let ticks_per_slot = 100; let slots_per_epoch = 2; - let active_window_tick_length = 1; + let active_window_length = 1; let leader_scheduler_config = - LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_tick_length); + LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_length); let leader_scheduler = LeaderScheduler::new(&leader_scheduler_config); @@ -858,7 +849,7 @@ pub mod tests { let bootstrap_leader_keypair = Arc::new(Keypair::new()); let bootstrap_leader_id = bootstrap_leader_keypair.pubkey(); let ticks_per_slot = 100; - let active_window_tick_length = slots_per_epoch * ticks_per_slot; + let active_window_length = slots_per_epoch; // Create mint and bank let (genesis_block, mint_keypair) = @@ -908,11 +899,11 @@ pub mod tests { &voting_keypair, &bank, vote_account_tokens as u64, - initial_vote_height, + 0, ); let leader_scheduler_config = - LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_tick_length); + LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_length); let mut leader_scheduler = LeaderScheduler::new(&leader_scheduler_config); leader_scheduler.generate_schedule(0, &bank); @@ -921,8 +912,8 @@ pub mod tests { // Make sure the validator, not the leader is selected on the first slot of the // next epoch - leader_scheduler.generate_schedule(1, &bank); - assert_eq!(leader_scheduler.current_epoch, 0); + leader_scheduler.generate_schedule(ticks_per_slot * slots_per_epoch, &bank); + assert_eq!(leader_scheduler.current_epoch, 1); if add_validator { assert_eq!(leader_scheduler.epoch_schedule[0][0], validator_id); } else { diff --git a/src/replay_stage.rs b/src/replay_stage.rs index bebfa47a79..c46aba7a7d 100644 --- a/src/replay_stage.rs +++ b/src/replay_stage.rs @@ -88,10 +88,13 @@ impl ReplayStage { ); let num_ticks = bank.tick_height(); - let mut num_ticks_to_next_vote = leader_scheduler - .read() - .unwrap() - .num_ticks_left_in_slot(num_ticks); + let (mut num_ticks_to_next_vote, slot_height) = { + let rl = leader_scheduler.read().unwrap(); + ( + rl.num_ticks_left_in_slot(num_ticks), + rl.tick_height_to_slot(num_ticks), + ) + }; for (i, entry) in entries.iter().enumerate() { inc_new_counter_info!("replicate-stage_bank-tick", bank.tick_height() as usize); @@ -138,12 +141,8 @@ impl ReplayStage { } if let Some(voting_keypair) = voting_keypair { let keypair = voting_keypair.as_ref(); - let vote = VoteTransaction::new_vote( - keypair, - bank.tick_height(), - bank.last_id(), - 0, - ); + let vote = + VoteTransaction::new_vote(keypair, slot_height, bank.last_id(), 0); cluster_info.write().unwrap().push_vote(vote); } } @@ -427,7 +426,7 @@ mod test { &my_keypair, &mint_keypair, 100, - ticks_per_slot, // add a vote for tick_height = ticks_per_slot + 1, // add a vote for tick_height = ticks_per_slot &last_entry_id, &last_id, num_ending_ticks, @@ -578,7 +577,7 @@ mod test { ); let keypair = voting_keypair.as_ref(); - let vote = VoteTransaction::new_vote(keypair, bank.tick_height(), bank.last_id(), 0); + let vote = VoteTransaction::new_vote(keypair, 0, bank.last_id(), 0); cluster_info_me.write().unwrap().push_vote(vote); info!("Send ReplayStage an entry, should see it on the ledger writer receiver"); @@ -704,7 +703,7 @@ mod test { ); let keypair = voting_keypair.as_ref(); - let vote = VoteTransaction::new_vote(keypair, bank.tick_height(), bank.last_id(), 0); + let vote = VoteTransaction::new_vote(keypair, 0, bank.last_id(), 0); cluster_info_me.write().unwrap().push_vote(vote); // Send enough ticks to trigger leader rotation diff --git a/tests/multinode.rs b/tests/multinode.rs index c2a4f94342..0d02692163 100644 --- a/tests/multinode.rs +++ b/tests/multinode.rs @@ -929,7 +929,7 @@ fn test_leader_to_validator_transition() { // Setup window length to exclude the genesis bootstrap leader vote at tick height 0, so // that when the leader schedule is recomputed for epoch 1 only the validator vote at tick // height 1 will be considered. - ticks_per_slot, + 1, ); let blocktree_config = fullnode_config.ledger_config(); @@ -957,7 +957,7 @@ fn test_leader_to_validator_transition() { &validator_keypair, &mint_keypair, 100, - ticks_per_slot, + 1, &last_entry_id, &last_id, 0, @@ -1040,7 +1040,7 @@ fn test_leader_validator_basic() { fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new( ticks_per_slot, 1, // 1 slot per epoch - ticks_per_slot, + 1, ); let blocktree_config = fullnode_config.ledger_config(); @@ -1066,7 +1066,7 @@ fn test_leader_validator_basic() { &validator_keypair, &mint_keypair, 100, - 1, + 0, &last_entry_id, &last_id, 0, @@ -1182,10 +1182,9 @@ fn test_dropped_handoff_recovery() { // Create the common leader scheduling configuration let slots_per_epoch = (N + 1) as u64; let ticks_per_slot = 5; - let ticks_per_epoch = slots_per_epoch * ticks_per_slot; let mut fullnode_config = FullnodeConfig::default(); fullnode_config.leader_scheduler_config = - LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, ticks_per_epoch); + LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, slots_per_epoch); let blocktree_config = fullnode_config.ledger_config(); // Make a common mint and a genesis entry for both leader + validator's ledgers @@ -1221,7 +1220,7 @@ fn test_dropped_handoff_recovery() { &next_leader_keypair, &mint_keypair, 100, - ticks_per_slot, + 1, &last_entry_id, &last_id, 0, @@ -1353,10 +1352,9 @@ fn test_full_leader_validator_network() { // Create the common leader scheduling configuration let slots_per_epoch = (N + 1) as u64; let ticks_per_slot = 5; - let ticks_per_epoch = slots_per_epoch * ticks_per_slot; let mut fullnode_config = FullnodeConfig::default(); fullnode_config.leader_scheduler_config = - LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, ticks_per_epoch * 3); + LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, slots_per_epoch * 3); // Create the bootstrap leader node information let bootstrap_leader_keypair = Arc::new(Keypair::new()); @@ -1403,7 +1401,7 @@ fn test_full_leader_validator_network() { node_keypair, &mint_keypair, 100, - 1, + 0, &last_entry_id, &last_id, 0, @@ -1790,9 +1788,7 @@ fn test_fullnode_rotate( */ let blocktree_config = fullnode_config.ledger_config(); - fullnode_config - .leader_scheduler_config - .active_window_tick_length = std::u64::MAX; + fullnode_config.leader_scheduler_config.active_window_length = std::u64::MAX; // Create the leader node information let leader_keypair = Arc::new(Keypair::new()); @@ -1839,7 +1835,7 @@ fn test_fullnode_rotate( &validator_keypair, &mint_keypair, 100, - 1, + 0, &last_entry_id, &last_id, 0,