leader_scheduler: replace older terminology with ticks, slots and epochs

This commit is contained in:
Michael Vines
2019-02-07 09:31:09 -08:00
committed by Grimes
parent cdb2a7bef3
commit 1f0b3f954a
7 changed files with 226 additions and 334 deletions

View File

@ -370,7 +370,7 @@ mod test {
// Mock the tick height to look like the tick height right after a leader transition
leader_scheduler.set_leader_schedule(vec![leader_keypair.pubkey()]);
let start_tick_height = 0;
let max_tick_height = start_tick_height + leader_scheduler.seed_rotation_interval;
let max_tick_height = start_tick_height + leader_scheduler.ticks_per_epoch;
let entry_height = 2 * start_tick_height;
let leader_scheduler = Arc::new(RwLock::new(leader_scheduler));

View File

@ -55,7 +55,7 @@ pub fn repair(
// 1) The replay stage hasn't caught up to the "consumed" entries we sent,
// in which case it will eventually catch up
//
// 2) We are on the border between seed_rotation_intervals, so the
// 2) We are on the border between ticks_per_epochs, so the
// schedule won't be known until the entry on that cusp is received
// by the replay stage (which comes after this stage). Hence, the next
// leader at the beginning of that next epoch will not know they are the

View File

@ -615,15 +615,12 @@ mod tests {
// Once the bootstrap leader hits the second epoch, because there are no other choices in
// the active set, this leader will remain the leader in the second epoch. In the second
// epoch, check that the same leader knows to shut down and restart as a leader again.
let leader_rotation_interval = 5;
let num_slots_per_epoch = 2;
let seed_rotation_interval = num_slots_per_epoch * leader_rotation_interval;
let active_window_length = 10 * seed_rotation_interval;
let leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
seed_rotation_interval,
active_window_length,
);
let ticks_per_slot = 5;
let slots_per_epoch = 2;
let ticks_per_epoch = slots_per_epoch * ticks_per_slot;
let active_window_length = 10 * ticks_per_epoch;
let leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_length);
let bootstrap_leader_keypair = Arc::new(bootstrap_leader_keypair);
let voting_keypair = VotingKeypair::new_local(&bootstrap_leader_keypair);
@ -646,10 +643,7 @@ mod tests {
// cluster it will continue to be the leader
assert_eq!(
rotation_receiver.recv().unwrap(),
(
FullnodeReturnType::LeaderToLeaderRotation,
leader_rotation_interval
)
(FullnodeReturnType::LeaderToLeaderRotation, ticks_per_slot)
);
bootstrap_leader_exit();
}
@ -659,11 +653,12 @@ mod tests {
solana_logger::setup();
let mut fullnode_config = FullnodeConfig::default();
let leader_rotation_interval = 16;
let ticks_per_slot = 16;
let slots_per_epoch = 2;
fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
leader_rotation_interval * 2,
leader_rotation_interval * 2,
ticks_per_slot,
slots_per_epoch,
ticks_per_slot * slots_per_epoch,
);
// Create the leader and validator nodes
@ -676,7 +671,7 @@ mod tests {
0,
// Generate enough ticks for two epochs to flush the bootstrap_leader's vote at
// tick_height = 0 from the leader scheduler's active window
leader_rotation_interval * 4,
ticks_per_slot * 4,
"test_wrong_role_transition",
);
let bootstrap_leader_info = bootstrap_leader_node.info.clone();
@ -749,13 +744,14 @@ mod tests {
info!("validator: {:?}", validator_info.id);
// Set the leader scheduler for the validator
let leader_rotation_interval = 10;
let ticks_per_slot = 10;
let slots_per_epoch = 4;
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
leader_rotation_interval * 4,
leader_rotation_interval * 4,
ticks_per_slot,
slots_per_epoch,
ticks_per_slot * slots_per_epoch,
);
let voting_keypair = VotingKeypair::new_local(&validator_keypair);
@ -769,12 +765,7 @@ mod tests {
&fullnode_config,
);
let blobs_to_send = fullnode_config
.leader_scheduler_config
.seed_rotation_interval
+ fullnode_config
.leader_scheduler_config
.leader_rotation_interval;
let blobs_to_send = slots_per_epoch * ticks_per_slot + ticks_per_slot;
// Send blobs to the validator from our mock leader
let t_responder = {
@ -817,12 +808,7 @@ mod tests {
let (bank, entry_height, _, _, _, _) =
new_bank_from_ledger(&validator_ledger_path, &LeaderSchedulerConfig::default());
assert!(
bank.tick_height()
>= fullnode_config
.leader_scheduler_config
.seed_rotation_interval
);
assert!(bank.tick_height() >= bank.leader_scheduler.read().unwrap().ticks_per_epoch);
assert!(entry_height >= ledger_initial_len);
@ -851,13 +837,14 @@ mod tests {
let leader_node_info = leader_node.info.clone();
// Set the leader scheduler for the validator
let leader_rotation_interval = 5;
let ticks_per_slot = 5;
let slots_per_epoch = 2;
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
leader_rotation_interval * 2,
leader_rotation_interval * 2,
ticks_per_slot,
slots_per_epoch,
ticks_per_slot * slots_per_epoch,
);
let voting_keypair = VotingKeypair::new_local(&leader_keypair);
@ -895,17 +882,14 @@ mod tests {
let (rotation_sender, rotation_receiver) = channel();
let leader_exit = leader.run(Some(rotation_sender));
let expected_rotations = vec![
(FullnodeReturnType::LeaderToLeaderRotation, ticks_per_slot),
(
FullnodeReturnType::LeaderToLeaderRotation,
leader_rotation_interval,
),
(
FullnodeReturnType::LeaderToLeaderRotation,
2 * leader_rotation_interval,
2 * ticks_per_slot,
),
(
FullnodeReturnType::LeaderToValidatorRotation,
3 * leader_rotation_interval,
3 * ticks_per_slot,
),
];

View File

@ -23,34 +23,25 @@ pub const DEFAULT_TICKS_PER_SLOT: u64 = 8;
*/
pub const DEFAULT_TICKS_PER_SLOT: u64 = 64; // TODO: DEFAULT_TICKS_PER_SLOT = 8 causes instability in the integration tests.
pub const DEFAULT_SLOTS_PER_EPOCH: u64 = 64;
pub const DEFAULT_SEED_ROTATION_INTERVAL: u64 = DEFAULT_SLOTS_PER_EPOCH * DEFAULT_TICKS_PER_SLOT;
pub const DEFAULT_ACTIVE_WINDOW_LENGTH: u64 = DEFAULT_SEED_ROTATION_INTERVAL;
pub const DEFAULT_ACTIVE_WINDOW_TICK_LENGTH: u64 = DEFAULT_SLOTS_PER_EPOCH * DEFAULT_TICKS_PER_SLOT;
#[derive(Clone)]
pub struct LeaderSchedulerConfig {
// The interval at which to rotate the leader, should be much less than
// seed_rotation_interval
pub leader_rotation_interval: u64,
pub ticks_per_slot: u64,
pub slots_per_epoch: u64,
// The interval at which to generate the seed used for ranking the validators
pub seed_rotation_interval: u64,
// The length of the acceptable window for determining live validators
pub active_window_length: u64,
// The tick length of the acceptable window for determining live validators
pub active_window_tick_length: u64,
}
// Used to toggle leader rotation in fullnode so that tests that don't
// need leader rotation don't break
impl LeaderSchedulerConfig {
pub fn new(
leader_rotation_interval: u64,
seed_rotation_interval: u64,
active_window_length: u64,
) -> Self {
pub fn new(ticks_per_slot: u64, slots_per_epoch: u64, active_window_tick_length: u64) -> Self {
LeaderSchedulerConfig {
leader_rotation_interval,
seed_rotation_interval,
active_window_length,
ticks_per_slot,
slots_per_epoch,
active_window_tick_length,
}
}
}
@ -58,9 +49,9 @@ impl LeaderSchedulerConfig {
impl Default for LeaderSchedulerConfig {
fn default() -> Self {
Self {
leader_rotation_interval: DEFAULT_TICKS_PER_SLOT,
seed_rotation_interval: DEFAULT_SEED_ROTATION_INTERVAL,
active_window_length: DEFAULT_ACTIVE_WINDOW_LENGTH,
ticks_per_slot: DEFAULT_TICKS_PER_SLOT,
slots_per_epoch: DEFAULT_SLOTS_PER_EPOCH,
active_window_tick_length: DEFAULT_ACTIVE_WINDOW_TICK_LENGTH,
}
}
}
@ -68,15 +59,15 @@ impl Default for LeaderSchedulerConfig {
#[derive(Clone, Debug)]
pub struct LeaderScheduler {
// A leader slot duration in ticks
pub leader_rotation_interval: u64,
pub ticks_per_slot: u64,
// Duration of an epoch (one or more slots) in ticks.
// This value must be divisible by leader_rotation_interval
pub seed_rotation_interval: u64,
// This value must be divisible by ticks_per_slot
pub ticks_per_epoch: u64,
// The length of time in ticks for which a vote qualifies a candidate for leader
// selection
pub active_window_length: u64,
active_window_tick_length: u64,
// Round-robin ordering of the validators for the current epoch at epoch_schedule[0], and the
// previous epoch at epoch_schedule[1]
@ -92,31 +83,33 @@ pub struct LeaderScheduler {
// The LeaderScheduler implements a schedule for leaders as follows:
//
// 1) After the first seed is generated, this signals the beginning of actual leader rotation.
// From this point on, every seed_rotation_interval PoH counts we generate the seed based
// From this point on, every ticks_per_epoch PoH counts we generate the seed based
// on the PoH height, and use it to do a weighted sample from the set
// of validators based on current stake weight. This gets you the bootstrap leader A for
// the next leader_rotation_interval PoH counts. On the same PoH count we generate the seed,
// the next ticks_per_slot PoH counts. On the same PoH count we generate the seed,
// we also order the validators based on their current stake weight, and starting
// from leader A, we then pick the next leader sequentially every leader_rotation_interval
// from leader A, we then pick the next leader sequentially every ticks_per_slot
// PoH counts based on this fixed ordering, so the next
// seed_rotation_interval / leader_rotation_interval leaders are determined.
// ticks_per_epoch / ticks_per_slot leaders are determined.
//
// 2) When we we hit the next seed rotation PoH height, step 1) is executed again to
// calculate the leader schedule for the upcoming seed_rotation_interval PoH counts.
// calculate the leader schedule for the upcoming ticks_per_epoch PoH counts.
impl LeaderScheduler {
pub fn new(config: &LeaderSchedulerConfig) -> Self {
let leader_rotation_interval = config.leader_rotation_interval;
let seed_rotation_interval = config.seed_rotation_interval;
let active_window_length = config.active_window_length;
let ticks_per_slot = config.ticks_per_slot;
let ticks_per_epoch = config.ticks_per_slot * config.slots_per_epoch;
let active_window_tick_length = config.active_window_tick_length;
// Enforced invariants
assert!(seed_rotation_interval >= leader_rotation_interval);
assert!(seed_rotation_interval % leader_rotation_interval == 0);
assert!(ticks_per_slot > 0);
assert!(ticks_per_epoch >= ticks_per_slot);
assert!(ticks_per_epoch % ticks_per_slot == 0);
assert!(active_window_tick_length > 0);
LeaderScheduler {
leader_rotation_interval,
seed_rotation_interval,
active_window_length,
ticks_per_slot,
ticks_per_epoch,
active_window_tick_length,
seed: 0,
epoch_schedule: [Vec::new(), Vec::new()],
current_epoch: 0,
@ -124,17 +117,17 @@ impl LeaderScheduler {
}
pub fn tick_height_to_slot(&self, tick_height: u64) -> u64 {
tick_height / self.leader_rotation_interval
tick_height / self.ticks_per_slot
}
fn tick_height_to_epoch(&self, tick_height: u64) -> u64 {
tick_height / self.seed_rotation_interval
tick_height / self.ticks_per_epoch
}
// Returns the number of ticks remaining from the specified tick_height to the end of the
// current slot
pub fn num_ticks_left_in_slot(&self, tick_height: u64) -> u64 {
self.leader_rotation_interval - tick_height % self.leader_rotation_interval - 1
self.ticks_per_slot - tick_height % self.ticks_per_slot - 1
}
// Inform the leader scheduler about the current tick height of the cluster. It may generate a
@ -154,7 +147,7 @@ impl LeaderScheduler {
// the current slot, so that generally the schedule applies to the range [slot N tick 1,
// slot N+1 tick 0). The schedule is shifted right 1 tick from the slot rotation interval so that
// the next leader is always known *before* a rotation occurs
if tick_height == 0 || tick_height % self.seed_rotation_interval == 1 {
if tick_height == 0 || tick_height % self.ticks_per_epoch == 1 {
self.generate_schedule(tick_height, bank);
}
}
@ -162,7 +155,7 @@ impl LeaderScheduler {
// Returns the leader for the requested slot, or None if the slot is out of the schedule bounds
pub fn get_leader_for_slot(&self, slot: u64) -> Option<Pubkey> {
trace!("get_leader_for_slot: slot {}", slot);
let tick_height = slot * self.leader_rotation_interval;
let tick_height = slot * self.ticks_per_slot;
let epoch = self.tick_height_to_epoch(tick_height);
trace!(
"get_leader_for_slot: tick_height={} slot={} epoch={} (ce={})",
@ -191,8 +184,8 @@ impl LeaderScheduler {
panic!("leader_schedule is empty"); // Should never happen
}
let first_tick_in_epoch = epoch * self.seed_rotation_interval;
let slot_index = (tick_height - first_tick_in_epoch) / self.leader_rotation_interval;
let first_tick_in_epoch = epoch * self.ticks_per_epoch;
let slot_index = (tick_height - first_tick_in_epoch) / self.ticks_per_slot;
// Round robin through each node in the schedule
Some(schedule[slot_index as usize % schedule.len()])
@ -204,7 +197,7 @@ impl LeaderScheduler {
// process_transaction(), case VoteInstruction::RegisterAccount), we can use a vector.
fn get_active_set(&mut self, tick_height: u64, bank: &Bank) -> HashSet<Pubkey> {
let upper_bound = tick_height;
let lower_bound = tick_height.saturating_sub(self.active_window_length);
let lower_bound = tick_height.saturating_sub(self.active_window_tick_length);
trace!(
"get_active_set: vote bounds ({}, {})",
lower_bound,
@ -303,8 +296,7 @@ impl LeaderScheduler {
let next_slot_leader = validator_rankings[0];
if last_slot_leader == next_slot_leader {
let slots_per_epoch =
self.seed_rotation_interval / self.leader_rotation_interval;
let slots_per_epoch = self.ticks_per_epoch / self.ticks_per_slot;
if slots_per_epoch == 1 {
// If there is only one slot per epoch, and the same leader as the last slot
// of the previous epoch was chosen, then pick the next leader in the
@ -326,7 +318,7 @@ impl LeaderScheduler {
trace!(
"generate_schedule: schedule for ticks ({}, {}): {:?} ",
tick_height,
tick_height + self.seed_rotation_interval,
tick_height + self.ticks_per_epoch,
self.epoch_schedule[0]
);
}
@ -490,23 +482,19 @@ pub mod tests {
bank.process_transaction(&new_vote_tx).unwrap();
}
fn run_scheduler_test(
num_validators: usize,
leader_rotation_interval: u64,
seed_rotation_interval: u64,
) {
fn run_scheduler_test(num_validators: usize, ticks_per_slot: u64, ticks_per_epoch: u64) {
info!(
"run_scheduler_test({}, {}, {})",
num_validators, leader_rotation_interval, seed_rotation_interval
num_validators, ticks_per_slot, ticks_per_epoch
);
// Allow the validators to be in the active window for the entire test
let active_window_length = seed_rotation_interval;
let active_window_tick_length = ticks_per_epoch;
// Set up the LeaderScheduler struct
let leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
seed_rotation_interval,
active_window_length,
ticks_per_slot,
ticks_per_epoch / ticks_per_slot,
active_window_tick_length,
);
// Create the bank and validators, which are inserted in order of account balance
@ -537,11 +525,11 @@ pub mod tests {
);
// Vote to make the validator part of the active set for the entire test
// (we made the active_window_length large enough at the beginning of the test)
// (we made the active_window_tick_length large enough at the beginning of the test)
push_vote(
&voting_keypair,
&bank,
seed_rotation_interval,
ticks_per_epoch,
genesis_block.last_id(),
);
}
@ -551,7 +539,7 @@ pub mod tests {
leader_scheduler.generate_schedule(0, &bank);
// The leader outside of the newly generated schedule window:
// (0, seed_rotation_interval]
// (0, ticks_per_epoch]
info!("yyy");
assert_eq!(
leader_scheduler.get_leader_for_slot(0),
@ -560,7 +548,7 @@ pub mod tests {
info!("xxxx");
assert_eq!(
leader_scheduler
.get_leader_for_slot(leader_scheduler.tick_height_to_slot(seed_rotation_interval)),
.get_leader_for_slot(leader_scheduler.tick_height_to_slot(ticks_per_epoch)),
None
);
@ -571,15 +559,15 @@ pub mod tests {
// Generate schedule for third epoch to ensure the bootstrap leader will not be added to
// the schedule, as the bootstrap leader did not vote in the second epoch but all other
// validators did
leader_scheduler.generate_schedule(seed_rotation_interval + 1, &bank);
leader_scheduler.generate_schedule(ticks_per_epoch + 1, &bank);
// For the next seed_rotation_interval entries, call get_leader_for_slot every
// leader_rotation_interval entries, and the next leader should be the next validator
// For the next ticks_per_epoch entries, call get_leader_for_slot every
// ticks_per_slot entries, and the next leader should be the next validator
// in order of stake
let num_slots = seed_rotation_interval / leader_rotation_interval;
let num_slots = ticks_per_epoch / ticks_per_slot;
let mut start_leader_index = None;
for i in 0..num_slots {
let tick_height = 2 * seed_rotation_interval + i * leader_rotation_interval;
let tick_height = 2 * ticks_per_epoch + i * ticks_per_slot;
info!("iteration {}: tick_height={}", i, tick_height);
let slot = leader_scheduler.tick_height_to_slot(tick_height);
let current_leader = leader_scheduler
@ -605,11 +593,11 @@ pub mod tests {
assert_eq!(current_leader, expected_leader);
assert_eq!(
slot,
leader_scheduler.tick_height_to_slot(2 * seed_rotation_interval) + i
leader_scheduler.tick_height_to_slot(2 * ticks_per_epoch) + i
);
assert_eq!(
slot,
leader_scheduler.tick_height_to_slot(tick_height + leader_rotation_interval - 1)
leader_scheduler.tick_height_to_slot(tick_height + ticks_per_slot - 1)
);
assert_eq!(
leader_scheduler.get_leader_for_slot(slot),
@ -620,7 +608,7 @@ pub mod tests {
#[test]
fn test_num_ticks_left_in_slot() {
let leader_scheduler = LeaderScheduler::new(&LeaderSchedulerConfig::new(10, 20, 0));
let leader_scheduler = LeaderScheduler::new(&LeaderSchedulerConfig::new(10, 2, 1));
assert_eq!(leader_scheduler.num_ticks_left_in_slot(0), 9);
assert_eq!(leader_scheduler.num_ticks_left_in_slot(1), 8);
@ -638,8 +626,8 @@ pub mod tests {
solana_logger::setup();
let leader_id = Keypair::new().pubkey();
let active_window_length = 1000;
let leader_scheduler_config = LeaderSchedulerConfig::new(100, 100, active_window_length);
let active_window_tick_length = 1000;
let leader_scheduler_config = LeaderSchedulerConfig::new(100, 1, active_window_tick_length);
let (genesis_block, mint_keypair) = GenesisBlock::new_with_leader(10000, leader_id, 500);
let bank = Bank::new_with_leader_scheduler_config(&genesis_block, &leader_scheduler_config);
@ -677,7 +665,7 @@ pub mod tests {
);
}
// Insert a bunch of votes at height "start_height + active_window_length"
// Insert a bunch of votes at height "start_height + active_window_tick_length"
let num_new_ids = 10;
let mut new_ids = HashSet::new();
for _ in 0..num_new_ids {
@ -701,7 +689,7 @@ pub mod tests {
push_vote(
&voting_keypair,
&bank,
start_height + active_window_length + 1,
start_height + active_window_tick_length + 1,
genesis_block.last_id(),
);
}
@ -716,26 +704,27 @@ pub mod tests {
assert_eq!(result, bootstrap_ids);
let result =
leader_scheduler.get_active_set(active_window_length + start_height - 1, &bank);
assert_eq!(result, old_ids);
let result = leader_scheduler.get_active_set(active_window_length + start_height, &bank);
leader_scheduler.get_active_set(active_window_tick_length + start_height - 1, &bank);
assert_eq!(result, old_ids);
let result =
leader_scheduler.get_active_set(active_window_length + start_height + 1, &bank);
leader_scheduler.get_active_set(active_window_tick_length + start_height, &bank);
assert_eq!(result, old_ids);
let result =
leader_scheduler.get_active_set(active_window_tick_length + start_height + 1, &bank);
assert_eq!(result, new_ids);
let result =
leader_scheduler.get_active_set(2 * active_window_length + start_height, &bank);
leader_scheduler.get_active_set(2 * active_window_tick_length + start_height, &bank);
assert_eq!(result, new_ids);
let result =
leader_scheduler.get_active_set(2 * active_window_length + start_height + 1, &bank);
let result = leader_scheduler
.get_active_set(2 * active_window_tick_length + start_height + 1, &bank);
assert_eq!(result, new_ids);
let result =
leader_scheduler.get_active_set(2 * active_window_length + start_height + 2, &bank);
let result = leader_scheduler
.get_active_set(2 * active_window_tick_length + start_height + 2, &bank);
assert!(result.is_empty());
}
@ -885,52 +874,36 @@ pub mod tests {
fn test_scheduler_basic() {
solana_logger::setup();
// Test when the number of validators equals
// seed_rotation_interval / leader_rotation_interval, so each validator
// ticks_per_epoch / ticks_per_slot, so each validator
// is selected once
let mut num_validators = 100;
let mut leader_rotation_interval = 100;
let mut seed_rotation_interval = leader_rotation_interval * num_validators as u64;
let mut ticks_per_slot = 100;
let mut ticks_per_epoch = ticks_per_slot * num_validators as u64;
run_scheduler_test(
num_validators,
leader_rotation_interval,
seed_rotation_interval,
);
run_scheduler_test(num_validators, ticks_per_slot, ticks_per_epoch);
// Test when there are fewer validators than
// seed_rotation_interval / leader_rotation_interval, so each validator
// ticks_per_epoch / ticks_per_slot, so each validator
// is selected multiple times
num_validators = 3;
leader_rotation_interval = 100;
seed_rotation_interval = 1000;
run_scheduler_test(
num_validators,
leader_rotation_interval,
seed_rotation_interval,
);
ticks_per_slot = 100;
ticks_per_epoch = 1000;
run_scheduler_test(num_validators, ticks_per_slot, ticks_per_epoch);
// Test when there are fewer number of validators than
// seed_rotation_interval / leader_rotation_interval, so each validator
// ticks_per_epoch / ticks_per_slot, so each validator
// may not be selected
num_validators = 10;
leader_rotation_interval = 100;
seed_rotation_interval = 200;
run_scheduler_test(
num_validators,
leader_rotation_interval,
seed_rotation_interval,
);
ticks_per_slot = 100;
ticks_per_epoch = 200;
run_scheduler_test(num_validators, ticks_per_slot, ticks_per_epoch);
// Test when seed_rotation_interval == leader_rotation_interval,
// Test when ticks_per_epoch == ticks_per_slot,
// only one validator should be selected
num_validators = 10;
leader_rotation_interval = 2;
seed_rotation_interval = 2;
run_scheduler_test(
num_validators,
leader_rotation_interval,
seed_rotation_interval,
);
ticks_per_slot = 2;
ticks_per_epoch = 2;
run_scheduler_test(num_validators, ticks_per_slot, ticks_per_epoch);
}
#[test]
@ -940,18 +913,15 @@ pub mod tests {
let num_validators = 10;
let num_vote_account_tokens = 1;
// Make sure seed_rotation_interval is big enough so we select all the
// Make sure ticks_per_epoch is big enough so we select all the
// validators as part of the schedule each time (we need to check the active window
// is the cause of validators being truncated later)
let leader_rotation_interval = 100;
let seed_rotation_interval = leader_rotation_interval * num_validators;
let active_window_length = seed_rotation_interval;
let ticks_per_slot = 100;
let slots_per_epoch = num_validators;
let active_window_tick_length = ticks_per_slot * slots_per_epoch;
let leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
seed_rotation_interval,
active_window_length,
);
let leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_tick_length);
// Create the bank and validators
let (genesis_block, mint_keypair) = GenesisBlock::new(
@ -988,12 +958,12 @@ pub mod tests {
push_vote(
&voting_keypair,
&bank,
(i + 2) * active_window_length - 1,
(i + 2) * active_window_tick_length - 1,
genesis_block.last_id(),
);
}
// Generate schedule every active_window_length entries and check that
// Generate schedule every active_window_tick_length entries and check that
// validators are falling out of the rotation as they fall out of the
// active set
let mut leader_scheduler = bank.leader_scheduler.write().unwrap();
@ -1006,7 +976,7 @@ pub mod tests {
assert_eq!(leader_scheduler.current_epoch, 1);
for i in 0..=num_validators {
info!("i === {}", i);
leader_scheduler.generate_schedule((i + 1) * active_window_length, &bank);
leader_scheduler.generate_schedule((i + 1) * active_window_tick_length, &bank);
assert_eq!(leader_scheduler.current_epoch, i + 2);
if i == 0 {
assert_eq!(
@ -1026,9 +996,9 @@ pub mod tests {
fn test_multiple_vote() {
let leader_keypair = Arc::new(Keypair::new());
let leader_id = leader_keypair.pubkey();
let active_window_length = 1000;
let active_window_tick_length = 1000;
let (genesis_block, _mint_keypair) = GenesisBlock::new_with_leader(10000, leader_id, 500);
let leader_scheduler_config = LeaderSchedulerConfig::new(100, 100, active_window_length);
let leader_scheduler_config = LeaderSchedulerConfig::new(100, 1, active_window_tick_length);
let bank = Bank::new_with_leader_scheduler_config(&genesis_block, &leader_scheduler_config);
// Bootstrap leader should be in the active set even without explicit votes
@ -1037,10 +1007,10 @@ pub mod tests {
let result = leader_scheduler.get_active_set(0, &bank);
assert_eq!(result, to_hashset_owned(&vec![leader_id]));
let result = leader_scheduler.get_active_set(active_window_length, &bank);
let result = leader_scheduler.get_active_set(active_window_tick_length, &bank);
assert_eq!(result, to_hashset_owned(&vec![leader_id]));
let result = leader_scheduler.get_active_set(active_window_length + 1, &bank);
let result = leader_scheduler.get_active_set(active_window_tick_length + 1, &bank);
assert!(result.is_empty());
}
@ -1062,10 +1032,10 @@ pub mod tests {
{
let mut leader_scheduler = bank.leader_scheduler.write().unwrap();
let result = leader_scheduler.get_active_set(active_window_length + 1, &bank);
let result = leader_scheduler.get_active_set(active_window_tick_length + 1, &bank);
assert_eq!(result, to_hashset_owned(&vec![leader_id]));
let result = leader_scheduler.get_active_set(active_window_length + 2, &bank);
let result = leader_scheduler.get_active_set(active_window_tick_length + 2, &bank);
assert!(result.is_empty());
}
@ -1074,10 +1044,10 @@ pub mod tests {
{
let mut leader_scheduler = bank.leader_scheduler.write().unwrap();
let result = leader_scheduler.get_active_set(active_window_length + 2, &bank);
let result = leader_scheduler.get_active_set(active_window_tick_length + 2, &bank);
assert_eq!(result, to_hashset_owned(&vec![leader_id]));
let result = leader_scheduler.get_active_set(active_window_length + 3, &bank);
let result = leader_scheduler.get_active_set(active_window_tick_length + 3, &bank);
assert!(result.is_empty());
}
}
@ -1086,15 +1056,13 @@ pub mod tests {
fn test_update_tick_height() {
solana_logger::setup();
let leader_rotation_interval = 100;
let seed_rotation_interval = 2 * leader_rotation_interval;
let active_window_length = 1;
let ticks_per_slot = 100;
let slots_per_epoch = 2;
let ticks_per_epoch = ticks_per_slot * slots_per_epoch;
let active_window_tick_length = 1;
let leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
seed_rotation_interval,
active_window_length,
);
let leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_tick_length);
// Check that the generate_schedule() function is being called by the
// update_tick_height() function at the correct entry heights.
@ -1129,10 +1097,10 @@ pub mod tests {
//
for tick_height in &[
1,
leader_rotation_interval,
leader_rotation_interval + 1,
seed_rotation_interval - 1,
seed_rotation_interval,
ticks_per_slot,
ticks_per_slot + 1,
ticks_per_epoch - 1,
ticks_per_epoch,
] {
info!("Checking tick_height {}", *tick_height);
leader_scheduler.update_tick_height(*tick_height, &bank);
@ -1163,11 +1131,11 @@ pub mod tests {
// Check various tick heights in epoch 1, and tick 0 of epoch 2
//
for tick_height in &[
seed_rotation_interval + 1,
seed_rotation_interval + leader_rotation_interval,
seed_rotation_interval + leader_rotation_interval + 1,
seed_rotation_interval + seed_rotation_interval - 1,
seed_rotation_interval + seed_rotation_interval,
ticks_per_epoch + 1,
ticks_per_epoch + ticks_per_slot,
ticks_per_epoch + ticks_per_slot + 1,
ticks_per_epoch + ticks_per_epoch - 1,
ticks_per_epoch + ticks_per_epoch,
] {
info!("Checking tick_height {}", *tick_height);
leader_scheduler.update_tick_height(*tick_height, &bank);
@ -1205,50 +1173,37 @@ pub mod tests {
let leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
assert_eq!(leader_scheduler.ticks_per_slot, DEFAULT_TICKS_PER_SLOT);
assert_eq!(
leader_scheduler.leader_rotation_interval,
DEFAULT_TICKS_PER_SLOT
);
assert_eq!(
leader_scheduler.seed_rotation_interval,
DEFAULT_SEED_ROTATION_INTERVAL
leader_scheduler.ticks_per_epoch,
DEFAULT_TICKS_PER_SLOT * DEFAULT_SLOTS_PER_EPOCH
);
// Check actual arguments for LeaderScheduler
let leader_rotation_interval = 100;
let seed_rotation_interval = 200;
let active_window_length = 1;
let ticks_per_slot = 100;
let slots_per_epoch = 2;
let active_window_tick_length = 1;
let leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
seed_rotation_interval,
active_window_length,
);
let leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_tick_length);
let leader_scheduler = LeaderScheduler::new(&leader_scheduler_config);
assert_eq!(leader_scheduler.ticks_per_slot, ticks_per_slot);
assert_eq!(
leader_scheduler.leader_rotation_interval,
leader_rotation_interval
);
assert_eq!(
leader_scheduler.seed_rotation_interval,
seed_rotation_interval
leader_scheduler.ticks_per_epoch,
ticks_per_slot * slots_per_epoch
);
}
fn run_consecutive_leader_test(num_slots_per_epoch: u64, add_validator: bool) {
fn run_consecutive_leader_test(slots_per_epoch: u64, add_validator: bool) {
let bootstrap_leader_keypair = Arc::new(Keypair::new());
let bootstrap_leader_id = bootstrap_leader_keypair.pubkey();
let leader_rotation_interval = 100;
let seed_rotation_interval = num_slots_per_epoch * leader_rotation_interval;
let active_window_length = seed_rotation_interval;
let ticks_per_slot = 100;
let active_window_tick_length = slots_per_epoch * ticks_per_slot;
let leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
seed_rotation_interval,
active_window_length,
);
let leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_tick_length);
// Create mint and bank
let (genesis_block, mint_keypair) =

View File

@ -100,11 +100,7 @@ impl ReplayStage {
inc_new_counter_info!("replicate-stage_bank-tick", bank.tick_height() as usize);
if entry.is_tick() {
if num_ticks_to_next_vote == 0 {
num_ticks_to_next_vote = bank
.leader_scheduler
.read()
.unwrap()
.leader_rotation_interval;
num_ticks_to_next_vote = bank.leader_scheduler.read().unwrap().ticks_per_slot;
}
num_ticks_to_next_vote -= 1;
}
@ -205,8 +201,7 @@ impl ReplayStage {
let tick_height = bank.tick_height();
let leader_scheduler = bank.leader_scheduler.read().unwrap();
let current_slot = leader_scheduler.tick_height_to_slot(tick_height + 1);
let first_tick_in_current_slot =
current_slot * leader_scheduler.leader_rotation_interval;
let first_tick_in_current_slot = current_slot * leader_scheduler.ticks_per_slot;
(
current_slot,
first_tick_in_current_slot
@ -274,11 +269,8 @@ impl ReplayStage {
}
current_slot += 1;
max_tick_height_for_slot += bank
.leader_scheduler
.read()
.unwrap()
.leader_rotation_interval;
max_tick_height_for_slot +=
bank.leader_scheduler.read().unwrap().ticks_per_slot;
last_leader_id = leader_id;
}
}
@ -383,19 +375,15 @@ mod test {
info!("old_leader_id: {:?}", old_leader_id);
// Set up the LeaderScheduler so that my_id becomes the leader for epoch 1
let leader_rotation_interval = 16;
let leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
leader_rotation_interval,
leader_rotation_interval,
);
let ticks_per_slot = 16;
let leader_scheduler_config = LeaderSchedulerConfig::new(ticks_per_slot, 1, ticks_per_slot);
let my_keypair = Arc::new(my_keypair);
let (active_set_entries, voting_keypair) = make_active_set_entries(
&my_keypair,
&mint_keypair,
100,
leader_rotation_interval, // add a vote for tick_height = leader_rotation_interval
ticks_per_slot, // add a vote for tick_height = ticks_per_slot
&last_id,
&last_id,
0,
@ -438,7 +426,7 @@ mod test {
l_receiver,
);
let total_entries_to_send = 2 * leader_rotation_interval as usize - 2;
let total_entries_to_send = 2 * ticks_per_slot as usize - 2;
let mut entries_to_send = vec![];
while entries_to_send.len() < total_entries_to_send {
let entry = Entry::new(&mut last_id, 0, 1, vec![]);
@ -457,7 +445,7 @@ mod test {
info!("Wait for replay_stage to exit and check return value is correct");
assert_eq!(
Some(TvuReturnType::LeaderRotation(
2 * leader_rotation_interval - 1,
2 * ticks_per_slot - 1,
expected_entry_height,
expected_last_id,
)),
@ -607,18 +595,11 @@ mod test {
.unwrap();
}
// Set up the LeaderScheduler so that this this node becomes the leader at
// bootstrap_height = num_bootstrap_slots * leader_rotation_interval
// Set up the LeaderScheduler so that this this node becomes the leader at
// bootstrap_height = num_bootstrap_slots * leader_rotation_interval
let leader_rotation_interval = 10;
let num_bootstrap_slots = 2;
let bootstrap_height = num_bootstrap_slots * leader_rotation_interval;
let leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
leader_rotation_interval * 2,
bootstrap_height,
);
let ticks_per_slot = 10;
let slots_per_epoch = 2;
let active_window_tick_length = ticks_per_slot * slots_per_epoch;
let leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, active_window_tick_length);
// Set up the cluster info
let cluster_info_me = Arc::new(RwLock::new(ClusterInfo::new(my_node.info.clone())));
@ -658,14 +639,15 @@ mod test {
cluster_info_me.write().unwrap().push_vote(vote);
// Send enough ticks to trigger leader rotation
let total_entries_to_send = (bootstrap_height - initial_tick_height) as usize;
let total_entries_to_send = (active_window_tick_length - initial_tick_height) as usize;
let num_hashes = 1;
// Add on the only entries that weren't ticks to the bootstrap height to get the
// total expected entry length
let expected_entry_height =
bootstrap_height + initial_non_tick_height + active_set_entries_len;
let leader_rotation_index = (bootstrap_height - initial_tick_height - 1) as usize;
active_window_tick_length + initial_non_tick_height + active_set_entries_len;
let leader_rotation_index =
(active_window_tick_length - initial_tick_height - 1) as usize;
let mut expected_last_id = Hash::default();
for i in 0..total_entries_to_send {
let entry = Entry::new(&mut last_id, 0, num_hashes, vec![]);
@ -691,7 +673,7 @@ mod test {
// Wait for replay_stage to exit and check return value is correct
assert_eq!(
Some(TvuReturnType::LeaderRotation(
bootstrap_height,
active_window_tick_length,
expected_entry_height,
expected_last_id,
)),

View File

@ -140,7 +140,7 @@ impl WindowUtil for Window {
// 1) The replay stage hasn't caught up to the "consumed" entries we sent,
// in which case it will eventually catch up
//
// 2) We are on the border between seed_rotation_intervals, so the
// 2) We are on the border between ticks_per_epochs, so the
// schedule won't be known until the entry on that cusp is received
// by the replay stage (which comes after this stage). Hence, the next
// leader at the beginning of that next epoch will not know they are the

View File

@ -130,10 +130,8 @@ fn test_multi_node_ledger_window() -> result::Result<()> {
let fullnode_config = FullnodeConfig::default();
info!(
"leader_rotation_interval: {}",
fullnode_config
.leader_scheduler_config
.leader_rotation_interval
"ticks_per_slot: {}",
fullnode_config.leader_scheduler_config.ticks_per_slot
);
// Write some into leader's ledger, this should populate the leader's window
@ -143,11 +141,7 @@ fn test_multi_node_ledger_window() -> result::Result<()> {
let db_ledger = DbLedger::open(&leader_ledger_path).unwrap();
let entries = solana::entry::create_ticks(
fullnode_config
.leader_scheduler_config
.leader_rotation_interval
- last_entry_height
- 2,
fullnode_config.leader_scheduler_config.ticks_per_slot - last_entry_height - 2,
last_entry_id,
);
db_ledger
@ -931,14 +925,14 @@ fn test_leader_to_validator_transition() {
let leader_info = leader_node.info.clone();
let mut fullnode_config = FullnodeConfig::default();
let leader_rotation_interval = 5;
let ticks_per_slot = 5;
fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
leader_rotation_interval,
ticks_per_slot,
1,
// Setup window length to exclude the genesis bootstrap leader vote at tick height 0, so
// that when the leader schedule is recomputed for epoch 1 only the validator vote at tick
// height 1 will be considered.
leader_rotation_interval,
ticks_per_slot,
);
// Initialize the leader ledger. Make a mint and a genesis entry
@ -958,7 +952,7 @@ fn test_leader_to_validator_transition() {
&validator_keypair,
&mint_keypair,
100,
leader_rotation_interval,
ticks_per_slot,
&last_id,
&last_id,
0,
@ -993,13 +987,10 @@ fn test_leader_to_validator_transition() {
// slot 0 -> slot 1: bootstrap leader remains the leader
// slot 1 -> slot 2: bootstrap leader to the validator
let expected_rotations = vec![
(
FullnodeReturnType::LeaderToLeaderRotation,
leader_rotation_interval,
),
(FullnodeReturnType::LeaderToLeaderRotation, ticks_per_slot),
(
FullnodeReturnType::LeaderToValidatorRotation,
2 * leader_rotation_interval,
2 * ticks_per_slot,
),
];
@ -1020,10 +1011,7 @@ fn test_leader_to_validator_transition() {
assert_eq!(
bank.tick_height(),
2 * fullnode_config
.leader_scheduler_config
.leader_rotation_interval
- 1
2 * fullnode_config.leader_scheduler_config.ticks_per_slot - 1
);
remove_dir_all(leader_ledger_path).unwrap();
}
@ -1083,11 +1071,11 @@ fn test_leader_validator_basic() {
// Create the leader scheduler config
let mut fullnode_config = FullnodeConfig::default();
let leader_rotation_interval = 5;
let ticks_per_slot = 5;
fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
leader_rotation_interval, // 1 slot per epoch
leader_rotation_interval,
ticks_per_slot,
1, // 1 slot per epoch
ticks_per_slot,
);
// Start the validator node
@ -1121,10 +1109,7 @@ fn test_leader_validator_basic() {
info!("Waiting for slot 0 -> slot 1: bootstrap leader will remain the leader");
assert_eq!(
leader_rotation_receiver.recv().unwrap(),
(
FullnodeReturnType::LeaderToLeaderRotation,
leader_rotation_interval,
)
(FullnodeReturnType::LeaderToLeaderRotation, ticks_per_slot,)
);
info!("Waiting for slot 1 -> slot 2: bootstrap leader becomes a validator");
@ -1132,7 +1117,7 @@ fn test_leader_validator_basic() {
leader_rotation_receiver.recv().unwrap(),
(
FullnodeReturnType::LeaderToValidatorRotation,
leader_rotation_interval * 2,
ticks_per_slot * 2,
)
);
@ -1141,7 +1126,7 @@ fn test_leader_validator_basic() {
validator_rotation_receiver.recv().unwrap(),
(
FullnodeReturnType::ValidatorToLeaderRotation,
leader_rotation_interval * 2,
ticks_per_slot * 2,
)
);
@ -1155,7 +1140,7 @@ fn test_leader_validator_basic() {
let validator_entries: Vec<Entry> = read_ledger(&validator_ledger_path);
let leader_entries = read_ledger(&leader_ledger_path);
assert!(leader_entries.len() as u64 >= leader_rotation_interval);
assert!(leader_entries.len() as u64 >= ticks_per_slot);
for (v, l) in validator_entries.iter().zip(leader_entries) {
assert_eq!(*v, l);
@ -1182,15 +1167,12 @@ fn test_dropped_handoff_recovery() {
let bootstrap_leader_info = bootstrap_leader_node.info.clone();
// Create the common leader scheduling configuration
let num_slots_per_epoch = (N + 1) as u64;
let leader_rotation_interval = 5;
let seed_rotation_interval = num_slots_per_epoch * leader_rotation_interval;
let slots_per_epoch = (N + 1) as u64;
let ticks_per_slot = 5;
let ticks_per_epoch = slots_per_epoch * ticks_per_slot;
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
seed_rotation_interval,
seed_rotation_interval,
);
fullnode_config.leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, ticks_per_epoch);
// Make a common mint and a genesis entry for both leader + validator's ledgers
let num_ending_ticks = 1;
@ -1218,7 +1200,7 @@ fn test_dropped_handoff_recovery() {
&next_leader_keypair,
&mint_keypair,
100,
leader_rotation_interval,
ticks_per_slot,
&last_id,
&last_id,
0,
@ -1399,15 +1381,12 @@ fn test_full_leader_validator_network() {
}
// Create the common leader scheduling configuration
let num_slots_per_epoch = (N + 1) as u64;
let leader_rotation_interval = 5;
let seed_rotation_interval = num_slots_per_epoch * leader_rotation_interval;
let slots_per_epoch = (N + 1) as u64;
let ticks_per_slot = 5;
let ticks_per_epoch = slots_per_epoch * ticks_per_slot;
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
seed_rotation_interval,
seed_rotation_interval,
);
fullnode_config.leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, ticks_per_epoch);
let mut nodes = vec![];
@ -1538,13 +1517,7 @@ fn test_full_leader_validator_network() {
}
let shortest = shortest.unwrap();
assert!(
shortest
>= fullnode_config
.leader_scheduler_config
.leader_rotation_interval
* 3,
);
assert!(shortest >= fullnode_config.leader_scheduler_config.ticks_per_slot * 3,);
for path in ledger_paths {
DbLedger::destroy(&path).expect("Expected successful database destruction");
@ -1598,18 +1571,16 @@ fn test_broadcast_last_tick() {
})
.collect();
let leader_rotation_interval = 40;
let seed_rotation_interval = 2 * leader_rotation_interval;
let ticks_per_slot = 40;
let slots_per_epoch = 2;
let ticks_per_epoch = slots_per_epoch * ticks_per_slot;
// Start up the bootstrap leader fullnode
let bootstrap_leader_keypair = Arc::new(bootstrap_leader_keypair);
let voting_keypair = VotingKeypair::new_local(&bootstrap_leader_keypair);
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.leader_scheduler_config = LeaderSchedulerConfig::new(
leader_rotation_interval,
seed_rotation_interval,
seed_rotation_interval,
);
fullnode_config.leader_scheduler_config =
LeaderSchedulerConfig::new(ticks_per_slot, slots_per_epoch, ticks_per_epoch);
let bootstrap_leader = Fullnode::new(
bootstrap_leader_node,
&bootstrap_leader_keypair,
@ -1638,8 +1609,8 @@ fn test_broadcast_last_tick() {
info!("Shutting down the leader...");
bootstrap_leader_exit();
// Index of the last tick must be at least leader_rotation_interval - 1
let last_tick_entry_index = leader_rotation_interval as usize - 2;
// Index of the last tick must be at least ticks_per_slot - 1
let last_tick_entry_index = ticks_per_slot as usize - 2;
let entries = read_ledger(&bootstrap_leader_ledger_path);
assert!(entries.len() >= last_tick_entry_index + 1);
let expected_last_tick = &entries[last_tick_entry_index];