Limit leader schedule search space (#8468) (#8485)

automerge
This commit is contained in:
mergify[bot]
2020-02-26 15:49:31 -08:00
committed by GitHub
parent 6d431b8210
commit 100a11f061
4 changed files with 54 additions and 23 deletions

View File

@ -28,8 +28,8 @@ use std::sync::{Arc, Mutex};
use std::time::Instant; use std::time::Instant;
use thiserror::Error; use thiserror::Error;
const GRACE_TICKS_FACTOR: u64 = 2; pub const GRACE_TICKS_FACTOR: u64 = 2;
const MAX_GRACE_SLOTS: u64 = 2; pub const MAX_GRACE_SLOTS: u64 = 2;
#[derive(Error, Debug, Clone)] #[derive(Error, Debug, Clone)]
pub enum PohRecorderError { pub enum PohRecorderError {
@ -85,6 +85,7 @@ impl PohRecorder {
bank.slot(), bank.slot(),
&bank, &bank,
Some(&self.blockstore), Some(&self.blockstore),
GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS,
); );
assert_eq!(self.ticks_per_slot, bank.ticks_per_slot()); assert_eq!(self.ticks_per_slot, bank.ticks_per_slot());
let (leader_first_tick_height, leader_last_tick_height, grace_ticks) = let (leader_first_tick_height, leader_last_tick_height, grace_ticks) =

View File

@ -4,7 +4,7 @@ use crate::{
cluster_info::ClusterInfo, cluster_info::ClusterInfo,
commitment::{AggregateCommitmentService, BlockCommitmentCache, CommitmentAggregationData}, commitment::{AggregateCommitmentService, BlockCommitmentCache, CommitmentAggregationData},
consensus::{StakeLockout, Tower}, consensus::{StakeLockout, Tower},
poh_recorder::PohRecorder, poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
result::Result, result::Result,
rewards_recorder_service::RewardsRecorderSender, rewards_recorder_service::RewardsRecorderSender,
rpc_subscriptions::RpcSubscriptions, rpc_subscriptions::RpcSubscriptions,
@ -690,6 +690,7 @@ impl ReplayStage {
bank.slot(), bank.slot(),
&bank, &bank,
Some(blockstore), Some(blockstore),
GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS,
); );
poh_recorder poh_recorder
.lock() .lock()

View File

@ -6,7 +6,7 @@ use crate::{
commitment::BlockCommitmentCache, commitment::BlockCommitmentCache,
contact_info::ContactInfo, contact_info::ContactInfo,
gossip_service::{discover_cluster, GossipService}, gossip_service::{discover_cluster, GossipService},
poh_recorder::PohRecorder, poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
poh_service::PohService, poh_service::PohService,
rewards_recorder_service::RewardsRecorderService, rewards_recorder_service::RewardsRecorderService,
rpc::JsonRpcConfig, rpc::JsonRpcConfig,
@ -308,7 +308,13 @@ impl Validator {
bank.tick_height(), bank.tick_height(),
bank.last_blockhash(), bank.last_blockhash(),
bank.slot(), bank.slot(),
leader_schedule_cache.next_leader_slot(&id, bank.slot(), &bank, Some(&blockstore)), leader_schedule_cache.next_leader_slot(
&id,
bank.slot(),
&bank,
Some(&blockstore),
GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS,
),
bank.ticks_per_slot(), bank.ticks_per_slot(),
&id, &id,
&blockstore, &blockstore,

View File

@ -106,6 +106,7 @@ impl LeaderScheduleCache {
mut current_slot: Slot, mut current_slot: Slot,
bank: &Bank, bank: &Bank,
blockstore: Option<&Blockstore>, blockstore: Option<&Blockstore>,
max_slot_range: u64,
) -> Option<(Slot, Slot)> { ) -> Option<(Slot, Slot)> {
let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1); let (mut epoch, mut start_index) = bank.get_epoch_and_slot_index(current_slot + 1);
let mut first_slot = None; let mut first_slot = None;
@ -141,9 +142,14 @@ impl LeaderScheduleCache {
} }
} }
if first_slot.is_none() { if let Some(first_slot) = first_slot {
if current_slot - first_slot + 1 >= max_slot_range {
return Some((first_slot, current_slot));
}
} else {
first_slot = Some(current_slot); first_slot = Some(current_slot);
} }
last_slot = current_slot; last_slot = current_slot;
} else if first_slot.is_some() { } else if first_slot.is_some() {
return Some((first_slot.unwrap(), last_slot)); return Some((first_slot.unwrap(), last_slot));
@ -264,6 +270,7 @@ mod tests {
staking_utils::tests::setup_vote_and_stake_accounts, staking_utils::tests::setup_vote_and_stake_accounts,
}; };
use solana_runtime::bank::Bank; use solana_runtime::bank::Bank;
use solana_sdk::clock::NUM_CONSECUTIVE_LEADER_SLOTS;
use solana_sdk::epoch_schedule::{ use solana_sdk::epoch_schedule::{
EpochSchedule, DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET, DEFAULT_SLOTS_PER_EPOCH, EpochSchedule, DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET, DEFAULT_SLOTS_PER_EPOCH,
MINIMUM_SLOTS_PER_EPOCH, MINIMUM_SLOTS_PER_EPOCH,
@ -395,11 +402,11 @@ mod tests {
pubkey pubkey
); );
assert_eq!( assert_eq!(
cache.next_leader_slot(&pubkey, 0, &bank, None), cache.next_leader_slot(&pubkey, 0, &bank, None, std::u64::MAX),
Some((1, 863999)) Some((1, 863999))
); );
assert_eq!( assert_eq!(
cache.next_leader_slot(&pubkey, 1, &bank, None), cache.next_leader_slot(&pubkey, 1, &bank, None, std::u64::MAX),
Some((2, 863999)) Some((2, 863999))
); );
assert_eq!( assert_eq!(
@ -407,7 +414,8 @@ mod tests {
&pubkey, &pubkey,
2 * genesis_config.epoch_schedule.slots_per_epoch - 1, // no schedule generated for epoch 2 2 * genesis_config.epoch_schedule.slots_per_epoch - 1, // no schedule generated for epoch 2
&bank, &bank,
None None,
std::u64::MAX
), ),
None None
); );
@ -417,7 +425,8 @@ mod tests {
&Pubkey::new_rand(), // not in leader_schedule &Pubkey::new_rand(), // not in leader_schedule
0, 0,
&bank, &bank,
None None,
std::u64::MAX
), ),
None None
); );
@ -450,7 +459,7 @@ mod tests {
// Check that the next leader slot after 0 is slot 1 // Check that the next leader slot after 0 is slot 1
assert_eq!( assert_eq!(
cache cache
.next_leader_slot(&pubkey, 0, &bank, Some(&blockstore)) .next_leader_slot(&pubkey, 0, &bank, Some(&blockstore), std::u64::MAX)
.unwrap() .unwrap()
.0, .0,
1 1
@ -462,7 +471,7 @@ mod tests {
blockstore.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
assert_eq!( assert_eq!(
cache cache
.next_leader_slot(&pubkey, 0, &bank, Some(&blockstore)) .next_leader_slot(&pubkey, 0, &bank, Some(&blockstore), std::u64::MAX)
.unwrap() .unwrap()
.0, .0,
1 1
@ -475,7 +484,7 @@ mod tests {
blockstore.insert_shreds(shreds, None, false).unwrap(); blockstore.insert_shreds(shreds, None, false).unwrap();
assert_eq!( assert_eq!(
cache cache
.next_leader_slot(&pubkey, 0, &bank, Some(&blockstore)) .next_leader_slot(&pubkey, 0, &bank, Some(&blockstore), std::u64::MAX)
.unwrap() .unwrap()
.0, .0,
3 3
@ -487,7 +496,8 @@ mod tests {
&pubkey, &pubkey,
2 * genesis_config.epoch_schedule.slots_per_epoch - 1, // no schedule generated for epoch 2 2 * genesis_config.epoch_schedule.slots_per_epoch - 1, // no schedule generated for epoch 2
&bank, &bank,
Some(&blockstore) Some(&blockstore),
std::u64::MAX
), ),
None None
); );
@ -497,7 +507,8 @@ mod tests {
&Pubkey::new_rand(), // not in leader_schedule &Pubkey::new_rand(), // not in leader_schedule
0, 0,
&bank, &bank,
Some(&blockstore) Some(&blockstore),
std::u64::MAX
), ),
None None
); );
@ -553,17 +564,29 @@ mod tests {
// If the max root isn't set, we'll get None // If the max root isn't set, we'll get None
assert!(cache assert!(cache
.next_leader_slot(&node_pubkey, 0, &bank, None) .next_leader_slot(&node_pubkey, 0, &bank, None, std::u64::MAX)
.is_none()); .is_none());
cache.set_root(&bank); cache.set_root(&bank);
assert_eq!( let res = cache
cache .next_leader_slot(&node_pubkey, 0, &bank, None, std::u64::MAX)
.next_leader_slot(&node_pubkey, 0, &bank, None) .unwrap();
.unwrap()
.0, assert_eq!(res.0, expected_slot);
expected_slot assert!(res.1 >= expected_slot + NUM_CONSECUTIVE_LEADER_SLOTS - 1);
);
let res = cache
.next_leader_slot(
&node_pubkey,
0,
&bank,
None,
NUM_CONSECUTIVE_LEADER_SLOTS - 1,
)
.unwrap();
assert_eq!(res.0, expected_slot);
assert_eq!(res.1, expected_slot + NUM_CONSECUTIVE_LEADER_SLOTS - 2);
} }
#[test] #[test]