Speed up local cluster partitioning tests (#11177)
* Fix long local cluster partition tests by skipping slot warmup Co-authored-by: Carl <carl@solana.com>
This commit is contained in:
@ -16,11 +16,10 @@ use solana_ledger::{
|
||||
use solana_sdk::{
|
||||
client::SyncClient,
|
||||
clock::{
|
||||
self, Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT,
|
||||
NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
self, Slot, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH},
|
||||
epoch_schedule::MINIMUM_SLOTS_PER_EPOCH,
|
||||
hash::Hash,
|
||||
poh_config::PohConfig,
|
||||
pubkey::Pubkey,
|
||||
@ -172,11 +171,6 @@ pub fn verify_ledger_ticks(ledger_path: &Path, ticks_per_slot: usize) {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn time_until_nth_epoch(epoch: u64, slots_per_epoch: u64, stakers_slot_offset: u64) -> u64 {
|
||||
let epoch_schedule = EpochSchedule::custom(slots_per_epoch, stakers_slot_offset, true);
|
||||
epoch_schedule.get_last_slot_in_epoch(epoch) * DEFAULT_MS_PER_SLOT
|
||||
}
|
||||
|
||||
pub fn sleep_n_epochs(
|
||||
num_epochs: f64,
|
||||
config: &PohConfig,
|
||||
|
@ -11,9 +11,9 @@ use solana_core::{
|
||||
gossip_service::discover_cluster,
|
||||
validator::{Validator, ValidatorConfig},
|
||||
};
|
||||
use solana_ledger::{
|
||||
create_new_tmp_ledger,
|
||||
genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo},
|
||||
use solana_ledger::create_new_tmp_ledger;
|
||||
use solana_runtime::genesis_utils::{
|
||||
create_genesis_config_with_vote_accounts, GenesisConfigInfo, ValidatorVoteKeypairs,
|
||||
};
|
||||
use solana_sdk::{
|
||||
client::SyncClient,
|
||||
@ -49,8 +49,12 @@ pub struct ClusterConfig {
|
||||
pub validator_configs: Vec<ValidatorConfig>,
|
||||
/// Number of nodes that are unstaked and not voting (a.k.a listening)
|
||||
pub num_listeners: u64,
|
||||
/// The specific pubkeys of each node if specified
|
||||
pub validator_keys: Option<Vec<Arc<Keypair>>>,
|
||||
/// List of tuples (pubkeys, in_genesis) of each node if specified. If
|
||||
/// `in_genesis` == true, the validator's vote and stake accounts
|
||||
// will be inserted into the genesis block instead of warming up through
|
||||
// creating the vote accounts. The first validator (bootstrap leader) automatically
|
||||
// is assumed to be `in_genesis` == true.
|
||||
pub validator_keys: Option<Vec<(Arc<Keypair>, bool)>>,
|
||||
/// The stakes of each node
|
||||
pub node_stakes: Vec<u64>,
|
||||
/// The total lamports available to the cluster
|
||||
@ -58,6 +62,7 @@ pub struct ClusterConfig {
|
||||
pub ticks_per_slot: u64,
|
||||
pub slots_per_epoch: u64,
|
||||
pub stakers_slot_offset: u64,
|
||||
pub skip_warmup_slots: bool,
|
||||
pub native_instruction_processors: Vec<(String, Pubkey)>,
|
||||
pub operating_mode: OperatingMode,
|
||||
pub poh_config: PohConfig,
|
||||
@ -77,6 +82,7 @@ impl Default for ClusterConfig {
|
||||
native_instruction_processors: vec![],
|
||||
operating_mode: OperatingMode::Development,
|
||||
poh_config: PohConfig::default(),
|
||||
skip_warmup_slots: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -108,32 +114,57 @@ impl LocalCluster {
|
||||
|
||||
pub fn new(config: &ClusterConfig) -> Self {
|
||||
assert_eq!(config.validator_configs.len(), config.node_stakes.len());
|
||||
let validator_keys = {
|
||||
let mut validator_keys = {
|
||||
if let Some(ref keys) = config.validator_keys {
|
||||
assert_eq!(config.validator_configs.len(), keys.len());
|
||||
keys.clone()
|
||||
} else {
|
||||
iter::repeat_with(|| Arc::new(Keypair::new()))
|
||||
iter::repeat_with(|| (Arc::new(Keypair::new()), false))
|
||||
.take(config.validator_configs.len())
|
||||
.collect()
|
||||
}
|
||||
};
|
||||
|
||||
let leader_keypair = &validator_keys[0];
|
||||
// Bootstrap leader should always be in genesis block
|
||||
validator_keys[0].1 = true;
|
||||
let (keys_in_genesis, stakes_in_genesis): (Vec<ValidatorVoteKeypairs>, Vec<u64>) =
|
||||
validator_keys
|
||||
.iter()
|
||||
.zip(&config.node_stakes)
|
||||
.filter_map(|((node_keypair, in_genesis), stake)| {
|
||||
if *in_genesis {
|
||||
Some((
|
||||
ValidatorVoteKeypairs {
|
||||
node_keypair: node_keypair.clone(),
|
||||
vote_keypair: Arc::new(Keypair::new()),
|
||||
stake_keypair: Arc::new(Keypair::new()),
|
||||
},
|
||||
stake,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unzip();
|
||||
let leader_keypair = &keys_in_genesis[0].node_keypair;
|
||||
let leader_vote_keypair = &keys_in_genesis[0].vote_keypair;
|
||||
let leader_pubkey = leader_keypair.pubkey();
|
||||
let leader_node = Node::new_localhost_with_pubkey(&leader_pubkey);
|
||||
let GenesisConfigInfo {
|
||||
mut genesis_config,
|
||||
mint_keypair,
|
||||
voting_keypair,
|
||||
} = create_genesis_config_with_leader(
|
||||
..
|
||||
} = create_genesis_config_with_vote_accounts(
|
||||
config.cluster_lamports,
|
||||
&leader_pubkey,
|
||||
config.node_stakes[0],
|
||||
&keys_in_genesis,
|
||||
stakes_in_genesis,
|
||||
);
|
||||
genesis_config.ticks_per_slot = config.ticks_per_slot;
|
||||
genesis_config.epoch_schedule =
|
||||
EpochSchedule::custom(config.slots_per_epoch, config.stakers_slot_offset, true);
|
||||
genesis_config.epoch_schedule = EpochSchedule::custom(
|
||||
config.slots_per_epoch,
|
||||
config.stakers_slot_offset,
|
||||
!config.skip_warmup_slots,
|
||||
);
|
||||
genesis_config.operating_mode = config.operating_mode;
|
||||
genesis_config.poh_config = config.poh_config.clone();
|
||||
|
||||
@ -167,7 +198,6 @@ impl LocalCluster {
|
||||
|
||||
let (leader_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
let leader_contact_info = leader_node.info.clone();
|
||||
let leader_voting_keypair = Arc::new(voting_keypair);
|
||||
let mut leader_config = config.validator_configs[0].clone();
|
||||
leader_config.rpc_ports = Some((
|
||||
leader_node.info.rpc.port(),
|
||||
@ -176,10 +206,10 @@ impl LocalCluster {
|
||||
leader_config.account_paths = vec![leader_ledger_path.join("accounts")];
|
||||
let leader_server = Validator::new(
|
||||
leader_node,
|
||||
&leader_keypair,
|
||||
leader_keypair,
|
||||
&leader_ledger_path,
|
||||
&leader_voting_keypair.pubkey(),
|
||||
vec![leader_voting_keypair.clone()],
|
||||
&leader_vote_keypair.pubkey(),
|
||||
vec![leader_vote_keypair.clone()],
|
||||
None,
|
||||
true,
|
||||
&leader_config,
|
||||
@ -188,7 +218,7 @@ impl LocalCluster {
|
||||
let mut validators = HashMap::new();
|
||||
let leader_info = ValidatorInfo {
|
||||
keypair: leader_keypair.clone(),
|
||||
voting_keypair: leader_voting_keypair,
|
||||
voting_keypair: leader_vote_keypair.clone(),
|
||||
ledger_path: leader_ledger_path,
|
||||
contact_info: leader_contact_info.clone(),
|
||||
};
|
||||
@ -208,12 +238,21 @@ impl LocalCluster {
|
||||
genesis_config,
|
||||
};
|
||||
|
||||
for (stake, validator_config, key) in izip!(
|
||||
let node_pubkey_to_vote_key: HashMap<Pubkey, Arc<Keypair>> = keys_in_genesis
|
||||
.into_iter()
|
||||
.map(|keypairs| (keypairs.node_keypair.pubkey(), keypairs.vote_keypair))
|
||||
.collect();
|
||||
for (stake, validator_config, (key, _)) in izip!(
|
||||
(&config.node_stakes[1..]).iter(),
|
||||
config.validator_configs[1..].iter(),
|
||||
validator_keys[1..].iter(),
|
||||
) {
|
||||
cluster.add_validator(validator_config, *stake, key.clone());
|
||||
cluster.add_validator(
|
||||
validator_config,
|
||||
*stake,
|
||||
key.clone(),
|
||||
node_pubkey_to_vote_key.get(&key.pubkey()).cloned(),
|
||||
);
|
||||
}
|
||||
|
||||
let listener_config = ValidatorConfig {
|
||||
@ -221,7 +260,7 @@ impl LocalCluster {
|
||||
..config.validator_configs[0].clone()
|
||||
};
|
||||
(0..config.num_listeners).for_each(|_| {
|
||||
cluster.add_validator(&listener_config, 0, Arc::new(Keypair::new()));
|
||||
cluster.add_validator(&listener_config, 0, Arc::new(Keypair::new()), None);
|
||||
});
|
||||
|
||||
discover_cluster(
|
||||
@ -257,6 +296,7 @@ impl LocalCluster {
|
||||
validator_config: &ValidatorConfig,
|
||||
stake: u64,
|
||||
validator_keypair: Arc<Keypair>,
|
||||
mut voting_keypair: Option<Arc<Keypair>>,
|
||||
) -> Pubkey {
|
||||
let client = create_client(
|
||||
self.entry_point_info.client_facing_addr(),
|
||||
@ -264,7 +304,10 @@ impl LocalCluster {
|
||||
);
|
||||
|
||||
// Must have enough tokens to fund vote account and set delegate
|
||||
let voting_keypair = Keypair::new();
|
||||
let should_create_vote_pubkey = voting_keypair.is_none();
|
||||
if voting_keypair.is_none() {
|
||||
voting_keypair = Some(Arc::new(Keypair::new()));
|
||||
}
|
||||
let validator_pubkey = validator_keypair.pubkey();
|
||||
let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey());
|
||||
let contact_info = validator_node.info.clone();
|
||||
@ -275,24 +318,25 @@ impl LocalCluster {
|
||||
info!("listener {} ", validator_pubkey,);
|
||||
} else {
|
||||
// Give the validator some lamports to setup vote accounts
|
||||
let validator_balance = Self::transfer_with_client(
|
||||
&client,
|
||||
&self.funding_keypair,
|
||||
&validator_pubkey,
|
||||
stake * 2 + 2,
|
||||
);
|
||||
info!(
|
||||
"validator {} balance {}",
|
||||
validator_pubkey, validator_balance
|
||||
);
|
||||
|
||||
Self::setup_vote_and_stake_accounts(
|
||||
&client,
|
||||
&voting_keypair,
|
||||
&validator_keypair,
|
||||
stake,
|
||||
)
|
||||
.unwrap();
|
||||
if should_create_vote_pubkey {
|
||||
let validator_balance = Self::transfer_with_client(
|
||||
&client,
|
||||
&self.funding_keypair,
|
||||
&validator_pubkey,
|
||||
stake * 2 + 2,
|
||||
);
|
||||
info!(
|
||||
"validator {} balance {}",
|
||||
validator_pubkey, validator_balance
|
||||
);
|
||||
Self::setup_vote_and_stake_accounts(
|
||||
&client,
|
||||
voting_keypair.as_ref().unwrap(),
|
||||
&validator_keypair,
|
||||
stake,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
let mut config = validator_config.clone();
|
||||
@ -300,8 +344,8 @@ impl LocalCluster {
|
||||
validator_node.info.rpc.port(),
|
||||
validator_node.info.rpc_pubsub.port(),
|
||||
));
|
||||
let voting_keypair = Arc::new(voting_keypair);
|
||||
config.account_paths = vec![ledger_path.join("accounts")];
|
||||
let voting_keypair = voting_keypair.unwrap();
|
||||
let validator_server = Validator::new(
|
||||
validator_node,
|
||||
&validator_keypair,
|
||||
@ -417,6 +461,10 @@ impl LocalCluster {
|
||||
) -> Result<()> {
|
||||
let vote_account_pubkey = vote_account.pubkey();
|
||||
let node_pubkey = from_account.pubkey();
|
||||
info!(
|
||||
"setup_vote_and_stake_accounts: {}, {}",
|
||||
node_pubkey, vote_account_pubkey
|
||||
);
|
||||
let stake_account_keypair = Keypair::new();
|
||||
let stake_account_pubkey = stake_account_keypair.pubkey();
|
||||
|
||||
|
@ -229,7 +229,6 @@ fn run_cluster_partition<E, F>(
|
||||
.collect();
|
||||
assert_eq!(node_stakes.len(), num_nodes);
|
||||
let cluster_lamports = node_stakes.iter().sum::<u64>() * 2;
|
||||
let partition_start_epoch = 2;
|
||||
let enable_partition = Arc::new(AtomicBool::new(true));
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
validator_config.enable_partition = Some(enable_partition.clone());
|
||||
@ -243,7 +242,7 @@ fn run_cluster_partition<E, F>(
|
||||
assert_eq!(validator_keys.len(), num_nodes);
|
||||
let num_slots_per_rotation = leader_schedule.num_slots() as u64;
|
||||
let fixed_schedule = FixedSchedule {
|
||||
start_epoch: partition_start_epoch,
|
||||
start_epoch: 0,
|
||||
leader_schedule: Arc::new(leader_schedule),
|
||||
};
|
||||
validator_config.fixed_leader_schedule = Some(fixed_schedule);
|
||||
@ -261,11 +260,20 @@ fn run_cluster_partition<E, F>(
|
||||
}
|
||||
};
|
||||
|
||||
let slots_per_epoch = 2048;
|
||||
let config = ClusterConfig {
|
||||
cluster_lamports,
|
||||
node_stakes,
|
||||
validator_configs: vec![validator_config; num_nodes],
|
||||
validator_keys: Some(validator_keys),
|
||||
validator_keys: Some(
|
||||
validator_keys
|
||||
.into_iter()
|
||||
.zip(iter::repeat_with(|| true))
|
||||
.collect(),
|
||||
),
|
||||
slots_per_epoch,
|
||||
stakers_slot_offset: slots_per_epoch,
|
||||
skip_warmup_slots: true,
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
|
||||
@ -275,33 +283,29 @@ fn run_cluster_partition<E, F>(
|
||||
);
|
||||
let mut cluster = LocalCluster::new(&config);
|
||||
|
||||
info!("PARTITION_TEST spend_and_verify_all_nodes(), ensure all nodes are caught up");
|
||||
cluster_tests::spend_and_verify_all_nodes(
|
||||
&cluster.entry_point_info,
|
||||
&cluster.funding_keypair,
|
||||
num_nodes,
|
||||
HashSet::new(),
|
||||
);
|
||||
|
||||
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, num_nodes).unwrap();
|
||||
|
||||
// Check epochs have correct number of slots
|
||||
info!("PARTITION_TEST sleeping until partition starting condition",);
|
||||
loop {
|
||||
let mut reached_epoch = true;
|
||||
for node in &cluster_nodes {
|
||||
let node_client = RpcClient::new_socket(node.rpc);
|
||||
if let Ok(epoch_info) = node_client.get_epoch_info() {
|
||||
info!("slots_per_epoch: {:?}", epoch_info);
|
||||
if epoch_info.slots_in_epoch <= (1 << VOTE_THRESHOLD_DEPTH) {
|
||||
reached_epoch = false;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
reached_epoch = false;
|
||||
}
|
||||
}
|
||||
|
||||
if reached_epoch {
|
||||
info!("PARTITION_TEST start partition");
|
||||
enable_partition.store(false, Ordering::Relaxed);
|
||||
on_partition_start(&mut cluster);
|
||||
break;
|
||||
} else {
|
||||
sleep(Duration::from_millis(100));
|
||||
}
|
||||
for node in &cluster_nodes {
|
||||
let node_client = RpcClient::new_socket(node.rpc);
|
||||
let epoch_info = node_client.get_epoch_info().unwrap();
|
||||
info!("slots_per_epoch: {:?}", epoch_info);
|
||||
assert_eq!(epoch_info.slots_in_epoch, slots_per_epoch);
|
||||
}
|
||||
|
||||
info!("PARTITION_TEST start partition");
|
||||
enable_partition.store(false, Ordering::Relaxed);
|
||||
on_partition_start(&mut cluster);
|
||||
|
||||
sleep(Duration::from_millis(leader_schedule_time));
|
||||
|
||||
info!("PARTITION_TEST remove partition");
|
||||
@ -472,7 +476,6 @@ fn run_kill_partition_switch_threshold<F>(
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
#[serial]
|
||||
fn test_kill_partition_switch_threshold_no_progress() {
|
||||
let max_switch_threshold_failure_pct = 1.0 - 2.0 * SWITCH_FORK_THRESHOLD;
|
||||
@ -501,9 +504,8 @@ fn test_kill_partition_switch_threshold_no_progress() {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
#[serial]
|
||||
fn test_kill_partition_switch_threshold() {
|
||||
fn test_kill_partition_switch_threshold_progress() {
|
||||
let max_switch_threshold_failure_pct = 1.0 - 2.0 * SWITCH_FORK_THRESHOLD;
|
||||
let total_stake = 10_000;
|
||||
|
||||
@ -760,7 +762,7 @@ fn test_frozen_account_from_genesis() {
|
||||
Arc::new(solana_sdk::signature::keypair_from_seed(&[0u8; 32]).unwrap());
|
||||
|
||||
let config = ClusterConfig {
|
||||
validator_keys: Some(vec![validator_identity.clone()]),
|
||||
validator_keys: Some(vec![(validator_identity.clone(), true)]),
|
||||
node_stakes: vec![100; 1],
|
||||
cluster_lamports: 1_000,
|
||||
validator_configs: vec![
|
||||
@ -788,7 +790,7 @@ fn test_frozen_account_from_snapshot() {
|
||||
snapshot_test_config.validator_config.frozen_accounts = vec![validator_identity.pubkey()];
|
||||
|
||||
let config = ClusterConfig {
|
||||
validator_keys: Some(vec![validator_identity.clone()]),
|
||||
validator_keys: Some(vec![(validator_identity.clone(), true)]),
|
||||
node_stakes: vec![100; 1],
|
||||
cluster_lamports: 1_000,
|
||||
validator_configs: vec![snapshot_test_config.validator_config.clone()],
|
||||
@ -864,6 +866,7 @@ fn test_consistency_halt() {
|
||||
&validator_snapshot_test_config.validator_config,
|
||||
validator_stake as u64,
|
||||
Arc::new(Keypair::new()),
|
||||
None,
|
||||
);
|
||||
let num_nodes = 2;
|
||||
assert_eq!(
|
||||
@ -958,6 +961,7 @@ fn test_snapshot_download() {
|
||||
&validator_snapshot_test_config.validator_config,
|
||||
stake,
|
||||
Arc::new(Keypair::new()),
|
||||
None,
|
||||
);
|
||||
}
|
||||
|
||||
@ -1095,6 +1099,7 @@ fn test_snapshots_blockstore_floor() {
|
||||
&validator_snapshot_test_config.validator_config,
|
||||
validator_stake,
|
||||
Arc::new(Keypair::new()),
|
||||
None,
|
||||
);
|
||||
let all_pubkeys = cluster.get_node_pubkeys();
|
||||
let validator_id = all_pubkeys
|
||||
|
Reference in New Issue
Block a user