Speed up local cluster partitioning tests (#11177)

* Fix long local cluster partition tests by skipping slot warmup

Co-authored-by: Carl <carl@solana.com>
This commit is contained in:
carllin
2020-07-23 18:50:42 -07:00
committed by GitHub
parent 125a36abbc
commit 6578ad7d08
14 changed files with 221 additions and 197 deletions

View File

@ -16,11 +16,10 @@ use solana_ledger::{
use solana_sdk::{
client::SyncClient,
clock::{
self, Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT,
NUM_CONSECUTIVE_LEADER_SLOTS,
self, Slot, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, NUM_CONSECUTIVE_LEADER_SLOTS,
},
commitment_config::CommitmentConfig,
epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH},
epoch_schedule::MINIMUM_SLOTS_PER_EPOCH,
hash::Hash,
poh_config::PohConfig,
pubkey::Pubkey,
@ -172,11 +171,6 @@ pub fn verify_ledger_ticks(ledger_path: &Path, ticks_per_slot: usize) {
}
}
pub fn time_until_nth_epoch(epoch: u64, slots_per_epoch: u64, stakers_slot_offset: u64) -> u64 {
let epoch_schedule = EpochSchedule::custom(slots_per_epoch, stakers_slot_offset, true);
epoch_schedule.get_last_slot_in_epoch(epoch) * DEFAULT_MS_PER_SLOT
}
pub fn sleep_n_epochs(
num_epochs: f64,
config: &PohConfig,

View File

@ -11,9 +11,9 @@ use solana_core::{
gossip_service::discover_cluster,
validator::{Validator, ValidatorConfig},
};
use solana_ledger::{
create_new_tmp_ledger,
genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo},
use solana_ledger::create_new_tmp_ledger;
use solana_runtime::genesis_utils::{
create_genesis_config_with_vote_accounts, GenesisConfigInfo, ValidatorVoteKeypairs,
};
use solana_sdk::{
client::SyncClient,
@ -49,8 +49,12 @@ pub struct ClusterConfig {
pub validator_configs: Vec<ValidatorConfig>,
/// Number of nodes that are unstaked and not voting (a.k.a listening)
pub num_listeners: u64,
/// The specific pubkeys of each node if specified
pub validator_keys: Option<Vec<Arc<Keypair>>>,
/// List of tuples (pubkeys, in_genesis) of each node if specified. If
/// `in_genesis` == true, the validator's vote and stake accounts
// will be inserted into the genesis block instead of warming up through
// creating the vote accounts. The first validator (bootstrap leader) automatically
// is assumed to be `in_genesis` == true.
pub validator_keys: Option<Vec<(Arc<Keypair>, bool)>>,
/// The stakes of each node
pub node_stakes: Vec<u64>,
/// The total lamports available to the cluster
@ -58,6 +62,7 @@ pub struct ClusterConfig {
pub ticks_per_slot: u64,
pub slots_per_epoch: u64,
pub stakers_slot_offset: u64,
pub skip_warmup_slots: bool,
pub native_instruction_processors: Vec<(String, Pubkey)>,
pub operating_mode: OperatingMode,
pub poh_config: PohConfig,
@ -77,6 +82,7 @@ impl Default for ClusterConfig {
native_instruction_processors: vec![],
operating_mode: OperatingMode::Development,
poh_config: PohConfig::default(),
skip_warmup_slots: false,
}
}
}
@ -108,32 +114,57 @@ impl LocalCluster {
pub fn new(config: &ClusterConfig) -> Self {
assert_eq!(config.validator_configs.len(), config.node_stakes.len());
let validator_keys = {
let mut validator_keys = {
if let Some(ref keys) = config.validator_keys {
assert_eq!(config.validator_configs.len(), keys.len());
keys.clone()
} else {
iter::repeat_with(|| Arc::new(Keypair::new()))
iter::repeat_with(|| (Arc::new(Keypair::new()), false))
.take(config.validator_configs.len())
.collect()
}
};
let leader_keypair = &validator_keys[0];
// Bootstrap leader should always be in genesis block
validator_keys[0].1 = true;
let (keys_in_genesis, stakes_in_genesis): (Vec<ValidatorVoteKeypairs>, Vec<u64>) =
validator_keys
.iter()
.zip(&config.node_stakes)
.filter_map(|((node_keypair, in_genesis), stake)| {
if *in_genesis {
Some((
ValidatorVoteKeypairs {
node_keypair: node_keypair.clone(),
vote_keypair: Arc::new(Keypair::new()),
stake_keypair: Arc::new(Keypair::new()),
},
stake,
))
} else {
None
}
})
.unzip();
let leader_keypair = &keys_in_genesis[0].node_keypair;
let leader_vote_keypair = &keys_in_genesis[0].vote_keypair;
let leader_pubkey = leader_keypair.pubkey();
let leader_node = Node::new_localhost_with_pubkey(&leader_pubkey);
let GenesisConfigInfo {
mut genesis_config,
mint_keypair,
voting_keypair,
} = create_genesis_config_with_leader(
..
} = create_genesis_config_with_vote_accounts(
config.cluster_lamports,
&leader_pubkey,
config.node_stakes[0],
&keys_in_genesis,
stakes_in_genesis,
);
genesis_config.ticks_per_slot = config.ticks_per_slot;
genesis_config.epoch_schedule =
EpochSchedule::custom(config.slots_per_epoch, config.stakers_slot_offset, true);
genesis_config.epoch_schedule = EpochSchedule::custom(
config.slots_per_epoch,
config.stakers_slot_offset,
!config.skip_warmup_slots,
);
genesis_config.operating_mode = config.operating_mode;
genesis_config.poh_config = config.poh_config.clone();
@ -167,7 +198,6 @@ impl LocalCluster {
let (leader_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let leader_contact_info = leader_node.info.clone();
let leader_voting_keypair = Arc::new(voting_keypair);
let mut leader_config = config.validator_configs[0].clone();
leader_config.rpc_ports = Some((
leader_node.info.rpc.port(),
@ -176,10 +206,10 @@ impl LocalCluster {
leader_config.account_paths = vec![leader_ledger_path.join("accounts")];
let leader_server = Validator::new(
leader_node,
&leader_keypair,
leader_keypair,
&leader_ledger_path,
&leader_voting_keypair.pubkey(),
vec![leader_voting_keypair.clone()],
&leader_vote_keypair.pubkey(),
vec![leader_vote_keypair.clone()],
None,
true,
&leader_config,
@ -188,7 +218,7 @@ impl LocalCluster {
let mut validators = HashMap::new();
let leader_info = ValidatorInfo {
keypair: leader_keypair.clone(),
voting_keypair: leader_voting_keypair,
voting_keypair: leader_vote_keypair.clone(),
ledger_path: leader_ledger_path,
contact_info: leader_contact_info.clone(),
};
@ -208,12 +238,21 @@ impl LocalCluster {
genesis_config,
};
for (stake, validator_config, key) in izip!(
let node_pubkey_to_vote_key: HashMap<Pubkey, Arc<Keypair>> = keys_in_genesis
.into_iter()
.map(|keypairs| (keypairs.node_keypair.pubkey(), keypairs.vote_keypair))
.collect();
for (stake, validator_config, (key, _)) in izip!(
(&config.node_stakes[1..]).iter(),
config.validator_configs[1..].iter(),
validator_keys[1..].iter(),
) {
cluster.add_validator(validator_config, *stake, key.clone());
cluster.add_validator(
validator_config,
*stake,
key.clone(),
node_pubkey_to_vote_key.get(&key.pubkey()).cloned(),
);
}
let listener_config = ValidatorConfig {
@ -221,7 +260,7 @@ impl LocalCluster {
..config.validator_configs[0].clone()
};
(0..config.num_listeners).for_each(|_| {
cluster.add_validator(&listener_config, 0, Arc::new(Keypair::new()));
cluster.add_validator(&listener_config, 0, Arc::new(Keypair::new()), None);
});
discover_cluster(
@ -257,6 +296,7 @@ impl LocalCluster {
validator_config: &ValidatorConfig,
stake: u64,
validator_keypair: Arc<Keypair>,
mut voting_keypair: Option<Arc<Keypair>>,
) -> Pubkey {
let client = create_client(
self.entry_point_info.client_facing_addr(),
@ -264,7 +304,10 @@ impl LocalCluster {
);
// Must have enough tokens to fund vote account and set delegate
let voting_keypair = Keypair::new();
let should_create_vote_pubkey = voting_keypair.is_none();
if voting_keypair.is_none() {
voting_keypair = Some(Arc::new(Keypair::new()));
}
let validator_pubkey = validator_keypair.pubkey();
let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey());
let contact_info = validator_node.info.clone();
@ -275,24 +318,25 @@ impl LocalCluster {
info!("listener {} ", validator_pubkey,);
} else {
// Give the validator some lamports to setup vote accounts
let validator_balance = Self::transfer_with_client(
&client,
&self.funding_keypair,
&validator_pubkey,
stake * 2 + 2,
);
info!(
"validator {} balance {}",
validator_pubkey, validator_balance
);
Self::setup_vote_and_stake_accounts(
&client,
&voting_keypair,
&validator_keypair,
stake,
)
.unwrap();
if should_create_vote_pubkey {
let validator_balance = Self::transfer_with_client(
&client,
&self.funding_keypair,
&validator_pubkey,
stake * 2 + 2,
);
info!(
"validator {} balance {}",
validator_pubkey, validator_balance
);
Self::setup_vote_and_stake_accounts(
&client,
voting_keypair.as_ref().unwrap(),
&validator_keypair,
stake,
)
.unwrap();
}
}
let mut config = validator_config.clone();
@ -300,8 +344,8 @@ impl LocalCluster {
validator_node.info.rpc.port(),
validator_node.info.rpc_pubsub.port(),
));
let voting_keypair = Arc::new(voting_keypair);
config.account_paths = vec![ledger_path.join("accounts")];
let voting_keypair = voting_keypair.unwrap();
let validator_server = Validator::new(
validator_node,
&validator_keypair,
@ -417,6 +461,10 @@ impl LocalCluster {
) -> Result<()> {
let vote_account_pubkey = vote_account.pubkey();
let node_pubkey = from_account.pubkey();
info!(
"setup_vote_and_stake_accounts: {}, {}",
node_pubkey, vote_account_pubkey
);
let stake_account_keypair = Keypair::new();
let stake_account_pubkey = stake_account_keypair.pubkey();