Make minimum warmup period 32 slots long (#4031)

* Make minimum warmup period 32 slots long

* PR fixes
This commit is contained in:
carllin
2019-04-29 15:26:52 -07:00
committed by GitHub
parent bae0aadafa
commit 73f250f03a
8 changed files with 69 additions and 42 deletions

View File

@ -10,6 +10,7 @@ use crate::gossip_service::discover_nodes;
use crate::locktower::VOTE_THRESHOLD_DEPTH;
use crate::poh_service::PohServiceConfig;
use solana_client::thin_client::create_client;
use solana_runtime::bank::MINIMUM_SLOT_LENGTH;
use solana_sdk::client::SyncClient;
use solana_sdk::hash::Hash;
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
@ -147,21 +148,23 @@ pub fn kill_entry_and_spend_and_verify_rest(
entry_point_info: &ContactInfo,
funding_keypair: &Keypair,
nodes: usize,
slot_millis: u64,
) {
solana_logger::setup();
let cluster_nodes = discover_nodes(&entry_point_info.gossip, nodes).unwrap();
assert!(cluster_nodes.len() >= nodes);
let client = create_client(entry_point_info.client_facing_addr(), FULLNODE_PORT_RANGE);
let first_two_epoch_slots = MINIMUM_SLOT_LENGTH * 3;
info!("sleeping for 2 leader fortnights");
sleep(Duration::from_millis(
SLOT_MILLIS * NUM_CONSECUTIVE_LEADER_SLOTS * 2,
slot_millis * first_two_epoch_slots as u64,
));
info!("done sleeping for 2 fortnights");
info!("done sleeping for first 2 warmup epochs");
info!("killing entry point");
assert!(client.fullnode_exit().unwrap());
info!("sleeping for 2 leader fortnights");
info!("sleeping for some time");
sleep(Duration::from_millis(
SLOT_MILLIS * NUM_CONSECUTIVE_LEADER_SLOTS,
slot_millis * NUM_CONSECUTIVE_LEADER_SLOTS,
));
info!("done sleeping for 2 fortnights");
for ingress_node in &cluster_nodes {

View File

@ -140,7 +140,7 @@ mod tests {
use super::*;
use crate::blocktree::tests::make_slot_entries;
use crate::voting_keypair::tests::new_vote_account;
use solana_runtime::bank::{Bank, EpochSchedule};
use solana_runtime::bank::{Bank, EpochSchedule, MINIMUM_SLOT_LENGTH};
use solana_sdk::genesis_block::{GenesisBlock, BOOTSTRAP_LEADER_LAMPORTS};
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::sync::mpsc::channel;
@ -151,11 +151,9 @@ mod tests {
#[test]
fn test_slot_leader_at_else_compute() {
let slots_per_epoch = 10;
let epoch_schedule = EpochSchedule::new(slots_per_epoch, slots_per_epoch / 2, true);
let cache = LeaderScheduleCache::new(epoch_schedule);
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);
let bank = Bank::new(&genesis_block);
let cache = LeaderScheduleCache::new_from_bank(&bank);
// Nothing in the cache, should return None
assert!(cache.slot_leader_at(bank.slot()).is_none());
@ -195,7 +193,7 @@ mod tests {
}
fn run_thread_race() {
let slots_per_epoch = 10;
let slots_per_epoch = MINIMUM_SLOT_LENGTH as u64;
let epoch_schedule = EpochSchedule::new(slots_per_epoch, slots_per_epoch / 2, true);
let cache = Arc::new(LeaderScheduleCache::new(epoch_schedule));
let (genesis_block, _mint_keypair) = GenesisBlock::new(2);

View File

@ -449,6 +449,7 @@ impl Drop for LocalCluster {
#[cfg(test)]
mod test {
use super::*;
use solana_runtime::bank::MINIMUM_SLOT_LENGTH;
#[test]
fn test_local_cluster_start_and_exit() {
@ -472,7 +473,7 @@ mod test {
node_stakes: vec![3; NUM_NODES],
cluster_lamports: 100,
ticks_per_slot: 16,
slots_per_epoch: 16,
slots_per_epoch: MINIMUM_SLOT_LENGTH as u64,
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&config);

View File

@ -2,7 +2,7 @@
//! "ticks", a measure of time in the PoH stream
use crate::poh_recorder::PohRecorder;
use crate::service::Service;
use solana_sdk::timing::NUM_TICKS_PER_SECOND;
use solana_sdk::timing::{self, NUM_TICKS_PER_SECOND};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::SyncSender;
use std::sync::{Arc, Mutex};
@ -28,6 +28,15 @@ impl Default for PohServiceConfig {
}
}
impl PohServiceConfig {
pub fn ticks_to_ms(&self, num_ticks: u64) -> u64 {
match self {
PohServiceConfig::Sleep(d) => timing::duration_as_ms(d) * num_ticks,
_ => panic!("Unsuppported tick config"),
}
}
}
pub struct PohService {
tick_producer: JoinHandle<()>,
}

View File

@ -255,7 +255,7 @@ mod test {
use crate::packet::{index_blobs, Blob};
use crate::service::Service;
use crate::streamer::{blob_receiver, responder};
use solana_runtime::bank::Bank;
use solana_runtime::bank::{Bank, MINIMUM_SLOT_LENGTH};
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::hash::Hash;
use std::fs::remove_dir_all;
@ -320,8 +320,8 @@ mod test {
);
// with a Bank and no idea who leader is, we keep the blobs (for now)
// TODO: persistr in blocktree that we didn't know who the leader was at the time?
blob.set_slot(100);
// TODO: persist in blocktree that we didn't know who the leader was at the time?
blob.set_slot(MINIMUM_SLOT_LENGTH as u64 * 3);
assert_eq!(
should_retransmit_and_persist(&blob, Some(&bank), Some(&cache), &me_id),
true

View File

@ -6,6 +6,7 @@ use solana::fullnode::FullnodeConfig;
use solana::gossip_service::discover_nodes;
use solana::local_cluster::{ClusterConfig, LocalCluster};
use solana::poh_service::PohServiceConfig;
use solana_runtime::bank::MINIMUM_SLOT_LENGTH;
use solana_sdk::timing;
use std::time::Duration;
@ -96,7 +97,7 @@ fn test_leader_failure_4() {
let config = ClusterConfig {
cluster_lamports: 10_000,
node_stakes: vec![100; 4],
fullnode_config,
fullnode_config: fullnode_config.clone(),
..ClusterConfig::default()
};
let local = LocalCluster::new(&config);
@ -104,6 +105,9 @@ fn test_leader_failure_4() {
&local.entry_point_info,
&local.funding_keypair,
num_nodes,
fullnode_config
.tick_config
.ticks_to_ms(config.ticks_per_slot as u64),
);
}
#[test]
@ -111,8 +115,8 @@ fn test_two_unbalanced_stakes() {
solana_logger::setup();
let mut fullnode_config = FullnodeConfig::default();
let num_ticks_per_second = 100;
let num_ticks_per_slot = 160;
let num_slots_per_epoch = 16;
let num_ticks_per_slot = 40;
let num_slots_per_epoch = MINIMUM_SLOT_LENGTH as u64;
fullnode_config.tick_config =
PohServiceConfig::Sleep(Duration::from_millis(100 / num_ticks_per_second));
fullnode_config.rpc_config.enable_fullnode_exit = true;
@ -124,13 +128,13 @@ fn test_two_unbalanced_stakes() {
slots_per_epoch: num_slots_per_epoch,
..ClusterConfig::default()
});
cluster_tests::sleep_n_epochs(
10.0,
&fullnode_config.tick_config,
num_ticks_per_slot,
num_slots_per_epoch,
);
cluster.close_preserve_ledgers();
let leader_id = cluster.entry_point_info.id;
let leader_ledger = cluster.fullnode_infos[&leader_id].ledger_path.clone();
@ -163,7 +167,7 @@ fn test_forwarding() {
#[test]
fn test_restart_node() {
let fullnode_config = FullnodeConfig::default();
let slots_per_epoch = 8;
let slots_per_epoch = MINIMUM_SLOT_LENGTH as u64;
let ticks_per_slot = 16;
let mut cluster = LocalCluster::new(&ClusterConfig {
node_stakes: vec![3],

View File

@ -16,6 +16,7 @@ use solana::storage_stage::StorageState;
use solana::storage_stage::STORAGE_ROTATE_TEST_COUNT;
use solana::streamer;
use solana::tvu::{Sockets, Tvu};
use solana_runtime::bank::MINIMUM_SLOT_LENGTH;
use solana_sdk::genesis_block::GenesisBlock;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_transaction;
@ -78,7 +79,7 @@ fn test_replay() {
let (mut genesis_block, mint_keypair) =
GenesisBlock::new_with_leader(total_balance, &leader.info.id, leader_balance);
genesis_block.ticks_per_slot = 160;
genesis_block.slots_per_epoch = 16;
genesis_block.slots_per_epoch = MINIMUM_SLOT_LENGTH as u64;
let (blocktree_path, blockhash) = create_new_tmp_ledger!(&genesis_block);
let tvu_addr = target1.info.tvu;