More fullnode to validator renaming (#6337)

This commit is contained in:
Greg Fitzgerald
2019-10-11 13:30:52 -06:00
committed by GitHub
parent 5650231df3
commit 322fcea6e5
17 changed files with 88 additions and 87 deletions

View File

@@ -114,17 +114,17 @@ pub fn send_many_transactions(
expected_balances
}
pub fn fullnode_exit(entry_point_info: &ContactInfo, nodes: usize) {
pub fn validator_exit(entry_point_info: &ContactInfo, nodes: usize) {
let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
assert!(cluster_nodes.len() >= nodes);
for node in &cluster_nodes {
let client = create_client(node.client_facing_addr(), VALIDATOR_PORT_RANGE);
assert!(client.fullnode_exit().unwrap());
assert!(client.validator_exit().unwrap());
}
sleep(Duration::from_millis(DEFAULT_SLOT_MILLIS));
for node in &cluster_nodes {
let client = create_client(node.client_facing_addr(), VALIDATOR_PORT_RANGE);
assert!(client.fullnode_exit().is_err());
assert!(client.validator_exit().is_err());
}
}
@@ -198,7 +198,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
));
info!("done sleeping for first 2 warmup epochs");
info!("killing entry point: {}", entry_point_info.id);
assert!(client.fullnode_exit().unwrap());
assert!(client.validator_exit().unwrap());
info!("sleeping for some time");
sleep(Duration::from_millis(
slot_millis * NUM_CONSECUTIVE_LEADER_SLOTS,

View File

@@ -58,7 +58,7 @@ impl ReplicatorInfo {
#[derive(Clone, Debug)]
pub struct ClusterConfig {
/// The fullnode config that should be applied to every node in the cluster
/// The validator config that should be applied to every node in the cluster
pub validator_configs: Vec<ValidatorConfig>,
/// Number of replicators in the cluster
/// Note- replicators will timeout if ticks_per_slot is much larger than the default 8
@@ -100,9 +100,9 @@ pub struct LocalCluster {
pub funding_keypair: Keypair,
/// Entry point from which the rest of the network can be discovered
pub entry_point_info: ContactInfo,
pub fullnode_infos: HashMap<Pubkey, ClusterValidatorInfo>,
pub validator_infos: HashMap<Pubkey, ClusterValidatorInfo>,
pub listener_infos: HashMap<Pubkey, ClusterValidatorInfo>,
fullnodes: HashMap<Pubkey, Validator>,
validators: HashMap<Pubkey, Validator>,
pub genesis_block: GenesisBlock,
replicators: Vec<Replicator>,
pub replicator_infos: HashMap<Pubkey, ReplicatorInfo>,
@@ -190,9 +190,9 @@ impl LocalCluster {
&config.validator_configs[0],
);
let mut fullnodes = HashMap::new();
let mut fullnode_infos = HashMap::new();
fullnodes.insert(leader_pubkey, leader_server);
let mut validators = HashMap::new();
let mut validator_infos = HashMap::new();
validators.insert(leader_pubkey, leader_server);
let leader_info = ValidatorInfo {
keypair: leader_keypair,
voting_keypair: leader_voting_keypair,
@@ -204,15 +204,15 @@ impl LocalCluster {
let cluster_leader =
ClusterValidatorInfo::new(leader_info, config.validator_configs[0].clone());
fullnode_infos.insert(leader_pubkey, cluster_leader);
validator_infos.insert(leader_pubkey, cluster_leader);
let mut cluster = Self {
funding_keypair: mint_keypair,
entry_point_info: leader_contact_info,
fullnodes,
validators,
replicators: vec![],
genesis_block,
fullnode_infos,
validator_infos,
replicator_infos: HashMap::new(),
listener_infos: HashMap::new(),
};
@@ -250,14 +250,14 @@ impl LocalCluster {
}
pub fn exit(&mut self) {
for node in self.fullnodes.values_mut() {
for node in self.validators.values_mut() {
node.exit();
}
}
pub fn close_preserve_ledgers(&mut self) {
self.exit();
for (_, node) in self.fullnodes.drain() {
for (_, node) in self.validators.drain() {
node.join().unwrap();
}
@@ -322,7 +322,7 @@ impl LocalCluster {
&validator_config,
);
self.fullnodes
self.validators
.insert(validator_keypair.pubkey(), validator_server);
let validator_pubkey = validator_keypair.pubkey();
let validator_info = ClusterValidatorInfo::new(
@@ -339,7 +339,8 @@ impl LocalCluster {
if validator_config.voting_disabled {
self.listener_infos.insert(validator_pubkey, validator_info);
} else {
self.fullnode_infos.insert(validator_pubkey, validator_info);
self.validator_infos
.insert(validator_pubkey, validator_info);
}
}
@@ -384,7 +385,7 @@ impl LocalCluster {
fn close(&mut self) {
self.close_preserve_ledgers();
for ledger_path in self
.fullnode_infos
.validator_infos
.values()
.map(|f| &f.info.ledger_path)
.chain(self.replicator_infos.values().map(|info| &info.ledger_path))
@@ -567,11 +568,11 @@ impl LocalCluster {
impl Cluster for LocalCluster {
fn get_node_pubkeys(&self) -> Vec<Pubkey> {
self.fullnodes.keys().cloned().collect()
self.validators.keys().cloned().collect()
}
fn get_validator_client(&self, pubkey: &Pubkey) -> Option<ThinClient> {
self.fullnode_infos.get(pubkey).map(|f| {
self.validator_infos.get(pubkey).map(|f| {
create_client(
f.info.contact_info.client_facing_addr(),
VALIDATOR_PORT_RANGE,
@@ -580,13 +581,13 @@ impl Cluster for LocalCluster {
}
fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo {
let mut node = self.fullnodes.remove(&pubkey).unwrap();
let mut node = self.validators.remove(&pubkey).unwrap();
// Shut down the fullnode
// Shut down the validator
node.exit();
node.join().unwrap();
self.fullnode_infos.remove(&pubkey).unwrap()
self.validator_infos.remove(&pubkey).unwrap()
}
fn restart_node(&mut self, pubkey: &Pubkey, mut cluster_validator_info: ClusterValidatorInfo) {
@@ -604,22 +605,22 @@ impl Cluster for LocalCluster {
};
// Restart the node
let fullnode_info = &cluster_validator_info.info;
let validator_info = &cluster_validator_info.info;
let restarted_node = Validator::new(
node,
&fullnode_info.keypair,
&fullnode_info.ledger_path,
&fullnode_info.voting_keypair.pubkey(),
&fullnode_info.voting_keypair,
&fullnode_info.storage_keypair,
&validator_info.keypair,
&validator_info.ledger_path,
&validator_info.voting_keypair.pubkey(),
&validator_info.voting_keypair,
&validator_info.storage_keypair,
entry_point_info,
true,
&cluster_validator_info.config,
);
self.fullnodes.insert(*pubkey, restarted_node);
self.fullnode_infos.insert(*pubkey, cluster_validator_info);
self.validators.insert(*pubkey, restarted_node);
self.validator_infos.insert(*pubkey, cluster_validator_info);
}
fn exit_restart_node(&mut self, pubkey: &Pubkey, validator_config: ValidatorConfig) {
@@ -646,7 +647,7 @@ mod test {
solana_logger::setup();
let num_nodes = 1;
let cluster = LocalCluster::new_with_equal_stakes(num_nodes, 100, 3);
assert_eq!(cluster.fullnodes.len(), num_nodes);
assert_eq!(cluster.validators.len(), num_nodes);
assert_eq!(cluster.replicators.len(), 0);
}
@@ -654,7 +655,7 @@ mod test {
fn test_local_cluster_start_and_exit_with_config() {
solana_logger::setup();
let mut validator_config = ValidatorConfig::default();
validator_config.rpc_config.enable_fullnode_exit = true;
validator_config.rpc_config.enable_validator_exit = true;
validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST;
const NUM_NODES: usize = 1;
let num_replicators = 1;
@@ -668,7 +669,7 @@ mod test {
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&config);
assert_eq!(cluster.fullnodes.len(), NUM_NODES);
assert_eq!(cluster.validators.len(), NUM_NODES);
assert_eq!(cluster.replicators.len(), num_replicators);
}
}

View File

@@ -52,7 +52,7 @@ fn test_ledger_cleanup_service() {
);
cluster.close_preserve_ledgers();
//check everyone's ledgers and make sure only ~100 slots are stored
for (_, info) in &cluster.fullnode_infos {
for (_, info) in &cluster.validator_infos {
let mut slots = 0;
let blocktree = Blocktree::open(&info.info.ledger_path).unwrap();
blocktree
@@ -130,22 +130,22 @@ fn test_spend_and_verify_all_nodes_env_num_nodes() {
#[allow(unused_attributes)]
#[test]
#[should_panic]
fn test_fullnode_exit_default_config_should_panic() {
fn test_validator_exit_default_config_should_panic() {
solana_logger::setup();
error!("test_fullnode_exit_default_config_should_panic");
error!("test_validator_exit_default_config_should_panic");
let num_nodes = 2;
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
cluster_tests::fullnode_exit(&local.entry_point_info, num_nodes);
cluster_tests::validator_exit(&local.entry_point_info, num_nodes);
}
#[test]
#[serial]
fn test_fullnode_exit_2() {
fn test_validator_exit_2() {
solana_logger::setup();
error!("test_fullnode_exit_2");
error!("test_validator_exit_2");
let num_nodes = 2;
let mut validator_config = ValidatorConfig::default();
validator_config.rpc_config.enable_fullnode_exit = true;
validator_config.rpc_config.enable_validator_exit = true;
let config = ClusterConfig {
cluster_lamports: 10_000,
node_stakes: vec![100; num_nodes],
@@ -153,7 +153,7 @@ fn test_fullnode_exit_2() {
..ClusterConfig::default()
};
let local = LocalCluster::new(&config);
cluster_tests::fullnode_exit(&local.entry_point_info, num_nodes);
cluster_tests::validator_exit(&local.entry_point_info, num_nodes);
}
// Cluster needs a supermajority to remain, so the minimum size for this test is 4
@@ -164,7 +164,7 @@ fn test_leader_failure_4() {
error!("test_leader_failure_4");
let num_nodes = 4;
let mut validator_config = ValidatorConfig::default();
validator_config.rpc_config.enable_fullnode_exit = true;
validator_config.rpc_config.enable_validator_exit = true;
let config = ClusterConfig {
cluster_lamports: 10_000,
node_stakes: vec![100; 4],
@@ -189,7 +189,7 @@ fn test_two_unbalanced_stakes() {
let num_ticks_per_slot = 10;
let num_slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64;
validator_config.rpc_config.enable_fullnode_exit = true;
validator_config.rpc_config.enable_validator_exit = true;
let mut cluster = LocalCluster::new(&ClusterConfig {
node_stakes: vec![999_990, 3],
cluster_lamports: 1_000_000,
@@ -208,7 +208,7 @@ fn test_two_unbalanced_stakes() {
);
cluster.close_preserve_ledgers();
let leader_pubkey = cluster.entry_point_info.id;
let leader_ledger = cluster.fullnode_infos[&leader_pubkey]
let leader_ledger = cluster.validator_infos[&leader_pubkey]
.info
.ledger_path
.clone();
@@ -430,7 +430,7 @@ fn test_snapshots_blocktree_floor() {
// Check the validator ledger doesn't contain any slots < slot_floor
cluster.close_preserve_ledgers();
let validator_ledger_path = &cluster.fullnode_infos[&validator_id];
let validator_ledger_path = &cluster.validator_infos[&validator_id];
let blocktree = Blocktree::open(&validator_ledger_path.info.ledger_path).unwrap();
// Skip the zeroth slot in blocktree that the ledger is initialized with
@@ -490,7 +490,7 @@ fn test_snapshots_restart_validity() {
let tar = snapshot_utils::get_snapshot_tar_path(&snapshot_package_output_path);
wait_for_next_snapshot(&cluster, &tar);
// Create new account paths since fullnode exit is not guaranteed to cleanup RPC threads,
// Create new account paths since validator exit is not guaranteed to cleanup RPC threads,
// which may delete the old accounts on exit at any point
let (new_account_storage_dirs, new_account_storage_paths) =
generate_account_paths(num_account_paths);
@@ -566,7 +566,7 @@ fn test_faulty_node(faulty_node_type: BroadcastStageType) {
);
let corrupt_node = cluster
.fullnode_infos
.validator_infos
.iter()
.find(|(_, v)| v.config.broadcast_stage_type == faulty_node_type)
.unwrap()
@@ -616,7 +616,7 @@ fn run_repairman_catchup(num_repairmen: u64) {
// their root could actually be much less than 31. This is why we give a num_root_buffer_slots buffer.
let stakers_slot_offset = num_slots_per_epoch + num_root_buffer_slots;
validator_config.rpc_config.enable_fullnode_exit = true;
validator_config.rpc_config.enable_validator_exit = true;
let lamports_per_repairman = 1000;
@@ -740,7 +740,7 @@ fn setup_snapshot_validator_config(
// Create the validator config
let mut validator_config = ValidatorConfig::default();
validator_config.rpc_config.enable_fullnode_exit = true;
validator_config.rpc_config.enable_validator_exit = true;
validator_config.snapshot_config = Some(snapshot_config);
validator_config.account_paths = Some(account_storage_paths);