Refactor LocalCluster and add support for listener nodes (#3790)

This commit is contained in:
Sagar Dhawan
2019-04-15 15:27:45 -07:00
committed by GitHub
parent 80f3568062
commit 3fcf03ff3e
6 changed files with 202 additions and 139 deletions

View File

@ -4,7 +4,7 @@ use solana::cluster::Cluster;
use solana::cluster_tests;
use solana::fullnode::FullnodeConfig;
use solana::gossip_service::discover_nodes;
use solana::local_cluster::LocalCluster;
use solana::local_cluster::{ClusterConfig, LocalCluster};
use solana::poh_service::PohServiceConfig;
use solana_sdk::timing;
use std::time::Duration;
@ -13,7 +13,7 @@ use std::time::Duration;
fn test_spend_and_verify_all_nodes_1() {
solana_logger::setup();
let num_nodes = 1;
let local = LocalCluster::new(num_nodes, 10_000, 100);
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
cluster_tests::spend_and_verify_all_nodes(
&local.entry_point_info,
&local.funding_keypair,
@ -25,7 +25,7 @@ fn test_spend_and_verify_all_nodes_1() {
fn test_spend_and_verify_all_nodes_2() {
solana_logger::setup();
let num_nodes = 2;
let local = LocalCluster::new(num_nodes, 10_000, 100);
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
cluster_tests::spend_and_verify_all_nodes(
&local.entry_point_info,
&local.funding_keypair,
@ -37,7 +37,7 @@ fn test_spend_and_verify_all_nodes_2() {
fn test_spend_and_verify_all_nodes_3() {
solana_logger::setup();
let num_nodes = 3;
let local = LocalCluster::new(num_nodes, 10_000, 100);
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
cluster_tests::spend_and_verify_all_nodes(
&local.entry_point_info,
&local.funding_keypair,
@ -50,7 +50,7 @@ fn test_spend_and_verify_all_nodes_3() {
fn test_fullnode_exit_default_config_should_panic() {
solana_logger::setup();
let num_nodes = 2;
let local = LocalCluster::new(num_nodes, 10_000, 100);
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
cluster_tests::fullnode_exit(&local.entry_point_info, num_nodes);
}
@ -60,7 +60,13 @@ fn test_fullnode_exit_2() {
let num_nodes = 2;
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.rpc_config.enable_fullnode_exit = true;
let local = LocalCluster::new_with_config(&[100; 2], 10_000, &fullnode_config, &[]);
let config = ClusterConfig {
cluster_lamports: 10_000,
node_stakes: vec![100; 2],
fullnode_config,
..ClusterConfig::default()
};
let local = LocalCluster::new(&config);
cluster_tests::fullnode_exit(&local.entry_point_info, num_nodes);
}
@ -71,7 +77,13 @@ fn test_leader_failure_4() {
let num_nodes = 4;
let mut fullnode_config = FullnodeConfig::default();
fullnode_config.rpc_config.enable_fullnode_exit = true;
let local = LocalCluster::new_with_config(&[100; 4], 10_000, &fullnode_config, &[]);
let config = ClusterConfig {
cluster_lamports: 10_000,
node_stakes: vec![100; 4],
fullnode_config,
..ClusterConfig::default()
};
let local = LocalCluster::new(&config);
cluster_tests::kill_entry_and_spend_and_verify_rest(
&local.entry_point_info,
&local.funding_keypair,
@ -87,14 +99,14 @@ fn test_two_unbalanced_stakes() {
fullnode_config.tick_config =
PohServiceConfig::Sleep(Duration::from_millis(100 / num_ticks_per_second));
fullnode_config.rpc_config.enable_fullnode_exit = true;
let mut cluster = LocalCluster::new_with_tick_config(
&[999_990, 3],
1_000_000,
&fullnode_config,
num_ticks_per_slot,
num_slots_per_epoch,
&[],
);
let mut cluster = LocalCluster::new(&ClusterConfig {
node_stakes: vec![999_990, 3],
cluster_lamports: 1_000_000,
fullnode_config: fullnode_config.clone(),
ticks_per_slot: num_ticks_per_slot,
slots_per_epoch: num_slots_per_epoch,
..ClusterConfig::default()
});
cluster_tests::sleep_n_epochs(
10.0,
&fullnode_config.tick_config,
@ -113,8 +125,12 @@ fn test_two_unbalanced_stakes() {
fn test_forwarding() {
// Set up a cluster where one node is never the leader, so all txs sent to this node
// will be have to be forwarded in order to be confirmed
let fullnode_config = FullnodeConfig::default();
let cluster = LocalCluster::new_with_config(&[999_990, 3], 2_000_000, &fullnode_config, &[]);
let config = ClusterConfig {
node_stakes: vec![999_990, 3],
cluster_lamports: 2_000_000,
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&config);
let cluster_nodes = discover_nodes(&cluster.entry_point_info.gossip, 2).unwrap();
assert!(cluster_nodes.len() >= 2);
@ -132,14 +148,14 @@ fn test_restart_node() {
let fullnode_config = FullnodeConfig::default();
let slots_per_epoch = 8;
let ticks_per_slot = 16;
let mut cluster = LocalCluster::new_with_tick_config(
&[3],
100,
&fullnode_config,
let mut cluster = LocalCluster::new(&ClusterConfig {
node_stakes: vec![3],
cluster_lamports: 100,
fullnode_config: fullnode_config.clone(),
ticks_per_slot,
slots_per_epoch,
&[],
);
..ClusterConfig::default()
});
let nodes = cluster.get_node_ids();
cluster_tests::sleep_n_epochs(
1.0,
@ -156,3 +172,16 @@ fn test_restart_node() {
);
cluster_tests::send_many_transactions(&cluster.entry_point_info, &cluster.funding_keypair, 1);
}
#[test]
fn test_listener_startup() {
let config = ClusterConfig {
node_stakes: vec![100; 1],
cluster_lamports: 1_000,
num_listeners: 3,
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&config);
let cluster_nodes = discover_nodes(&cluster.entry_point_info.gossip, 4).unwrap();
assert_eq!(cluster_nodes.len(), 4);
}