Release builds for local cluster tests (#5891)
* Release builds for test * Remove setting thread count in local cluster * Increase timeout * Move local cluster to separate job * Extract out local cluster test from bench-tps * Make local cluster inaccessible from outside crate * Update test-stable.sh to exclude local_cluster in stable, include it in local-cluster CI job * Move bench-exchange to local cluster * Remove local cluster from coverage
This commit is contained in:
@@ -1,14 +1,40 @@
|
||||
pub mod cluster;
|
||||
pub mod cluster_tests;
|
||||
pub mod local_cluster;
|
||||
#[cfg(test)]
|
||||
mod cluster;
|
||||
#[cfg(test)]
|
||||
mod cluster_tests;
|
||||
#[cfg(test)]
|
||||
mod local_cluster;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
#[macro_use]
|
||||
#[cfg(test)]
|
||||
extern crate log;
|
||||
|
||||
#[cfg(test)]
|
||||
extern crate solana_bench_exchange;
|
||||
|
||||
#[cfg(test)]
|
||||
extern crate solana_bench_tps;
|
||||
|
||||
#[macro_use]
|
||||
#[cfg(test)]
|
||||
extern crate solana_core;
|
||||
|
||||
#[cfg(test)]
|
||||
extern crate solana_drone;
|
||||
|
||||
#[macro_use]
|
||||
#[cfg(test)]
|
||||
extern crate solana_exchange_program;
|
||||
|
||||
#[macro_use]
|
||||
#[cfg(test)]
|
||||
extern crate solana_move_loader_program;
|
||||
|
||||
#[macro_use]
|
||||
#[cfg(test)]
|
||||
extern crate solana_storage_program;
|
||||
|
||||
#[cfg(test)]
|
||||
extern crate tempfile;
|
||||
|
@@ -10,7 +10,6 @@ use solana_core::{
|
||||
service::Service,
|
||||
validator::{Validator, ValidatorConfig},
|
||||
};
|
||||
use solana_rayon_threadlimit::set_thread_count;
|
||||
use solana_sdk::{
|
||||
client::SyncClient,
|
||||
clock::DEFAULT_TICKS_PER_SLOT,
|
||||
@@ -117,8 +116,6 @@ impl LocalCluster {
|
||||
}
|
||||
|
||||
pub fn new(config: &ClusterConfig) -> Self {
|
||||
set_thread_count(1);
|
||||
|
||||
assert_eq!(config.validator_configs.len(), config.node_stakes.len());
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let leader_pubkey = leader_keypair.pubkey();
|
||||
|
4
local_cluster/src/tests.rs
Normal file
4
local_cluster/src/tests.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
mod bench_exchange;
|
||||
mod bench_tps;
|
||||
mod local_cluster;
|
||||
mod replicator;
|
100
local_cluster/src/tests/bench_exchange.rs
Normal file
100
local_cluster/src/tests/bench_exchange.rs
Normal file
@@ -0,0 +1,100 @@
|
||||
use crate::local_cluster::{ClusterConfig, LocalCluster};
|
||||
use solana_bench_exchange::bench::{airdrop_lamports, do_bench_exchange, Config};
|
||||
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_drone::drone::run_local_drone;
|
||||
use solana_exchange_api::exchange_processor::process_instruction;
|
||||
use solana_exchange_api::id;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::bank_client::BankClient;
|
||||
use solana_sdk::genesis_block::create_genesis_block;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use std::process::exit;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_exchange_local_cluster() {
|
||||
solana_logger::setup();
|
||||
|
||||
const NUM_NODES: usize = 1;
|
||||
|
||||
let mut config = Config::default();
|
||||
config.identity = Keypair::new();
|
||||
config.duration = Duration::from_secs(1);
|
||||
config.fund_amount = 100_000;
|
||||
config.threads = 1;
|
||||
config.transfer_delay = 20; // 15
|
||||
config.batch_size = 100; // 1000;
|
||||
config.chunk_size = 10; // 200;
|
||||
config.account_groups = 1; // 10;
|
||||
let Config {
|
||||
fund_amount,
|
||||
batch_size,
|
||||
account_groups,
|
||||
..
|
||||
} = config;
|
||||
let accounts_in_groups = batch_size * account_groups;
|
||||
|
||||
let cluster = LocalCluster::new(&ClusterConfig {
|
||||
node_stakes: vec![100_000; NUM_NODES],
|
||||
cluster_lamports: 100_000_000_000_000,
|
||||
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
|
||||
native_instruction_processors: [solana_exchange_program!()].to_vec(),
|
||||
..ClusterConfig::default()
|
||||
});
|
||||
|
||||
let drone_keypair = Keypair::new();
|
||||
cluster.transfer(
|
||||
&cluster.funding_keypair,
|
||||
&drone_keypair.pubkey(),
|
||||
2_000_000_000_000,
|
||||
);
|
||||
|
||||
let (addr_sender, addr_receiver) = channel();
|
||||
run_local_drone(drone_keypair, addr_sender, Some(1_000_000_000_000));
|
||||
let drone_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||
|
||||
info!("Connecting to the cluster");
|
||||
let (nodes, _) =
|
||||
discover_cluster(&cluster.entry_point_info.gossip, NUM_NODES).unwrap_or_else(|err| {
|
||||
error!("Failed to discover {} nodes: {:?}", NUM_NODES, err);
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
|
||||
info!("clients: {}", num_clients);
|
||||
assert!(num_clients >= NUM_NODES);
|
||||
|
||||
const NUM_SIGNERS: u64 = 2;
|
||||
airdrop_lamports(
|
||||
&client,
|
||||
&drone_addr,
|
||||
&config.identity,
|
||||
fund_amount * (accounts_in_groups + 1) as u64 * NUM_SIGNERS,
|
||||
);
|
||||
|
||||
do_bench_exchange(vec![client], config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exchange_bank_client() {
|
||||
solana_logger::setup();
|
||||
let (genesis_block, identity) = create_genesis_block(100_000_000_000_000);
|
||||
let mut bank = Bank::new(&genesis_block);
|
||||
bank.add_instruction_processor(id(), process_instruction);
|
||||
let clients = vec![BankClient::new(bank)];
|
||||
|
||||
let mut config = Config::default();
|
||||
config.identity = identity;
|
||||
config.duration = Duration::from_secs(1);
|
||||
config.fund_amount = 100_000;
|
||||
config.threads = 1;
|
||||
config.transfer_delay = 20; // 0;
|
||||
config.batch_size = 100; // 1500;
|
||||
config.chunk_size = 10; // 1500;
|
||||
config.account_groups = 1; // 50;
|
||||
|
||||
do_bench_exchange(clients, config);
|
||||
}
|
76
local_cluster/src/tests/bench_tps.rs
Normal file
76
local_cluster/src/tests/bench_tps.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
use crate::local_cluster::{ClusterConfig, LocalCluster};
|
||||
use serial_test_derive::serial;
|
||||
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs};
|
||||
use solana_bench_tps::cli::Config;
|
||||
use solana_client::thin_client::create_client;
|
||||
use solana_core::cluster_info::FULLNODE_PORT_RANGE;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_drone::drone::run_local_drone;
|
||||
use solana_move_loader_program;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::time::Duration;
|
||||
|
||||
fn test_bench_tps_local_cluster(config: Config) {
|
||||
solana_logger::setup();
|
||||
const NUM_NODES: usize = 1;
|
||||
let cluster = LocalCluster::new(&ClusterConfig {
|
||||
node_stakes: vec![999_990; NUM_NODES],
|
||||
cluster_lamports: 200_000_000,
|
||||
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
|
||||
native_instruction_processors: vec![solana_move_loader_program!()],
|
||||
..ClusterConfig::default()
|
||||
});
|
||||
|
||||
let drone_keypair = Keypair::new();
|
||||
cluster.transfer(
|
||||
&cluster.funding_keypair,
|
||||
&drone_keypair.pubkey(),
|
||||
100_000_000,
|
||||
);
|
||||
|
||||
let client = create_client(
|
||||
(cluster.entry_point_info.rpc, cluster.entry_point_info.tpu),
|
||||
FULLNODE_PORT_RANGE,
|
||||
);
|
||||
|
||||
let (addr_sender, addr_receiver) = channel();
|
||||
run_local_drone(drone_keypair, addr_sender, None);
|
||||
let drone_addr = addr_receiver.recv_timeout(Duration::from_secs(2)).unwrap();
|
||||
|
||||
let lamports_per_account = 100;
|
||||
|
||||
let (keypairs, move_keypairs, _keypair_balance) = generate_and_fund_keypairs(
|
||||
&client,
|
||||
Some(drone_addr),
|
||||
&config.id,
|
||||
config.tx_count,
|
||||
lamports_per_account,
|
||||
config.use_move,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let total = do_bench_tps(vec![client], config, keypairs, 0, move_keypairs);
|
||||
assert!(total > 100);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_bench_tps_local_cluster_solana() {
|
||||
let mut config = Config::default();
|
||||
config.tx_count = 100;
|
||||
config.duration = Duration::from_secs(10);
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_bench_tps_local_cluster_move() {
|
||||
let mut config = Config::default();
|
||||
config.tx_count = 100;
|
||||
config.duration = Duration::from_secs(20);
|
||||
config.use_move = true;
|
||||
|
||||
test_bench_tps_local_cluster(config);
|
||||
}
|
751
local_cluster/src/tests/local_cluster.rs
Normal file
751
local_cluster/src/tests/local_cluster.rs
Normal file
@@ -0,0 +1,751 @@
|
||||
use crate::{
|
||||
cluster::Cluster,
|
||||
cluster_tests,
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
};
|
||||
use log::*;
|
||||
use serial_test_derive::serial;
|
||||
use solana_core::{
|
||||
bank_forks::SnapshotConfig, blocktree::Blocktree, broadcast_stage::BroadcastStageType,
|
||||
gossip_service::discover_cluster, snapshot_utils, validator::ValidatorConfig,
|
||||
};
|
||||
use solana_runtime::{
|
||||
accounts_db::AccountsDB,
|
||||
epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH},
|
||||
};
|
||||
use solana_sdk::{client::SyncClient, clock, poh_config::PohConfig};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
fs,
|
||||
thread::sleep,
|
||||
time::Duration,
|
||||
};
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_ledger_cleanup_service() {
|
||||
solana_logger::setup();
|
||||
error!("test_ledger_cleanup_service");
|
||||
let num_nodes = 3;
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
validator_config.max_ledger_slots = Some(100);
|
||||
let config = ClusterConfig {
|
||||
cluster_lamports: 10_000,
|
||||
poh_config: PohConfig::new_sleep(Duration::from_millis(50)),
|
||||
node_stakes: vec![100; num_nodes],
|
||||
validator_configs: vec![validator_config.clone(); num_nodes],
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
let mut cluster = LocalCluster::new(&config);
|
||||
// 200ms/per * 100 = 20 seconds, so sleep a little longer than that.
|
||||
sleep(Duration::from_secs(60));
|
||||
|
||||
cluster_tests::spend_and_verify_all_nodes(
|
||||
&cluster.entry_point_info,
|
||||
&cluster.funding_keypair,
|
||||
num_nodes,
|
||||
HashSet::new(),
|
||||
);
|
||||
cluster.close_preserve_ledgers();
|
||||
//check everyone's ledgers and make sure only ~100 slots are stored
|
||||
for (_, info) in &cluster.fullnode_infos {
|
||||
let mut slots = 0;
|
||||
let blocktree = Blocktree::open(&info.info.ledger_path).unwrap();
|
||||
blocktree
|
||||
.slot_meta_iterator(0)
|
||||
.unwrap()
|
||||
.for_each(|_| slots += 1);
|
||||
// with 3 nodes upto 3 slots can be in progress and not complete so max slots in blocktree should be upto 103
|
||||
assert!(slots <= 103, "got {}", slots);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_spend_and_verify_all_nodes_1() {
|
||||
solana_logger::setup();
|
||||
error!("test_spend_and_verify_all_nodes_1");
|
||||
let num_nodes = 1;
|
||||
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
|
||||
cluster_tests::spend_and_verify_all_nodes(
|
||||
&local.entry_point_info,
|
||||
&local.funding_keypair,
|
||||
num_nodes,
|
||||
HashSet::new(),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_spend_and_verify_all_nodes_2() {
|
||||
solana_logger::setup();
|
||||
error!("test_spend_and_verify_all_nodes_2");
|
||||
let num_nodes = 2;
|
||||
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
|
||||
cluster_tests::spend_and_verify_all_nodes(
|
||||
&local.entry_point_info,
|
||||
&local.funding_keypair,
|
||||
num_nodes,
|
||||
HashSet::new(),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_spend_and_verify_all_nodes_3() {
|
||||
solana_logger::setup();
|
||||
error!("test_spend_and_verify_all_nodes_3");
|
||||
let num_nodes = 3;
|
||||
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
|
||||
cluster_tests::spend_and_verify_all_nodes(
|
||||
&local.entry_point_info,
|
||||
&local.funding_keypair,
|
||||
num_nodes,
|
||||
HashSet::new(),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(unused_attributes)]
|
||||
#[ignore]
|
||||
fn test_spend_and_verify_all_nodes_env_num_nodes() {
|
||||
solana_logger::setup();
|
||||
let num_nodes: usize = std::env::var("NUM_NODES")
|
||||
.expect("please set environment variable NUM_NODES")
|
||||
.parse()
|
||||
.expect("could not parse NUM_NODES as a number");
|
||||
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
|
||||
cluster_tests::spend_and_verify_all_nodes(
|
||||
&local.entry_point_info,
|
||||
&local.funding_keypair,
|
||||
num_nodes,
|
||||
HashSet::new(),
|
||||
);
|
||||
}
|
||||
|
||||
#[allow(unused_attributes)]
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_fullnode_exit_default_config_should_panic() {
|
||||
solana_logger::setup();
|
||||
error!("test_fullnode_exit_default_config_should_panic");
|
||||
let num_nodes = 2;
|
||||
let local = LocalCluster::new_with_equal_stakes(num_nodes, 10_000, 100);
|
||||
cluster_tests::fullnode_exit(&local.entry_point_info, num_nodes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_fullnode_exit_2() {
|
||||
solana_logger::setup();
|
||||
error!("test_fullnode_exit_2");
|
||||
let num_nodes = 2;
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
validator_config.rpc_config.enable_fullnode_exit = true;
|
||||
let config = ClusterConfig {
|
||||
cluster_lamports: 10_000,
|
||||
node_stakes: vec![100; num_nodes],
|
||||
validator_configs: vec![validator_config.clone(); num_nodes],
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
let local = LocalCluster::new(&config);
|
||||
cluster_tests::fullnode_exit(&local.entry_point_info, num_nodes);
|
||||
}
|
||||
|
||||
// Cluster needs a supermajority to remain, so the minimum size for this test is 4
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_leader_failure_4() {
|
||||
solana_logger::setup();
|
||||
error!("test_leader_failure_4");
|
||||
let num_nodes = 4;
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
validator_config.rpc_config.enable_fullnode_exit = true;
|
||||
let config = ClusterConfig {
|
||||
cluster_lamports: 10_000,
|
||||
node_stakes: vec![100; 4],
|
||||
validator_configs: vec![validator_config.clone(); num_nodes],
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
let local = LocalCluster::new(&config);
|
||||
cluster_tests::kill_entry_and_spend_and_verify_rest(
|
||||
&local.entry_point_info,
|
||||
&local.funding_keypair,
|
||||
num_nodes,
|
||||
config.ticks_per_slot * config.poh_config.target_tick_duration.as_millis() as u64,
|
||||
);
|
||||
}
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_two_unbalanced_stakes() {
|
||||
solana_logger::setup();
|
||||
error!("test_two_unbalanced_stakes");
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
let num_ticks_per_second = 100;
|
||||
let num_ticks_per_slot = 10;
|
||||
let num_slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64;
|
||||
|
||||
validator_config.rpc_config.enable_fullnode_exit = true;
|
||||
let mut cluster = LocalCluster::new(&ClusterConfig {
|
||||
node_stakes: vec![999_990, 3],
|
||||
cluster_lamports: 1_000_000,
|
||||
validator_configs: vec![validator_config.clone(); 2],
|
||||
ticks_per_slot: num_ticks_per_slot,
|
||||
slots_per_epoch: num_slots_per_epoch,
|
||||
poh_config: PohConfig::new_sleep(Duration::from_millis(1000 / num_ticks_per_second)),
|
||||
..ClusterConfig::default()
|
||||
});
|
||||
|
||||
cluster_tests::sleep_n_epochs(
|
||||
10.0,
|
||||
&cluster.genesis_block.poh_config,
|
||||
num_ticks_per_slot,
|
||||
num_slots_per_epoch,
|
||||
);
|
||||
cluster.close_preserve_ledgers();
|
||||
let leader_pubkey = cluster.entry_point_info.id;
|
||||
let leader_ledger = cluster.fullnode_infos[&leader_pubkey]
|
||||
.info
|
||||
.ledger_path
|
||||
.clone();
|
||||
cluster_tests::verify_ledger_ticks(&leader_ledger, num_ticks_per_slot as usize);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_forwarding() {
|
||||
// Set up a cluster where one node is never the leader, so all txs sent to this node
|
||||
// will be have to be forwarded in order to be confirmed
|
||||
let config = ClusterConfig {
|
||||
node_stakes: vec![999_990, 3],
|
||||
cluster_lamports: 2_000_000,
|
||||
validator_configs: vec![ValidatorConfig::default(); 2],
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
let cluster = LocalCluster::new(&config);
|
||||
|
||||
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 2).unwrap();
|
||||
assert!(cluster_nodes.len() >= 2);
|
||||
|
||||
let leader_pubkey = cluster.entry_point_info.id;
|
||||
|
||||
let validator_info = cluster_nodes
|
||||
.iter()
|
||||
.find(|c| c.id != leader_pubkey)
|
||||
.unwrap();
|
||||
|
||||
// Confirm that transactions were forwarded to and processed by the leader.
|
||||
cluster_tests::send_many_transactions(&validator_info, &cluster.funding_keypair, 10, 20);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_restart_node() {
|
||||
solana_logger::setup();
|
||||
error!("test_restart_node");
|
||||
let slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64;
|
||||
let ticks_per_slot = 16;
|
||||
let validator_config = ValidatorConfig::default();
|
||||
let mut cluster = LocalCluster::new(&ClusterConfig {
|
||||
node_stakes: vec![3],
|
||||
cluster_lamports: 100,
|
||||
validator_configs: vec![validator_config.clone()],
|
||||
ticks_per_slot,
|
||||
slots_per_epoch,
|
||||
..ClusterConfig::default()
|
||||
});
|
||||
let nodes = cluster.get_node_pubkeys();
|
||||
cluster_tests::sleep_n_epochs(
|
||||
1.0,
|
||||
&cluster.genesis_block.poh_config,
|
||||
clock::DEFAULT_TICKS_PER_SLOT,
|
||||
slots_per_epoch,
|
||||
);
|
||||
cluster.exit_restart_node(&nodes[0], validator_config);
|
||||
cluster_tests::sleep_n_epochs(
|
||||
0.5,
|
||||
&cluster.genesis_block.poh_config,
|
||||
clock::DEFAULT_TICKS_PER_SLOT,
|
||||
slots_per_epoch,
|
||||
);
|
||||
cluster_tests::send_many_transactions(
|
||||
&cluster.entry_point_info,
|
||||
&cluster.funding_keypair,
|
||||
10,
|
||||
1,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_listener_startup() {
|
||||
let config = ClusterConfig {
|
||||
node_stakes: vec![100; 1],
|
||||
cluster_lamports: 1_000,
|
||||
num_listeners: 3,
|
||||
validator_configs: vec![ValidatorConfig::default(); 1],
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
let cluster = LocalCluster::new(&config);
|
||||
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 4).unwrap();
|
||||
assert_eq!(cluster_nodes.len(), 4);
|
||||
}
|
||||
|
||||
#[allow(unused_attributes)]
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_snapshot_restart_locktower() {
|
||||
// First set up the cluster with 2 nodes
|
||||
let snapshot_interval_slots = 10;
|
||||
let num_account_paths = 4;
|
||||
|
||||
let leader_snapshot_test_config =
|
||||
setup_snapshot_validator_config(snapshot_interval_slots, num_account_paths);
|
||||
let validator_snapshot_test_config =
|
||||
setup_snapshot_validator_config(snapshot_interval_slots, num_account_paths);
|
||||
|
||||
let config = ClusterConfig {
|
||||
node_stakes: vec![10000, 10],
|
||||
cluster_lamports: 100000,
|
||||
validator_configs: vec![
|
||||
leader_snapshot_test_config.validator_config.clone(),
|
||||
validator_snapshot_test_config.validator_config.clone(),
|
||||
],
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
|
||||
let mut cluster = LocalCluster::new(&config);
|
||||
|
||||
// Let the nodes run for a while, then stop one of the validators
|
||||
sleep(Duration::from_millis(5000));
|
||||
let all_pubkeys = cluster.get_node_pubkeys();
|
||||
let validator_id = all_pubkeys
|
||||
.into_iter()
|
||||
.find(|x| *x != cluster.entry_point_info.id)
|
||||
.unwrap();
|
||||
let validator_info = cluster.exit_node(&validator_id);
|
||||
|
||||
// Get slot after which this was generated
|
||||
let snapshot_package_output_path = &leader_snapshot_test_config
|
||||
.validator_config
|
||||
.snapshot_config
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.snapshot_package_output_path;
|
||||
let tar = snapshot_utils::get_snapshot_tar_path(&snapshot_package_output_path);
|
||||
wait_for_next_snapshot(&cluster, &tar);
|
||||
|
||||
// Copy tar to validator's snapshot output directory
|
||||
let validator_tar_path =
|
||||
snapshot_utils::get_snapshot_tar_path(&validator_snapshot_test_config.snapshot_output_path);
|
||||
fs::hard_link(tar, &validator_tar_path).unwrap();
|
||||
|
||||
// Restart validator from snapshot, the validator's locktower state in this snapshot
|
||||
// will contain slots < the root bank of the snapshot. Validator should not panic.
|
||||
cluster.restart_node(&validator_id, validator_info);
|
||||
|
||||
// Test cluster can still make progress and get confirmations in locktower
|
||||
cluster_tests::spend_and_verify_all_nodes(
|
||||
&cluster.entry_point_info,
|
||||
&cluster.funding_keypair,
|
||||
1,
|
||||
HashSet::new(),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_snapshots_blocktree_floor() {
|
||||
// First set up the cluster with 1 snapshotting leader
|
||||
let snapshot_interval_slots = 10;
|
||||
let num_account_paths = 4;
|
||||
|
||||
let leader_snapshot_test_config =
|
||||
setup_snapshot_validator_config(snapshot_interval_slots, num_account_paths);
|
||||
let validator_snapshot_test_config =
|
||||
setup_snapshot_validator_config(snapshot_interval_slots, num_account_paths);
|
||||
|
||||
let snapshot_package_output_path = &leader_snapshot_test_config
|
||||
.validator_config
|
||||
.snapshot_config
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.snapshot_package_output_path;
|
||||
|
||||
let config = ClusterConfig {
|
||||
node_stakes: vec![10000],
|
||||
cluster_lamports: 100000,
|
||||
validator_configs: vec![leader_snapshot_test_config.validator_config.clone()],
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
|
||||
let mut cluster = LocalCluster::new(&config);
|
||||
|
||||
trace!("Waiting for snapshot tar to be generated with slot",);
|
||||
|
||||
let tar = snapshot_utils::get_snapshot_tar_path(&snapshot_package_output_path);
|
||||
loop {
|
||||
if tar.exists() {
|
||||
trace!("snapshot tar exists");
|
||||
break;
|
||||
}
|
||||
sleep(Duration::from_millis(5000));
|
||||
}
|
||||
|
||||
// Copy tar to validator's snapshot output directory
|
||||
let validator_tar_path =
|
||||
snapshot_utils::get_snapshot_tar_path(&validator_snapshot_test_config.snapshot_output_path);
|
||||
fs::hard_link(tar, &validator_tar_path).unwrap();
|
||||
let slot_floor = snapshot_utils::bank_slot_from_archive(&validator_tar_path).unwrap();
|
||||
|
||||
// Start up a new node from a snapshot
|
||||
let validator_stake = 5;
|
||||
cluster.add_validator(
|
||||
&validator_snapshot_test_config.validator_config,
|
||||
validator_stake,
|
||||
);
|
||||
let all_pubkeys = cluster.get_node_pubkeys();
|
||||
let validator_id = all_pubkeys
|
||||
.into_iter()
|
||||
.find(|x| *x != cluster.entry_point_info.id)
|
||||
.unwrap();
|
||||
let validator_client = cluster.get_validator_client(&validator_id).unwrap();
|
||||
let mut current_slot = 0;
|
||||
|
||||
// Let this validator run a while with repair
|
||||
let target_slot = slot_floor + 40;
|
||||
while current_slot <= target_slot {
|
||||
trace!("current_slot: {}", current_slot);
|
||||
if let Ok(slot) = validator_client.get_slot() {
|
||||
current_slot = slot;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
sleep(Duration::from_secs(1));
|
||||
}
|
||||
|
||||
// Check the validator ledger doesn't contain any slots < slot_floor
|
||||
cluster.close_preserve_ledgers();
|
||||
let validator_ledger_path = &cluster.fullnode_infos[&validator_id];
|
||||
let blocktree = Blocktree::open(&validator_ledger_path.info.ledger_path).unwrap();
|
||||
|
||||
// Skip the zeroth slot in blocktree that the ledger is initialized with
|
||||
let (first_slot, _) = blocktree.slot_meta_iterator(1).unwrap().next().unwrap();
|
||||
|
||||
assert_eq!(first_slot, slot_floor);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_snapshots_restart_validity() {
|
||||
solana_logger::setup();
|
||||
let snapshot_interval_slots = 10;
|
||||
let num_account_paths = 4;
|
||||
let mut snapshot_test_config =
|
||||
setup_snapshot_validator_config(snapshot_interval_slots, num_account_paths);
|
||||
let snapshot_package_output_path = &snapshot_test_config
|
||||
.validator_config
|
||||
.snapshot_config
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.snapshot_package_output_path;
|
||||
|
||||
// Set up the cluster with 1 snapshotting validator
|
||||
let mut all_account_storage_dirs = vec![vec![]];
|
||||
|
||||
std::mem::swap(
|
||||
&mut all_account_storage_dirs[0],
|
||||
&mut snapshot_test_config.account_storage_dirs,
|
||||
);
|
||||
|
||||
let config = ClusterConfig {
|
||||
node_stakes: vec![10000],
|
||||
cluster_lamports: 100000,
|
||||
validator_configs: vec![snapshot_test_config.validator_config.clone()],
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
|
||||
// Create and reboot the node from snapshot `num_runs` times
|
||||
let num_runs = 3;
|
||||
let mut expected_balances = HashMap::new();
|
||||
let mut cluster = LocalCluster::new(&config);
|
||||
for i in 1..num_runs {
|
||||
info!("run {}", i);
|
||||
// Push transactions to one of the nodes and confirm that transactions were
|
||||
// forwarded to and processed.
|
||||
trace!("Sending transactions");
|
||||
let new_balances = cluster_tests::send_many_transactions(
|
||||
&cluster.entry_point_info,
|
||||
&cluster.funding_keypair,
|
||||
10,
|
||||
10,
|
||||
);
|
||||
|
||||
expected_balances.extend(new_balances);
|
||||
|
||||
let tar = snapshot_utils::get_snapshot_tar_path(&snapshot_package_output_path);
|
||||
wait_for_next_snapshot(&cluster, &tar);
|
||||
|
||||
// Create new account paths since fullnode exit is not guaranteed to cleanup RPC threads,
|
||||
// which may delete the old accounts on exit at any point
|
||||
let (new_account_storage_dirs, new_account_storage_paths) =
|
||||
generate_account_paths(num_account_paths);
|
||||
all_account_storage_dirs.push(new_account_storage_dirs);
|
||||
snapshot_test_config.validator_config.account_paths = Some(new_account_storage_paths);
|
||||
|
||||
// Restart node
|
||||
trace!("Restarting cluster from snapshot");
|
||||
let nodes = cluster.get_node_pubkeys();
|
||||
cluster.exit_restart_node(&nodes[0], snapshot_test_config.validator_config.clone());
|
||||
|
||||
// Verify account balances on validator
|
||||
trace!("Verifying balances");
|
||||
cluster_tests::verify_balances(expected_balances.clone(), &cluster.entry_point_info);
|
||||
|
||||
// Check that we can still push transactions
|
||||
trace!("Spending and verifying");
|
||||
cluster_tests::spend_and_verify_all_nodes(
|
||||
&cluster.entry_point_info,
|
||||
&cluster.funding_keypair,
|
||||
1,
|
||||
HashSet::new(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_fail_entry_verification_leader() {
|
||||
test_faulty_node(BroadcastStageType::FailEntryVerification);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(unused_attributes)]
|
||||
#[ignore]
|
||||
fn test_fake_blobs_broadcast_leader() {
|
||||
test_faulty_node(BroadcastStageType::BroadcastFakeBlobs);
|
||||
}
|
||||
|
||||
fn test_faulty_node(faulty_node_type: BroadcastStageType) {
|
||||
solana_logger::setup();
|
||||
let num_nodes = 4;
|
||||
let validator_config = ValidatorConfig::default();
|
||||
let mut error_validator_config = ValidatorConfig::default();
|
||||
error_validator_config.broadcast_stage_type = faulty_node_type.clone();
|
||||
let mut validator_configs = vec![validator_config; num_nodes - 1];
|
||||
validator_configs.push(error_validator_config);
|
||||
let mut node_stakes = vec![100; num_nodes - 1];
|
||||
node_stakes.push(50);
|
||||
let cluster_config = ClusterConfig {
|
||||
cluster_lamports: 10_000,
|
||||
node_stakes,
|
||||
validator_configs: validator_configs,
|
||||
slots_per_epoch: MINIMUM_SLOTS_PER_EPOCH * 2 as u64,
|
||||
stakers_slot_offset: MINIMUM_SLOTS_PER_EPOCH * 2 as u64,
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
|
||||
let cluster = LocalCluster::new(&cluster_config);
|
||||
let epoch_schedule = EpochSchedule::new(
|
||||
cluster_config.slots_per_epoch,
|
||||
cluster_config.stakers_slot_offset,
|
||||
true,
|
||||
);
|
||||
let num_warmup_epochs = epoch_schedule.get_stakers_epoch(0) + 1;
|
||||
|
||||
// Wait for the corrupted leader to be scheduled afer the warmup epochs expire
|
||||
cluster_tests::sleep_n_epochs(
|
||||
(num_warmup_epochs + 1) as f64,
|
||||
&cluster.genesis_block.poh_config,
|
||||
cluster_config.ticks_per_slot,
|
||||
cluster_config.slots_per_epoch,
|
||||
);
|
||||
|
||||
let corrupt_node = cluster
|
||||
.fullnode_infos
|
||||
.iter()
|
||||
.find(|(_, v)| v.config.broadcast_stage_type == faulty_node_type)
|
||||
.unwrap()
|
||||
.0;
|
||||
let mut ignore = HashSet::new();
|
||||
ignore.insert(*corrupt_node);
|
||||
|
||||
// Verify that we can still spend and verify even in the presence of corrupt nodes
|
||||
cluster_tests::spend_and_verify_all_nodes(
|
||||
&cluster.entry_point_info,
|
||||
&cluster.funding_keypair,
|
||||
num_nodes,
|
||||
ignore,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_repairman_catchup() {
|
||||
solana_logger::setup();
|
||||
error!("test_repairman_catchup");
|
||||
run_repairman_catchup(3);
|
||||
}
|
||||
|
||||
fn run_repairman_catchup(num_repairmen: u64) {
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
let num_ticks_per_second = 100;
|
||||
let num_ticks_per_slot = 40;
|
||||
let num_slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH as u64;
|
||||
let num_root_buffer_slots = 10;
|
||||
// Calculate the leader schedule num_root_buffer_slots ahead. Otherwise, if stakers_slot_offset ==
|
||||
// num_slots_per_epoch, and num_slots_per_epoch == MINIMUM_SLOTS_PER_EPOCH, then repairmen
|
||||
// will stop sending repairs after the last slot in epoch 1 (0-indexed), because the root
|
||||
// is at most in the first epoch.
|
||||
//
|
||||
// For example:
|
||||
// Assume:
|
||||
// 1) num_slots_per_epoch = 32
|
||||
// 2) stakers_slot_offset = 32
|
||||
// 3) MINIMUM_SLOTS_PER_EPOCH = 32
|
||||
//
|
||||
// Then the last slot in epoch 1 is slot 63. After completing slots 0 to 63, the root on the
|
||||
// repairee is at most 31. Because, the stakers_slot_offset == 32, then the max confirmed epoch
|
||||
// on the repairee is epoch 1.
|
||||
// Thus the repairmen won't send any slots past epoch 1, slot 63 to this repairee until the repairee
|
||||
// updates their root, and the repairee can't update their root until they get slot 64, so no progress
|
||||
// is made. This is also not accounting for the fact that the repairee may not vote on every slot, so
|
||||
// their root could actually be much less than 31. This is why we give a num_root_buffer_slots buffer.
|
||||
let stakers_slot_offset = num_slots_per_epoch + num_root_buffer_slots;
|
||||
|
||||
validator_config.rpc_config.enable_fullnode_exit = true;
|
||||
|
||||
let lamports_per_repairman = 1000;
|
||||
|
||||
// Make the repairee_stake small relative to the repairmen stake so that the repairee doesn't
|
||||
// get included in the leader schedule, causing slots to get skipped while it's still trying
|
||||
// to catch up
|
||||
let repairee_stake = 3;
|
||||
let cluster_lamports = 2 * lamports_per_repairman * num_repairmen + repairee_stake;
|
||||
let node_stakes: Vec<_> = (0..num_repairmen).map(|_| lamports_per_repairman).collect();
|
||||
let mut cluster = LocalCluster::new(&ClusterConfig {
|
||||
node_stakes,
|
||||
cluster_lamports,
|
||||
validator_configs: vec![validator_config.clone(); num_repairmen as usize],
|
||||
ticks_per_slot: num_ticks_per_slot,
|
||||
slots_per_epoch: num_slots_per_epoch,
|
||||
stakers_slot_offset,
|
||||
poh_config: PohConfig::new_sleep(Duration::from_millis(1000 / num_ticks_per_second)),
|
||||
..ClusterConfig::default()
|
||||
});
|
||||
|
||||
let repairman_pubkeys: HashSet<_> = cluster.get_node_pubkeys().into_iter().collect();
|
||||
let epoch_schedule = EpochSchedule::new(num_slots_per_epoch, stakers_slot_offset, true);
|
||||
let num_warmup_epochs = epoch_schedule.get_stakers_epoch(0) + 1;
|
||||
|
||||
// Sleep for longer than the first N warmup epochs, with a one epoch buffer for timing issues
|
||||
cluster_tests::sleep_n_epochs(
|
||||
num_warmup_epochs as f64 + 1.0,
|
||||
&cluster.genesis_block.poh_config,
|
||||
num_ticks_per_slot,
|
||||
num_slots_per_epoch,
|
||||
);
|
||||
|
||||
// Start up a new node, wait for catchup. Backwards repair won't be sufficient because the
|
||||
// leader is sending blobs past this validator's first two confirmed epochs. Thus, the repairman
|
||||
// protocol will have to kick in for this validator to repair.
|
||||
cluster.add_validator(&validator_config, repairee_stake);
|
||||
|
||||
let all_pubkeys = cluster.get_node_pubkeys();
|
||||
let repairee_id = all_pubkeys
|
||||
.into_iter()
|
||||
.find(|x| !repairman_pubkeys.contains(x))
|
||||
.unwrap();
|
||||
|
||||
// Wait for repairman protocol to catch this validator up
|
||||
let repairee_client = cluster.get_validator_client(&repairee_id).unwrap();
|
||||
let mut current_slot = 0;
|
||||
|
||||
// Make sure this validator can get repaired past the first few warmup epochs
|
||||
let target_slot = (num_warmup_epochs) * num_slots_per_epoch + 1;
|
||||
while current_slot <= target_slot {
|
||||
trace!("current_slot: {}", current_slot);
|
||||
if let Ok(slot) = repairee_client.get_slot() {
|
||||
current_slot = slot;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
sleep(Duration::from_secs(1));
|
||||
}
|
||||
}
|
||||
|
||||
fn wait_for_next_snapshot<P: AsRef<Path>>(cluster: &LocalCluster, tar: P) {
|
||||
// Get slot after which this was generated
|
||||
let client = cluster
|
||||
.get_validator_client(&cluster.entry_point_info.id)
|
||||
.unwrap();
|
||||
let last_slot = client.get_slot().expect("Couldn't get slot");
|
||||
|
||||
// Wait for a snapshot for a bank >= last_slot to be made so we know that the snapshot
|
||||
// must include the transactions just pushed
|
||||
trace!(
|
||||
"Waiting for snapshot tar to be generated with slot > {}",
|
||||
last_slot
|
||||
);
|
||||
loop {
|
||||
if tar.as_ref().exists() {
|
||||
trace!("snapshot tar exists");
|
||||
let slot = snapshot_utils::bank_slot_from_archive(&tar).unwrap();
|
||||
if slot >= last_slot {
|
||||
break;
|
||||
}
|
||||
trace!("snapshot tar slot {} < last_slot {}", slot, last_slot);
|
||||
}
|
||||
sleep(Duration::from_millis(5000));
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_account_paths(num_account_paths: usize) -> (Vec<TempDir>, String) {
|
||||
let account_storage_dirs: Vec<TempDir> = (0..num_account_paths)
|
||||
.map(|_| TempDir::new().unwrap())
|
||||
.collect();
|
||||
let account_storage_paths: Vec<_> = account_storage_dirs
|
||||
.iter()
|
||||
.map(|a| a.path().to_str().unwrap().to_string())
|
||||
.collect();
|
||||
let account_storage_paths = AccountsDB::format_paths(account_storage_paths);
|
||||
(account_storage_dirs, account_storage_paths)
|
||||
}
|
||||
|
||||
struct SnapshotValidatorConfig {
|
||||
_snapshot_dir: TempDir,
|
||||
snapshot_output_path: TempDir,
|
||||
account_storage_dirs: Vec<TempDir>,
|
||||
validator_config: ValidatorConfig,
|
||||
}
|
||||
|
||||
fn setup_snapshot_validator_config(
|
||||
snapshot_interval_slots: usize,
|
||||
num_account_paths: usize,
|
||||
) -> SnapshotValidatorConfig {
|
||||
// Create the snapshot config
|
||||
let snapshot_dir = TempDir::new().unwrap();
|
||||
let snapshot_output_path = TempDir::new().unwrap();
|
||||
let snapshot_config = SnapshotConfig {
|
||||
snapshot_interval_slots,
|
||||
snapshot_package_output_path: PathBuf::from(snapshot_output_path.path()),
|
||||
snapshot_path: PathBuf::from(snapshot_dir.path()),
|
||||
};
|
||||
|
||||
// Create the account paths
|
||||
let (account_storage_dirs, account_storage_paths) = generate_account_paths(num_account_paths);
|
||||
|
||||
// Create the validator config
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
validator_config.rpc_config.enable_fullnode_exit = true;
|
||||
validator_config.snapshot_config = Some(snapshot_config);
|
||||
validator_config.account_paths = Some(account_storage_paths);
|
||||
|
||||
SnapshotValidatorConfig {
|
||||
_snapshot_dir: snapshot_dir,
|
||||
snapshot_output_path,
|
||||
account_storage_dirs,
|
||||
validator_config,
|
||||
}
|
||||
}
|
184
local_cluster/src/tests/replicator.rs
Normal file
184
local_cluster/src/tests/replicator.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
use crate::local_cluster::{ClusterConfig, LocalCluster};
|
||||
use serial_test_derive::serial;
|
||||
use solana_client::thin_client::create_client;
|
||||
use solana_core::blocktree::{create_new_tmp_ledger, get_tmp_ledger_path, Blocktree};
|
||||
use solana_core::cluster_info::{ClusterInfo, Node, FULLNODE_PORT_RANGE};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::gossip_service::discover_cluster;
|
||||
use solana_core::replicator::Replicator;
|
||||
use solana_core::storage_stage::SLOTS_PER_TURN_TEST;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_sdk::genesis_block::create_genesis_block;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use std::fs::remove_dir_all;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
/// Start the cluster with the given configuration and wait till the replicators are discovered
|
||||
/// Then download blobs from one of them.
|
||||
fn run_replicator_startup_basic(num_nodes: usize, num_replicators: usize) {
|
||||
solana_logger::setup();
|
||||
info!("starting replicator test");
|
||||
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
let slots_per_segment = 8;
|
||||
validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST;
|
||||
let config = ClusterConfig {
|
||||
validator_configs: vec![validator_config; num_nodes],
|
||||
num_replicators,
|
||||
node_stakes: vec![100; num_nodes],
|
||||
cluster_lamports: 10_000,
|
||||
// keep a low slot/segment count to speed up the test
|
||||
slots_per_segment,
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
let cluster = LocalCluster::new(&config);
|
||||
|
||||
let (cluster_nodes, cluster_replicators) = discover_cluster(
|
||||
&cluster.entry_point_info.gossip,
|
||||
num_nodes + num_replicators,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
cluster_nodes.len() + cluster_replicators.len(),
|
||||
num_nodes + num_replicators
|
||||
);
|
||||
let mut replicator_count = 0;
|
||||
let mut replicator_info = ContactInfo::default();
|
||||
for node in &cluster_replicators {
|
||||
info!("storage: {:?} rpc: {:?}", node.storage_addr, node.rpc);
|
||||
if ContactInfo::is_valid_address(&node.storage_addr) {
|
||||
replicator_count += 1;
|
||||
replicator_info = node.clone();
|
||||
}
|
||||
}
|
||||
assert_eq!(replicator_count, num_replicators);
|
||||
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
cluster_nodes[0].clone(),
|
||||
)));
|
||||
let path = get_tmp_ledger_path("test");
|
||||
let blocktree = Arc::new(Blocktree::open(&path).unwrap());
|
||||
Replicator::download_from_replicator(
|
||||
&cluster_info,
|
||||
&replicator_info,
|
||||
&blocktree,
|
||||
slots_per_segment,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_replicator_startup_1_node() {
|
||||
run_replicator_startup_basic(1, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_replicator_startup_2_nodes() {
|
||||
run_replicator_startup_basic(2, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_replicator_startup_leader_hang() {
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
|
||||
solana_logger::setup();
|
||||
info!("starting replicator test");
|
||||
|
||||
let leader_ledger_path = std::path::PathBuf::from("replicator_test_leader_ledger");
|
||||
let (genesis_block, _mint_keypair) = create_genesis_block(10_000);
|
||||
let (replicator_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_block);
|
||||
|
||||
{
|
||||
let replicator_keypair = Arc::new(Keypair::new());
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
|
||||
info!("starting replicator node");
|
||||
let replicator_node = Node::new_localhost_with_pubkey(&replicator_keypair.pubkey());
|
||||
|
||||
let fake_gossip = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||
let leader_info = ContactInfo::new_gossip_entry_point(&fake_gossip);
|
||||
|
||||
let replicator_res = Replicator::new(
|
||||
&replicator_ledger_path,
|
||||
replicator_node,
|
||||
leader_info,
|
||||
replicator_keypair,
|
||||
storage_keypair,
|
||||
);
|
||||
|
||||
assert!(replicator_res.is_err());
|
||||
}
|
||||
|
||||
let _ignored = Blocktree::destroy(&leader_ledger_path);
|
||||
let _ignored = Blocktree::destroy(&replicator_ledger_path);
|
||||
let _ignored = remove_dir_all(&leader_ledger_path);
|
||||
let _ignored = remove_dir_all(&replicator_ledger_path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_replicator_startup_ledger_hang() {
|
||||
solana_logger::setup();
|
||||
info!("starting replicator test");
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST;
|
||||
let cluster = LocalCluster::new_with_equal_stakes(2, 10_000, 100);
|
||||
|
||||
info!("starting replicator node");
|
||||
let bad_keys = Arc::new(Keypair::new());
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let mut replicator_node = Node::new_localhost_with_pubkey(&bad_keys.pubkey());
|
||||
|
||||
// Pass bad TVU sockets to prevent successful ledger download
|
||||
replicator_node.sockets.tvu = vec![std::net::UdpSocket::bind("0.0.0.0:0").unwrap()];
|
||||
let (replicator_ledger_path, _blockhash) = create_new_tmp_ledger!(&cluster.genesis_block);
|
||||
|
||||
let replicator_res = Replicator::new(
|
||||
&replicator_ledger_path,
|
||||
replicator_node,
|
||||
cluster.entry_point_info.clone(),
|
||||
bad_keys,
|
||||
storage_keypair,
|
||||
);
|
||||
|
||||
assert!(replicator_res.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_account_setup() {
|
||||
let num_nodes = 1;
|
||||
let num_replicators = 1;
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST;
|
||||
let config = ClusterConfig {
|
||||
validator_configs: vec![ValidatorConfig::default(); num_nodes],
|
||||
num_replicators,
|
||||
node_stakes: vec![100; num_nodes],
|
||||
cluster_lamports: 10_000,
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
let cluster = LocalCluster::new(&config);
|
||||
|
||||
let _ = discover_cluster(
|
||||
&cluster.entry_point_info.gossip,
|
||||
num_nodes + num_replicators as usize,
|
||||
)
|
||||
.unwrap();
|
||||
// now check that the cluster actually has accounts for the replicator.
|
||||
let client = create_client(
|
||||
cluster.entry_point_info.client_facing_addr(),
|
||||
FULLNODE_PORT_RANGE,
|
||||
);
|
||||
cluster.replicator_infos.iter().for_each(|(_, value)| {
|
||||
assert_eq!(
|
||||
client
|
||||
.poll_get_balance(&value.replicator_storage_pubkey)
|
||||
.unwrap(),
|
||||
1
|
||||
);
|
||||
});
|
||||
}
|
Reference in New Issue
Block a user