Remove archiver and storage program (#9992)

automerge
This commit is contained in:
Jack May
2020-05-14 18:22:47 -07:00
committed by GitHub
parent 9ef9969d29
commit eb1acaf927
117 changed files with 383 additions and 7735 deletions

View File

@@ -1,195 +0,0 @@
use log::*;
use serial_test_derive::serial;
use solana_archiver_lib::archiver::Archiver;
use solana_client::thin_client::create_client;
use solana_core::{
cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE},
contact_info::ContactInfo,
gossip_service::discover_cluster,
serve_repair::ServeRepair,
storage_stage::SLOTS_PER_TURN_TEST,
validator::ValidatorConfig,
};
use solana_ledger::{blockstore::Blockstore, create_new_tmp_ledger, get_tmp_ledger_path};
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
use solana_sdk::{
commitment_config::CommitmentConfig,
genesis_config::create_genesis_config,
signature::{Keypair, Signer},
};
use std::{fs::remove_dir_all, sync::Arc};
/// Start the cluster with the given configuration and wait till the archivers are discovered
/// Then download shreds from one of them.
fn run_archiver_startup_basic(num_nodes: usize, num_archivers: usize) {
solana_logger::setup();
info!("starting archiver test");
let mut validator_config = ValidatorConfig::default();
let slots_per_segment = 8;
validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST;
let config = ClusterConfig {
validator_configs: vec![validator_config; num_nodes],
num_archivers,
node_stakes: vec![100; num_nodes],
cluster_lamports: 10_000,
// keep a low slot/segment count to speed up the test
slots_per_segment,
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&config);
let (cluster_nodes, cluster_archivers) =
discover_cluster(&cluster.entry_point_info.gossip, num_nodes + num_archivers).unwrap();
assert_eq!(
cluster_nodes.len() + cluster_archivers.len(),
num_nodes + num_archivers
);
let mut archiver_count = 0;
let mut archiver_info = ContactInfo::default();
for node in &cluster_archivers {
info!("storage: {:?} rpc: {:?}", node.storage_addr, node.rpc);
if ContactInfo::is_valid_address(&node.storage_addr) {
archiver_count += 1;
archiver_info = node.clone();
}
}
assert_eq!(archiver_count, num_archivers);
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
cluster_nodes[0].clone(),
));
let serve_repair = ServeRepair::new(cluster_info);
let path = get_tmp_ledger_path!();
let blockstore = Arc::new(Blockstore::open(&path).unwrap());
Archiver::download_from_archiver(
&serve_repair,
&archiver_info,
&blockstore,
slots_per_segment,
)
.unwrap();
}
#[test]
#[ignore]
#[serial]
fn test_archiver_startup_1_node() {
run_archiver_startup_basic(1, 1);
}
#[test]
#[ignore]
#[serial]
fn test_archiver_startup_2_nodes() {
run_archiver_startup_basic(2, 1);
}
#[test]
#[serial]
fn test_archiver_startup_leader_hang() {
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
solana_logger::setup();
info!("starting archiver test");
let leader_ledger_path = std::path::PathBuf::from("archiver_test_leader_ledger");
let (genesis_config, _mint_keypair) = create_genesis_config(10_000);
let (archiver_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
{
let archiver_keypair = Arc::new(Keypair::new());
let storage_keypair = Arc::new(Keypair::new());
info!("starting archiver node");
let archiver_node = Node::new_localhost_with_pubkey(&archiver_keypair.pubkey());
let fake_gossip = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
let leader_info = ContactInfo::new_gossip_entry_point(&fake_gossip);
let archiver_res = Archiver::new(
&archiver_ledger_path,
archiver_node,
leader_info,
archiver_keypair,
storage_keypair,
CommitmentConfig::recent(),
);
assert!(archiver_res.is_err());
}
let _ignored = Blockstore::destroy(&leader_ledger_path);
let _ignored = Blockstore::destroy(&archiver_ledger_path);
let _ignored = remove_dir_all(&leader_ledger_path);
let _ignored = remove_dir_all(&archiver_ledger_path);
}
#[test]
#[serial]
fn test_archiver_startup_ledger_hang() {
solana_logger::setup();
info!("starting archiver test");
let mut validator_config = ValidatorConfig::default();
validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST;
let cluster = LocalCluster::new_with_equal_stakes(2, 10_000, 100);
info!("starting archiver node");
let bad_keys = Arc::new(Keypair::new());
let storage_keypair = Arc::new(Keypair::new());
let mut archiver_node = Node::new_localhost_with_pubkey(&bad_keys.pubkey());
// Pass bad TVU sockets to prevent successful ledger download
archiver_node.sockets.tvu = vec![std::net::UdpSocket::bind("0.0.0.0:0").unwrap()];
let (archiver_ledger_path, _blockhash) = create_new_tmp_ledger!(&cluster.genesis_config);
let archiver_res = Archiver::new(
&archiver_ledger_path,
archiver_node,
cluster.entry_point_info.clone(),
bad_keys,
storage_keypair,
CommitmentConfig::recent(),
);
assert!(archiver_res.is_err());
}
#[test]
#[serial]
fn test_account_setup() {
let num_nodes = 1;
let num_archivers = 1;
let mut validator_config = ValidatorConfig::default();
validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST;
let config = ClusterConfig {
validator_configs: vec![ValidatorConfig::default(); num_nodes],
num_archivers,
node_stakes: vec![100; num_nodes],
cluster_lamports: 10_000,
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&config);
let _ = discover_cluster(
&cluster.entry_point_info.gossip,
num_nodes + num_archivers as usize,
)
.unwrap();
// now check that the cluster actually has accounts for the archiver.
let client = create_client(
cluster.entry_point_info.client_facing_addr(),
VALIDATOR_PORT_RANGE,
);
cluster.archiver_infos.iter().for_each(|(_, value)| {
assert_eq!(
client
.poll_get_balance_with_commitment(
&value.archiver_storage_pubkey,
CommitmentConfig::recent()
)
.unwrap(),
1
);
});
}

View File

@@ -266,7 +266,7 @@ fn run_cluster_partition(
);
let mut cluster = LocalCluster::new(&config);
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, num_nodes).unwrap();
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, num_nodes).unwrap();
info!("PARTITION_TEST sleeping until partition starting condition",);
loop {
@@ -338,7 +338,7 @@ fn run_cluster_partition(
assert!(alive_node_contact_infos.len() > 0);
info!("PARTITION_TEST discovering nodes");
let (cluster_nodes, _) = discover_cluster(
let cluster_nodes = discover_cluster(
&alive_node_contact_infos[0].gossip,
alive_node_contact_infos.len(),
)
@@ -461,7 +461,7 @@ fn test_forwarding() {
};
let cluster = LocalCluster::new(&config);
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 2).unwrap();
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 2).unwrap();
assert!(cluster_nodes.len() >= 2);
let leader_pubkey = cluster.entry_point_info.id;
@@ -525,7 +525,7 @@ fn test_listener_startup() {
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&config);
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 4).unwrap();
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 4).unwrap();
assert_eq!(cluster_nodes.len(), 4);
}
@@ -542,7 +542,7 @@ fn test_stable_operating_mode() {
..ClusterConfig::default()
};
let cluster = LocalCluster::new(&config);
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
assert_eq!(cluster_nodes.len(), 1);
let client = create_client(
@@ -571,13 +571,7 @@ fn test_stable_operating_mode() {
}
// Programs that are not available at epoch 0
for program_id in [
&solana_sdk::bpf_loader::id(),
&solana_storage_program::id(),
&solana_vest_program::id(),
]
.iter()
{
for program_id in [&solana_sdk::bpf_loader::id(), &solana_vest_program::id()].iter() {
assert_eq!(
(
program_id,
@@ -719,7 +713,7 @@ fn test_consistency_halt() {
let mut cluster = LocalCluster::new(&config);
sleep(Duration::from_millis(5000));
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
info!("num_nodes: {}", cluster_nodes.len());
// Add a validator with the leader as trusted, it should halt when it detects
@@ -747,7 +741,6 @@ fn test_consistency_halt() {
assert_eq!(
discover_cluster(&cluster.entry_point_info.gossip, num_nodes)
.unwrap()
.0
.len(),
num_nodes
);
@@ -762,11 +755,11 @@ fn test_consistency_halt() {
break;
}
Ok(nodes) => {
if nodes.0.len() < 2 {
if nodes.len() < 2 {
encountered_error = true;
break;
}
info!("checking cluster for fewer nodes.. {:?}", nodes.0.len());
info!("checking cluster for fewer nodes.. {:?}", nodes.len());
}
}
let client = cluster
@@ -962,7 +955,7 @@ fn test_snapshots_blockstore_floor() {
// Start up a new node from a snapshot
let validator_stake = 5;
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
let mut trusted_validators = HashSet::new();
trusted_validators.insert(cluster_nodes[0].id);
validator_snapshot_test_config