@@ -12,7 +12,6 @@ homepage = "https://solana.com/"
|
||||
itertools = "0.9.0"
|
||||
log = "0.4.8"
|
||||
rand = "0.7.0"
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.2.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.2.0" }
|
||||
solana-core = { path = "../core", version = "1.2.0" }
|
||||
solana-client = { path = "../client", version = "1.2.0" }
|
||||
@@ -25,7 +24,6 @@ solana-logger = { path = "../logger", version = "1.2.0" }
|
||||
solana-runtime = { path = "../runtime", version = "1.2.0" }
|
||||
solana-sdk = { path = "../sdk", version = "1.2.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.2.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.2.0" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.2.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.2.0" }
|
||||
tempfile = "3.1.0"
|
||||
|
@@ -10,7 +10,6 @@ use std::sync::Arc;
|
||||
pub struct ValidatorInfo {
|
||||
pub keypair: Arc<Keypair>,
|
||||
pub voting_keypair: Arc<Keypair>,
|
||||
pub storage_keypair: Arc<Keypair>,
|
||||
pub ledger_path: PathBuf,
|
||||
pub contact_info: ContactInfo,
|
||||
}
|
||||
|
@@ -45,7 +45,7 @@ pub fn spend_and_verify_all_nodes<S: ::std::hash::BuildHasher>(
|
||||
nodes: usize,
|
||||
ignore_nodes: HashSet<Pubkey, S>,
|
||||
) {
|
||||
let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
|
||||
let cluster_nodes = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
|
||||
assert!(cluster_nodes.len() >= nodes);
|
||||
for ingress_node in &cluster_nodes {
|
||||
if ignore_nodes.contains(&ingress_node.id) {
|
||||
@@ -126,7 +126,7 @@ pub fn send_many_transactions(
|
||||
}
|
||||
|
||||
pub fn validator_exit(entry_point_info: &ContactInfo, nodes: usize) {
|
||||
let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
|
||||
let cluster_nodes = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
|
||||
assert!(cluster_nodes.len() >= nodes);
|
||||
for node in &cluster_nodes {
|
||||
let client = create_client(node.client_facing_addr(), VALIDATOR_PORT_RANGE);
|
||||
@@ -197,7 +197,7 @@ pub fn kill_entry_and_spend_and_verify_rest(
|
||||
slot_millis: u64,
|
||||
) {
|
||||
solana_logger::setup();
|
||||
let (cluster_nodes, _) = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
|
||||
let cluster_nodes = discover_cluster(&entry_point_info.gossip, nodes).unwrap();
|
||||
assert!(cluster_nodes.len() >= nodes);
|
||||
let client = create_client(entry_point_info.client_facing_addr(), VALIDATOR_PORT_RANGE);
|
||||
// sleep long enough to make sure we are in epoch 3
|
||||
|
@@ -1,6 +1,3 @@
|
||||
pub mod cluster;
|
||||
pub mod cluster_tests;
|
||||
pub mod local_cluster;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana_storage_program;
|
||||
|
@@ -1,7 +1,6 @@
|
||||
use crate::cluster::{Cluster, ClusterValidatorInfo, ValidatorInfo};
|
||||
use itertools::izip;
|
||||
use log::*;
|
||||
use solana_archiver_lib::archiver::Archiver;
|
||||
use solana_client::thin_client::{create_client, ThinClient};
|
||||
use solana_core::{
|
||||
cluster_info::{Node, VALIDATOR_PORT_RANGE},
|
||||
@@ -15,60 +14,35 @@ use solana_ledger::{
|
||||
};
|
||||
use solana_sdk::{
|
||||
client::SyncClient,
|
||||
clock::{DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_SLOTS_PER_SEGMENT, DEFAULT_TICKS_PER_SLOT},
|
||||
clock::{DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_schedule::EpochSchedule,
|
||||
genesis_config::{GenesisConfig, OperatingMode},
|
||||
message::Message,
|
||||
poh_config::PohConfig,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
system_transaction,
|
||||
transaction::Transaction,
|
||||
transport::Result as TransportResult,
|
||||
};
|
||||
use solana_stake_program::{
|
||||
config as stake_config, stake_instruction,
|
||||
stake_state::{Authorized, Lockup, StakeState},
|
||||
};
|
||||
use solana_storage_program::{
|
||||
storage_contract,
|
||||
storage_instruction::{self, StorageAccountType},
|
||||
};
|
||||
use solana_vote_program::{
|
||||
vote_instruction,
|
||||
vote_state::{VoteInit, VoteState},
|
||||
};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs::remove_dir_all,
|
||||
io::{Error, ErrorKind, Result},
|
||||
iter,
|
||||
path::PathBuf,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
pub struct ArchiverInfo {
|
||||
pub archiver_storage_pubkey: Pubkey,
|
||||
pub ledger_path: PathBuf,
|
||||
}
|
||||
|
||||
impl ArchiverInfo {
|
||||
fn new(storage_pubkey: Pubkey, ledger_path: PathBuf) -> Self {
|
||||
Self {
|
||||
archiver_storage_pubkey: storage_pubkey,
|
||||
ledger_path,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ClusterConfig {
|
||||
/// The validator config that should be applied to every node in the cluster
|
||||
pub validator_configs: Vec<ValidatorConfig>,
|
||||
/// Number of archivers in the cluster
|
||||
/// Note- archivers will timeout if ticks_per_slot is much larger than the default 8
|
||||
pub num_archivers: usize,
|
||||
/// Number of nodes that are unstaked and not voting (a.k.a listening)
|
||||
pub num_listeners: u64,
|
||||
/// The specific pubkeys of each node if specified
|
||||
@@ -79,7 +53,6 @@ pub struct ClusterConfig {
|
||||
pub cluster_lamports: u64,
|
||||
pub ticks_per_slot: u64,
|
||||
pub slots_per_epoch: u64,
|
||||
pub slots_per_segment: u64,
|
||||
pub stakers_slot_offset: u64,
|
||||
pub native_instruction_processors: Vec<(String, Pubkey)>,
|
||||
pub operating_mode: OperatingMode,
|
||||
@@ -90,14 +63,12 @@ impl Default for ClusterConfig {
|
||||
fn default() -> Self {
|
||||
ClusterConfig {
|
||||
validator_configs: vec![],
|
||||
num_archivers: 0,
|
||||
num_listeners: 0,
|
||||
validator_keys: None,
|
||||
node_stakes: vec![],
|
||||
cluster_lamports: 0,
|
||||
ticks_per_slot: DEFAULT_TICKS_PER_SLOT,
|
||||
slots_per_epoch: DEFAULT_DEV_SLOTS_PER_EPOCH,
|
||||
slots_per_segment: DEFAULT_SLOTS_PER_SEGMENT,
|
||||
stakers_slot_offset: DEFAULT_DEV_SLOTS_PER_EPOCH,
|
||||
native_instruction_processors: vec![],
|
||||
operating_mode: OperatingMode::Development,
|
||||
@@ -113,8 +84,6 @@ pub struct LocalCluster {
|
||||
pub entry_point_info: ContactInfo,
|
||||
pub validators: HashMap<Pubkey, ClusterValidatorInfo>,
|
||||
pub genesis_config: GenesisConfig,
|
||||
archivers: Vec<Archiver>,
|
||||
pub archiver_infos: HashMap<Pubkey, ArchiverInfo>,
|
||||
}
|
||||
|
||||
impl LocalCluster {
|
||||
@@ -159,7 +128,6 @@ impl LocalCluster {
|
||||
config.node_stakes[0],
|
||||
);
|
||||
genesis_config.ticks_per_slot = config.ticks_per_slot;
|
||||
genesis_config.slots_per_segment = config.slots_per_segment;
|
||||
genesis_config.epoch_schedule =
|
||||
EpochSchedule::custom(config.slots_per_epoch, config.stakers_slot_offset, true);
|
||||
genesis_config.operating_mode = config.operating_mode;
|
||||
@@ -171,11 +139,7 @@ impl LocalCluster {
|
||||
solana_genesis_programs::get_programs(genesis_config.operating_mode, 0)
|
||||
.unwrap_or_else(|| vec![])
|
||||
}
|
||||
OperatingMode::Development => {
|
||||
genesis_config
|
||||
.native_instruction_processors
|
||||
.push(solana_storage_program!());
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
genesis_config.inflation =
|
||||
@@ -185,12 +149,6 @@ impl LocalCluster {
|
||||
.native_instruction_processors
|
||||
.extend_from_slice(&config.native_instruction_processors);
|
||||
|
||||
let storage_keypair = Keypair::new();
|
||||
genesis_config.add_account(
|
||||
storage_keypair.pubkey(),
|
||||
storage_contract::create_validator_storage_account(leader_pubkey, 1),
|
||||
);
|
||||
|
||||
// Replace staking config
|
||||
genesis_config.add_account(
|
||||
stake_config::id(),
|
||||
@@ -205,7 +163,6 @@ impl LocalCluster {
|
||||
|
||||
let (leader_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
let leader_contact_info = leader_node.info.clone();
|
||||
let leader_storage_keypair = Arc::new(storage_keypair);
|
||||
let leader_voting_keypair = Arc::new(voting_keypair);
|
||||
let mut leader_config = config.validator_configs[0].clone();
|
||||
leader_config.rpc_ports = Some((
|
||||
@@ -218,7 +175,6 @@ impl LocalCluster {
|
||||
&leader_ledger_path,
|
||||
&leader_voting_keypair.pubkey(),
|
||||
vec![leader_voting_keypair.clone()],
|
||||
&leader_storage_keypair,
|
||||
None,
|
||||
true,
|
||||
&leader_config,
|
||||
@@ -229,7 +185,6 @@ impl LocalCluster {
|
||||
let leader_info = ValidatorInfo {
|
||||
keypair: leader_keypair.clone(),
|
||||
voting_keypair: leader_voting_keypair,
|
||||
storage_keypair: leader_storage_keypair,
|
||||
ledger_path: leader_ledger_path,
|
||||
contact_info: leader_contact_info.clone(),
|
||||
};
|
||||
@@ -246,9 +201,7 @@ impl LocalCluster {
|
||||
funding_keypair: mint_keypair,
|
||||
entry_point_info: leader_contact_info,
|
||||
validators,
|
||||
archivers: vec![],
|
||||
genesis_config,
|
||||
archiver_infos: HashMap::new(),
|
||||
};
|
||||
|
||||
for (stake, validator_config, key) in izip!(
|
||||
@@ -273,15 +226,7 @@ impl LocalCluster {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
for _ in 0..config.num_archivers {
|
||||
cluster.add_archiver();
|
||||
}
|
||||
|
||||
discover_cluster(
|
||||
&cluster.entry_point_info.gossip,
|
||||
config.node_stakes.len() + config.num_archivers as usize,
|
||||
)
|
||||
.unwrap();
|
||||
discover_cluster(&cluster.entry_point_info.gossip, config.node_stakes.len()).unwrap();
|
||||
|
||||
cluster
|
||||
}
|
||||
@@ -301,10 +246,6 @@ impl LocalCluster {
|
||||
v.join().expect("Validator join failed");
|
||||
}
|
||||
}
|
||||
|
||||
while let Some(archiver) = self.archivers.pop() {
|
||||
archiver.close();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_validator(
|
||||
@@ -320,7 +261,6 @@ impl LocalCluster {
|
||||
|
||||
// Must have enough tokens to fund vote account and set delegate
|
||||
let voting_keypair = Keypair::new();
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let validator_pubkey = validator_keypair.pubkey();
|
||||
let validator_node = Node::new_localhost_with_pubkey(&validator_keypair.pubkey());
|
||||
let contact_info = validator_node.info.clone();
|
||||
@@ -330,7 +270,7 @@ impl LocalCluster {
|
||||
// setup as a listener
|
||||
info!("listener {} ", validator_pubkey,);
|
||||
} else {
|
||||
// Give the validator some lamports to setup vote and storage accounts
|
||||
// Give the validator some lamports to setup vote accounts
|
||||
let validator_balance = Self::transfer_with_client(
|
||||
&client,
|
||||
&self.funding_keypair,
|
||||
@@ -349,9 +289,6 @@ impl LocalCluster {
|
||||
stake,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
Self::setup_storage_account(&client, &storage_keypair, &validator_keypair, false)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let mut config = validator_config.clone();
|
||||
@@ -366,7 +303,6 @@ impl LocalCluster {
|
||||
&ledger_path,
|
||||
&voting_keypair.pubkey(),
|
||||
vec![voting_keypair.clone()],
|
||||
&storage_keypair,
|
||||
Some(&self.entry_point_info),
|
||||
true,
|
||||
&config,
|
||||
@@ -377,7 +313,6 @@ impl LocalCluster {
|
||||
ValidatorInfo {
|
||||
keypair: validator_keypair,
|
||||
voting_keypair,
|
||||
storage_keypair,
|
||||
ledger_path,
|
||||
contact_info,
|
||||
},
|
||||
@@ -389,56 +324,8 @@ impl LocalCluster {
|
||||
validator_pubkey
|
||||
}
|
||||
|
||||
fn add_archiver(&mut self) {
|
||||
let archiver_keypair = Arc::new(Keypair::new());
|
||||
let archiver_pubkey = archiver_keypair.pubkey();
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let storage_pubkey = storage_keypair.pubkey();
|
||||
let client = create_client(
|
||||
self.entry_point_info.client_facing_addr(),
|
||||
VALIDATOR_PORT_RANGE,
|
||||
);
|
||||
|
||||
// Give the archiver some lamports to setup its storage accounts
|
||||
Self::transfer_with_client(
|
||||
&client,
|
||||
&self.funding_keypair,
|
||||
&archiver_keypair.pubkey(),
|
||||
42,
|
||||
);
|
||||
let archiver_node = Node::new_localhost_archiver(&archiver_pubkey);
|
||||
|
||||
Self::setup_storage_account(&client, &storage_keypair, &archiver_keypair, true).unwrap();
|
||||
|
||||
let (archiver_ledger_path, _blockhash) = create_new_tmp_ledger!(&self.genesis_config);
|
||||
let archiver = Archiver::new(
|
||||
&archiver_ledger_path,
|
||||
archiver_node,
|
||||
self.entry_point_info.clone(),
|
||||
archiver_keypair,
|
||||
storage_keypair,
|
||||
CommitmentConfig::recent(),
|
||||
)
|
||||
.unwrap_or_else(|err| panic!("Archiver::new() failed: {:?}", err));
|
||||
|
||||
self.archivers.push(archiver);
|
||||
self.archiver_infos.insert(
|
||||
archiver_pubkey,
|
||||
ArchiverInfo::new(storage_pubkey, archiver_ledger_path),
|
||||
);
|
||||
}
|
||||
|
||||
fn close(&mut self) {
|
||||
self.close_preserve_ledgers();
|
||||
for ledger_path in self
|
||||
.validators
|
||||
.values()
|
||||
.map(|f| &f.info.ledger_path)
|
||||
.chain(self.archiver_infos.values().map(|info| &info.ledger_path))
|
||||
{
|
||||
remove_dir_all(&ledger_path)
|
||||
.unwrap_or_else(|_| panic!("Unable to remove {:?}", ledger_path));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn transfer(&self, source_keypair: &Keypair, dest_pubkey: &Pubkey, lamports: u64) -> u64 {
|
||||
@@ -601,40 +488,6 @@ impl LocalCluster {
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets up the storage account for validators/archivers and assumes the funder is the owner
|
||||
fn setup_storage_account(
|
||||
client: &ThinClient,
|
||||
storage_keypair: &Keypair,
|
||||
from_keypair: &Arc<Keypair>,
|
||||
archiver: bool,
|
||||
) -> TransportResult<()> {
|
||||
let storage_account_type = if archiver {
|
||||
StorageAccountType::Archiver
|
||||
} else {
|
||||
StorageAccountType::Validator
|
||||
};
|
||||
let message = Message::new_with_payer(
|
||||
&storage_instruction::create_storage_account(
|
||||
&from_keypair.pubkey(),
|
||||
&from_keypair.pubkey(),
|
||||
&storage_keypair.pubkey(),
|
||||
1,
|
||||
storage_account_type,
|
||||
),
|
||||
Some(&from_keypair.pubkey()),
|
||||
);
|
||||
|
||||
let signer_keys = vec![from_keypair.as_ref(), &storage_keypair];
|
||||
let blockhash = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
.unwrap()
|
||||
.0;
|
||||
let mut transaction = Transaction::new(&signer_keys, message, blockhash);
|
||||
client
|
||||
.retry_transfer(&from_keypair, &mut transaction, 10)
|
||||
.map(|_signature| ())
|
||||
}
|
||||
}
|
||||
|
||||
impl Cluster for LocalCluster {
|
||||
@@ -686,7 +539,6 @@ impl Cluster for LocalCluster {
|
||||
&validator_info.ledger_path,
|
||||
&validator_info.voting_keypair.pubkey(),
|
||||
vec![validator_info.voting_keypair.clone()],
|
||||
&validator_info.storage_keypair,
|
||||
entry_point_info,
|
||||
true,
|
||||
&cluster_validator_info.config,
|
||||
@@ -716,7 +568,6 @@ impl Drop for LocalCluster {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_core::storage_stage::SLOTS_PER_TURN_TEST;
|
||||
use solana_sdk::epoch_schedule::MINIMUM_SLOTS_PER_EPOCH;
|
||||
|
||||
#[test]
|
||||
@@ -725,7 +576,6 @@ mod test {
|
||||
let num_nodes = 1;
|
||||
let cluster = LocalCluster::new_with_equal_stakes(num_nodes, 100, 3);
|
||||
assert_eq!(cluster.validators.len(), num_nodes);
|
||||
assert_eq!(cluster.archivers.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -733,12 +583,9 @@ mod test {
|
||||
solana_logger::setup();
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
validator_config.rpc_config.enable_validator_exit = true;
|
||||
validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST;
|
||||
const NUM_NODES: usize = 1;
|
||||
let num_archivers = 1;
|
||||
let config = ClusterConfig {
|
||||
validator_configs: vec![ValidatorConfig::default(); NUM_NODES],
|
||||
num_archivers,
|
||||
node_stakes: vec![3; NUM_NODES],
|
||||
cluster_lamports: 100,
|
||||
ticks_per_slot: 8,
|
||||
@@ -748,6 +595,5 @@ mod test {
|
||||
};
|
||||
let cluster = LocalCluster::new(&config);
|
||||
assert_eq!(cluster.validators.len(), NUM_NODES);
|
||||
assert_eq!(cluster.archivers.len(), num_archivers);
|
||||
}
|
||||
}
|
||||
|
@@ -1,195 +0,0 @@
|
||||
use log::*;
|
||||
use serial_test_derive::serial;
|
||||
use solana_archiver_lib::archiver::Archiver;
|
||||
use solana_client::thin_client::create_client;
|
||||
use solana_core::{
|
||||
cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE},
|
||||
contact_info::ContactInfo,
|
||||
gossip_service::discover_cluster,
|
||||
serve_repair::ServeRepair,
|
||||
storage_stage::SLOTS_PER_TURN_TEST,
|
||||
validator::ValidatorConfig,
|
||||
};
|
||||
use solana_ledger::{blockstore::Blockstore, create_new_tmp_ledger, get_tmp_ledger_path};
|
||||
use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
genesis_config::create_genesis_config,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::{fs::remove_dir_all, sync::Arc};
|
||||
|
||||
/// Start the cluster with the given configuration and wait till the archivers are discovered
|
||||
/// Then download shreds from one of them.
|
||||
fn run_archiver_startup_basic(num_nodes: usize, num_archivers: usize) {
|
||||
solana_logger::setup();
|
||||
info!("starting archiver test");
|
||||
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
let slots_per_segment = 8;
|
||||
validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST;
|
||||
let config = ClusterConfig {
|
||||
validator_configs: vec![validator_config; num_nodes],
|
||||
num_archivers,
|
||||
node_stakes: vec![100; num_nodes],
|
||||
cluster_lamports: 10_000,
|
||||
// keep a low slot/segment count to speed up the test
|
||||
slots_per_segment,
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
let cluster = LocalCluster::new(&config);
|
||||
|
||||
let (cluster_nodes, cluster_archivers) =
|
||||
discover_cluster(&cluster.entry_point_info.gossip, num_nodes + num_archivers).unwrap();
|
||||
assert_eq!(
|
||||
cluster_nodes.len() + cluster_archivers.len(),
|
||||
num_nodes + num_archivers
|
||||
);
|
||||
let mut archiver_count = 0;
|
||||
let mut archiver_info = ContactInfo::default();
|
||||
for node in &cluster_archivers {
|
||||
info!("storage: {:?} rpc: {:?}", node.storage_addr, node.rpc);
|
||||
if ContactInfo::is_valid_address(&node.storage_addr) {
|
||||
archiver_count += 1;
|
||||
archiver_info = node.clone();
|
||||
}
|
||||
}
|
||||
assert_eq!(archiver_count, num_archivers);
|
||||
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
|
||||
cluster_nodes[0].clone(),
|
||||
));
|
||||
let serve_repair = ServeRepair::new(cluster_info);
|
||||
let path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&path).unwrap());
|
||||
Archiver::download_from_archiver(
|
||||
&serve_repair,
|
||||
&archiver_info,
|
||||
&blockstore,
|
||||
slots_per_segment,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
#[serial]
|
||||
fn test_archiver_startup_1_node() {
|
||||
run_archiver_startup_basic(1, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
#[serial]
|
||||
fn test_archiver_startup_2_nodes() {
|
||||
run_archiver_startup_basic(2, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_archiver_startup_leader_hang() {
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
|
||||
solana_logger::setup();
|
||||
info!("starting archiver test");
|
||||
|
||||
let leader_ledger_path = std::path::PathBuf::from("archiver_test_leader_ledger");
|
||||
let (genesis_config, _mint_keypair) = create_genesis_config(10_000);
|
||||
let (archiver_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
|
||||
{
|
||||
let archiver_keypair = Arc::new(Keypair::new());
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
|
||||
info!("starting archiver node");
|
||||
let archiver_node = Node::new_localhost_with_pubkey(&archiver_keypair.pubkey());
|
||||
|
||||
let fake_gossip = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||
let leader_info = ContactInfo::new_gossip_entry_point(&fake_gossip);
|
||||
|
||||
let archiver_res = Archiver::new(
|
||||
&archiver_ledger_path,
|
||||
archiver_node,
|
||||
leader_info,
|
||||
archiver_keypair,
|
||||
storage_keypair,
|
||||
CommitmentConfig::recent(),
|
||||
);
|
||||
|
||||
assert!(archiver_res.is_err());
|
||||
}
|
||||
|
||||
let _ignored = Blockstore::destroy(&leader_ledger_path);
|
||||
let _ignored = Blockstore::destroy(&archiver_ledger_path);
|
||||
let _ignored = remove_dir_all(&leader_ledger_path);
|
||||
let _ignored = remove_dir_all(&archiver_ledger_path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_archiver_startup_ledger_hang() {
|
||||
solana_logger::setup();
|
||||
info!("starting archiver test");
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST;
|
||||
let cluster = LocalCluster::new_with_equal_stakes(2, 10_000, 100);
|
||||
|
||||
info!("starting archiver node");
|
||||
let bad_keys = Arc::new(Keypair::new());
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let mut archiver_node = Node::new_localhost_with_pubkey(&bad_keys.pubkey());
|
||||
|
||||
// Pass bad TVU sockets to prevent successful ledger download
|
||||
archiver_node.sockets.tvu = vec![std::net::UdpSocket::bind("0.0.0.0:0").unwrap()];
|
||||
let (archiver_ledger_path, _blockhash) = create_new_tmp_ledger!(&cluster.genesis_config);
|
||||
|
||||
let archiver_res = Archiver::new(
|
||||
&archiver_ledger_path,
|
||||
archiver_node,
|
||||
cluster.entry_point_info.clone(),
|
||||
bad_keys,
|
||||
storage_keypair,
|
||||
CommitmentConfig::recent(),
|
||||
);
|
||||
|
||||
assert!(archiver_res.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_account_setup() {
|
||||
let num_nodes = 1;
|
||||
let num_archivers = 1;
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
validator_config.storage_slots_per_turn = SLOTS_PER_TURN_TEST;
|
||||
let config = ClusterConfig {
|
||||
validator_configs: vec![ValidatorConfig::default(); num_nodes],
|
||||
num_archivers,
|
||||
node_stakes: vec![100; num_nodes],
|
||||
cluster_lamports: 10_000,
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
let cluster = LocalCluster::new(&config);
|
||||
|
||||
let _ = discover_cluster(
|
||||
&cluster.entry_point_info.gossip,
|
||||
num_nodes + num_archivers as usize,
|
||||
)
|
||||
.unwrap();
|
||||
// now check that the cluster actually has accounts for the archiver.
|
||||
let client = create_client(
|
||||
cluster.entry_point_info.client_facing_addr(),
|
||||
VALIDATOR_PORT_RANGE,
|
||||
);
|
||||
cluster.archiver_infos.iter().for_each(|(_, value)| {
|
||||
assert_eq!(
|
||||
client
|
||||
.poll_get_balance_with_commitment(
|
||||
&value.archiver_storage_pubkey,
|
||||
CommitmentConfig::recent()
|
||||
)
|
||||
.unwrap(),
|
||||
1
|
||||
);
|
||||
});
|
||||
}
|
@@ -266,7 +266,7 @@ fn run_cluster_partition(
|
||||
);
|
||||
let mut cluster = LocalCluster::new(&config);
|
||||
|
||||
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, num_nodes).unwrap();
|
||||
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, num_nodes).unwrap();
|
||||
|
||||
info!("PARTITION_TEST sleeping until partition starting condition",);
|
||||
loop {
|
||||
@@ -338,7 +338,7 @@ fn run_cluster_partition(
|
||||
|
||||
assert!(alive_node_contact_infos.len() > 0);
|
||||
info!("PARTITION_TEST discovering nodes");
|
||||
let (cluster_nodes, _) = discover_cluster(
|
||||
let cluster_nodes = discover_cluster(
|
||||
&alive_node_contact_infos[0].gossip,
|
||||
alive_node_contact_infos.len(),
|
||||
)
|
||||
@@ -461,7 +461,7 @@ fn test_forwarding() {
|
||||
};
|
||||
let cluster = LocalCluster::new(&config);
|
||||
|
||||
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 2).unwrap();
|
||||
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 2).unwrap();
|
||||
assert!(cluster_nodes.len() >= 2);
|
||||
|
||||
let leader_pubkey = cluster.entry_point_info.id;
|
||||
@@ -525,7 +525,7 @@ fn test_listener_startup() {
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
let cluster = LocalCluster::new(&config);
|
||||
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 4).unwrap();
|
||||
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 4).unwrap();
|
||||
assert_eq!(cluster_nodes.len(), 4);
|
||||
}
|
||||
|
||||
@@ -542,7 +542,7 @@ fn test_stable_operating_mode() {
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
let cluster = LocalCluster::new(&config);
|
||||
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
|
||||
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
|
||||
assert_eq!(cluster_nodes.len(), 1);
|
||||
|
||||
let client = create_client(
|
||||
@@ -571,13 +571,7 @@ fn test_stable_operating_mode() {
|
||||
}
|
||||
|
||||
// Programs that are not available at epoch 0
|
||||
for program_id in [
|
||||
&solana_sdk::bpf_loader::id(),
|
||||
&solana_storage_program::id(),
|
||||
&solana_vest_program::id(),
|
||||
]
|
||||
.iter()
|
||||
{
|
||||
for program_id in [&solana_sdk::bpf_loader::id(), &solana_vest_program::id()].iter() {
|
||||
assert_eq!(
|
||||
(
|
||||
program_id,
|
||||
@@ -719,7 +713,7 @@ fn test_consistency_halt() {
|
||||
let mut cluster = LocalCluster::new(&config);
|
||||
|
||||
sleep(Duration::from_millis(5000));
|
||||
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
|
||||
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
|
||||
info!("num_nodes: {}", cluster_nodes.len());
|
||||
|
||||
// Add a validator with the leader as trusted, it should halt when it detects
|
||||
@@ -747,7 +741,6 @@ fn test_consistency_halt() {
|
||||
assert_eq!(
|
||||
discover_cluster(&cluster.entry_point_info.gossip, num_nodes)
|
||||
.unwrap()
|
||||
.0
|
||||
.len(),
|
||||
num_nodes
|
||||
);
|
||||
@@ -762,11 +755,11 @@ fn test_consistency_halt() {
|
||||
break;
|
||||
}
|
||||
Ok(nodes) => {
|
||||
if nodes.0.len() < 2 {
|
||||
if nodes.len() < 2 {
|
||||
encountered_error = true;
|
||||
break;
|
||||
}
|
||||
info!("checking cluster for fewer nodes.. {:?}", nodes.0.len());
|
||||
info!("checking cluster for fewer nodes.. {:?}", nodes.len());
|
||||
}
|
||||
}
|
||||
let client = cluster
|
||||
@@ -962,7 +955,7 @@ fn test_snapshots_blockstore_floor() {
|
||||
// Start up a new node from a snapshot
|
||||
let validator_stake = 5;
|
||||
|
||||
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
|
||||
let cluster_nodes = discover_cluster(&cluster.entry_point_info.gossip, 1).unwrap();
|
||||
let mut trusted_validators = HashSet::new();
|
||||
trusted_validators.insert(cluster_nodes[0].id);
|
||||
validator_snapshot_test_config
|
||||
|
Reference in New Issue
Block a user