Extract tower storage details from Tower struct

This commit is contained in:
Michael Vines
2021-07-20 22:25:13 -07:00
parent ca37873e16
commit 397801a2d8
12 changed files with 425 additions and 388 deletions

View File

@ -1,12 +1,11 @@
use solana_client::thin_client::ThinClient;
use solana_core::validator::Validator;
use solana_core::validator::ValidatorConfig;
use solana_gossip::{cluster_info::Node, contact_info::ContactInfo};
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Keypair;
use solana_streamer::socket::SocketAddrSpace;
use std::path::PathBuf;
use std::sync::Arc;
use {
solana_client::thin_client::ThinClient,
solana_core::validator::{Validator, ValidatorConfig},
solana_gossip::{cluster_info::Node, contact_info::ContactInfo},
solana_sdk::{pubkey::Pubkey, signature::Keypair},
solana_streamer::socket::SocketAddrSpace,
std::{path::PathBuf, sync::Arc},
};
pub struct ValidatorInfo {
pub keypair: Arc<Keypair>,

View File

@ -1,55 +1,59 @@
use crate::{
cluster::{Cluster, ClusterValidatorInfo, ValidatorInfo},
cluster_tests,
validator_configs::*,
};
use itertools::izip;
use log::*;
use solana_client::thin_client::{create_client, ThinClient};
use solana_core::validator::{Validator, ValidatorConfig, ValidatorStartProgress};
use solana_gossip::{
cluster_info::{Node, VALIDATOR_PORT_RANGE},
contact_info::ContactInfo,
gossip_service::discover_cluster,
};
use solana_ledger::create_new_tmp_ledger;
use solana_runtime::genesis_utils::{
create_genesis_config_with_vote_accounts_and_cluster_type, GenesisConfigInfo,
ValidatorVoteKeypairs,
};
use solana_sdk::{
account::Account,
account::AccountSharedData,
client::SyncClient,
clock::{DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT},
commitment_config::CommitmentConfig,
epoch_schedule::EpochSchedule,
genesis_config::{ClusterType, GenesisConfig},
message::Message,
poh_config::PohConfig,
pubkey::Pubkey,
signature::{Keypair, Signer},
stake::{
config as stake_config, instruction as stake_instruction,
state::{Authorized, Lockup},
use {
crate::{
cluster::{Cluster, ClusterValidatorInfo, ValidatorInfo},
cluster_tests,
validator_configs::*,
},
itertools::izip,
log::*,
solana_client::thin_client::{create_client, ThinClient},
solana_core::{
consensus::FileTowerStorage,
validator::{Validator, ValidatorConfig, ValidatorStartProgress},
},
solana_gossip::{
cluster_info::{Node, VALIDATOR_PORT_RANGE},
contact_info::ContactInfo,
gossip_service::discover_cluster,
},
solana_ledger::create_new_tmp_ledger,
solana_runtime::genesis_utils::{
create_genesis_config_with_vote_accounts_and_cluster_type, GenesisConfigInfo,
ValidatorVoteKeypairs,
},
solana_sdk::{
account::Account,
account::AccountSharedData,
client::SyncClient,
clock::{DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT},
commitment_config::CommitmentConfig,
epoch_schedule::EpochSchedule,
genesis_config::{ClusterType, GenesisConfig},
message::Message,
poh_config::PohConfig,
pubkey::Pubkey,
signature::{Keypair, Signer},
stake::{
config as stake_config, instruction as stake_instruction,
state::{Authorized, Lockup},
},
system_transaction,
transaction::Transaction,
},
solana_stake_program::{config::create_account as create_stake_config_account, stake_state},
solana_streamer::socket::SocketAddrSpace,
solana_vote_program::{
vote_instruction,
vote_state::{VoteInit, VoteState},
},
std::{
collections::HashMap,
io::{Error, ErrorKind, Result},
iter,
sync::{Arc, RwLock},
},
system_transaction,
transaction::Transaction,
};
use solana_stake_program::{config::create_account as create_stake_config_account, stake_state};
use solana_streamer::socket::SocketAddrSpace;
use solana_vote_program::{
vote_instruction,
vote_state::{VoteInit, VoteState},
};
use std::{
collections::HashMap,
io::{Error, ErrorKind, Result},
iter,
sync::{Arc, RwLock},
};
#[derive(Debug)]
pub struct ClusterConfig {
/// The validator config that should be applied to every node in the cluster
pub validator_configs: Vec<ValidatorConfig>,
@ -207,6 +211,7 @@ impl LocalCluster {
let mut leader_config = safe_clone_config(&config.validator_configs[0]);
leader_config.rpc_addrs = Some((leader_node.info.rpc, leader_node.info.rpc_pubsub));
leader_config.account_paths = vec![leader_ledger_path.join("accounts")];
leader_config.tower_storage = Arc::new(FileTowerStorage::new(leader_ledger_path.clone()));
let leader_keypair = Arc::new(Keypair::from_bytes(&leader_keypair.to_bytes()).unwrap());
let leader_vote_keypair =
Arc::new(Keypair::from_bytes(&leader_vote_keypair.to_bytes()).unwrap());
@ -367,6 +372,7 @@ impl LocalCluster {
let mut config = safe_clone_config(validator_config);
config.rpc_addrs = Some((validator_node.info.rpc, validator_node.info.rpc_pubsub));
config.account_paths = vec![ledger_path.join("accounts")];
config.tower_storage = Arc::new(FileTowerStorage::new(ledger_path.clone()));
let voting_keypair = voting_keypair.unwrap();
let validator_server = Validator::new(
validator_node,
@ -704,6 +710,8 @@ impl Cluster for LocalCluster {
let validator_info = &cluster_validator_info.info;
cluster_validator_info.config.account_paths =
vec![validator_info.ledger_path.join("accounts")];
cluster_validator_info.config.tower_storage =
Arc::new(FileTowerStorage::new(validator_info.ledger_path.clone()));
let restarted_node = Validator::new(
node,
validator_info.keypair.clone(),

View File

@ -37,7 +37,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig {
wal_recovery_mode: config.wal_recovery_mode.clone(),
poh_verify: config.poh_verify,
require_tower: config.require_tower,
tower_path: config.tower_path.clone(),
tower_storage: config.tower_storage.clone(),
debug_keys: config.debug_keys.clone(),
contact_debug_interval: config.contact_debug_interval,
contact_save_interval: config.contact_save_interval,

View File

@ -1,73 +1,77 @@
#![allow(clippy::integer_arithmetic)]
use assert_matches::assert_matches;
use crossbeam_channel::{unbounded, Receiver};
use gag::BufferRedirect;
use log::*;
use serial_test::serial;
use solana_client::{
pubsub_client::PubsubClient,
rpc_client::RpcClient,
rpc_config::{RpcProgramAccountsConfig, RpcSignatureSubscribeConfig},
rpc_response::RpcSignatureResult,
thin_client::{create_client, ThinClient},
use {
assert_matches::assert_matches,
crossbeam_channel::{unbounded, Receiver},
gag::BufferRedirect,
log::*,
serial_test::serial,
solana_client::{
pubsub_client::PubsubClient,
rpc_client::RpcClient,
rpc_config::{RpcProgramAccountsConfig, RpcSignatureSubscribeConfig},
rpc_response::RpcSignatureResult,
thin_client::{create_client, ThinClient},
},
solana_core::{
broadcast_stage::{
broadcast_duplicates_run::BroadcastDuplicatesConfig, BroadcastStageType,
},
consensus::{FileTowerStorage, Tower, SWITCH_FORK_THRESHOLD, VOTE_THRESHOLD_DEPTH},
optimistic_confirmation_verifier::OptimisticConfirmationVerifier,
replay_stage::DUPLICATE_THRESHOLD,
validator::ValidatorConfig,
},
solana_download_utils::download_snapshot,
solana_gossip::{
cluster_info::VALIDATOR_PORT_RANGE,
crds::Cursor,
gossip_service::{self, discover_cluster},
},
solana_ledger::{
ancestor_iterator::AncestorIterator,
blockstore::{Blockstore, PurgeType},
blockstore_db::AccessType,
leader_schedule::FixedSchedule,
leader_schedule::LeaderSchedule,
},
solana_local_cluster::{
cluster::{Cluster, ClusterValidatorInfo},
cluster_tests,
local_cluster::{ClusterConfig, LocalCluster},
validator_configs::*,
},
solana_runtime::{
snapshot_config::SnapshotConfig,
snapshot_utils::{self, ArchiveFormat},
},
solana_sdk::{
account::AccountSharedData,
client::{AsyncClient, SyncClient},
clock::{self, Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT, MAX_RECENT_BLOCKHASHES},
commitment_config::CommitmentConfig,
epoch_schedule::MINIMUM_SLOTS_PER_EPOCH,
genesis_config::ClusterType,
hash::Hash,
poh_config::PohConfig,
pubkey::Pubkey,
signature::{Keypair, Signer},
system_program, system_transaction,
},
solana_streamer::socket::SocketAddrSpace,
solana_vote_program::{vote_state::MAX_LOCKOUT_HISTORY, vote_transaction},
std::{
collections::{BTreeSet, HashMap, HashSet},
fs,
io::Read,
iter,
path::{Path, PathBuf},
sync::atomic::{AtomicBool, Ordering},
sync::Arc,
thread::{sleep, Builder, JoinHandle},
time::{Duration, Instant},
},
tempfile::TempDir,
};
use solana_core::{
broadcast_stage::{broadcast_duplicates_run::BroadcastDuplicatesConfig, BroadcastStageType},
consensus::{Tower, SWITCH_FORK_THRESHOLD, VOTE_THRESHOLD_DEPTH},
optimistic_confirmation_verifier::OptimisticConfirmationVerifier,
replay_stage::DUPLICATE_THRESHOLD,
validator::ValidatorConfig,
};
use solana_download_utils::download_snapshot;
use solana_gossip::{
cluster_info::VALIDATOR_PORT_RANGE,
crds::Cursor,
gossip_service::{self, discover_cluster},
};
use solana_ledger::{
ancestor_iterator::AncestorIterator,
blockstore::{Blockstore, PurgeType},
blockstore_db::AccessType,
leader_schedule::FixedSchedule,
leader_schedule::LeaderSchedule,
};
use solana_local_cluster::{
cluster::{Cluster, ClusterValidatorInfo},
cluster_tests,
local_cluster::{ClusterConfig, LocalCluster},
validator_configs::*,
};
use solana_runtime::{
snapshot_config::SnapshotConfig,
snapshot_utils::{self, ArchiveFormat},
};
use solana_sdk::{
account::AccountSharedData,
client::{AsyncClient, SyncClient},
clock::{self, Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT, MAX_RECENT_BLOCKHASHES},
commitment_config::CommitmentConfig,
epoch_schedule::MINIMUM_SLOTS_PER_EPOCH,
genesis_config::ClusterType,
hash::Hash,
poh_config::PohConfig,
pubkey::Pubkey,
signature::{Keypair, Signer},
system_program, system_transaction,
};
use solana_streamer::socket::SocketAddrSpace;
use solana_vote_program::{vote_state::MAX_LOCKOUT_HISTORY, vote_transaction};
use std::{
collections::{BTreeSet, HashMap, HashSet},
fs,
io::Read,
iter,
path::{Path, PathBuf},
sync::atomic::{AtomicBool, Ordering},
sync::Arc,
thread::{sleep, Builder, JoinHandle},
time::{Duration, Instant},
};
use tempfile::TempDir;
const RUST_LOG_FILTER: &str =
"error,solana_core::replay_stage=warn,solana_local_cluster=info,local_cluster=info";
@ -2484,6 +2488,8 @@ fn test_validator_saves_tower() {
.ledger_path
.clone();
let file_tower_storage = FileTowerStorage::new(ledger_path.clone());
// Wait for some votes to be generated
let mut last_replayed_root;
loop {
@ -2500,7 +2506,7 @@ fn test_validator_saves_tower() {
// Stop validator and check saved tower
let validator_info = cluster.exit_node(&validator_id);
let tower1 = Tower::restore(&ledger_path, &validator_id).unwrap();
let tower1 = Tower::restore(&file_tower_storage, &validator_id).unwrap();
trace!("tower1: {:?}", tower1);
assert_eq!(tower1.root(), 0);
@ -2528,14 +2534,16 @@ fn test_validator_saves_tower() {
.get_slot_with_commitment(CommitmentConfig::processed())
.unwrap();
let validator_info = cluster.exit_node(&validator_id);
let tower2 = Tower::restore(&ledger_path, &validator_id).unwrap();
let tower2 = Tower::restore(&file_tower_storage, &validator_id).unwrap();
trace!("tower2: {:?}", tower2);
assert_eq!(tower2.root(), last_replayed_root);
last_replayed_root = recent_slot;
// Rollback saved tower to `tower1` to simulate a validator starting from a newer snapshot
// without having to wait for that snapshot to be generated in this test
tower1.save(&validator_identity_keypair).unwrap();
tower1
.save(&file_tower_storage, &validator_identity_keypair)
.unwrap();
cluster.restart_node(&validator_id, validator_info, SocketAddrSpace::Unspecified);
let validator_client = cluster.get_validator_client(&validator_id).unwrap();
@ -2560,7 +2568,7 @@ fn test_validator_saves_tower() {
// Check the new root is reflected in the saved tower state
let mut validator_info = cluster.exit_node(&validator_id);
let tower3 = Tower::restore(&ledger_path, &validator_id).unwrap();
let tower3 = Tower::restore(&file_tower_storage, &validator_id).unwrap();
trace!("tower3: {:?}", tower3);
assert!(tower3.root() > last_replayed_root);
@ -2588,7 +2596,7 @@ fn test_validator_saves_tower() {
cluster.close_preserve_ledgers();
let tower4 = Tower::restore(&ledger_path, &validator_id).unwrap();
let tower4 = Tower::restore(&file_tower_storage, &validator_id).unwrap();
trace!("tower4: {:?}", tower4);
// should tower4 advance 1 slot compared to tower3????
assert_eq!(tower4.root(), tower3.root() + 1);
@ -2606,8 +2614,10 @@ fn purge_slots(blockstore: &Blockstore, start_slot: Slot, slot_count: Slot) {
blockstore.purge_slots(start_slot, start_slot + slot_count, PurgeType::Exact);
}
fn restore_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option<Tower> {
let tower = Tower::restore(ledger_path, node_pubkey);
fn restore_tower(tower_path: &Path, node_pubkey: &Pubkey) -> Option<Tower> {
let file_tower_storage = FileTowerStorage::new(tower_path.to_path_buf());
let tower = Tower::restore(&file_tower_storage, node_pubkey);
if let Err(tower_err) = tower {
if tower_err.is_file_missing() {
return None;
@ -2616,19 +2626,20 @@ fn restore_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option<Tower> {
}
}
// actually saved tower must have at least one vote.
Tower::restore(ledger_path, node_pubkey).ok()
Tower::restore(&file_tower_storage, node_pubkey).ok()
}
fn last_vote_in_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option<(Slot, Hash)> {
restore_tower(ledger_path, node_pubkey).map(|tower| tower.last_voted_slot_hash().unwrap())
fn last_vote_in_tower(tower_path: &Path, node_pubkey: &Pubkey) -> Option<(Slot, Hash)> {
restore_tower(tower_path, node_pubkey).map(|tower| tower.last_voted_slot_hash().unwrap())
}
fn root_in_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option<Slot> {
restore_tower(ledger_path, node_pubkey).map(|tower| tower.root())
fn root_in_tower(tower_path: &Path, node_pubkey: &Pubkey) -> Option<Slot> {
restore_tower(tower_path, node_pubkey).map(|tower| tower.root())
}
fn remove_tower(ledger_path: &Path, node_pubkey: &Pubkey) {
fs::remove_file(Tower::get_filename(ledger_path, node_pubkey)).unwrap();
fn remove_tower(tower_path: &Path, node_pubkey: &Pubkey) {
let file_tower_storage = FileTowerStorage::new(tower_path.to_path_buf());
fs::remove_file(file_tower_storage.filename(node_pubkey)).unwrap();
}
// A bit convoluted test case; but this roughly follows this test theoretical scenario: