2019-10-11 13:30:52 -06:00
//! The `validator` module hosts all the validator microservices.
2018-07-02 15:24:40 -07:00
2019-11-04 11:03:39 -07:00
use crate ::{
broadcast_stage ::BroadcastStageType ,
cluster_info ::{ ClusterInfo , Node } ,
2020-03-09 22:03:09 -07:00
cluster_info_vote_listener ::VoteTracker ,
2019-11-04 11:03:39 -07:00
contact_info ::ContactInfo ,
gossip_service ::{ discover_cluster , GossipService } ,
2020-02-26 13:35:50 -08:00
poh_recorder ::{ PohRecorder , GRACE_TICKS_FACTOR , MAX_GRACE_SLOTS } ,
2019-11-04 11:03:39 -07:00
poh_service ::PohService ,
2020-02-04 19:50:24 -07:00
rewards_recorder_service ::RewardsRecorderService ,
2019-11-04 11:03:39 -07:00
rpc ::JsonRpcConfig ,
rpc_pubsub_service ::PubSubService ,
rpc_service ::JsonRpcService ,
rpc_subscriptions ::RpcSubscriptions ,
2020-01-31 14:23:51 -08:00
serve_repair ::ServeRepair ,
serve_repair_service ::ServeRepairService ,
2019-11-04 11:03:39 -07:00
sigverify ,
2020-03-05 23:52:31 -07:00
snapshot_packager_service ::SnapshotPackagerService ,
2019-11-04 11:03:39 -07:00
tpu ::Tpu ,
2019-11-20 16:43:10 -07:00
transaction_status_service ::TransactionStatusService ,
2020-03-16 08:37:31 -07:00
tvu ::{ Sockets , Tvu , TvuConfig } ,
2019-11-04 11:03:39 -07:00
} ;
2019-11-21 14:23:40 -07:00
use crossbeam_channel ::unbounded ;
2020-06-22 20:27:25 -07:00
use rand ::{ thread_rng , Rng } ;
2019-11-04 11:03:39 -07:00
use solana_ledger ::{
2019-11-04 19:10:06 -07:00
bank_forks_utils ,
2020-06-22 20:27:25 -07:00
blockstore ::{ Blockstore , CompletedSlotsReceiver , PurgeType } ,
2020-07-06 12:43:45 -07:00
blockstore_db ::BlockstoreRecoveryMode ,
2020-05-06 08:24:59 -07:00
blockstore_processor , create_new_tmp_ledger ,
2019-12-03 16:31:59 -08:00
leader_schedule ::FixedSchedule ,
2019-11-04 11:03:39 -07:00
leader_schedule_cache ::LeaderScheduleCache ,
} ;
2019-06-13 14:37:12 -07:00
use solana_metrics ::datapoint_info ;
2020-06-17 09:27:03 -06:00
use solana_runtime ::{
bank ::Bank ,
bank_forks ::{ BankForks , SnapshotConfig } ,
2020-06-25 22:06:58 -06:00
commitment ::BlockCommitmentCache ,
2020-06-17 09:27:03 -06:00
hardened_unpack ::{ open_genesis_config , MAX_GENESIS_ARCHIVE_UNPACKED_SIZE } ,
} ;
2019-11-04 11:03:39 -07:00
use solana_sdk ::{
2020-05-14 18:22:47 -07:00
clock ::Slot ,
2020-02-24 19:27:04 -08:00
epoch_schedule ::MAX_LEADER_SCHEDULE_EPOCH_OFFSET ,
2019-11-08 23:56:57 -05:00
genesis_config ::GenesisConfig ,
2020-02-07 09:57:54 -07:00
hash ::Hash ,
2019-11-04 11:03:39 -07:00
pubkey ::Pubkey ,
2020-02-24 10:18:08 -07:00
shred_version ::compute_shred_version ,
2020-02-20 14:28:55 -07:00
signature ::{ Keypair , Signer } ,
2019-11-04 11:03:39 -07:00
timing ::timestamp ,
} ;
2020-06-25 18:24:16 +09:00
use solana_vote_program ::vote_state ::VoteState ;
2019-11-04 11:03:39 -07:00
use std ::{
2020-02-25 10:41:13 -07:00
collections ::HashSet ,
2019-11-04 11:03:39 -07:00
net ::{ IpAddr , Ipv4Addr , SocketAddr } ,
path ::{ Path , PathBuf } ,
process ,
sync ::atomic ::{ AtomicBool , Ordering } ,
2019-11-21 14:23:40 -07:00
sync ::mpsc ::Receiver ,
2020-03-05 23:52:31 -07:00
sync ::{ mpsc ::channel , Arc , Mutex , RwLock } ,
2020-01-07 14:18:34 -07:00
thread ::{ sleep , Result } ,
time ::Duration ,
2019-11-04 11:03:39 -07:00
} ;
2018-07-02 11:20:35 -07:00
2019-04-15 15:27:45 -07:00
#[ derive(Clone, Debug) ]
2019-05-23 22:05:16 -07:00
pub struct ValidatorConfig {
2019-08-08 09:14:30 -07:00
pub dev_halt_at_slot : Option < Slot > ,
2019-11-08 23:56:57 -05:00
pub expected_genesis_hash : Option < Hash > ,
2020-06-30 12:43:48 -07:00
pub expected_bank_hash : Option < Hash > ,
2020-01-28 16:56:55 -07:00
pub expected_shred_version : Option < u16 > ,
2019-01-30 17:16:55 -07:00
pub voting_disabled : bool ,
2019-12-05 21:41:29 -05:00
pub account_paths : Vec < PathBuf > ,
2019-03-03 22:01:09 -08:00
pub rpc_config : JsonRpcConfig ,
2020-01-30 11:58:39 -07:00
pub rpc_ports : Option < ( u16 , u16 ) > , // (API, PubSub)
2019-07-31 17:58:10 -07:00
pub snapshot_config : Option < SnapshotConfig > ,
2020-03-31 17:21:19 -07:00
pub max_ledger_shreds : Option < u64 > ,
2019-06-19 00:13:19 -07:00
pub broadcast_stage_type : BroadcastStageType ,
2020-01-27 16:49:25 -08:00
pub enable_partition : Option < Arc < AtomicBool > > ,
2019-12-03 16:31:59 -08:00
pub fixed_leader_schedule : Option < FixedSchedule > ,
2020-03-02 11:47:58 -07:00
pub wait_for_supermajority : Option < Slot > ,
2020-01-24 18:27:04 -07:00
pub new_hard_forks : Option < Vec < Slot > > ,
2020-02-25 10:41:13 -07:00
pub trusted_validators : Option < HashSet < Pubkey > > , // None = trust all
2020-03-16 08:37:31 -07:00
pub halt_on_trusted_validators_accounts_hash_mismatch : bool ,
pub accounts_hash_fault_injection_slots : u64 , // 0 = no fault injection
2020-03-22 11:10:04 -07:00
pub frozen_accounts : Vec < Pubkey > ,
2020-03-23 08:42:32 -07:00
pub no_rocksdb_compaction : bool ,
2020-04-16 15:12:20 -07:00
pub accounts_hash_interval_slots : u64 ,
2020-04-30 10:53:34 +09:00
pub max_genesis_archive_unpacked_size : u64 ,
2020-07-06 12:43:45 -07:00
pub wal_recovery_mode : Option < BlockstoreRecoveryMode > ,
2019-01-29 08:51:01 -08:00
}
2019-06-19 00:13:19 -07:00
2019-05-23 22:05:16 -07:00
impl Default for ValidatorConfig {
2019-01-29 08:51:01 -08:00
fn default ( ) -> Self {
Self {
2019-08-08 09:14:30 -07:00
dev_halt_at_slot : None ,
2019-11-08 23:56:57 -05:00
expected_genesis_hash : None ,
2020-06-30 12:43:48 -07:00
expected_bank_hash : None ,
2020-01-28 16:56:55 -07:00
expected_shred_version : None ,
2019-01-30 17:16:55 -07:00
voting_disabled : false ,
2020-03-31 17:21:19 -07:00
max_ledger_shreds : None ,
2019-12-05 21:41:29 -05:00
account_paths : Vec ::new ( ) ,
2019-03-03 22:01:09 -08:00
rpc_config : JsonRpcConfig ::default ( ) ,
2020-01-30 11:58:39 -07:00
rpc_ports : None ,
2019-07-31 17:58:10 -07:00
snapshot_config : None ,
2019-06-19 00:13:19 -07:00
broadcast_stage_type : BroadcastStageType ::Standard ,
2020-01-27 16:49:25 -08:00
enable_partition : None ,
2019-12-03 16:31:59 -08:00
fixed_leader_schedule : None ,
2020-03-02 11:47:58 -07:00
wait_for_supermajority : None ,
2020-01-24 18:27:04 -07:00
new_hard_forks : None ,
2020-02-25 10:41:13 -07:00
trusted_validators : None ,
2020-03-16 08:37:31 -07:00
halt_on_trusted_validators_accounts_hash_mismatch : false ,
accounts_hash_fault_injection_slots : 0 ,
2020-03-22 11:10:04 -07:00
frozen_accounts : vec ! [ ] ,
2020-03-23 08:42:32 -07:00
no_rocksdb_compaction : false ,
2020-04-16 15:12:20 -07:00
accounts_hash_interval_slots : std ::u64 ::MAX ,
2020-04-30 10:53:34 +09:00
max_genesis_archive_unpacked_size : MAX_GENESIS_ARCHIVE_UNPACKED_SIZE ,
2020-07-06 12:43:45 -07:00
wal_recovery_mode : None ,
2019-01-29 08:51:01 -08:00
}
}
}
2019-08-20 23:59:31 -07:00
#[ derive(Default) ]
pub struct ValidatorExit {
2019-08-23 08:55:51 -07:00
exits : Vec < Box < dyn FnOnce ( ) + Send + Sync > > ,
2019-08-20 23:59:31 -07:00
}
impl ValidatorExit {
2020-06-09 01:38:14 +01:00
pub fn register_exit ( & mut self , exit : Box < dyn FnOnce ( ) + Send + Sync > ) {
2019-08-20 23:59:31 -07:00
self . exits . push ( exit ) ;
}
pub fn exit ( self ) {
for exit in self . exits {
exit ( ) ;
}
}
}
2019-05-23 22:05:16 -07:00
pub struct Validator {
2019-03-03 16:44:06 -08:00
pub id : Pubkey ,
2019-08-20 23:59:31 -07:00
validator_exit : Arc < RwLock < Option < ValidatorExit > > > ,
2020-01-30 10:17:01 -07:00
rpc_service : Option < ( JsonRpcService , PubSubService ) > ,
2019-11-20 16:43:10 -07:00
transaction_status_service : Option < TransactionStatusService > ,
2020-02-04 19:50:24 -07:00
rewards_recorder_service : Option < RewardsRecorderService > ,
2018-12-06 13:52:47 -07:00
gossip_service : GossipService ,
2020-01-31 14:23:51 -08:00
serve_repair_service : ServeRepairService ,
2020-03-05 23:52:31 -07:00
snapshot_packager_service : Option < SnapshotPackagerService > ,
2019-02-26 10:48:18 -08:00
poh_recorder : Arc < Mutex < PohRecorder > > ,
2019-03-04 19:02:03 -08:00
poh_service : PohService ,
tpu : Tpu ,
tvu : Tvu ,
2019-11-13 05:37:13 +09:00
ip_echo_server : solana_net_utils ::IpEchoServer ,
2018-07-02 15:24:40 -07:00
}
2018-07-02 11:20:35 -07:00
2019-05-23 22:05:16 -07:00
impl Validator {
2020-02-14 13:11:55 -06:00
#[ allow(clippy::cognitive_complexity) ]
2019-05-20 13:32:32 -07:00
pub fn new (
2019-01-29 18:12:32 -08:00
mut node : Node ,
2019-01-30 20:51:50 -07:00
keypair : & Arc < Keypair > ,
2019-07-30 15:53:41 -07:00
ledger_path : & Path ,
2019-03-09 19:28:43 -08:00
vote_account : & Pubkey ,
2020-03-31 08:23:42 -07:00
mut authorized_voter_keypairs : Vec < Arc < Keypair > > ,
2019-03-08 17:23:07 -08:00
entrypoint_info_option : Option < & ContactInfo > ,
2019-11-04 22:14:55 -07:00
poh_verify : bool ,
2019-05-23 22:05:16 -07:00
config : & ValidatorConfig ,
2019-05-20 13:32:32 -07:00
) -> Self {
2019-08-08 15:38:23 -07:00
let id = keypair . pubkey ( ) ;
assert_eq! ( id , node . info . id ) ;
2020-03-13 11:41:18 -07:00
warn! ( " identity: {} " , id ) ;
warn! ( " vote account: {} " , vote_account ) ;
2020-03-31 08:23:42 -07:00
if config . voting_disabled {
warn! ( " voting disabled " ) ;
authorized_voter_keypairs . clear ( ) ;
} else {
for authorized_voter_keypair in & authorized_voter_keypairs {
warn! ( " authorized voter: {} " , authorized_voter_keypair . pubkey ( ) ) ;
}
}
2020-01-07 14:33:39 -07:00
report_target_features ( ) ;
2019-11-04 11:03:39 -07:00
2019-08-08 15:38:23 -07:00
info! ( " entrypoint: {:?} " , entrypoint_info_option ) ;
2019-09-14 12:32:57 -07:00
2020-06-16 23:03:26 -07:00
if solana_perf ::perf_libs ::api ( ) . is_some ( ) {
info! ( " Initializing sigverify, this could take a while... " ) ;
} else {
info! ( " Initializing sigverify... " ) ;
}
2019-09-14 12:32:57 -07:00
sigverify ::init ( ) ;
info! ( " Done. " ) ;
2019-02-06 19:21:31 -08:00
2020-06-23 14:29:07 -07:00
if let Some ( shred_version ) = config . expected_shred_version {
if let Some ( wait_for_supermajority_slot ) = config . wait_for_supermajority {
backup_and_clear_blockstore (
ledger_path ,
wait_for_supermajority_slot + 1 ,
shred_version ,
) ;
}
}
2020-07-07 09:41:45 -07:00
for accounts_path in & config . account_paths {
cleanup_accounts_path ( accounts_path ) ;
}
2019-05-09 14:10:04 -07:00
let (
2020-02-20 19:53:26 -07:00
genesis_config ,
2019-05-09 14:10:04 -07:00
bank_forks ,
2020-01-13 14:13:52 -07:00
blockstore ,
2019-05-09 14:10:04 -07:00
ledger_signal_receiver ,
completed_slots_receiver ,
leader_schedule_cache ,
2020-02-20 19:53:26 -07:00
snapshot_hash ,
2020-07-01 15:19:40 +09:00
) = new_banks_from_ledger ( config , ledger_path , poh_verify ) ;
2019-02-20 17:05:57 -08:00
2019-04-19 02:39:44 -07:00
let leader_schedule_cache = Arc ::new ( leader_schedule_cache ) ;
2020-05-06 08:24:59 -07:00
let bank = bank_forks . working_bank ( ) ;
let bank_forks = Arc ::new ( RwLock ::new ( bank_forks ) ) ;
2020-01-27 18:05:31 -07:00
2020-06-25 18:24:16 +09:00
info! ( " Starting validator with working bank slot {} " , bank . slot ( ) ) ;
2020-01-27 18:05:31 -07:00
{
let hard_forks : Vec < _ > = bank . hard_forks ( ) . read ( ) . unwrap ( ) . iter ( ) . copied ( ) . collect ( ) ;
if ! hard_forks . is_empty ( ) {
info! ( " Hard forks: {:?} " , hard_forks ) ;
}
}
2018-11-15 13:23:26 -08:00
node . info . wallclock = timestamp ( ) ;
2020-02-20 19:53:26 -07:00
node . info . shred_version = compute_shred_version (
& genesis_config . hash ( ) ,
Some ( & bank . hard_forks ( ) . read ( ) . unwrap ( ) ) ,
) ;
2020-06-25 18:24:16 +09:00
2020-01-13 15:59:31 -07:00
Self ::print_node_info ( & node ) ;
2020-01-28 16:56:55 -07:00
if let Some ( expected_shred_version ) = config . expected_shred_version {
if expected_shred_version ! = node . info . shred_version {
error! (
2020-06-30 12:43:48 -07:00
" shred version mismatch: expected {} found: {} " ,
expected_shred_version , node . info . shred_version ,
2020-01-28 16:56:55 -07:00
) ;
process ::exit ( 1 ) ;
}
}
2020-06-25 18:24:16 +09:00
let mut validator_exit = ValidatorExit ::default ( ) ;
let exit = Arc ::new ( AtomicBool ::new ( false ) ) ;
let exit_ = exit . clone ( ) ;
validator_exit . register_exit ( Box ::new ( move | | exit_ . store ( true , Ordering ::Relaxed ) ) ) ;
let validator_exit = Arc ::new ( RwLock ::new ( Some ( validator_exit ) ) ) ;
2020-04-21 12:54:45 -07:00
let cluster_info = Arc ::new ( ClusterInfo ::new ( node . info . clone ( ) , keypair . clone ( ) ) ) ;
2020-01-13 14:13:52 -07:00
let blockstore = Arc ::new ( blockstore ) ;
2020-06-25 22:06:58 -06:00
let block_commitment_cache = Arc ::new ( RwLock ::new ( BlockCommitmentCache ::default ( ) ) ) ;
2019-11-11 13:18:34 -05:00
2020-05-07 00:23:06 -06:00
let subscriptions = Arc ::new ( RpcSubscriptions ::new (
& exit ,
bank_forks . clone ( ) ,
block_commitment_cache . clone ( ) ,
) ) ;
2020-01-30 10:17:01 -07:00
2020-05-28 12:22:19 -07:00
let rpc_override_health_check = Arc ::new ( AtomicBool ::new ( false ) ) ;
2020-01-30 11:58:39 -07:00
let rpc_service = config . rpc_ports . map ( | ( rpc_port , rpc_pubsub_port ) | {
if ContactInfo ::is_valid_address ( & node . info . rpc ) {
assert! ( ContactInfo ::is_valid_address ( & node . info . rpc_pubsub ) ) ;
assert_eq! ( rpc_port , node . info . rpc . port ( ) ) ;
assert_eq! ( rpc_pubsub_port , node . info . rpc_pubsub . port ( ) ) ;
} else {
assert! ( ! ContactInfo ::is_valid_address ( & node . info . rpc_pubsub ) ) ;
}
(
JsonRpcService ::new (
SocketAddr ::new ( IpAddr ::V4 ( Ipv4Addr ::new ( 0 , 0 , 0 , 0 ) ) , rpc_port ) ,
config . rpc_config . clone ( ) ,
2020-02-28 17:57:46 -07:00
config . snapshot_config . clone ( ) ,
2020-01-30 11:58:39 -07:00
bank_forks . clone ( ) ,
block_commitment_cache . clone ( ) ,
blockstore . clone ( ) ,
cluster_info . clone ( ) ,
2020-02-20 19:53:26 -07:00
genesis_config . hash ( ) ,
2020-01-30 11:58:39 -07:00
ledger_path ,
validator_exit . clone ( ) ,
2020-04-14 13:22:58 -07:00
config . trusted_validators . clone ( ) ,
2020-05-28 12:22:19 -07:00
rpc_override_health_check . clone ( ) ,
2020-01-30 11:58:39 -07:00
) ,
PubSubService ::new (
& subscriptions ,
SocketAddr ::new ( IpAddr ::V4 ( Ipv4Addr ::new ( 0 , 0 , 0 , 0 ) ) , rpc_pubsub_port ) ,
& exit ,
) ,
)
} ) ;
2018-10-12 11:04:14 -07:00
2019-11-20 16:43:10 -07:00
let ( transaction_status_sender , transaction_status_service ) =
2020-03-23 11:25:39 -06:00
if rpc_service . is_some ( ) & & config . rpc_config . enable_rpc_transaction_history {
2019-11-21 14:23:40 -07:00
let ( transaction_status_sender , transaction_status_receiver ) = unbounded ( ) ;
2019-11-20 16:43:10 -07:00
(
Some ( transaction_status_sender ) ,
Some ( TransactionStatusService ::new (
transaction_status_receiver ,
2020-01-13 14:13:52 -07:00
blockstore . clone ( ) ,
2019-11-20 16:43:10 -07:00
& exit ,
) ) ,
)
} else {
( None , None )
} ;
2020-02-11 18:01:49 -07:00
let ( rewards_recorder_sender , rewards_recorder_service ) =
2020-03-23 11:25:39 -06:00
if rpc_service . is_some ( ) & & config . rpc_config . enable_rpc_transaction_history {
2020-02-11 18:01:49 -07:00
let ( rewards_recorder_sender , rewards_receiver ) = unbounded ( ) ;
2020-02-04 19:50:24 -07:00
(
2020-02-11 18:01:49 -07:00
Some ( rewards_recorder_sender ) ,
2020-02-04 19:50:24 -07:00
Some ( RewardsRecorderService ::new (
rewards_receiver ,
blockstore . clone ( ) ,
& exit ,
) ) ,
)
} else {
( None , None )
} ;
2019-08-16 16:20:20 -07:00
info! (
" Starting PoH: epoch={} slot={} tick_height={} blockhash={} leader={:?} " ,
bank . epoch ( ) ,
bank . slot ( ) ,
bank . tick_height ( ) ,
bank . last_blockhash ( ) ,
leader_schedule_cache . slot_leader_at ( bank . slot ( ) , Some ( & bank ) )
) ;
2019-08-08 09:14:30 -07:00
if config . dev_halt_at_slot . is_some ( ) {
2020-06-17 21:54:52 -06:00
// Simulate a confirmed root to avoid RPC errors with CommitmentConfig::max() and
2020-05-15 09:02:48 -07:00
// to ensure RPC endpoints like getConfirmedBlock, which require a confirmed root, work
block_commitment_cache
. write ( )
. unwrap ( )
2020-07-07 17:59:46 -06:00
. set_highest_confirmed_root ( bank_forks . read ( ) . unwrap ( ) . root ( ) ) ;
2020-05-15 09:02:48 -07:00
2019-08-08 09:14:30 -07:00
// Park with the RPC service running, ready for inspection!
2019-08-16 16:20:20 -07:00
warn! ( " Validator halted " ) ;
2019-08-08 09:14:30 -07:00
std ::thread ::park ( ) ;
}
2020-02-20 19:53:26 -07:00
let poh_config = Arc ::new ( genesis_config . poh_config ) ;
2019-08-08 09:14:30 -07:00
let ( mut poh_recorder , entry_receiver ) = PohRecorder ::new_with_clear_signal (
bank . tick_height ( ) ,
bank . last_blockhash ( ) ,
bank . slot ( ) ,
2020-02-26 13:35:50 -08:00
leader_schedule_cache . next_leader_slot (
& id ,
bank . slot ( ) ,
& bank ,
Some ( & blockstore ) ,
GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS ,
) ,
2019-08-08 09:14:30 -07:00
bank . ticks_per_slot ( ) ,
& id ,
2020-01-13 14:13:52 -07:00
& blockstore ,
blockstore . new_shreds_signals . first ( ) . cloned ( ) ,
2019-08-08 09:14:30 -07:00
& leader_schedule_cache ,
& poh_config ,
) ;
if config . snapshot_config . is_some ( ) {
poh_recorder . set_bank ( & bank ) ;
}
let poh_recorder = Arc ::new ( Mutex ::new ( poh_recorder ) ) ;
2019-11-13 05:37:13 +09:00
let ip_echo_server = solana_net_utils ::ip_echo_server ( node . sockets . ip_echo . unwrap ( ) ) ;
2019-08-08 09:14:30 -07:00
2018-12-06 13:52:47 -07:00
let gossip_service = GossipService ::new (
2018-10-08 20:55:54 -06:00
& cluster_info ,
2019-02-20 21:36:08 -08:00
Some ( bank_forks . clone ( ) ) ,
2018-08-22 19:00:56 -06:00
node . sockets . gossip ,
2019-03-04 16:33:14 -08:00
& exit ,
2018-09-02 23:23:43 -10:00
) ;
2018-08-22 19:00:56 -06:00
2020-01-31 14:23:51 -08:00
let serve_repair = Arc ::new ( RwLock ::new ( ServeRepair ::new ( cluster_info . clone ( ) ) ) ) ;
let serve_repair_service = ServeRepairService ::new (
& serve_repair ,
Some ( blockstore . clone ( ) ) ,
node . sockets . serve_repair ,
& exit ,
) ;
2019-01-21 11:14:27 -08:00
// Insert the entrypoint info, should only be None if this node
2020-01-22 09:22:09 -07:00
// is the bootstrap validator
2019-01-21 11:14:27 -08:00
if let Some ( entrypoint_info ) = entrypoint_info_option {
2020-04-21 12:54:45 -07:00
cluster_info . set_entrypoint ( entrypoint_info . clone ( ) ) ;
2018-08-22 18:51:53 -06:00
}
2018-08-22 18:50:19 -06:00
2020-03-05 23:52:31 -07:00
let ( snapshot_packager_service , snapshot_package_sender ) =
if config . snapshot_config . is_some ( ) {
// Start a snapshot packaging service
let ( sender , receiver ) = channel ( ) ;
2020-04-24 15:04:23 -07:00
let snapshot_packager_service =
SnapshotPackagerService ::new ( receiver , snapshot_hash , & exit , & cluster_info ) ;
2020-03-05 23:52:31 -07:00
( Some ( snapshot_packager_service ) , Some ( sender ) )
} else {
( None , None )
} ;
2020-06-30 12:43:48 -07:00
if wait_for_supermajority ( config , & bank , & cluster_info , rpc_override_health_check ) {
std ::process ::exit ( 1 ) ;
}
2018-12-03 00:10:43 -08:00
2020-01-07 14:18:34 -07:00
let poh_service = PohService ::new ( poh_recorder . clone ( ) , & poh_config , & exit ) ;
assert_eq! (
2020-01-13 14:13:52 -07:00
blockstore . new_shreds_signals . len ( ) ,
2020-01-07 14:18:34 -07:00
1 ,
" New shred signal for the TVU should be the same as the clear bank signal. "
) ;
2020-04-29 18:02:05 -07:00
let vote_tracker = Arc ::new ( VoteTracker ::new ( bank_forks . read ( ) . unwrap ( ) . root_bank ( ) ) ) ;
2020-03-09 22:03:09 -07:00
2020-03-19 23:35:01 -07:00
let ( retransmit_slots_sender , retransmit_slots_receiver ) = unbounded ( ) ;
2020-07-09 23:52:54 -06:00
let ( verified_vote_sender , verified_vote_receiver ) = unbounded ( ) ;
2019-02-11 17:56:52 -08:00
let tvu = Tvu ::new (
2019-03-08 18:29:08 -08:00
vote_account ,
2020-03-31 08:23:42 -07:00
authorized_voter_keypairs ,
2019-02-21 11:19:45 -08:00
& bank_forks ,
2019-01-26 13:58:08 +05:30
& cluster_info ,
2020-01-30 10:26:27 -07:00
Sockets {
repair : node
. sockets
. repair
. try_clone ( )
. expect ( " Failed to clone repair socket " ) ,
retransmit : node
. sockets
. retransmit_sockets
. iter ( )
. map ( | s | s . try_clone ( ) . expect ( " Failed to clone retransmit socket " ) )
. collect ( ) ,
fetch : node
. sockets
. tvu
. iter ( )
. map ( | s | s . try_clone ( ) . expect ( " Failed to clone TVU Sockets " ) )
. collect ( ) ,
forwards : node
. sockets
. tvu_forwards
. iter ( )
. map ( | s | s . try_clone ( ) . expect ( " Failed to clone TVU forwards Sockets " ) )
. collect ( ) ,
} ,
2020-01-13 14:13:52 -07:00
blockstore . clone ( ) ,
2019-02-04 15:33:43 -08:00
ledger_signal_receiver ,
2019-02-18 19:08:54 -07:00
& subscriptions ,
2019-03-03 16:44:06 -08:00
& poh_recorder ,
2019-04-19 02:39:44 -07:00
& leader_schedule_cache ,
2019-03-04 16:33:14 -08:00
& exit ,
2019-05-09 14:10:04 -07:00
completed_slots_receiver ,
2019-11-04 16:44:27 -07:00
block_commitment_cache ,
2020-01-27 16:49:25 -08:00
config . enable_partition . clone ( ) ,
2019-11-20 16:43:10 -07:00
transaction_status_sender . clone ( ) ,
2020-02-11 18:01:49 -07:00
rewards_recorder_sender ,
2020-03-05 23:52:31 -07:00
snapshot_package_sender ,
2020-03-09 22:03:09 -07:00
vote_tracker . clone ( ) ,
2020-03-19 23:35:01 -07:00
retransmit_slots_sender ,
2020-07-09 23:52:54 -06:00
verified_vote_receiver ,
2020-03-16 08:37:31 -07:00
TvuConfig {
2020-03-31 17:21:19 -07:00
max_ledger_shreds : config . max_ledger_shreds ,
2020-03-16 08:37:31 -07:00
halt_on_trusted_validators_accounts_hash_mismatch : config
. halt_on_trusted_validators_accounts_hash_mismatch ,
shred_version : node . info . shred_version ,
trusted_validators : config . trusted_validators . clone ( ) ,
accounts_hash_fault_injection_slots : config . accounts_hash_fault_injection_slots ,
} ,
2019-03-03 16:44:06 -08:00
) ;
2019-04-19 14:18:19 -07:00
2019-03-03 16:44:06 -08:00
let tpu = Tpu ::new (
& cluster_info ,
& poh_recorder ,
entry_receiver ,
2020-03-19 23:35:01 -07:00
retransmit_slots_receiver ,
2019-03-03 16:44:06 -08:00
node . sockets . tpu ,
2019-07-30 14:50:02 -07:00
node . sockets . tpu_forwards ,
2019-03-03 16:44:06 -08:00
node . sockets . broadcast ,
2020-05-17 22:01:08 +01:00
& subscriptions ,
2019-11-20 16:43:10 -07:00
transaction_status_sender ,
2020-01-13 14:13:52 -07:00
& blockstore ,
2019-06-19 00:13:19 -07:00
& config . broadcast_stage_type ,
2019-03-04 16:33:14 -08:00
& exit ,
2020-01-13 15:59:31 -07:00
node . info . shred_version ,
2020-03-09 22:03:09 -07:00
vote_tracker ,
bank_forks ,
2020-07-09 23:52:54 -06:00
verified_vote_sender ,
2019-01-26 13:58:08 +05:30
) ;
2018-12-12 12:38:00 -08:00
2019-07-30 16:18:33 -04:00
datapoint_info! ( " validator-new " , ( " id " , id . to_string ( ) , String ) ) ;
2019-02-21 11:37:48 -08:00
Self {
2019-01-30 20:02:35 -07:00
id ,
2018-12-06 13:52:47 -07:00
gossip_service ,
2020-01-31 14:23:51 -08:00
serve_repair_service ,
2019-04-12 18:17:34 -07:00
rpc_service ,
2019-11-20 16:43:10 -07:00
transaction_status_service ,
2020-02-04 19:50:24 -07:00
rewards_recorder_service ,
2020-03-05 23:52:31 -07:00
snapshot_packager_service ,
2019-03-04 19:02:03 -08:00
tpu ,
tvu ,
2019-02-26 10:48:18 -08:00
poh_service ,
poh_recorder ,
2019-05-03 11:01:35 -07:00
ip_echo_server ,
2019-08-20 23:59:31 -07:00
validator_exit ,
2019-02-01 18:09:38 -08:00
}
}
2019-01-28 20:10:38 -08:00
// Used for notifying many nodes in parallel to exit
2019-08-20 23:59:31 -07:00
pub fn exit ( & mut self ) {
if let Some ( x ) = self . validator_exit . write ( ) . unwrap ( ) . take ( ) {
x . exit ( )
}
2018-07-16 22:22:29 -07:00
}
2018-09-13 14:00:17 -07:00
2019-08-20 23:59:31 -07:00
pub fn close ( mut self ) -> Result < ( ) > {
2018-07-17 08:18:42 -07:00
self . exit ( ) ;
2018-07-09 14:53:18 -06:00
self . join ( )
2018-07-02 15:24:40 -07:00
}
2019-09-14 12:32:57 -07:00
fn print_node_info ( node : & Node ) {
info! ( " {:?} " , node . info ) ;
info! (
" local gossip address: {} " ,
node . sockets . gossip . local_addr ( ) . unwrap ( )
) ;
info! (
" local broadcast address: {} " ,
2019-12-16 17:11:18 -08:00
node . sockets
. broadcast
. first ( )
. unwrap ( )
. local_addr ( )
. unwrap ( )
2019-09-14 12:32:57 -07:00
) ;
info! (
" local repair address: {} " ,
node . sockets . repair . local_addr ( ) . unwrap ( )
) ;
info! (
" local retransmit address: {} " ,
2019-10-10 13:24:03 -07:00
node . sockets . retransmit_sockets [ 0 ] . local_addr ( ) . unwrap ( )
2019-09-14 12:32:57 -07:00
) ;
}
2019-11-13 11:12:09 -07:00
pub fn join ( self ) -> Result < ( ) > {
self . poh_service . join ( ) ? ;
drop ( self . poh_recorder ) ;
2020-01-30 10:17:01 -07:00
if let Some ( ( rpc_service , rpc_pubsub_service ) ) = self . rpc_service {
2019-11-13 11:12:09 -07:00
rpc_service . join ( ) ? ;
rpc_pubsub_service . join ( ) ? ;
}
2019-11-20 16:43:10 -07:00
if let Some ( transaction_status_service ) = self . transaction_status_service {
transaction_status_service . join ( ) ? ;
}
2019-11-13 11:12:09 -07:00
2020-02-04 19:50:24 -07:00
if let Some ( rewards_recorder_service ) = self . rewards_recorder_service {
rewards_recorder_service . join ( ) ? ;
}
2020-03-05 23:52:31 -07:00
if let Some ( s ) = self . snapshot_packager_service {
s . join ( ) ? ;
}
2019-11-13 11:12:09 -07:00
self . gossip_service . join ( ) ? ;
2020-01-31 14:23:51 -08:00
self . serve_repair_service . join ( ) ? ;
2019-11-13 11:12:09 -07:00
self . tpu . join ( ) ? ;
self . tvu . join ( ) ? ;
self . ip_echo_server . shutdown_now ( ) ;
Ok ( ( ) )
}
2019-02-06 19:47:55 -08:00
}
2018-09-14 01:53:18 -07:00
2020-02-20 19:53:26 -07:00
#[ allow(clippy::type_complexity) ]
2020-07-01 15:19:40 +09:00
fn new_banks_from_ledger (
2020-02-25 00:27:19 -07:00
config : & ValidatorConfig ,
2020-07-01 15:19:40 +09:00
ledger_path : & Path ,
2019-11-04 22:14:55 -07:00
poh_verify : bool ,
2019-04-19 02:39:44 -07:00
) -> (
2020-02-20 19:53:26 -07:00
GenesisConfig ,
2019-04-19 02:39:44 -07:00
BankForks ,
2020-01-13 14:13:52 -07:00
Blockstore ,
2019-04-19 02:39:44 -07:00
Receiver < bool > ,
2019-05-09 14:10:04 -07:00
CompletedSlotsReceiver ,
2019-04-19 02:39:44 -07:00
LeaderScheduleCache ,
2020-02-20 19:53:26 -07:00
Option < ( Slot , Hash ) > ,
2019-04-19 02:39:44 -07:00
) {
2020-07-01 15:19:40 +09:00
info! ( " loading ledger from {:?}... " , ledger_path ) ;
let genesis_config = open_genesis_config ( ledger_path , config . max_genesis_archive_unpacked_size ) ;
2020-02-24 19:27:04 -08:00
// This needs to be limited otherwise the state in the VoteAccount data
// grows too large
let leader_schedule_slot_offset = genesis_config . epoch_schedule . leader_schedule_slot_offset ;
let slots_per_epoch = genesis_config . epoch_schedule . slots_per_epoch ;
let leader_epoch_offset = ( leader_schedule_slot_offset + slots_per_epoch - 1 ) / slots_per_epoch ;
assert! ( leader_epoch_offset < = MAX_LEADER_SCHEDULE_EPOCH_OFFSET ) ;
2019-11-08 23:56:57 -05:00
let genesis_hash = genesis_config . hash ( ) ;
info! ( " genesis hash: {} " , genesis_hash ) ;
2020-02-25 00:27:19 -07:00
if let Some ( expected_genesis_hash ) = config . expected_genesis_hash {
2019-11-08 23:56:57 -05:00
if genesis_hash ! = expected_genesis_hash {
error! ( " genesis hash mismatch: expected {} " , expected_genesis_hash ) ;
2020-07-01 15:19:40 +09:00
error! ( " Delete the ledger directory to continue: {:?} " , ledger_path ) ;
2019-11-04 11:03:39 -07:00
process ::exit ( 1 ) ;
2019-08-21 18:16:40 -07:00
}
}
2019-02-19 18:31:56 -08:00
2020-03-23 08:42:32 -07:00
let ( mut blockstore , ledger_signal_receiver , completed_slots_receiver ) =
2020-07-06 12:43:45 -07:00
Blockstore ::open_with_signal ( ledger_path , config . wal_recovery_mode . clone ( ) )
. expect ( " Failed to open ledger database " ) ;
2020-03-23 08:42:32 -07:00
blockstore . set_no_compaction ( config . no_rocksdb_compaction ) ;
2019-02-26 21:16:18 -08:00
2020-01-13 14:13:52 -07:00
let process_options = blockstore_processor ::ProcessOptions {
2019-11-04 22:14:55 -07:00
poh_verify ,
2020-01-22 19:31:50 -07:00
dev_halt_at_slot : config . dev_halt_at_slot ,
2020-01-24 18:27:04 -07:00
new_hard_forks : config . new_hard_forks . clone ( ) ,
2020-03-22 11:10:04 -07:00
frozen_accounts : config . frozen_accounts . clone ( ) ,
2020-01-13 14:13:52 -07:00
.. blockstore_processor ::ProcessOptions ::default ( )
2019-11-04 19:10:06 -07:00
} ;
2020-05-06 08:24:59 -07:00
let ( mut bank_forks , mut leader_schedule_cache , snapshot_hash ) = bank_forks_utils ::load (
& genesis_config ,
& blockstore ,
config . account_paths . clone ( ) ,
config . snapshot_config . as_ref ( ) ,
process_options ,
)
. unwrap_or_else ( | err | {
error! ( " Failed to load ledger: {:?} " , err ) ;
2020-06-25 18:24:16 +09:00
process ::exit ( 1 ) ;
2020-05-06 08:24:59 -07:00
} ) ;
2019-02-06 19:47:55 -08:00
2020-01-22 19:31:50 -07:00
leader_schedule_cache . set_fixed_leader_schedule ( config . fixed_leader_schedule . clone ( ) ) ;
2019-12-03 16:31:59 -08:00
2020-01-22 19:31:50 -07:00
bank_forks . set_snapshot_config ( config . snapshot_config . clone ( ) ) ;
2020-04-16 15:12:20 -07:00
bank_forks . set_accounts_hash_interval_slots ( config . accounts_hash_interval_slots ) ;
2019-08-25 21:33:41 -07:00
2019-02-06 19:47:55 -08:00
(
2020-02-20 19:53:26 -07:00
genesis_config ,
2019-02-20 17:05:57 -08:00
bank_forks ,
2020-01-13 14:13:52 -07:00
blockstore ,
2019-02-16 20:58:07 -07:00
ledger_signal_receiver ,
2019-05-09 14:10:04 -07:00
completed_slots_receiver ,
2019-04-19 02:39:44 -07:00
leader_schedule_cache ,
2020-02-20 19:53:26 -07:00
snapshot_hash ,
2019-02-16 20:58:07 -07:00
)
}
2020-06-23 14:29:07 -07:00
fn backup_and_clear_blockstore ( ledger_path : & Path , start_slot : Slot , shred_version : u16 ) {
2020-06-22 20:27:25 -07:00
use std ::time ::Instant ;
2020-06-23 14:29:07 -07:00
let blockstore = Blockstore ::open ( ledger_path ) . unwrap ( ) ;
let mut do_copy_and_clear = false ;
// Search for shreds with incompatible version in blockstore
2020-06-22 20:27:25 -07:00
if let Ok ( slot_meta_iterator ) = blockstore . slot_meta_iterator ( start_slot ) {
2020-06-23 14:29:07 -07:00
for ( slot , _meta ) in slot_meta_iterator {
if let Ok ( shreds ) = blockstore . get_data_shreds_for_slot ( slot , 0 ) {
for shred in & shreds {
if shred . version ( ) ! = shred_version {
do_copy_and_clear = true ;
break ;
}
}
}
}
}
// If found, then copy shreds to another db and clear from start_slot
if do_copy_and_clear {
let folder_name = format! ( " backup_rocksdb_ {} " , thread_rng ( ) . gen_range ( 0 , 99999 ) ) ;
let backup_blockstore = Blockstore ::open ( & ledger_path . join ( folder_name ) ) ;
2020-06-22 20:27:25 -07:00
let mut last_print = Instant ::now ( ) ;
let mut copied = 0 ;
2020-06-23 14:29:07 -07:00
let mut last_slot = None ;
let slot_meta_iterator = blockstore . slot_meta_iterator ( start_slot ) . unwrap ( ) ;
2020-06-22 20:27:25 -07:00
for ( slot , _meta ) in slot_meta_iterator {
if let Ok ( shreds ) = blockstore . get_data_shreds_for_slot ( slot , 0 ) {
if let Ok ( ref backup_blockstore ) = backup_blockstore {
copied + = shreds . len ( ) ;
let _ = backup_blockstore . insert_shreds ( shreds , None , true ) ;
}
}
if last_print . elapsed ( ) . as_millis ( ) > 3000 {
info! (
" Copying shreds from slot {} copied {} so far. " ,
start_slot , copied
) ;
last_print = Instant ::now ( ) ;
}
2020-06-23 14:29:07 -07:00
last_slot = Some ( slot ) ;
2020-06-22 20:27:25 -07:00
}
2020-06-23 14:29:07 -07:00
let end_slot = last_slot . unwrap ( ) ;
2020-06-22 20:27:25 -07:00
info! ( " Purging slots {} to {} " , start_slot , end_slot ) ;
2020-06-29 14:44:35 -07:00
blockstore . purge_slots ( start_slot , end_slot , PurgeType ::Exact ) ;
2020-06-23 14:29:07 -07:00
blockstore . purge_from_next_slots ( start_slot , end_slot ) ;
2020-06-22 20:27:25 -07:00
info! ( " Purging done, compacting db.. " ) ;
if let Err ( e ) = blockstore . compact_storage ( start_slot , end_slot ) {
warn! (
" Error from compacting storage from {} to {}: {:?} " ,
start_slot , end_slot , e
) ;
}
info! ( " done " ) ;
}
2020-06-23 14:29:07 -07:00
drop ( blockstore ) ;
2020-06-22 20:27:25 -07:00
}
2020-06-30 12:43:48 -07:00
// Return true on error, indicating the validator should exit.
2020-05-28 12:22:19 -07:00
fn wait_for_supermajority (
config : & ValidatorConfig ,
bank : & Bank ,
cluster_info : & ClusterInfo ,
rpc_override_health_check : Arc < AtomicBool > ,
2020-06-30 12:43:48 -07:00
) -> bool {
if let Some ( wait_for_supermajority ) = config . wait_for_supermajority {
match wait_for_supermajority . cmp ( & bank . slot ( ) ) {
std ::cmp ::Ordering ::Less = > return false ,
std ::cmp ::Ordering ::Greater = > {
error! ( " Ledger does not have enough data to wait for supermajority, please enable snapshot fetch. Has {} needs {} " , bank . slot ( ) , wait_for_supermajority ) ;
return true ;
}
_ = > { }
}
} else {
return false ;
}
if let Some ( expected_bank_hash ) = config . expected_bank_hash {
if bank . hash ( ) ! = expected_bank_hash {
error! (
" Bank hash({}) does not match expected value: {} " ,
bank . hash ( ) ,
expected_bank_hash
) ;
return true ;
}
2020-01-30 10:26:27 -07:00
}
info! (
2020-03-09 20:31:09 -07:00
" Waiting for 80% of activated stake at slot {} to be in gossip... " ,
2020-01-30 10:26:27 -07:00
bank . slot ( )
) ;
2020-04-17 11:05:59 -07:00
for i in 1 .. {
let gossip_stake_percent = get_stake_percent_in_gossip ( & bank , & cluster_info , i % 10 = = 0 ) ;
2020-01-30 10:26:27 -07:00
2020-03-09 20:31:09 -07:00
if gossip_stake_percent > = 80 {
2020-01-30 10:26:27 -07:00
break ;
}
2020-05-28 12:22:19 -07:00
// The normal RPC health checks don't apply as the node is waiting, so feign health to
// prevent load balancers from removing the node from their list of candidates during a
// manual restart.
rpc_override_health_check . store ( true , Ordering ::Relaxed ) ;
2020-01-30 10:26:27 -07:00
sleep ( Duration ::new ( 1 , 0 ) ) ;
}
2020-05-28 12:22:19 -07:00
rpc_override_health_check . store ( false , Ordering ::Relaxed ) ;
2020-06-30 12:43:48 -07:00
false
2020-01-30 10:26:27 -07:00
}
2020-02-26 12:23:54 +08:00
pub struct TestValidator {
pub server : Validator ,
pub leader_data : ContactInfo ,
pub alice : Keypair ,
pub ledger_path : PathBuf ,
pub genesis_hash : Hash ,
pub vote_pubkey : Pubkey ,
2020-02-10 18:59:05 -07:00
}
2020-02-26 12:23:54 +08:00
pub struct TestValidatorOptions {
pub fees : u64 ,
pub bootstrap_validator_lamports : u64 ,
2020-04-06 17:20:55 -06:00
pub mint_lamports : u64 ,
2020-02-07 09:14:27 -07:00
}
2020-02-26 12:23:54 +08:00
impl Default for TestValidatorOptions {
fn default ( ) -> Self {
2020-03-21 10:54:40 -07:00
use solana_ledger ::genesis_utils ::BOOTSTRAP_VALIDATOR_LAMPORTS ;
2020-02-26 12:23:54 +08:00
TestValidatorOptions {
fees : 0 ,
bootstrap_validator_lamports : BOOTSTRAP_VALIDATOR_LAMPORTS ,
2020-04-06 17:20:55 -06:00
mint_lamports : 1_000_000 ,
2020-02-26 12:23:54 +08:00
}
}
}
2019-03-12 18:27:52 -06:00
2020-02-26 12:23:54 +08:00
impl TestValidator {
pub fn run ( ) -> Self {
Self ::run_with_options ( TestValidatorOptions ::default ( ) )
}
2019-10-01 01:14:49 +05:30
2020-02-26 12:23:54 +08:00
pub fn run_with_options ( options : TestValidatorOptions ) -> Self {
2020-03-21 10:54:40 -07:00
use solana_ledger ::genesis_utils ::{
create_genesis_config_with_leader_ex , GenesisConfigInfo ,
} ;
2020-02-28 13:27:01 -07:00
use solana_sdk ::fee_calculator ::FeeRateGovernor ;
2020-02-26 12:23:54 +08:00
let TestValidatorOptions {
fees ,
bootstrap_validator_lamports ,
2020-04-06 17:20:55 -06:00
mint_lamports ,
2020-02-26 12:23:54 +08:00
} = options ;
let node_keypair = Arc ::new ( Keypair ::new ( ) ) ;
let node = Node ::new_localhost_with_pubkey ( & node_keypair . pubkey ( ) ) ;
let contact_info = node . info . clone ( ) ;
let GenesisConfigInfo {
mut genesis_config ,
mint_keypair ,
voting_keypair ,
} = create_genesis_config_with_leader_ex (
2020-04-06 17:20:55 -06:00
mint_lamports ,
2020-02-26 12:23:54 +08:00
& contact_info . id ,
42 ,
bootstrap_validator_lamports ,
) ;
genesis_config
. native_instruction_processors
. push ( solana_budget_program! ( ) ) ;
2020-04-15 09:41:29 -07:00
genesis_config
. native_instruction_processors
. push ( solana_bpf_loader_program! ( ) ) ;
2019-03-12 18:27:52 -06:00
2020-02-26 12:23:54 +08:00
genesis_config . rent . lamports_per_byte_year = 1 ;
genesis_config . rent . exemption_threshold = 1.0 ;
2020-02-28 13:27:01 -07:00
genesis_config . fee_rate_governor = FeeRateGovernor ::new ( fees , 0 ) ;
2020-02-26 12:23:54 +08:00
let ( ledger_path , blockhash ) = create_new_tmp_ledger! ( & genesis_config ) ;
let leader_voting_keypair = Arc ::new ( voting_keypair ) ;
let config = ValidatorConfig {
rpc_ports : Some ( ( node . info . rpc . port ( ) , node . info . rpc_pubsub . port ( ) ) ) ,
.. ValidatorConfig ::default ( )
} ;
let node = Validator ::new (
node ,
& node_keypair ,
& ledger_path ,
& leader_voting_keypair . pubkey ( ) ,
2020-03-31 08:23:42 -07:00
vec! [ leader_voting_keypair . clone ( ) ] ,
2020-02-26 12:23:54 +08:00
None ,
true ,
& config ,
) ;
discover_cluster ( & contact_info . gossip , 1 ) . expect ( " Node startup failed " ) ;
TestValidator {
server : node ,
leader_data : contact_info ,
alice : mint_keypair ,
ledger_path ,
genesis_hash : blockhash ,
vote_pubkey : leader_voting_keypair . pubkey ( ) ,
}
}
2019-03-12 18:27:52 -06:00
}
2020-01-07 14:33:39 -07:00
fn report_target_features ( ) {
warn! (
" CUDA is {}abled " ,
if solana_perf ::perf_libs ::api ( ) . is_some ( ) {
" en "
} else {
" dis "
}
) ;
2020-03-01 15:17:23 -07:00
#[ cfg(any(target_arch = " x86 " , target_arch = " x86_64 " )) ]
2020-01-07 14:33:39 -07:00
{
2020-03-01 15:17:23 -07:00
// Validator binaries built on a machine with AVX support will generate invalid opcodes
// when run on machines without AVX causing a non-obvious process abort. Instead detect
// the mismatch and error cleanly.
#[ target_feature(enable = " avx " ) ]
{
if is_x86_feature_detected! ( " avx " ) {
info! ( " AVX detected " ) ;
} else {
error! ( " Your machine does not have AVX support, please rebuild from source on your machine " ) ;
process ::exit ( 1 ) ;
}
2020-01-07 14:33:39 -07:00
}
}
}
2020-01-07 14:18:34 -07:00
// Get the activated stake percentage (based on the provided bank) that is visible in gossip
2020-04-21 12:54:45 -07:00
fn get_stake_percent_in_gossip ( bank : & Bank , cluster_info : & ClusterInfo , log : bool ) -> u64 {
2020-04-17 11:05:59 -07:00
let mut online_stake = 0 ;
let mut wrong_shred_stake = 0 ;
let mut wrong_shred_nodes = vec! [ ] ;
let mut offline_stake = 0 ;
let mut offline_nodes = vec! [ ] ;
2020-01-07 14:18:34 -07:00
let mut total_activated_stake = 0 ;
2020-04-21 12:54:45 -07:00
let all_tvu_peers = cluster_info . all_tvu_peers ( ) ;
let my_shred_version = cluster_info . my_shred_version ( ) ;
let my_id = cluster_info . id ( ) ;
2020-01-07 14:18:34 -07:00
for ( activated_stake , vote_account ) in bank . vote_accounts ( ) . values ( ) {
2020-06-25 18:24:16 +09:00
let vote_state = VoteState ::from ( & vote_account ) . unwrap_or_default ( ) ;
2020-01-07 14:18:34 -07:00
total_activated_stake + = activated_stake ;
2020-04-17 11:05:59 -07:00
if * activated_stake = = 0 {
continue ;
}
if let Some ( peer ) = all_tvu_peers
2020-01-07 14:18:34 -07:00
. iter ( )
2020-04-17 11:05:59 -07:00
. find ( | peer | peer . id = = vote_state . node_pubkey )
2020-01-07 14:18:34 -07:00
{
2020-04-18 10:16:19 -07:00
if peer . shred_version = = my_shred_version {
2020-04-17 11:05:59 -07:00
trace! (
" observed {} in gossip, (activated_stake={}) " ,
vote_state . node_pubkey ,
activated_stake
) ;
online_stake + = activated_stake ;
} else {
wrong_shred_stake + = activated_stake ;
wrong_shred_nodes . push ( ( * activated_stake , vote_state . node_pubkey ) ) ;
}
2020-04-18 10:16:19 -07:00
} else if vote_state . node_pubkey = = my_id {
online_stake + = activated_stake ; // This node is online
2020-04-17 11:05:59 -07:00
} else {
offline_stake + = activated_stake ;
offline_nodes . push ( ( * activated_stake , vote_state . node_pubkey ) ) ;
}
}
if log {
info! (
" {}% of active stake visible in gossip " ,
online_stake * 100 / total_activated_stake
) ;
if ! wrong_shred_nodes . is_empty ( ) {
info! (
" {}% of active stake has the wrong shred version in gossip " ,
wrong_shred_stake * 100 / total_activated_stake ,
2020-01-07 14:18:34 -07:00
) ;
2020-04-17 11:05:59 -07:00
for ( stake , identity ) in wrong_shred_nodes {
info! (
" {}% - {} " ,
stake * 100 / total_activated_stake ,
identity
) ;
}
}
if ! offline_nodes . is_empty ( ) {
info! (
" {}% of active stake is not visible in gossip " ,
offline_stake * 100 / total_activated_stake
) ;
for ( stake , identity ) in offline_nodes {
info! (
" {}% - {} " ,
stake * 100 / total_activated_stake ,
identity
) ;
}
2020-01-07 14:18:34 -07:00
}
}
2020-04-17 11:05:59 -07:00
online_stake * 100 / total_activated_stake
2020-01-07 14:18:34 -07:00
}
2020-07-07 09:41:45 -07:00
fn cleanup_accounts_dir_entry ( path : std ::fs ::DirEntry , accounts_file_regex : & regex ::Regex ) {
if let Ok ( file_type ) = path . file_type ( ) {
if file_type . is_file ( ) {
if let Ok ( file_name ) = path . file_name ( ) . into_string ( ) {
if accounts_file_regex . is_match ( & file_name ) {
if let Err ( e ) = std ::fs ::remove_file ( path . path ( ) ) {
info! ( " Couldn't delete file: {:?} error: {:?} " , path , e ) ;
}
}
}
}
}
}
// Cleanup anything that looks like an accounts append-vec
fn cleanup_accounts_path ( account_path : & std ::path ::Path ) {
use regex ::Regex ;
let accounts_file_regex = Regex ::new ( r "(\d+).(\d+)" ) . unwrap ( ) ;
if let Ok ( dir_entries ) = std ::fs ::read_dir ( & account_path ) {
for entry in dir_entries {
if let Ok ( path ) = entry {
cleanup_accounts_dir_entry ( path , & accounts_file_regex ) ;
}
}
}
}
2018-07-02 15:24:40 -07:00
#[ cfg(test) ]
mod tests {
2019-01-31 20:41:03 -08:00
use super ::* ;
2020-03-21 10:54:40 -07:00
use solana_ledger ::genesis_utils ::create_genesis_config_with_leader ;
2018-09-14 01:53:18 -07:00
use std ::fs ::remove_dir_all ;
2018-08-03 11:06:06 -07:00
2018-07-02 15:24:40 -07:00
#[ test ]
fn validator_exit ( ) {
2019-05-23 14:50:23 -07:00
solana_logger ::setup ( ) ;
2019-01-29 18:12:32 -08:00
let leader_keypair = Keypair ::new ( ) ;
2019-03-09 19:28:43 -08:00
let leader_node = Node ::new_localhost_with_pubkey ( & leader_keypair . pubkey ( ) ) ;
2018-10-25 16:58:40 -07:00
2019-01-29 18:12:32 -08:00
let validator_keypair = Keypair ::new ( ) ;
2019-03-09 19:28:43 -08:00
let validator_node = Node ::new_localhost_with_pubkey ( & validator_keypair . pubkey ( ) ) ;
2019-11-08 23:56:57 -05:00
let genesis_config =
create_genesis_config_with_leader ( 10_000 , & leader_keypair . pubkey ( ) , 1000 )
. genesis_config ;
let ( validator_ledger_path , _blockhash ) = create_new_tmp_ledger! ( & genesis_config ) ;
2019-01-29 18:12:32 -08:00
2019-05-15 15:19:29 -07:00
let voting_keypair = Arc ::new ( Keypair ::new ( ) ) ;
2020-01-30 11:58:39 -07:00
let config = ValidatorConfig {
rpc_ports : Some ( (
validator_node . info . rpc . port ( ) ,
validator_node . info . rpc_pubsub . port ( ) ,
) ) ,
.. ValidatorConfig ::default ( )
} ;
2019-05-23 22:05:16 -07:00
let validator = Validator ::new (
2019-01-29 18:12:32 -08:00
validator_node ,
2019-01-30 20:51:50 -07:00
& Arc ::new ( validator_keypair ) ,
2019-01-24 12:04:04 -08:00
& validator_ledger_path ,
2019-03-09 19:28:43 -08:00
& voting_keypair . pubkey ( ) ,
2020-03-31 08:23:42 -07:00
vec! [ voting_keypair . clone ( ) ] ,
2019-01-29 18:12:32 -08:00
Some ( & leader_node . info ) ,
2019-07-12 16:58:13 -07:00
true ,
2019-11-20 16:43:10 -07:00
& config ,
2018-09-14 01:53:18 -07:00
) ;
2019-01-29 18:12:32 -08:00
validator . close ( ) . unwrap ( ) ;
2018-09-14 01:53:18 -07:00
remove_dir_all ( validator_ledger_path ) . unwrap ( ) ;
2018-07-02 11:20:35 -07:00
}
2018-09-25 15:41:29 -07:00
2018-07-17 08:18:42 -07:00
#[ test ]
2020-06-22 20:27:25 -07:00
fn test_backup_and_clear_blockstore ( ) {
use std ::time ::Instant ;
solana_logger ::setup ( ) ;
use solana_ledger ::get_tmp_ledger_path ;
use solana_ledger ::{ blockstore , entry } ;
let blockstore_path = get_tmp_ledger_path! ( ) ;
{
2020-06-23 14:29:07 -07:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
let entries = entry ::create_ticks ( 1 , 0 , Hash ::default ( ) ) ;
2020-06-22 20:27:25 -07:00
info! ( " creating shreds " ) ;
let mut last_print = Instant ::now ( ) ;
for i in 1 .. 10 {
2020-06-23 14:29:07 -07:00
let shreds = blockstore ::entries_to_test_shreds ( entries . clone ( ) , i , i - 1 , true , 1 ) ;
2020-06-22 20:27:25 -07:00
blockstore . insert_shreds ( shreds , None , true ) . unwrap ( ) ;
if last_print . elapsed ( ) . as_millis ( ) > 5000 {
info! ( " inserted {} " , i ) ;
last_print = Instant ::now ( ) ;
}
}
2020-06-23 14:29:07 -07:00
drop ( blockstore ) ;
2020-06-22 20:27:25 -07:00
2020-06-23 14:29:07 -07:00
backup_and_clear_blockstore ( & blockstore_path , 5 , 2 ) ;
2020-06-22 20:27:25 -07:00
2020-06-23 14:29:07 -07:00
let blockstore = Blockstore ::open ( & blockstore_path ) . unwrap ( ) ;
assert! ( blockstore . meta ( 4 ) . unwrap ( ) . unwrap ( ) . next_slots . is_empty ( ) ) ;
for i in 5 .. 10 {
2020-06-22 20:27:25 -07:00
assert! ( blockstore
. get_data_shreds_for_slot ( i , 0 )
. unwrap ( )
. is_empty ( ) ) ;
}
}
}
#[ test ]
2018-07-17 08:18:42 -07:00
fn validator_parallel_exit ( ) {
2019-01-29 18:12:32 -08:00
let leader_keypair = Keypair ::new ( ) ;
2019-03-09 19:28:43 -08:00
let leader_node = Node ::new_localhost_with_pubkey ( & leader_keypair . pubkey ( ) ) ;
2019-01-29 18:12:32 -08:00
2018-09-14 01:53:18 -07:00
let mut ledger_paths = vec! [ ] ;
2019-08-20 23:59:31 -07:00
let mut validators : Vec < Validator > = ( 0 .. 2 )
2019-02-26 19:36:46 -08:00
. map ( | _ | {
2019-01-29 18:12:32 -08:00
let validator_keypair = Keypair ::new ( ) ;
2019-03-09 19:28:43 -08:00
let validator_node = Node ::new_localhost_with_pubkey ( & validator_keypair . pubkey ( ) ) ;
2019-11-08 23:56:57 -05:00
let genesis_config =
create_genesis_config_with_leader ( 10_000 , & leader_keypair . pubkey ( ) , 1000 )
. genesis_config ;
let ( validator_ledger_path , _blockhash ) = create_new_tmp_ledger! ( & genesis_config ) ;
2018-09-14 01:53:18 -07:00
ledger_paths . push ( validator_ledger_path . clone ( ) ) ;
2020-03-31 08:23:42 -07:00
let vote_account_keypair = Arc ::new ( Keypair ::new ( ) ) ;
2020-01-30 11:58:39 -07:00
let config = ValidatorConfig {
rpc_ports : Some ( (
validator_node . info . rpc . port ( ) ,
validator_node . info . rpc_pubsub . port ( ) ,
) ) ,
.. ValidatorConfig ::default ( )
} ;
2019-05-23 22:05:16 -07:00
Validator ::new (
2019-01-29 18:12:32 -08:00
validator_node ,
2019-01-30 20:51:50 -07:00
& Arc ::new ( validator_keypair ) ,
2019-01-24 12:04:04 -08:00
& validator_ledger_path ,
2020-03-31 08:23:42 -07:00
& vote_account_keypair . pubkey ( ) ,
vec! [ vote_account_keypair . clone ( ) ] ,
2019-01-29 18:12:32 -08:00
Some ( & leader_node . info ) ,
2019-07-12 16:58:13 -07:00
true ,
2019-11-20 16:43:10 -07:00
& config ,
2018-09-14 01:53:18 -07:00
)
2018-12-07 20:01:28 -07:00
} )
. collect ( ) ;
2018-09-14 01:53:18 -07:00
2020-03-25 18:09:19 -07:00
// Each validator can exit in parallel to speed many sequential calls to join`
2019-08-20 23:59:31 -07:00
validators . iter_mut ( ) . for_each ( | v | v . exit ( ) ) ;
2019-01-29 18:12:32 -08:00
// While join is called sequentially, the above exit call notified all the
// validators to exit from all their threads
validators . into_iter ( ) . for_each ( | validator | {
validator . join ( ) . unwrap ( ) ;
2018-08-03 11:06:06 -07:00
} ) ;
2018-09-14 01:53:18 -07:00
for path in ledger_paths {
remove_dir_all ( path ) . unwrap ( ) ;
}
2018-07-17 08:18:42 -07:00
}
2020-06-30 12:43:48 -07:00
#[ test ]
fn test_wait_for_supermajority ( ) {
solana_logger ::setup ( ) ;
use solana_sdk ::genesis_config ::create_genesis_config ;
use solana_sdk ::hash ::hash ;
let node_keypair = Arc ::new ( Keypair ::new ( ) ) ;
let cluster_info = ClusterInfo ::new (
ContactInfo ::new_localhost ( & node_keypair . pubkey ( ) , timestamp ( ) ) ,
node_keypair ,
) ;
let ( genesis_config , _mint_keypair ) = create_genesis_config ( 1 ) ;
let bank = Arc ::new ( Bank ::new ( & genesis_config ) ) ;
let mut config = ValidatorConfig ::default ( ) ;
let rpc_override_health_check = Arc ::new ( AtomicBool ::new ( false ) ) ;
assert! ( ! wait_for_supermajority (
& config ,
& bank ,
& cluster_info ,
rpc_override_health_check . clone ( )
) ) ;
// bank=0, wait=1, should fail
config . wait_for_supermajority = Some ( 1 ) ;
assert! ( wait_for_supermajority (
& config ,
& bank ,
& cluster_info ,
rpc_override_health_check . clone ( )
) ) ;
// bank=1, wait=0, should pass, bank is past the wait slot
let bank = Bank ::new_from_parent ( & bank , & Pubkey ::default ( ) , 1 ) ;
config . wait_for_supermajority = Some ( 0 ) ;
assert! ( ! wait_for_supermajority (
& config ,
& bank ,
& cluster_info ,
rpc_override_health_check . clone ( )
) ) ;
// bank=1, wait=1, equal, but bad hash provided
config . wait_for_supermajority = Some ( 1 ) ;
config . expected_bank_hash = Some ( hash ( & [ 1 ] ) ) ;
assert! ( wait_for_supermajority (
& config ,
& bank ,
& cluster_info ,
rpc_override_health_check
) ) ;
}
2020-07-07 09:41:45 -07:00
#[ test ]
fn accounts_clean ( ) {
use std ::fs ::File ;
let temp_dir = tempfile ::tempdir_in ( " farf " ) . unwrap ( ) ;
let temp_path = temp_dir . path ( ) ;
{
let _file1 = File ::create ( temp_path . join ( " foo.txt " ) ) . unwrap ( ) ;
let _file2 = File ::create ( temp_path . join ( " 123.2222 " ) ) . unwrap ( ) ;
}
std ::fs ::create_dir ( temp_path . join ( " 12.088 " ) ) . unwrap ( ) ;
cleanup_accounts_path ( temp_dir . path ( ) ) ;
assert! ( File ::open ( temp_path . join ( " foo.txt " ) ) . is_ok ( ) ) ;
assert! ( File ::open ( temp_path . join ( " 123.2222 " ) ) . is_err ( ) ) ;
assert! ( std ::fs ::read_dir ( temp_path . join ( " 12.088 " ) ) . is_ok ( ) ) ;
}
2018-07-02 11:20:35 -07:00
}