Add ability to hard fork at any slot (#7801)

automerge
This commit is contained in:
Michael Vines
2020-01-24 18:27:04 -07:00
committed by Grimes
parent a2f2c46f87
commit 989355e885
8 changed files with 227 additions and 36 deletions

View File

@ -31,11 +31,11 @@ use solana_ledger::{
leader_schedule_cache::LeaderScheduleCache, leader_schedule_cache::LeaderScheduleCache,
}; };
use solana_metrics::datapoint_info; use solana_metrics::datapoint_info;
use solana_runtime::bank::Bank; use solana_runtime::{bank::Bank, hard_forks::HardForks};
use solana_sdk::{ use solana_sdk::{
clock::{Slot, DEFAULT_SLOTS_PER_TURN}, clock::{Slot, DEFAULT_SLOTS_PER_TURN},
genesis_config::GenesisConfig, genesis_config::GenesisConfig,
hash::Hash, hash::{extend_and_hash, Hash},
poh_config::PohConfig, poh_config::PohConfig,
pubkey::Pubkey, pubkey::Pubkey,
signature::{Keypair, KeypairUtil}, signature::{Keypair, KeypairUtil},
@ -69,6 +69,7 @@ pub struct ValidatorConfig {
pub partition_cfg: Option<PartitionCfg>, pub partition_cfg: Option<PartitionCfg>,
pub fixed_leader_schedule: Option<FixedSchedule>, pub fixed_leader_schedule: Option<FixedSchedule>,
pub wait_for_supermajority: bool, pub wait_for_supermajority: bool,
pub new_hard_forks: Option<Vec<Slot>>,
} }
impl Default for ValidatorConfig { impl Default for ValidatorConfig {
@ -89,6 +90,7 @@ impl Default for ValidatorConfig {
partition_cfg: None, partition_cfg: None,
fixed_leader_schedule: None, fixed_leader_schedule: None,
wait_for_supermajority: false, wait_for_supermajority: false,
new_hard_forks: None,
} }
} }
} }
@ -179,7 +181,8 @@ impl Validator {
let validator_exit = Arc::new(RwLock::new(Some(validator_exit))); let validator_exit = Arc::new(RwLock::new(Some(validator_exit)));
node.info.wallclock = timestamp(); node.info.wallclock = timestamp();
node.info.shred_version = Shred::version_from_hash(&genesis_hash); node.info.shred_version =
compute_shred_version(&genesis_hash, &bank.hard_forks().read().unwrap());
Self::print_node_info(&node); Self::print_node_info(&node);
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new( let cluster_info = Arc::new(RwLock::new(ClusterInfo::new(
@ -466,7 +469,21 @@ impl Validator {
} }
} }
pub fn new_banks_from_blockstore( fn compute_shred_version(genesis_hash: &Hash, hard_forks: &HardForks) -> u16 {
use byteorder::{ByteOrder, LittleEndian};
let mut hash = *genesis_hash;
for (slot, count) in hard_forks.iter() {
let mut buf = [0u8; 16];
LittleEndian::write_u64(&mut buf[..7], *slot);
LittleEndian::write_u64(&mut buf[8..], *count as u64);
hash = extend_and_hash(&hash, &buf);
}
Shred::version_from_hash(&hash)
}
fn new_banks_from_blockstore(
expected_genesis_hash: Option<Hash>, expected_genesis_hash: Option<Hash>,
blockstore_path: &Path, blockstore_path: &Path,
poh_verify: bool, poh_verify: bool,
@ -505,6 +522,7 @@ pub fn new_banks_from_blockstore(
let process_options = blockstore_processor::ProcessOptions { let process_options = blockstore_processor::ProcessOptions {
poh_verify, poh_verify,
dev_halt_at_slot: config.dev_halt_at_slot, dev_halt_at_slot: config.dev_halt_at_slot,
new_hard_forks: config.new_hard_forks.clone(),
..blockstore_processor::ProcessOptions::default() ..blockstore_processor::ProcessOptions::default()
}; };

View File

@ -514,6 +514,15 @@ fn open_database(ledger_path: &Path) -> Database {
} }
} }
// This function is duplicated in validator/src/main.rs...
fn hardforks_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Slot>> {
if matches.is_present(name) {
Some(values_t_or_exit!(matches, name, Slot))
} else {
None
}
}
fn load_bank_forks( fn load_bank_forks(
arg_matches: &ArgMatches, arg_matches: &ArgMatches,
ledger_path: &PathBuf, ledger_path: &PathBuf,
@ -554,23 +563,26 @@ fn main() {
.takes_value(true) .takes_value(true)
.default_value("0") .default_value("0")
.help("Start at this slot"); .help("Start at this slot");
let no_snapshot_arg = Arg::with_name("no_snapshot") let no_snapshot_arg = Arg::with_name("no_snapshot")
.long("no-snapshot") .long("no-snapshot")
.takes_value(false) .takes_value(false)
.help("Do not start from a local snapshot if present"); .help("Do not start from a local snapshot if present");
let account_paths_arg = Arg::with_name("account_paths") let account_paths_arg = Arg::with_name("account_paths")
.long("accounts") .long("accounts")
.value_name("PATHS") .value_name("PATHS")
.takes_value(true) .takes_value(true)
.help("Comma separated persistent accounts location"); .help("Comma separated persistent accounts location");
let halt_at_slot_arg = Arg::with_name("halt_at_slot") let halt_at_slot_arg = Arg::with_name("halt_at_slot")
.long("halt-at-slot") .long("halt-at-slot")
.value_name("SLOT") .value_name("SLOT")
.takes_value(true) .takes_value(true)
.help("Halt processing at the given slot"); .help("Halt processing at the given slot");
let hard_forks_arg = Arg::with_name("hard_forks")
.long("hard-fork")
.value_name("SLOT")
.multiple(true)
.takes_value(true)
.help("Add a hard fork at this slot");
let matches = App::new(crate_name!()) let matches = App::new(crate_name!())
.about(crate_description!()) .about(crate_description!())
@ -627,6 +639,7 @@ fn main() {
.arg(&no_snapshot_arg) .arg(&no_snapshot_arg)
.arg(&account_paths_arg) .arg(&account_paths_arg)
.arg(&halt_at_slot_arg) .arg(&halt_at_slot_arg)
.arg(&hard_forks_arg)
.arg( .arg(
Arg::with_name("skip_poh_verify") Arg::with_name("skip_poh_verify")
.long("skip-poh-verify") .long("skip-poh-verify")
@ -639,6 +652,7 @@ fn main() {
.arg(&no_snapshot_arg) .arg(&no_snapshot_arg)
.arg(&account_paths_arg) .arg(&account_paths_arg)
.arg(&halt_at_slot_arg) .arg(&halt_at_slot_arg)
.arg(&hard_forks_arg)
.arg( .arg(
Arg::with_name("include_all_votes") Arg::with_name("include_all_votes")
.long("include-all-votes") .long("include-all-votes")
@ -656,6 +670,7 @@ fn main() {
.about("Create a new ledger snapshot") .about("Create a new ledger snapshot")
.arg(&no_snapshot_arg) .arg(&no_snapshot_arg)
.arg(&account_paths_arg) .arg(&account_paths_arg)
.arg(&hard_forks_arg)
.arg( .arg(
Arg::with_name("snapshot_slot") Arg::with_name("snapshot_slot")
.index(1) .index(1)
@ -758,8 +773,9 @@ fn main() {
} }
("verify", Some(arg_matches)) => { ("verify", Some(arg_matches)) => {
let process_options = ProcessOptions { let process_options = ProcessOptions {
poh_verify: !arg_matches.is_present("skip_poh_verify"),
dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(),
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
poh_verify: !arg_matches.is_present("skip_poh_verify"),
..ProcessOptions::default() ..ProcessOptions::default()
}; };
@ -773,8 +789,9 @@ fn main() {
let output_file = value_t_or_exit!(arg_matches, "graph_filename", String); let output_file = value_t_or_exit!(arg_matches, "graph_filename", String);
let process_options = ProcessOptions { let process_options = ProcessOptions {
poh_verify: false,
dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(),
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
poh_verify: false,
..ProcessOptions::default() ..ProcessOptions::default()
}; };
@ -812,8 +829,9 @@ fn main() {
let output_directory = value_t_or_exit!(arg_matches, "output_directory", String); let output_directory = value_t_or_exit!(arg_matches, "output_directory", String);
let process_options = ProcessOptions { let process_options = ProcessOptions {
poh_verify: false,
dev_halt_at_slot: Some(snapshot_slot), dev_halt_at_slot: Some(snapshot_slot),
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
poh_verify: false,
..ProcessOptions::default() ..ProcessOptions::default()
}; };
match load_bank_forks(arg_matches, &ledger_path, process_options) { match load_bank_forks(arg_matches, &ledger_path, process_options) {

View File

@ -255,6 +255,9 @@ pub enum BlockstoreProcessorError {
#[error("no valid forks found")] #[error("no valid forks found")]
NoValidForksFound, NoValidForksFound,
#[error("invalid hard fork")]
InvalidHardFork(Slot),
} }
/// Callback for accessing bank state while processing the blockstore /// Callback for accessing bank state while processing the blockstore
@ -267,6 +270,7 @@ pub struct ProcessOptions {
pub dev_halt_at_slot: Option<Slot>, pub dev_halt_at_slot: Option<Slot>,
pub entry_callback: Option<ProcessCallback>, pub entry_callback: Option<ProcessCallback>,
pub override_num_threads: Option<usize>, pub override_num_threads: Option<usize>,
pub new_hard_forks: Option<Vec<Slot>>,
} }
pub fn process_blockstore( pub fn process_blockstore(
@ -314,6 +318,23 @@ pub fn process_blockstore_from_root(
genesis_config.operating_mode, genesis_config.operating_mode,
)); ));
if let Some(ref new_hard_forks) = opts.new_hard_forks {
let hard_forks = bank.hard_forks();
for hard_fork_slot in new_hard_forks.iter() {
// Ensure the user isn't trying to add new hard forks for a slot that's earlier than the current
// root slot. Doing so won't cause any effect so emit an error
if *hard_fork_slot <= start_slot {
error!(
"Unable to add new hard fork at {}, it must be greater than slot {}",
hard_fork_slot, start_slot
);
return Err(BlockstoreProcessorError::InvalidHardFork(*hard_fork_slot));
}
hard_forks.write().unwrap().register(*hard_fork_slot);
}
}
blockstore blockstore
.set_roots(&[start_slot]) .set_roots(&[start_slot])
.expect("Couldn't set root slot on startup"); .expect("Couldn't set root slot on startup");

View File

@ -72,6 +72,7 @@ else
query() { query() {
echo "$*" echo "$*"
set -x
curl -XPOST \ curl -XPOST \
"$host/query?u=${username}&p=${password}" \ "$host/query?u=${username}&p=${password}" \
--data-urlencode "q=$*" --data-urlencode "q=$*"

View File

@ -6,6 +6,7 @@ use crate::{
accounts::{Accounts, TransactionAccounts, TransactionLoadResult, TransactionLoaders}, accounts::{Accounts, TransactionAccounts, TransactionLoadResult, TransactionLoaders},
accounts_db::{AccountStorageEntry, AccountsDBSerialize, AppendVecId, ErrorCounters}, accounts_db::{AccountStorageEntry, AccountsDBSerialize, AppendVecId, ErrorCounters},
blockhash_queue::BlockhashQueue, blockhash_queue::BlockhashQueue,
hard_forks::HardForks,
message_processor::{MessageProcessor, ProcessInstruction}, message_processor::{MessageProcessor, ProcessInstruction},
nonce_utils, nonce_utils,
rent_collector::RentCollector, rent_collector::RentCollector,
@ -35,7 +36,7 @@ use solana_sdk::{
epoch_schedule::EpochSchedule, epoch_schedule::EpochSchedule,
fee_calculator::FeeCalculator, fee_calculator::FeeCalculator,
genesis_config::GenesisConfig, genesis_config::GenesisConfig,
hash::{hashv, Hash}, hash::{extend_and_hash, hashv, Hash},
inflation::Inflation, inflation::Inflation,
native_loader, native_loader,
nonce_state::NonceState, nonce_state::NonceState,
@ -226,6 +227,9 @@ pub struct Bank {
/// parent's slot /// parent's slot
parent_slot: Slot, parent_slot: Slot,
/// slots to hard fork at
hard_forks: Arc<RwLock<HardForks>>,
/// The number of transactions processed without error /// The number of transactions processed without error
#[serde(serialize_with = "serialize_atomicu64")] #[serde(serialize_with = "serialize_atomicu64")]
#[serde(deserialize_with = "deserialize_atomicu64")] #[serde(deserialize_with = "deserialize_atomicu64")]
@ -422,6 +426,7 @@ impl Bank {
signature_count: AtomicU64::new(0), signature_count: AtomicU64::new(0),
message_processor: MessageProcessor::default(), message_processor: MessageProcessor::default(),
entered_epoch_callback: parent.entered_epoch_callback.clone(), entered_epoch_callback: parent.entered_epoch_callback.clone(),
hard_forks: parent.hard_forks.clone(),
last_vote_sync: AtomicU64::new(parent.last_vote_sync.load(Ordering::Relaxed)), last_vote_sync: AtomicU64::new(parent.last_vote_sync.load(Ordering::Relaxed)),
}; };
@ -1685,6 +1690,10 @@ impl Bank {
*self.inflation.write().unwrap() = inflation; *self.inflation.write().unwrap() = inflation;
} }
pub fn hard_forks(&self) -> Arc<RwLock<HardForks>> {
self.hard_forks.clone()
}
pub fn set_entered_epoch_callback(&self, entered_epoch_callback: EnteredEpochCallback) { pub fn set_entered_epoch_callback(&self, entered_epoch_callback: EnteredEpochCallback) {
*self.entered_epoch_callback.write().unwrap() = Some(entered_epoch_callback); *self.entered_epoch_callback.write().unwrap() = Some(entered_epoch_callback);
} }
@ -1779,20 +1788,33 @@ impl Bank {
let accounts_delta_hash = self.rc.accounts.bank_hash_info_at(self.slot()); let accounts_delta_hash = self.rc.accounts.bank_hash_info_at(self.slot());
let mut signature_count_buf = [0u8; 8]; let mut signature_count_buf = [0u8; 8];
LittleEndian::write_u64(&mut signature_count_buf[..], self.signature_count() as u64); LittleEndian::write_u64(&mut signature_count_buf[..], self.signature_count() as u64);
let hash = hashv(&[
let mut hash = hashv(&[
self.parent_hash.as_ref(), self.parent_hash.as_ref(),
accounts_delta_hash.hash.as_ref(), accounts_delta_hash.hash.as_ref(),
&signature_count_buf, &signature_count_buf,
self.last_blockhash().as_ref(), self.last_blockhash().as_ref(),
]); ]);
if let Some(buf) = self
.hard_forks
.read()
.unwrap()
.get_hash_data(self.slot(), self.parent_slot())
{
info!("hard fork at bank {}", self.slot());
hash = extend_and_hash(&hash, &buf)
}
info!( info!(
"bank frozen: {} hash: {} accounts_delta: {} signature_count: {} last_blockhash: {}", "bank frozen: {} hash: {} accounts_delta: {} signature_count: {} last_blockhash: {}",
self.slot(), self.slot(),
hash, hash,
accounts_delta_hash.hash, accounts_delta_hash.hash,
self.signature_count(), self.signature_count(),
self.last_blockhash() self.last_blockhash(),
); );
info!( info!(
"accounts hash slot: {} stats: {:?}", "accounts hash slot: {} stats: {:?}",
self.slot(), self.slot(),
@ -2147,6 +2169,7 @@ impl From<LegacyBank0223> for Bank {
storage_accounts: bank.storage_accounts, storage_accounts: bank.storage_accounts,
parent_hash: bank.parent_hash, parent_hash: bank.parent_hash,
parent_slot: bank.parent_slot, parent_slot: bank.parent_slot,
hard_forks: Arc::new(RwLock::new(HardForks::default())),
collector_id: bank.collector_id, collector_id: bank.collector_id,
collector_fees: bank.collector_fees, collector_fees: bank.collector_fees,
ancestors: bank.ancestors, ancestors: bank.ancestors,

91
runtime/src/hard_forks.rs Normal file
View File

@ -0,0 +1,91 @@
//! The `hard_forks` module is used to maintain the list of slot boundaries for when a hard fork
//! should occur.
use byteorder::{ByteOrder, LittleEndian};
use solana_sdk::clock::Slot;
use std::ops::Add;
#[derive(Default, Clone, Deserialize, Serialize)]
pub struct HardForks {
hard_forks: Vec<(Slot, usize)>,
}
impl HardForks {
// Register a fork to occur at all slots >= `slot` with a parent slot < `slot`
pub fn register(&mut self, new_slot: Slot) {
if let Some(i) = self
.hard_forks
.iter()
.position(|(slot, _)| *slot == new_slot)
{
self.hard_forks[i] = (new_slot, self.hard_forks[i].1 + 1);
} else {
self.hard_forks.push((new_slot, 1));
}
self.hard_forks.sort();
}
// Returns a sorted-by-slot iterator over the registered hark forks
pub fn iter(&self) -> std::slice::Iter<(Slot, usize)> {
self.hard_forks.iter()
}
// Returns data to include in the bank hash for the given slot if a hard fork is scheduled
pub fn get_hash_data(&self, slot: Slot, parent_slot: Slot) -> Option<[u8; 8]> {
// The expected number of hard forks in a cluster is small.
// If this turns out to be false then a more efficient data
// structure may be needed here to avoid this linear search
let fork_count = self
.hard_forks
.iter()
.fold(0, |acc, (fork_slot, fork_count)| {
acc.add(if parent_slot < *fork_slot && slot >= *fork_slot {
*fork_count
} else {
0
})
});
if fork_count > 0 {
let mut buf = [0u8; 8];
LittleEndian::write_u64(&mut buf[..], fork_count as u64);
Some(buf)
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn iter_is_sorted() {
let mut hf = HardForks::default();
hf.register(30);
hf.register(20);
hf.register(10);
hf.register(20);
assert_eq!(
hf.iter().map(|i| *i).collect::<Vec<_>>(),
vec![(10, 1), (20, 2), (30, 1)]
);
}
#[test]
fn multiple_hard_forks_since_parent() {
let mut hf = HardForks::default();
hf.register(10);
hf.register(20);
assert_eq!(hf.get_hash_data(9, 0), None);
assert_eq!(hf.get_hash_data(10, 0), Some([1, 0, 0, 0, 0, 0, 0, 0,]));
assert_eq!(hf.get_hash_data(19, 0), Some([1, 0, 0, 0, 0, 0, 0, 0,]));
assert_eq!(hf.get_hash_data(20, 0), Some([2, 0, 0, 0, 0, 0, 0, 0,]));
assert_eq!(hf.get_hash_data(20, 10), Some([1, 0, 0, 0, 0, 0, 0, 0,]));
assert_eq!(hf.get_hash_data(20, 11), Some([1, 0, 0, 0, 0, 0, 0, 0,]));
assert_eq!(hf.get_hash_data(21, 11), Some([1, 0, 0, 0, 0, 0, 0, 0,]));
assert_eq!(hf.get_hash_data(21, 20), None);
}
}

View File

@ -7,6 +7,7 @@ pub mod bank_client;
mod blockhash_queue; mod blockhash_queue;
pub mod bloom; pub mod bloom;
pub mod genesis_utils; pub mod genesis_utils;
pub mod hard_forks;
pub mod loader_utils; pub mod loader_utils;
pub mod message_processor; pub mod message_processor;
mod native_loader; mod native_loader;

View File

@ -1,5 +1,7 @@
use bzip2::bufread::BzDecoder; use bzip2::bufread::BzDecoder;
use clap::{crate_description, crate_name, value_t, value_t_or_exit, App, Arg}; use clap::{
crate_description, crate_name, value_t, value_t_or_exit, values_t_or_exit, App, Arg, ArgMatches,
};
use console::{style, Emoji}; use console::{style, Emoji};
use indicatif::{ProgressBar, ProgressStyle}; use indicatif::{ProgressBar, ProgressStyle};
use log::*; use log::*;
@ -18,6 +20,7 @@ use solana_core::{
cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE}, cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE},
contact_info::ContactInfo, contact_info::ContactInfo,
gossip_service::GossipService, gossip_service::GossipService,
rpc::JsonRpcConfig,
validator::{Validator, ValidatorConfig}, validator::{Validator, ValidatorConfig},
}; };
use solana_ledger::bank_forks::SnapshotConfig; use solana_ledger::bank_forks::SnapshotConfig;
@ -341,6 +344,15 @@ fn download_ledger(
Ok(()) Ok(())
} }
// This function is duplicated in ledger-tool/src/main.rs...
fn hardforks_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Slot>> {
if matches.is_present(name) {
Some(values_t_or_exit!(matches, name, Slot))
} else {
None
}
}
#[allow(clippy::cognitive_complexity)] #[allow(clippy::cognitive_complexity)]
pub fn main() { pub fn main() {
let default_dynamic_port_range = let default_dynamic_port_range =
@ -577,6 +589,14 @@ pub fn main() {
.takes_value(false) .takes_value(false)
.help("After processing the ledger, wait until a supermajority of stake is visible on gossip before starting PoH"), .help("After processing the ledger, wait until a supermajority of stake is visible on gossip before starting PoH"),
) )
.arg(
Arg::with_name("hard_forks")
.long("hard-fork")
.value_name("SLOT")
.multiple(true)
.takes_value(true)
.help("Add a hard fork at this slot"),
)
.get_matches(); .get_matches();
let identity_keypair = Arc::new( let identity_keypair = Arc::new(
@ -618,16 +638,26 @@ pub fn main() {
exit(1); exit(1);
}); });
let mut validator_config = ValidatorConfig::default(); let mut validator_config = ValidatorConfig {
validator_config.dev_sigverify_disabled = matches.is_present("dev_no_sigverify"); blockstream_unix_socket: matches
validator_config.dev_halt_at_slot = value_t!(matches, "dev_halt_at_slot", Slot).ok(); .value_of("blockstream_unix_socket")
.map(PathBuf::from),
validator_config.rpc_config.enable_validator_exit = matches.is_present("enable_rpc_exit"); dev_sigverify_disabled: matches.is_present("dev_no_sigverify"),
validator_config.wait_for_supermajority = matches.is_present("wait_for_supermajority"); dev_halt_at_slot: value_t!(matches, "dev_halt_at_slot", Slot).ok(),
expected_genesis_hash: matches
validator_config.rpc_config.faucet_addr = matches.value_of("rpc_faucet_addr").map(|address| { .value_of("expected_genesis_hash")
.map(|s| Hash::from_str(&s).unwrap()),
new_hard_forks: hardforks_of(&matches, "hard_forks"),
rpc_config: JsonRpcConfig {
enable_validator_exit: matches.is_present("enable_rpc_exit"),
faucet_addr: matches.value_of("rpc_faucet_addr").map(|address| {
solana_net_utils::parse_host_port(address).expect("failed to parse faucet address") solana_net_utils::parse_host_port(address).expect("failed to parse faucet address")
}); }),
},
voting_disabled: matches.is_present("no_voting"),
wait_for_supermajority: matches.is_present("wait_for_supermajority"),
..ValidatorConfig::default()
};
let dynamic_port_range = let dynamic_port_range =
solana_net_utils::parse_port_range(matches.value_of("dynamic_port_range").unwrap()) solana_net_utils::parse_port_range(matches.value_of("dynamic_port_range").unwrap())
@ -692,14 +722,6 @@ pub fn main() {
warn!("--vote-signer-address ignored"); warn!("--vote-signer-address ignored");
} }
validator_config.blockstream_unix_socket = matches
.value_of("blockstream_unix_socket")
.map(PathBuf::from);
validator_config.expected_genesis_hash = matches
.value_of("expected_genesis_hash")
.map(|s| Hash::from_str(&s).unwrap());
println!( println!(
"{} {}", "{} {}",
style(crate_name!()).bold(), style(crate_name!()).bold(),
@ -743,10 +765,6 @@ pub fn main() {
.join(","), .join(","),
); );
if matches.is_present("no_voting") {
validator_config.voting_disabled = true;
}
let vote_account = pubkey_of(&matches, "vote_account").unwrap_or_else(|| { let vote_account = pubkey_of(&matches, "vote_account").unwrap_or_else(|| {
// Disable voting because normal (=not bootstrapping) validator rejects // Disable voting because normal (=not bootstrapping) validator rejects
// non-voting accounts (= ephemeral keypairs). // non-voting accounts (= ephemeral keypairs).