parent
a2f2c46f87
commit
989355e885
@ -31,11 +31,11 @@ use solana_ledger::{
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
};
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::{bank::Bank, hard_forks::HardForks};
|
||||
use solana_sdk::{
|
||||
clock::{Slot, DEFAULT_SLOTS_PER_TURN},
|
||||
genesis_config::GenesisConfig,
|
||||
hash::Hash,
|
||||
hash::{extend_and_hash, Hash},
|
||||
poh_config::PohConfig,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, KeypairUtil},
|
||||
@ -69,6 +69,7 @@ pub struct ValidatorConfig {
|
||||
pub partition_cfg: Option<PartitionCfg>,
|
||||
pub fixed_leader_schedule: Option<FixedSchedule>,
|
||||
pub wait_for_supermajority: bool,
|
||||
pub new_hard_forks: Option<Vec<Slot>>,
|
||||
}
|
||||
|
||||
impl Default for ValidatorConfig {
|
||||
@ -89,6 +90,7 @@ impl Default for ValidatorConfig {
|
||||
partition_cfg: None,
|
||||
fixed_leader_schedule: None,
|
||||
wait_for_supermajority: false,
|
||||
new_hard_forks: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -179,7 +181,8 @@ impl Validator {
|
||||
let validator_exit = Arc::new(RwLock::new(Some(validator_exit)));
|
||||
|
||||
node.info.wallclock = timestamp();
|
||||
node.info.shred_version = Shred::version_from_hash(&genesis_hash);
|
||||
node.info.shred_version =
|
||||
compute_shred_version(&genesis_hash, &bank.hard_forks().read().unwrap());
|
||||
Self::print_node_info(&node);
|
||||
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new(
|
||||
@ -466,7 +469,21 @@ impl Validator {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_banks_from_blockstore(
|
||||
fn compute_shred_version(genesis_hash: &Hash, hard_forks: &HardForks) -> u16 {
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
|
||||
let mut hash = *genesis_hash;
|
||||
for (slot, count) in hard_forks.iter() {
|
||||
let mut buf = [0u8; 16];
|
||||
LittleEndian::write_u64(&mut buf[..7], *slot);
|
||||
LittleEndian::write_u64(&mut buf[8..], *count as u64);
|
||||
hash = extend_and_hash(&hash, &buf);
|
||||
}
|
||||
|
||||
Shred::version_from_hash(&hash)
|
||||
}
|
||||
|
||||
fn new_banks_from_blockstore(
|
||||
expected_genesis_hash: Option<Hash>,
|
||||
blockstore_path: &Path,
|
||||
poh_verify: bool,
|
||||
@ -505,6 +522,7 @@ pub fn new_banks_from_blockstore(
|
||||
let process_options = blockstore_processor::ProcessOptions {
|
||||
poh_verify,
|
||||
dev_halt_at_slot: config.dev_halt_at_slot,
|
||||
new_hard_forks: config.new_hard_forks.clone(),
|
||||
..blockstore_processor::ProcessOptions::default()
|
||||
};
|
||||
|
||||
|
@ -514,6 +514,15 @@ fn open_database(ledger_path: &Path) -> Database {
|
||||
}
|
||||
}
|
||||
|
||||
// This function is duplicated in validator/src/main.rs...
|
||||
fn hardforks_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Slot>> {
|
||||
if matches.is_present(name) {
|
||||
Some(values_t_or_exit!(matches, name, Slot))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn load_bank_forks(
|
||||
arg_matches: &ArgMatches,
|
||||
ledger_path: &PathBuf,
|
||||
@ -554,23 +563,26 @@ fn main() {
|
||||
.takes_value(true)
|
||||
.default_value("0")
|
||||
.help("Start at this slot");
|
||||
|
||||
let no_snapshot_arg = Arg::with_name("no_snapshot")
|
||||
.long("no-snapshot")
|
||||
.takes_value(false)
|
||||
.help("Do not start from a local snapshot if present");
|
||||
|
||||
let account_paths_arg = Arg::with_name("account_paths")
|
||||
.long("accounts")
|
||||
.value_name("PATHS")
|
||||
.takes_value(true)
|
||||
.help("Comma separated persistent accounts location");
|
||||
|
||||
let halt_at_slot_arg = Arg::with_name("halt_at_slot")
|
||||
.long("halt-at-slot")
|
||||
.value_name("SLOT")
|
||||
.takes_value(true)
|
||||
.help("Halt processing at the given slot");
|
||||
let hard_forks_arg = Arg::with_name("hard_forks")
|
||||
.long("hard-fork")
|
||||
.value_name("SLOT")
|
||||
.multiple(true)
|
||||
.takes_value(true)
|
||||
.help("Add a hard fork at this slot");
|
||||
|
||||
let matches = App::new(crate_name!())
|
||||
.about(crate_description!())
|
||||
@ -627,6 +639,7 @@ fn main() {
|
||||
.arg(&no_snapshot_arg)
|
||||
.arg(&account_paths_arg)
|
||||
.arg(&halt_at_slot_arg)
|
||||
.arg(&hard_forks_arg)
|
||||
.arg(
|
||||
Arg::with_name("skip_poh_verify")
|
||||
.long("skip-poh-verify")
|
||||
@ -639,6 +652,7 @@ fn main() {
|
||||
.arg(&no_snapshot_arg)
|
||||
.arg(&account_paths_arg)
|
||||
.arg(&halt_at_slot_arg)
|
||||
.arg(&hard_forks_arg)
|
||||
.arg(
|
||||
Arg::with_name("include_all_votes")
|
||||
.long("include-all-votes")
|
||||
@ -656,6 +670,7 @@ fn main() {
|
||||
.about("Create a new ledger snapshot")
|
||||
.arg(&no_snapshot_arg)
|
||||
.arg(&account_paths_arg)
|
||||
.arg(&hard_forks_arg)
|
||||
.arg(
|
||||
Arg::with_name("snapshot_slot")
|
||||
.index(1)
|
||||
@ -758,8 +773,9 @@ fn main() {
|
||||
}
|
||||
("verify", Some(arg_matches)) => {
|
||||
let process_options = ProcessOptions {
|
||||
poh_verify: !arg_matches.is_present("skip_poh_verify"),
|
||||
dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(),
|
||||
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
|
||||
poh_verify: !arg_matches.is_present("skip_poh_verify"),
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
|
||||
@ -773,8 +789,9 @@ fn main() {
|
||||
let output_file = value_t_or_exit!(arg_matches, "graph_filename", String);
|
||||
|
||||
let process_options = ProcessOptions {
|
||||
poh_verify: false,
|
||||
dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(),
|
||||
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
|
||||
poh_verify: false,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
|
||||
@ -812,8 +829,9 @@ fn main() {
|
||||
let output_directory = value_t_or_exit!(arg_matches, "output_directory", String);
|
||||
|
||||
let process_options = ProcessOptions {
|
||||
poh_verify: false,
|
||||
dev_halt_at_slot: Some(snapshot_slot),
|
||||
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
|
||||
poh_verify: false,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
match load_bank_forks(arg_matches, &ledger_path, process_options) {
|
||||
|
@ -255,6 +255,9 @@ pub enum BlockstoreProcessorError {
|
||||
|
||||
#[error("no valid forks found")]
|
||||
NoValidForksFound,
|
||||
|
||||
#[error("invalid hard fork")]
|
||||
InvalidHardFork(Slot),
|
||||
}
|
||||
|
||||
/// Callback for accessing bank state while processing the blockstore
|
||||
@ -267,6 +270,7 @@ pub struct ProcessOptions {
|
||||
pub dev_halt_at_slot: Option<Slot>,
|
||||
pub entry_callback: Option<ProcessCallback>,
|
||||
pub override_num_threads: Option<usize>,
|
||||
pub new_hard_forks: Option<Vec<Slot>>,
|
||||
}
|
||||
|
||||
pub fn process_blockstore(
|
||||
@ -314,6 +318,23 @@ pub fn process_blockstore_from_root(
|
||||
genesis_config.operating_mode,
|
||||
));
|
||||
|
||||
if let Some(ref new_hard_forks) = opts.new_hard_forks {
|
||||
let hard_forks = bank.hard_forks();
|
||||
|
||||
for hard_fork_slot in new_hard_forks.iter() {
|
||||
// Ensure the user isn't trying to add new hard forks for a slot that's earlier than the current
|
||||
// root slot. Doing so won't cause any effect so emit an error
|
||||
if *hard_fork_slot <= start_slot {
|
||||
error!(
|
||||
"Unable to add new hard fork at {}, it must be greater than slot {}",
|
||||
hard_fork_slot, start_slot
|
||||
);
|
||||
return Err(BlockstoreProcessorError::InvalidHardFork(*hard_fork_slot));
|
||||
}
|
||||
hard_forks.write().unwrap().register(*hard_fork_slot);
|
||||
}
|
||||
}
|
||||
|
||||
blockstore
|
||||
.set_roots(&[start_slot])
|
||||
.expect("Couldn't set root slot on startup");
|
||||
|
@ -72,6 +72,7 @@ else
|
||||
|
||||
query() {
|
||||
echo "$*"
|
||||
set -x
|
||||
curl -XPOST \
|
||||
"$host/query?u=${username}&p=${password}" \
|
||||
--data-urlencode "q=$*"
|
||||
|
@ -6,6 +6,7 @@ use crate::{
|
||||
accounts::{Accounts, TransactionAccounts, TransactionLoadResult, TransactionLoaders},
|
||||
accounts_db::{AccountStorageEntry, AccountsDBSerialize, AppendVecId, ErrorCounters},
|
||||
blockhash_queue::BlockhashQueue,
|
||||
hard_forks::HardForks,
|
||||
message_processor::{MessageProcessor, ProcessInstruction},
|
||||
nonce_utils,
|
||||
rent_collector::RentCollector,
|
||||
@ -35,7 +36,7 @@ use solana_sdk::{
|
||||
epoch_schedule::EpochSchedule,
|
||||
fee_calculator::FeeCalculator,
|
||||
genesis_config::GenesisConfig,
|
||||
hash::{hashv, Hash},
|
||||
hash::{extend_and_hash, hashv, Hash},
|
||||
inflation::Inflation,
|
||||
native_loader,
|
||||
nonce_state::NonceState,
|
||||
@ -226,6 +227,9 @@ pub struct Bank {
|
||||
/// parent's slot
|
||||
parent_slot: Slot,
|
||||
|
||||
/// slots to hard fork at
|
||||
hard_forks: Arc<RwLock<HardForks>>,
|
||||
|
||||
/// The number of transactions processed without error
|
||||
#[serde(serialize_with = "serialize_atomicu64")]
|
||||
#[serde(deserialize_with = "deserialize_atomicu64")]
|
||||
@ -422,6 +426,7 @@ impl Bank {
|
||||
signature_count: AtomicU64::new(0),
|
||||
message_processor: MessageProcessor::default(),
|
||||
entered_epoch_callback: parent.entered_epoch_callback.clone(),
|
||||
hard_forks: parent.hard_forks.clone(),
|
||||
last_vote_sync: AtomicU64::new(parent.last_vote_sync.load(Ordering::Relaxed)),
|
||||
};
|
||||
|
||||
@ -1685,6 +1690,10 @@ impl Bank {
|
||||
*self.inflation.write().unwrap() = inflation;
|
||||
}
|
||||
|
||||
pub fn hard_forks(&self) -> Arc<RwLock<HardForks>> {
|
||||
self.hard_forks.clone()
|
||||
}
|
||||
|
||||
pub fn set_entered_epoch_callback(&self, entered_epoch_callback: EnteredEpochCallback) {
|
||||
*self.entered_epoch_callback.write().unwrap() = Some(entered_epoch_callback);
|
||||
}
|
||||
@ -1779,20 +1788,33 @@ impl Bank {
|
||||
let accounts_delta_hash = self.rc.accounts.bank_hash_info_at(self.slot());
|
||||
let mut signature_count_buf = [0u8; 8];
|
||||
LittleEndian::write_u64(&mut signature_count_buf[..], self.signature_count() as u64);
|
||||
let hash = hashv(&[
|
||||
|
||||
let mut hash = hashv(&[
|
||||
self.parent_hash.as_ref(),
|
||||
accounts_delta_hash.hash.as_ref(),
|
||||
&signature_count_buf,
|
||||
self.last_blockhash().as_ref(),
|
||||
]);
|
||||
|
||||
if let Some(buf) = self
|
||||
.hard_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_hash_data(self.slot(), self.parent_slot())
|
||||
{
|
||||
info!("hard fork at bank {}", self.slot());
|
||||
hash = extend_and_hash(&hash, &buf)
|
||||
}
|
||||
|
||||
info!(
|
||||
"bank frozen: {} hash: {} accounts_delta: {} signature_count: {} last_blockhash: {}",
|
||||
self.slot(),
|
||||
hash,
|
||||
accounts_delta_hash.hash,
|
||||
self.signature_count(),
|
||||
self.last_blockhash()
|
||||
self.last_blockhash(),
|
||||
);
|
||||
|
||||
info!(
|
||||
"accounts hash slot: {} stats: {:?}",
|
||||
self.slot(),
|
||||
@ -2147,6 +2169,7 @@ impl From<LegacyBank0223> for Bank {
|
||||
storage_accounts: bank.storage_accounts,
|
||||
parent_hash: bank.parent_hash,
|
||||
parent_slot: bank.parent_slot,
|
||||
hard_forks: Arc::new(RwLock::new(HardForks::default())),
|
||||
collector_id: bank.collector_id,
|
||||
collector_fees: bank.collector_fees,
|
||||
ancestors: bank.ancestors,
|
||||
|
91
runtime/src/hard_forks.rs
Normal file
91
runtime/src/hard_forks.rs
Normal file
@ -0,0 +1,91 @@
|
||||
//! The `hard_forks` module is used to maintain the list of slot boundaries for when a hard fork
|
||||
//! should occur.
|
||||
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use solana_sdk::clock::Slot;
|
||||
use std::ops::Add;
|
||||
|
||||
#[derive(Default, Clone, Deserialize, Serialize)]
|
||||
pub struct HardForks {
|
||||
hard_forks: Vec<(Slot, usize)>,
|
||||
}
|
||||
impl HardForks {
|
||||
// Register a fork to occur at all slots >= `slot` with a parent slot < `slot`
|
||||
pub fn register(&mut self, new_slot: Slot) {
|
||||
if let Some(i) = self
|
||||
.hard_forks
|
||||
.iter()
|
||||
.position(|(slot, _)| *slot == new_slot)
|
||||
{
|
||||
self.hard_forks[i] = (new_slot, self.hard_forks[i].1 + 1);
|
||||
} else {
|
||||
self.hard_forks.push((new_slot, 1));
|
||||
}
|
||||
self.hard_forks.sort();
|
||||
}
|
||||
|
||||
// Returns a sorted-by-slot iterator over the registered hark forks
|
||||
pub fn iter(&self) -> std::slice::Iter<(Slot, usize)> {
|
||||
self.hard_forks.iter()
|
||||
}
|
||||
|
||||
// Returns data to include in the bank hash for the given slot if a hard fork is scheduled
|
||||
pub fn get_hash_data(&self, slot: Slot, parent_slot: Slot) -> Option<[u8; 8]> {
|
||||
// The expected number of hard forks in a cluster is small.
|
||||
// If this turns out to be false then a more efficient data
|
||||
// structure may be needed here to avoid this linear search
|
||||
let fork_count = self
|
||||
.hard_forks
|
||||
.iter()
|
||||
.fold(0, |acc, (fork_slot, fork_count)| {
|
||||
acc.add(if parent_slot < *fork_slot && slot >= *fork_slot {
|
||||
*fork_count
|
||||
} else {
|
||||
0
|
||||
})
|
||||
});
|
||||
|
||||
if fork_count > 0 {
|
||||
let mut buf = [0u8; 8];
|
||||
LittleEndian::write_u64(&mut buf[..], fork_count as u64);
|
||||
Some(buf)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn iter_is_sorted() {
|
||||
let mut hf = HardForks::default();
|
||||
hf.register(30);
|
||||
hf.register(20);
|
||||
hf.register(10);
|
||||
hf.register(20);
|
||||
|
||||
assert_eq!(
|
||||
hf.iter().map(|i| *i).collect::<Vec<_>>(),
|
||||
vec![(10, 1), (20, 2), (30, 1)]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiple_hard_forks_since_parent() {
|
||||
let mut hf = HardForks::default();
|
||||
hf.register(10);
|
||||
hf.register(20);
|
||||
|
||||
assert_eq!(hf.get_hash_data(9, 0), None);
|
||||
assert_eq!(hf.get_hash_data(10, 0), Some([1, 0, 0, 0, 0, 0, 0, 0,]));
|
||||
assert_eq!(hf.get_hash_data(19, 0), Some([1, 0, 0, 0, 0, 0, 0, 0,]));
|
||||
assert_eq!(hf.get_hash_data(20, 0), Some([2, 0, 0, 0, 0, 0, 0, 0,]));
|
||||
assert_eq!(hf.get_hash_data(20, 10), Some([1, 0, 0, 0, 0, 0, 0, 0,]));
|
||||
assert_eq!(hf.get_hash_data(20, 11), Some([1, 0, 0, 0, 0, 0, 0, 0,]));
|
||||
assert_eq!(hf.get_hash_data(21, 11), Some([1, 0, 0, 0, 0, 0, 0, 0,]));
|
||||
assert_eq!(hf.get_hash_data(21, 20), None);
|
||||
}
|
||||
}
|
@ -7,6 +7,7 @@ pub mod bank_client;
|
||||
mod blockhash_queue;
|
||||
pub mod bloom;
|
||||
pub mod genesis_utils;
|
||||
pub mod hard_forks;
|
||||
pub mod loader_utils;
|
||||
pub mod message_processor;
|
||||
mod native_loader;
|
||||
|
@ -1,5 +1,7 @@
|
||||
use bzip2::bufread::BzDecoder;
|
||||
use clap::{crate_description, crate_name, value_t, value_t_or_exit, App, Arg};
|
||||
use clap::{
|
||||
crate_description, crate_name, value_t, value_t_or_exit, values_t_or_exit, App, Arg, ArgMatches,
|
||||
};
|
||||
use console::{style, Emoji};
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use log::*;
|
||||
@ -18,6 +20,7 @@ use solana_core::{
|
||||
cluster_info::{ClusterInfo, Node, VALIDATOR_PORT_RANGE},
|
||||
contact_info::ContactInfo,
|
||||
gossip_service::GossipService,
|
||||
rpc::JsonRpcConfig,
|
||||
validator::{Validator, ValidatorConfig},
|
||||
};
|
||||
use solana_ledger::bank_forks::SnapshotConfig;
|
||||
@ -341,6 +344,15 @@ fn download_ledger(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// This function is duplicated in ledger-tool/src/main.rs...
|
||||
fn hardforks_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Slot>> {
|
||||
if matches.is_present(name) {
|
||||
Some(values_t_or_exit!(matches, name, Slot))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
pub fn main() {
|
||||
let default_dynamic_port_range =
|
||||
@ -577,6 +589,14 @@ pub fn main() {
|
||||
.takes_value(false)
|
||||
.help("After processing the ledger, wait until a supermajority of stake is visible on gossip before starting PoH"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("hard_forks")
|
||||
.long("hard-fork")
|
||||
.value_name("SLOT")
|
||||
.multiple(true)
|
||||
.takes_value(true)
|
||||
.help("Add a hard fork at this slot"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let identity_keypair = Arc::new(
|
||||
@ -618,16 +638,26 @@ pub fn main() {
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
validator_config.dev_sigverify_disabled = matches.is_present("dev_no_sigverify");
|
||||
validator_config.dev_halt_at_slot = value_t!(matches, "dev_halt_at_slot", Slot).ok();
|
||||
|
||||
validator_config.rpc_config.enable_validator_exit = matches.is_present("enable_rpc_exit");
|
||||
validator_config.wait_for_supermajority = matches.is_present("wait_for_supermajority");
|
||||
|
||||
validator_config.rpc_config.faucet_addr = matches.value_of("rpc_faucet_addr").map(|address| {
|
||||
solana_net_utils::parse_host_port(address).expect("failed to parse faucet address")
|
||||
});
|
||||
let mut validator_config = ValidatorConfig {
|
||||
blockstream_unix_socket: matches
|
||||
.value_of("blockstream_unix_socket")
|
||||
.map(PathBuf::from),
|
||||
dev_sigverify_disabled: matches.is_present("dev_no_sigverify"),
|
||||
dev_halt_at_slot: value_t!(matches, "dev_halt_at_slot", Slot).ok(),
|
||||
expected_genesis_hash: matches
|
||||
.value_of("expected_genesis_hash")
|
||||
.map(|s| Hash::from_str(&s).unwrap()),
|
||||
new_hard_forks: hardforks_of(&matches, "hard_forks"),
|
||||
rpc_config: JsonRpcConfig {
|
||||
enable_validator_exit: matches.is_present("enable_rpc_exit"),
|
||||
faucet_addr: matches.value_of("rpc_faucet_addr").map(|address| {
|
||||
solana_net_utils::parse_host_port(address).expect("failed to parse faucet address")
|
||||
}),
|
||||
},
|
||||
voting_disabled: matches.is_present("no_voting"),
|
||||
wait_for_supermajority: matches.is_present("wait_for_supermajority"),
|
||||
..ValidatorConfig::default()
|
||||
};
|
||||
|
||||
let dynamic_port_range =
|
||||
solana_net_utils::parse_port_range(matches.value_of("dynamic_port_range").unwrap())
|
||||
@ -692,14 +722,6 @@ pub fn main() {
|
||||
warn!("--vote-signer-address ignored");
|
||||
}
|
||||
|
||||
validator_config.blockstream_unix_socket = matches
|
||||
.value_of("blockstream_unix_socket")
|
||||
.map(PathBuf::from);
|
||||
|
||||
validator_config.expected_genesis_hash = matches
|
||||
.value_of("expected_genesis_hash")
|
||||
.map(|s| Hash::from_str(&s).unwrap());
|
||||
|
||||
println!(
|
||||
"{} {}",
|
||||
style(crate_name!()).bold(),
|
||||
@ -743,10 +765,6 @@ pub fn main() {
|
||||
.join(","),
|
||||
);
|
||||
|
||||
if matches.is_present("no_voting") {
|
||||
validator_config.voting_disabled = true;
|
||||
}
|
||||
|
||||
let vote_account = pubkey_of(&matches, "vote_account").unwrap_or_else(|| {
|
||||
// Disable voting because normal (=not bootstrapping) validator rejects
|
||||
// non-voting accounts (= ephemeral keypairs).
|
||||
|
Loading…
x
Reference in New Issue
Block a user