Compare commits

...

10 Commits

19 changed files with 299 additions and 344 deletions

View File

@ -212,7 +212,7 @@ SUBCOMMANDS:
cluster-version Get the version of the cluster entrypoint
config Solana command-line tool configuration settings
confirm Confirm transaction by signature
create-address-with-seed Generate a dervied account address with a seed
create-address-with-seed Generate a derived account address with a seed
create-archiver-storage-account Create an archiver storage account
create-nonce-account Create a nonce account
create-stake-account Create a stake account
@ -618,7 +618,7 @@ ARGS:
#### solana-create-address-with-seed
```text
solana-create-address-with-seed
Generate a dervied account address with a seed
Generate a derived account address with a seed
USAGE:
solana create-address-with-seed [FLAGS] [OPTIONS] <SEED_STRING> <PROGRAM_ID>

View File

@ -2,6 +2,7 @@ use crate::{
cluster_query::*,
display::{println_name_value, println_signers},
nonce::{self, *},
offline::*,
stake::*,
storage::*,
validator_info::*,
@ -31,7 +32,7 @@ use solana_sdk::{
message::Message,
native_token::lamports_to_sol,
pubkey::Pubkey,
signature::{Keypair, KeypairUtil, Signature},
signature::{keypair_from_seed, Keypair, KeypairUtil, Signature},
system_instruction::{create_address_with_seed, SystemError, MAX_ADDRESS_SEED_LEN},
system_transaction,
transaction::{Transaction, TransactionError},
@ -125,7 +126,7 @@ impl From<Keypair> for SigningAuthority {
impl From<Pubkey> for SigningAuthority {
fn from(pubkey: Pubkey) -> Self {
SigningAuthority::Offline(pubkey, Keypair::new())
SigningAuthority::Offline(pubkey, keypair_from_seed(pubkey.as_ref()).unwrap())
}
}
@ -488,8 +489,8 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
("stake-authorize-withdrawer", Some(matches)) => {
parse_stake_authorize(matches, StakeAuthorize::Withdrawer)
}
("show-stake-account", Some(matches)) => parse_show_stake_account(matches),
("show-stake-history", Some(matches)) => parse_show_stake_history(matches),
("stake-account", Some(matches)) => parse_show_stake_account(matches),
("stake-history", Some(matches)) => parse_show_stake_history(matches),
// Storage Commands
("create-archiver-storage-account", Some(matches)) => {
parse_storage_create_archiver_account(matches)
@ -599,9 +600,9 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
let timestamp_pubkey = value_of(&matches, "timestamp_pubkey");
let witnesses = values_of(&matches, "witness");
let cancelable = matches.is_present("cancelable");
let sign_only = matches.is_present("sign_only");
let signers = pubkeys_sigs_of(&matches, "signer");
let blockhash = value_of(&matches, "blockhash");
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let signers = pubkeys_sigs_of(&matches, SIGNER_ARG.name);
let blockhash = value_of(&matches, BLOCKHASH_ARG.name);
let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
@ -1871,7 +1872,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
)
.subcommand(
SubCommand::with_name("create-address-with-seed")
.about("Generate a dervied account address with a seed")
.about("Generate a derived account address with a seed")
.arg(
Arg::with_name("seed")
.index(1)
@ -1973,31 +1974,9 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.long("cancelable")
.takes_value(false),
)
.arg(
Arg::with_name("sign_only")
.long("sign-only")
.takes_value(false)
.help("Sign the transaction offline"),
)
.offline_args()
.arg(nonce_arg())
.arg(nonce_authority_arg())
.arg(
Arg::with_name("signer")
.long("signer")
.value_name("PUBKEY=BASE58_SIG")
.takes_value(true)
.validator(is_pubkey_sig)
.multiple(true)
.help("Provide a public-key/signature pair for the transaction"),
)
.arg(
Arg::with_name("blockhash")
.long("blockhash")
.value_name("BLOCKHASH")
.takes_value(true)
.validator(is_hash)
.help("Use the supplied blockhash"),
),
.arg(nonce_authority_arg()),
)
.subcommand(
SubCommand::with_name("send-signature")
@ -2092,6 +2071,7 @@ mod tests {
use solana_sdk::{
account::Account,
nonce_state::{Meta as NonceMeta, NonceState},
pubkey::Pubkey,
signature::{read_keypair_file, write_keypair_file},
system_program,
transaction::TransactionError,
@ -2112,6 +2092,13 @@ mod tests {
path
}
#[test]
fn test_signing_authority_dummy_keypairs() {
let signing_authority: SigningAuthority = Pubkey::new(&[1u8; 32]).into();
assert_eq!(signing_authority, Pubkey::new(&[1u8; 32]).into());
assert_ne!(signing_authority, Pubkey::new(&[2u8; 32]).into());
}
#[test]
fn test_cli_parse_command() {
let test_commands = app("test", "desc", "version");
@ -2375,12 +2362,16 @@ mod tests {
);
// Test Pay Subcommand w/ sign-only
let blockhash = Hash::default();
let blockhash_string = format!("{}", blockhash);
let test_pay = test_commands.clone().get_matches_from(vec![
"test",
"pay",
&pubkey_string,
"50",
"lamports",
"--blockhash",
&blockhash_string,
"--sign-only",
]);
assert_eq!(
@ -2389,6 +2380,7 @@ mod tests {
command: CliCommand::Pay(PayCommand {
lamports: 50,
to: pubkey,
blockhash: Some(blockhash),
sign_only: true,
..PayCommand::default()
}),
@ -2406,6 +2398,8 @@ mod tests {
&pubkey_string,
"50",
"lamports",
"--blockhash",
&blockhash_string,
"--signer",
&signer1,
]);
@ -2415,6 +2409,7 @@ mod tests {
command: CliCommand::Pay(PayCommand {
lamports: 50,
to: pubkey,
blockhash: Some(blockhash),
signers: Some(vec![(key1, sig1)]),
..PayCommand::default()
}),
@ -2432,6 +2427,8 @@ mod tests {
&pubkey_string,
"50",
"lamports",
"--blockhash",
&blockhash_string,
"--signer",
&signer1,
"--signer",
@ -2443,6 +2440,7 @@ mod tests {
command: CliCommand::Pay(PayCommand {
lamports: 50,
to: pubkey,
blockhash: Some(blockhash),
signers: Some(vec![(key1, sig1), (key2, sig2)]),
..PayCommand::default()
}),
@ -2451,8 +2449,6 @@ mod tests {
);
// Test Pay Subcommand w/ Blockhash
let blockhash = Hash::default();
let blockhash_string = format!("{}", blockhash);
let test_pay = test_commands.clone().get_matches_from(vec![
"test",
"pay",

View File

@ -6,6 +6,7 @@ pub mod cluster_query;
pub mod config;
pub mod display;
pub mod nonce;
pub mod offline;
pub mod stake;
pub mod storage;
pub mod validator_info;

View File

@ -3,6 +3,7 @@ use crate::cli::{
log_instruction_custom_error, required_lamports_from, CliCommand, CliCommandInfo, CliConfig,
CliError, ProcessResult, SigningAuthority,
};
use crate::offline::BLOCKHASH_ARG;
use clap::{App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{input_parsers::*, input_validators::*, ArgConstant};
use solana_client::rpc_client::RpcClient;
@ -55,7 +56,7 @@ pub fn nonce_arg<'a, 'b>() -> Arg<'a, 'b> {
.long(NONCE_ARG.long)
.takes_value(true)
.value_name("PUBKEY")
.requires("blockhash")
.requires(BLOCKHASH_ARG.name)
.validator(is_pubkey)
.help(NONCE_ARG.help)
}

63
cli/src/offline.rs Normal file
View File

@ -0,0 +1,63 @@
use clap::{App, Arg};
use solana_clap_utils::{
input_validators::{is_hash, is_pubkey_sig},
ArgConstant,
};
pub const BLOCKHASH_ARG: ArgConstant<'static> = ArgConstant {
name: "blockhash",
long: "blockhash",
help: "Use the supplied blockhash",
};
pub const SIGN_ONLY_ARG: ArgConstant<'static> = ArgConstant {
name: "sign_only",
long: "sign-only",
help: "Sign the transaction offline",
};
pub const SIGNER_ARG: ArgConstant<'static> = ArgConstant {
name: "signer",
long: "signer",
help: "Provid a public-key/signature pair for the transaction",
};
fn blockhash_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(BLOCKHASH_ARG.name)
.long(BLOCKHASH_ARG.long)
.takes_value(true)
.value_name("BLOCKHASH")
.validator(is_hash)
.help(BLOCKHASH_ARG.help)
}
fn sign_only_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(SIGN_ONLY_ARG.name)
.long(SIGN_ONLY_ARG.long)
.takes_value(false)
.requires(BLOCKHASH_ARG.name)
.help(SIGN_ONLY_ARG.help)
}
fn signer_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(SIGNER_ARG.name)
.long(SIGNER_ARG.long)
.takes_value(true)
.value_name("BASE58_PUBKEY=BASE58_SIG")
.validator(is_pubkey_sig)
.requires(BLOCKHASH_ARG.name)
.multiple(true)
.help(SIGNER_ARG.help)
}
pub trait OfflineArgs {
fn offline_args(self) -> Self;
}
impl OfflineArgs for App<'_, '_> {
fn offline_args(self) -> Self {
self.arg(blockhash_arg())
.arg(sign_only_arg())
.arg(signer_arg())
}
}

View File

@ -6,6 +6,7 @@ use crate::{
CliConfig, CliError, ProcessResult, SigningAuthority,
},
nonce::{check_nonce_account, nonce_arg, NONCE_ARG, NONCE_AUTHORITY_ARG},
offline::*,
};
use clap::{App, Arg, ArgMatches, SubCommand};
use console::style;
@ -172,29 +173,7 @@ impl StakeSubCommands for App<'_, '_> {
.help("The vote account to which the stake will be delegated")
)
.arg(stake_authority_arg())
.arg(
Arg::with_name("sign_only")
.long("sign-only")
.takes_value(false)
.help("Sign the transaction offline"),
)
.arg(
Arg::with_name("signer")
.long("signer")
.value_name("PUBKEY=BASE58_SIG")
.takes_value(true)
.validator(is_pubkey_sig)
.multiple(true)
.help("Provide a public-key/signature pair for the transaction"),
)
.arg(
Arg::with_name("blockhash")
.long("blockhash")
.value_name("BLOCKHASH")
.takes_value(true)
.validator(is_hash)
.help("Use the supplied blockhash"),
)
.offline_args()
.arg(nonce_arg())
.arg(nonce_authority_arg())
)
@ -220,29 +199,7 @@ impl StakeSubCommands for App<'_, '_> {
.help("New authorized staker")
)
.arg(stake_authority_arg())
.arg(
Arg::with_name("sign_only")
.long("sign-only")
.takes_value(false)
.help("Sign the transaction offline"),
)
.arg(
Arg::with_name("signer")
.long("signer")
.value_name("PUBKEY=BASE58_SIG")
.takes_value(true)
.validator(is_pubkey_sig)
.multiple(true)
.help("Provide a public-key/signature pair for the transaction"),
)
.arg(
Arg::with_name("blockhash")
.long("blockhash")
.value_name("BLOCKHASH")
.takes_value(true)
.validator(is_hash)
.help("Use the supplied blockhash"),
)
.offline_args()
.arg(nonce_arg())
.arg(nonce_authority_arg())
)
@ -268,29 +225,7 @@ impl StakeSubCommands for App<'_, '_> {
.help("New authorized withdrawer")
)
.arg(withdraw_authority_arg())
.arg(
Arg::with_name("sign_only")
.long("sign-only")
.takes_value(false)
.help("Sign the transaction offline"),
)
.arg(
Arg::with_name("signer")
.long("signer")
.value_name("PUBKEY=BASE58_SIG")
.takes_value(true)
.validator(is_pubkey_sig)
.multiple(true)
.help("Provide a public-key/signature pair for the transaction"),
)
.arg(
Arg::with_name("blockhash")
.long("blockhash")
.value_name("BLOCKHASH")
.takes_value(true)
.validator(is_hash)
.help("Use the supplied blockhash"),
)
.offline_args()
.arg(nonce_arg())
.arg(nonce_authority_arg())
)
@ -306,29 +241,7 @@ impl StakeSubCommands for App<'_, '_> {
.help("Stake account to be deactivated.")
)
.arg(stake_authority_arg())
.arg(
Arg::with_name("sign_only")
.long("sign-only")
.takes_value(false)
.help("Sign the transaction offline"),
)
.arg(
Arg::with_name("signer")
.long("signer")
.value_name("PUBKEY=BASE58_SIG")
.takes_value(true)
.validator(is_pubkey_sig)
.multiple(true)
.help("Provide a public-key/signature pair for the transaction"),
)
.arg(
Arg::with_name("blockhash")
.long("blockhash")
.value_name("BLOCKHASH")
.takes_value(true)
.validator(is_hash)
.help("Use the supplied blockhash"),
)
.offline_args()
.arg(nonce_arg())
.arg(nonce_authority_arg())
)
@ -437,9 +350,9 @@ pub fn parse_stake_delegate_stake(matches: &ArgMatches<'_>) -> Result<CliCommand
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
let force = matches.is_present("force");
let sign_only = matches.is_present("sign_only");
let signers = pubkeys_sigs_of(&matches, "signer");
let blockhash = value_of(matches, "blockhash");
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let signers = pubkeys_sigs_of(&matches, SIGNER_ARG.name);
let blockhash = value_of(matches, BLOCKHASH_ARG.name);
let require_keypair = signers.is_none();
let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
let stake_authority = if matches.is_present(STAKE_AUTHORITY_ARG.name) {
@ -487,8 +400,8 @@ pub fn parse_stake_authorize(
StakeAuthorize::Staker => STAKE_AUTHORITY_ARG.name,
StakeAuthorize::Withdrawer => WITHDRAW_AUTHORITY_ARG.name,
};
let sign_only = matches.is_present("sign_only");
let signers = pubkeys_sigs_of(&matches, "signer");
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let signers = pubkeys_sigs_of(&matches, SIGNER_ARG.name);
let authority = if matches.is_present(authority_flag) {
Some(SigningAuthority::new_from_matches(
&matches,
@ -498,7 +411,7 @@ pub fn parse_stake_authorize(
} else {
None
};
let blockhash = value_of(matches, "blockhash");
let blockhash = value_of(matches, BLOCKHASH_ARG.name);
let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
@ -528,9 +441,9 @@ pub fn parse_stake_authorize(
pub fn parse_stake_deactivate_stake(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
let sign_only = matches.is_present("sign_only");
let signers = pubkeys_sigs_of(&matches, "signer");
let blockhash = value_of(matches, "blockhash");
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
let signers = pubkeys_sigs_of(&matches, SIGNER_ARG.name);
let blockhash = value_of(matches, BLOCKHASH_ARG.name);
let require_keypair = signers.is_none();
let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
let stake_authority = if matches.is_present(STAKE_AUTHORITY_ARG.name) {
@ -1177,11 +1090,15 @@ mod tests {
}
);
// Test Authorize Subcommand w/ sign-only
let blockhash = Hash::default();
let blockhash_string = format!("{}", blockhash);
let test_authorize = test_commands.clone().get_matches_from(vec![
"test",
&subcommand,
&stake_account_string,
&stake_account_string,
"--blockhash",
&blockhash_string,
"--sign-only",
]);
assert_eq!(
@ -1194,7 +1111,7 @@ mod tests {
authority: None,
sign_only: true,
signers: None,
blockhash: None,
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
},
@ -1210,6 +1127,8 @@ mod tests {
&subcommand,
&stake_account_string,
&stake_account_string,
"--blockhash",
&blockhash_string,
"--signer",
&signer,
]);
@ -1223,7 +1142,7 @@ mod tests {
authority: None,
sign_only: false,
signers: Some(vec![(keypair.pubkey(), sig)]),
blockhash: None,
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
},
@ -1239,6 +1158,8 @@ mod tests {
&subcommand,
&stake_account_string,
&stake_account_string,
"--blockhash",
&blockhash_string,
"--signer",
&signer,
"--signer",
@ -1254,7 +1175,7 @@ mod tests {
authority: None,
sign_only: false,
signers: Some(vec![(keypair.pubkey(), sig), (keypair2.pubkey(), sig2),]),
blockhash: None,
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
},
@ -1262,8 +1183,6 @@ mod tests {
}
);
// Test Authorize Subcommand w/ blockhash
let blockhash = Hash::default();
let blockhash_string = format!("{}", blockhash);
let test_authorize = test_commands.clone().get_matches_from(vec![
"test",
&subcommand,
@ -1538,6 +1457,8 @@ mod tests {
"delegate-stake",
&stake_account_string,
&vote_account_string,
"--blockhash",
&blockhash_string,
"--sign-only",
]);
assert_eq!(
@ -1550,7 +1471,7 @@ mod tests {
force: false,
sign_only: true,
signers: None,
blockhash: None,
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
},
@ -1567,6 +1488,8 @@ mod tests {
"delegate-stake",
&stake_account_string,
&vote_account_string,
"--blockhash",
&blockhash_string,
"--signer",
&signer1,
]);
@ -1580,7 +1503,7 @@ mod tests {
force: false,
sign_only: false,
signers: Some(vec![(key1, sig1)]),
blockhash: None,
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
},
@ -1597,6 +1520,8 @@ mod tests {
"delegate-stake",
&stake_account_string,
&vote_account_string,
"--blockhash",
&blockhash_string,
"--signer",
&signer1,
"--signer",
@ -1612,7 +1537,7 @@ mod tests {
force: false,
sign_only: false,
signers: Some(vec![(key1, sig1), (key2, sig2)]),
blockhash: None,
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
},
@ -1752,6 +1677,8 @@ mod tests {
"test",
"deactivate-stake",
&stake_account_string,
"--blockhash",
&blockhash_string,
"--sign-only",
]);
assert_eq!(
@ -1762,7 +1689,7 @@ mod tests {
stake_authority: None,
sign_only: true,
signers: None,
blockhash: None,
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
},
@ -1778,6 +1705,8 @@ mod tests {
"test",
"deactivate-stake",
&stake_account_string,
"--blockhash",
&blockhash_string,
"--signer",
&signer1,
]);
@ -1789,7 +1718,7 @@ mod tests {
stake_authority: None,
sign_only: false,
signers: Some(vec![(key1, sig1)]),
blockhash: None,
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
},
@ -1805,6 +1734,8 @@ mod tests {
"test",
"deactivate-stake",
&stake_account_string,
"--blockhash",
&blockhash_string,
"--signer",
&signer1,
"--signer",
@ -1818,7 +1749,7 @@ mod tests {
stake_authority: None,
sign_only: false,
signers: Some(vec![(key1, sig1), (key2, sig2)]),
blockhash: None,
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
},

View File

@ -321,13 +321,27 @@ impl Tower {
if let Some(fork_stake) = stake_lockouts.get(&vote.slot) {
let lockout = fork_stake.stake as f64 / total_staked as f64;
trace!(
"fork_stake {} {} {} {}",
"fork_stake slot: {} lockout: {} fork_stake: {} total_stake: {}",
slot,
lockout,
fork_stake.stake,
total_staked
);
lockout > self.threshold_size
for (new_lockout, original_lockout) in
lockouts.votes.iter().zip(self.lockouts.votes.iter())
{
if new_lockout.slot == original_lockout.slot {
if new_lockout.confirmation_count <= self.threshold_depth as u32 {
break;
}
if new_lockout.confirmation_count != original_lockout.confirmation_count {
return lockout > self.threshold_size;
}
} else {
break;
}
}
true
} else {
false
}
@ -742,6 +756,34 @@ mod test {
assert!(!tower.check_vote_stake_threshold(1, &stakes, 2));
}
#[test]
fn test_check_vote_threshold_lockouts_not_updated() {
solana_logger::setup();
let mut tower = Tower::new_for_tests(1, 0.67);
let stakes = vec![
(
0,
StakeLockout {
stake: 1,
lockout: 8,
},
),
(
1,
StakeLockout {
stake: 2,
lockout: 8,
},
),
]
.into_iter()
.collect();
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
tower.record_vote(2, Hash::default());
assert!(tower.check_vote_stake_threshold(6, &stakes, 2));
}
#[test]
fn test_lockout_is_updated_for_entire_branch() {
let mut stake_lockouts = HashMap::new();

View File

@ -30,7 +30,6 @@ pub mod gossip_service;
pub mod ledger_cleanup_service;
pub mod local_vote_signer_service;
pub mod packet;
pub mod partition_cfg;
pub mod poh_recorder;
pub mod poh_service;
pub mod recvmmsg;

View File

@ -1,92 +0,0 @@
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
use solana_ledger::shred::Shred;
use solana_runtime::bank::Bank;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::timing::timestamp;
use std::collections::HashSet;
use std::sync::Arc;
use std::sync::RwLock;
///Configure a partition in the retransmit stage
#[derive(Debug, Clone)]
pub struct Partition {
pub num_partitions: usize,
pub my_partition: usize,
pub start_ts: u64,
pub end_ts: u64,
leaders: Arc<RwLock<Vec<Pubkey>>>,
}
impl Default for Partition {
fn default() -> Self {
Self {
num_partitions: 0,
my_partition: 0,
start_ts: 0,
end_ts: 0,
leaders: Arc::new(RwLock::new(vec![])),
}
}
}
#[derive(Default, Debug, Clone)]
pub struct PartitionCfg {
partitions: Vec<Partition>,
}
impl PartitionCfg {
pub fn new(partitions: Vec<Partition>) -> Self {
Self { partitions }
}
pub fn is_connected(
&self,
bank: &Option<Arc<Bank>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
shred: &Shred,
) -> bool {
if bank.is_none() {
return true;
}
let bank = bank.as_ref().unwrap().clone();
let slot_leader_pubkey = leader_schedule_cache.slot_leader_at(shred.slot(), Some(&bank));
let slot_leader_pubkey = slot_leader_pubkey.unwrap_or_default();
let time = timestamp();
for p in &self.partitions {
let is_time = (p.start_ts <= time) && (time < p.end_ts);
if !is_time {
continue;
}
trace!("PARTITION_TEST partition time! {}", p.my_partition);
if p.num_partitions == 0 {
continue;
}
if p.leaders.read().unwrap().is_empty() {
let mut leader_vec = p.leaders.write().unwrap();
let mut leaders: Vec<Pubkey> = bank.vote_accounts().keys().cloned().collect();
leaders.sort();
*leader_vec = leaders;
warn!("PARTITION_TEST partition enabled {}", p.my_partition);
}
let is_connected: bool = {
let leaders = p.leaders.read().unwrap();
let start = p.my_partition * leaders.len() / p.num_partitions;
let partition_size = leaders.len() / p.num_partitions;
let end = start + partition_size;
let end = if leaders.len() - end < partition_size {
leaders.len()
} else {
end
};
let my_leaders: HashSet<_> = leaders[start..end].iter().collect();
my_leaders.contains(&slot_leader_pubkey)
};
if is_connected {
trace!("PARTITION_TEST connected {}", p.my_partition);
continue;
}
trace!("PARTITION_TEST not connected {}", p.my_partition);
return false;
}
trace!("PARTITION_TEST connected");
true
}
}

View File

@ -3,7 +3,6 @@
use crate::{
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
packet::Packets,
partition_cfg::PartitionCfg,
repair_service::RepairStrategy,
result::{Error, Result},
streamer::PacketReceiver,
@ -22,7 +21,7 @@ use solana_sdk::epoch_schedule::EpochSchedule;
use std::{
cmp,
net::UdpSocket,
sync::atomic::AtomicBool,
sync::atomic::{AtomicBool, Ordering},
sync::mpsc::channel,
sync::mpsc::RecvTimeoutError,
sync::Mutex,
@ -213,7 +212,7 @@ impl RetransmitStage {
exit: &Arc<AtomicBool>,
completed_slots_receiver: CompletedSlotsReceiver,
epoch_schedule: EpochSchedule,
cfg: Option<PartitionCfg>,
cfg: Option<Arc<AtomicBool>>,
shred_version: u16,
) -> Self {
let (retransmit_sender, retransmit_receiver) = channel();
@ -245,7 +244,7 @@ impl RetransmitStage {
move |id, shred, working_bank, last_root| {
let is_connected = cfg
.as_ref()
.map(|x| x.is_connected(&working_bank, &leader_schedule_cache, shred))
.map(|x| x.load(Ordering::Relaxed))
.unwrap_or(true);
let rv = should_retransmit_and_persist(
shred,

View File

@ -6,7 +6,6 @@ use crate::{
cluster_info::ClusterInfo,
commitment::BlockCommitmentCache,
ledger_cleanup_service::LedgerCleanupService,
partition_cfg::PartitionCfg,
poh_recorder::PohRecorder,
replay_stage::{ReplayStage, ReplayStageConfig},
retransmit_stage::RetransmitStage,
@ -84,7 +83,7 @@ impl Tvu {
completed_slots_receiver: CompletedSlotsReceiver,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
sigverify_disabled: bool,
cfg: Option<PartitionCfg>,
cfg: Option<Arc<AtomicBool>>,
shred_version: u16,
transaction_status_sender: Option<TransactionStatusSender>,
) -> Self {

View File

@ -6,7 +6,6 @@ use crate::{
commitment::BlockCommitmentCache,
contact_info::ContactInfo,
gossip_service::{discover_cluster, GossipService},
partition_cfg::PartitionCfg,
poh_recorder::PohRecorder,
poh_service::PohService,
rpc::JsonRpcConfig,
@ -66,7 +65,7 @@ pub struct ValidatorConfig {
pub snapshot_config: Option<SnapshotConfig>,
pub max_ledger_slots: Option<u64>,
pub broadcast_stage_type: BroadcastStageType,
pub partition_cfg: Option<PartitionCfg>,
pub enable_partition: Option<Arc<AtomicBool>>,
pub fixed_leader_schedule: Option<FixedSchedule>,
pub wait_for_supermajority: bool,
pub new_hard_forks: Option<Vec<Slot>>,
@ -87,7 +86,7 @@ impl Default for ValidatorConfig {
rpc_config: JsonRpcConfig::default(),
snapshot_config: None,
broadcast_stage_type: BroadcastStageType::Standard,
partition_cfg: None,
enable_partition: None,
fixed_leader_schedule: None,
wait_for_supermajority: false,
new_hard_forks: None,
@ -127,6 +126,7 @@ pub struct Validator {
}
impl Validator {
#[allow(clippy::cognitive_complexity)]
pub fn new(
mut node: Node,
keypair: &Arc<Keypair>,
@ -172,6 +172,15 @@ impl Validator {
let exit = Arc::new(AtomicBool::new(false));
let bank_info = &bank_forks_info[0];
let bank = bank_forks[bank_info.bank_slot].clone();
info!("Starting validator from slot {}", bank.slot());
{
let hard_forks: Vec<_> = bank.hard_forks().read().unwrap().iter().copied().collect();
if !hard_forks.is_empty() {
info!("Hard forks: {:?}", hard_forks);
}
}
let bank_forks = Arc::new(RwLock::new(bank_forks));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
@ -370,7 +379,7 @@ impl Validator {
completed_slots_receiver,
block_commitment_cache,
config.dev_sigverify_disabled,
config.partition_cfg.clone(),
config.enable_partition.clone(),
node.info.shred_version,
transaction_status_sender.clone(),
);
@ -475,7 +484,7 @@ fn compute_shred_version(genesis_hash: &Hash, hard_forks: &HardForks) -> u16 {
let mut hash = *genesis_hash;
for (slot, count) in hard_forks.iter() {
let mut buf = [0u8; 16];
LittleEndian::write_u64(&mut buf[..7], *slot);
LittleEndian::write_u64(&mut buf[..8], *slot);
LittleEndian::write_u64(&mut buf[8..], *count as u64);
hash = extend_and_hash(&hash, &buf);
}
@ -654,6 +663,16 @@ mod tests {
use crate::genesis_utils::create_genesis_config_with_leader;
use std::fs::remove_dir_all;
#[test]
fn test_compute_shred_version() {
let mut hard_forks = HardForks::default();
assert_eq!(compute_shred_version(&Hash::default(), &hard_forks), 1);
hard_forks.register(1);
assert_eq!(compute_shred_version(&Hash::default(), &hard_forks), 55551);
hard_forks.register(1);
assert_eq!(compute_shred_version(&Hash::default(), &hard_forks), 46353);
}
#[test]
fn validator_exit() {
solana_logger::setup();

View File

@ -220,7 +220,6 @@ pub const VALIDATOR_PUBKEYS: &[&str] = &[
"7v5DXDvYzkgTdFYXYB12ZLKD6z8QfzR53N9hg6XgEQJE", // Cryptium Labs GmbH
"8LSwP5qYbmuUfKLGwi8XaKJnai9HyZAJTnBovyWebRfd", //
"8UPb8LMWyoJJC9Aeq9QmTzKZKV2ssov739bTJ14M4ws1", //
"8oRw7qpj6XgLGXYCDuNoTMCqoJnDd6A8LTpNyqApSfkA", //
"8wFK4fCAuDoAH1fsgou9yKZPqDMFtJUVoDdkZAAMuhyA", // LunaNova Technologies Ltd
"94eWgQm2k8BXKEWbJP2eScHZeKopXpqkuoVrCofQWBhW", // Node A-Team
"9J8WcnXxo3ArgEwktfk9tsrf4Rp8h5uPUgnQbQHLvtkd", // moonli.me

View File

@ -396,11 +396,11 @@ mod tests {
);
assert_eq!(
cache.next_leader_slot(&pubkey, 0, &bank, None),
Some((1, 6047999))
Some((1, 863999))
);
assert_eq!(
cache.next_leader_slot(&pubkey, 1, &bank, None),
Some((2, 6047999))
Some((2, 863999))
);
assert_eq!(
cache.next_leader_slot(

View File

@ -448,7 +448,7 @@ pub fn bank_from_archive<P: AsRef<Path>>(
} else {
// Once v0.23.x is deployed, this default can be removed and snapshots without a version
// file can be rejected
String::from("v0.22.3")
String::from("0.22.3")
};
let bank = rebuild_bank_from_snapshots(
@ -525,7 +525,7 @@ where
|stream| {
let mut bank: Bank = match snapshot_version {
env!("CARGO_PKG_VERSION") => deserialize_from_snapshot(stream.by_ref())?,
"v0.22.3" => {
"0.22.3" => {
let bank0223: solana_runtime::bank::LegacyBank0223 =
deserialize_from_snapshot(stream.by_ref())?;
bank0223.into()

View File

@ -1,13 +1,11 @@
use assert_matches::assert_matches;
use log::*;
use serial_test_derive::serial;
use solana_client::rpc_client::RpcClient;
use solana_client::thin_client::create_client;
use solana_core::{
broadcast_stage::BroadcastStageType,
consensus::VOTE_THRESHOLD_DEPTH,
gossip_service::discover_cluster,
partition_cfg::{Partition, PartitionCfg},
validator::ValidatorConfig,
broadcast_stage::BroadcastStageType, consensus::VOTE_THRESHOLD_DEPTH,
gossip_service::discover_cluster, validator::ValidatorConfig,
};
use solana_ledger::{
bank_forks::SnapshotConfig, blockstore::Blockstore, leader_schedule::FixedSchedule,
@ -18,7 +16,6 @@ use solana_local_cluster::{
cluster_tests,
local_cluster::{ClusterConfig, LocalCluster},
};
use solana_sdk::timing::timestamp;
use solana_sdk::{
client::SyncClient,
clock,
@ -28,6 +25,7 @@ use solana_sdk::{
poh_config::PohConfig,
signature::{Keypair, KeypairUtil},
};
use std::sync::atomic::{AtomicBool, Ordering};
use std::{
collections::{HashMap, HashSet},
fs, iter,
@ -248,7 +246,7 @@ fn run_cluster_partition(
};
let validator_pubkeys: Vec<_> = validator_keys.iter().map(|v| v.pubkey()).collect();
let mut config = ClusterConfig {
let config = ClusterConfig {
cluster_lamports,
node_stakes,
validator_configs: vec![validator_config.clone(); num_nodes],
@ -256,71 +254,65 @@ fn run_cluster_partition(
..ClusterConfig::default()
};
let now = timestamp();
// Partition needs to start after the first few shorter warmup epochs, otherwise
// no root will be set before the partition is resolved, the leader schedule will
// not be computable, and the cluster wll halt.
let partition_epoch_start_offset = cluster_tests::time_until_nth_epoch(
partition_start_epoch,
config.slots_per_epoch,
config.stakers_slot_offset,
let enable_partition = Some(Arc::new(AtomicBool::new(true)));
info!(
"PARTITION_TEST starting cluster with {:?} partitions slots_per_epoch: {}",
partitions, config.slots_per_epoch,
);
// Assume it takes <= 10 seconds for `LocalCluster::new` to boot up.
let local_cluster_boot_time = 10_000;
let partition_start = now + partition_epoch_start_offset + local_cluster_boot_time;
let partition_end = partition_start + leader_schedule_time as u64;
let mut validator_index = 0;
for (i, partition) in partitions.iter().enumerate() {
for _ in partition.iter() {
let mut p1 = Partition::default();
p1.num_partitions = partitions.len();
p1.my_partition = i;
p1.start_ts = partition_start;
p1.end_ts = partition_end;
config.validator_configs[validator_index].partition_cfg =
Some(PartitionCfg::new(vec![p1]));
validator_index += 1;
let mut cluster = LocalCluster::new(&config);
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, num_nodes).unwrap();
info!("PARTITION_TEST sleeping until partition starting condition",);
loop {
let mut reached_epoch = true;
for node in &cluster_nodes {
let node_client = RpcClient::new_socket(node.rpc);
if let Ok(epoch_info) = node_client.get_epoch_info() {
info!("slots_per_epoch: {:?}", epoch_info);
if epoch_info.slots_in_epoch <= (1 << VOTE_THRESHOLD_DEPTH) {
reached_epoch = false;
break;
}
} else {
reached_epoch = false;
}
}
if reached_epoch {
info!("PARTITION_TEST start partition");
enable_partition
.clone()
.unwrap()
.store(false, Ordering::Relaxed);
break;
} else {
sleep(Duration::from_millis(100));
}
}
info!(
"PARTITION_TEST starting cluster with {:?} partitions",
partitions
);
let now = Instant::now();
let mut cluster = LocalCluster::new(&config);
let elapsed = now.elapsed();
assert!(elapsed.as_millis() < local_cluster_boot_time as u128);
sleep(Duration::from_millis(leader_schedule_time));
info!("PARTITION_TEST remove partition");
enable_partition.unwrap().store(true, Ordering::Relaxed);
let now = timestamp();
let timeout = partition_start as u64 - now as u64;
info!(
"PARTITION_TEST sleeping until partition start timeout {}",
timeout
);
let mut dead_nodes = HashSet::new();
if timeout > 0 {
sleep(Duration::from_millis(timeout as u64));
}
info!("PARTITION_TEST done sleeping until partition start timeout");
let now = timestamp();
let timeout = partition_end as u64 - now as u64;
info!(
"PARTITION_TEST sleeping until partition end timeout {}",
timeout
);
let mut alive_node_contact_infos = vec![];
let should_exits: Vec<_> = partitions
.iter()
.flat_map(|p| p.iter().map(|(_, should_exit)| should_exit))
.collect();
assert_eq!(should_exits.len(), validator_pubkeys.len());
let timeout = 10;
if timeout > 0 {
// Give partitions time to propagate their blocks from durinig the partition
// Give partitions time to propagate their blocks from during the partition
// after the partition resolves
let propagation_time = leader_schedule_time;
info!("PARTITION_TEST resolving partition");
sleep(Duration::from_millis(timeout));
info!("PARTITION_TEST waiting for blocks to propagate after partition");
info!("PARTITION_TEST resolving partition. sleeping {}ms", timeout);
sleep(Duration::from_millis(10_000));
info!(
"PARTITION_TEST waiting for blocks to propagate after partition {}ms",
propagation_time
);
sleep(Duration::from_millis(propagation_time));
info!("PARTITION_TEST resuming normal operation");
for (pubkey, should_exit) in validator_pubkeys.iter().zip(should_exits) {
@ -353,6 +345,7 @@ fn run_cluster_partition(
info!("PARTITION_TEST looking for new roots on all nodes");
let mut roots = vec![HashSet::new(); alive_node_contact_infos.len()];
let mut done = false;
let mut last_print = Instant::now();
while !done {
for (i, ingress_node) in alive_node_contact_infos.iter().enumerate() {
let client = create_client(
@ -362,12 +355,15 @@ fn run_cluster_partition(
let slot = client.get_slot().unwrap_or(0);
roots[i].insert(slot);
let min_node = roots.iter().map(|r| r.len()).min().unwrap_or(0);
info!("PARTITION_TEST min observed roots {}/16", min_node);
if last_print.elapsed().as_secs() > 3 {
info!("PARTITION_TEST min observed roots {}/16", min_node);
last_print = Instant::now();
}
done = min_node >= 16;
}
sleep(Duration::from_millis(clock::DEFAULT_MS_PER_SLOT / 2));
}
info!("PARTITION_TEST done spending on all node");
info!("PARTITION_TEST done waiting for roots");
}
#[allow(unused_attributes)]
@ -424,6 +420,7 @@ fn test_kill_partition() {
leader_schedule.push(k.pubkey())
}
}
info!("leader_schedule: {}", leader_schedule.len());
run_cluster_partition(
&partitions,

View File

@ -174,7 +174,7 @@ cloud_CreateInstances() {
# the stock Ubuntu 18.04 image and programmatically install CUDA after the
# instance boots
#
imageName="ubuntu-1804-bionic-v20181029-with-cuda-10-and-cuda-9-2"
imageName="ubuntu-1804-bionic-v20181029-with-cuda-10-and-cuda-9-2 --image-project principal-lane-200702"
else
# Upstream Ubuntu 18.04 LTS image
imageName="ubuntu-1804-bionic-v20190813a --image-project ubuntu-os-cloud"

View File

@ -2901,7 +2901,7 @@ mod tests {
#[test]
#[allow(clippy::cognitive_complexity)]
fn test_rent() {
fn test_rent_complex() {
let mock_program_id = Pubkey::new(&[2u8; 32]);
let (mut genesis_config, _mint_keypair) = create_genesis_config(10);
@ -2941,6 +2941,8 @@ mod tests {
generic_rent_due_for_system_account,
);
let magic_rent_number = 131; // yuck, derive this value programmatically one day
let t1 = system_transaction::transfer(
&keypairs[0],
&keypairs[1].pubkey(),
@ -2962,7 +2964,7 @@ mod tests {
let t4 = system_transaction::transfer(
&keypairs[6],
&keypairs[7].pubkey(),
48991,
generic_rent_due_for_system_account + 1,
genesis_config.hash(),
);
let t5 = system_transaction::transfer(
@ -3002,19 +3004,19 @@ mod tests {
let mut rent_collected = 0;
// 48992 - 48990(Rent) - 1(transfer)
// 48992 - generic_rent_due_for_system_account(Rent) - 1(transfer)
assert_eq!(bank.get_balance(&keypairs[0].pubkey()), 1);
rent_collected += generic_rent_due_for_system_account;
// 48992 - 48990(Rent) + 1(transfer)
// 48992 - generic_rent_due_for_system_account(Rent) + 1(transfer)
assert_eq!(bank.get_balance(&keypairs[1].pubkey()), 3);
rent_collected += generic_rent_due_for_system_account;
// 48992 - 48990(Rent) - 1(transfer)
// 48992 - generic_rent_due_for_system_account(Rent) - 1(transfer)
assert_eq!(bank.get_balance(&keypairs[2].pubkey()), 1);
rent_collected += generic_rent_due_for_system_account;
// 48992 - 48990(Rent) + 1(transfer)
// 48992 - generic_rent_due_for_system_account(Rent) + 1(transfer)
assert_eq!(bank.get_balance(&keypairs[3].pubkey()), 3);
rent_collected += generic_rent_due_for_system_account;
@ -3022,45 +3024,46 @@ mod tests {
assert_eq!(bank.get_balance(&keypairs[4].pubkey()), 10);
assert_eq!(bank.get_balance(&keypairs[5].pubkey()), 10);
// 98004 - 48990(Rent) - 48991(transfer)
// 98004 - generic_rent_due_for_system_account(Rent) - 48991(transfer)
assert_eq!(bank.get_balance(&keypairs[6].pubkey()), 23);
rent_collected += generic_rent_due_for_system_account;
// 0 + 48990(transfer) - 917(Rent)
// 0 + 48990(transfer) - magic_rent_number(Rent)
assert_eq!(
bank.get_balance(&keypairs[7].pubkey()),
generic_rent_due_for_system_account + 1 - 917
generic_rent_due_for_system_account + 1 - magic_rent_number
);
// Epoch should be updated
// Rent deducted on store side
let account8 = bank.get_account(&keypairs[7].pubkey()).unwrap();
// Epoch should be set correctly.
assert_eq!(account8.rent_epoch, bank.epoch + 1);
rent_collected += 917;
rent_collected += magic_rent_number;
// 49921 - 48900(Rent) - 929(Transfer)
// 49921 - generic_rent_due_for_system_account(Rent) - 929(Transfer)
assert_eq!(bank.get_balance(&keypairs[8].pubkey()), 2);
rent_collected += generic_rent_due_for_system_account;
let account10 = bank.get_account(&keypairs[9].pubkey()).unwrap();
// Account was overwritten at load time, since it didn't have sufficient balance to pay rent
// Then, at store time we deducted 917 rent for the current epoch, once it has balance
// Then, at store time we deducted `magic_rent_number` rent for the current epoch, once it has balance
assert_eq!(account10.rent_epoch, bank.epoch + 1);
// account data is blank now
assert_eq!(account10.data.len(), 0);
// 10 - 10(Rent) + 929(Transfer) - 917(Rent)
assert_eq!(account10.lamports, 12);
rent_collected += 927;
// 10 - 10(Rent) + 929(Transfer) - magic_rent_number(Rent)
assert_eq!(account10.lamports, 929 - magic_rent_number);
rent_collected += magic_rent_number + 10;
// 48993 - 48990(Rent)
// 48993 - generic_rent_due_for_system_account(Rent)
assert_eq!(bank.get_balance(&keypairs[10].pubkey()), 3);
rent_collected += generic_rent_due_for_system_account;
// 48993 - 48990(Rent) + 1(Addition by program)
// 48993 - generic_rent_due_for_system_account(Rent) + 1(Addition by program)
assert_eq!(bank.get_balance(&keypairs[11].pubkey()), 4);
rent_collected += generic_rent_due_for_system_account;
// 48993 - 48990(Rent) - 1(Deduction by program)
// 48993 - generic_rent_due_for_system_account(Rent) - 1(Deduction by program)
assert_eq!(bank.get_balance(&keypairs[12].pubkey()), 2);
rent_collected += generic_rent_due_for_system_account;

View File

@ -17,12 +17,10 @@ pub const DEFAULT_HASHES_PER_SECOND: u64 = 2_000_000;
pub const DEFAULT_DEV_SLOTS_PER_EPOCH: u64 = 8192;
pub const SECONDS_PER_DAY: u64 = 24 * 60 * 60;
pub const SECONDS_PER_WEEK: u64 = 7 * SECONDS_PER_DAY;
pub const SECONDS_PER_FORTNIGHT: u64 = 2 * SECONDS_PER_WEEK;
pub const TICKS_PER_FORTNIGHT: u64 = DEFAULT_TICKS_PER_SECOND * SECONDS_PER_FORTNIGHT;
pub const TICKS_PER_DAY: u64 = DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY;
// 1 Epoch ~= 2 weeks
pub const DEFAULT_SLOTS_PER_EPOCH: u64 = TICKS_PER_FORTNIGHT / DEFAULT_TICKS_PER_SLOT;
// 1 Epoch ~= 2 days
pub const DEFAULT_SLOTS_PER_EPOCH: u64 = 2 * TICKS_PER_DAY / DEFAULT_TICKS_PER_SLOT;
// Storage segment configuration
pub const DEFAULT_SLOTS_PER_SEGMENT: u64 = 1024;