Compare commits
178 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
f4e43731ef | ||
|
884ef211f7 | ||
|
896ef5a15f | ||
|
4ff482cd47 | ||
|
6e27360fbc | ||
|
a10a0824eb | ||
|
2fdda2ec1b | ||
|
57f76a2111 | ||
|
287daa9248 | ||
|
7e40a051a4 | ||
|
bd0a7730b6 | ||
|
0d1c87e650 | ||
|
56cd963fd7 | ||
|
d1b26cb267 | ||
|
6a0a03415c | ||
|
41d50adbf3 | ||
|
328b52c4d6 | ||
|
b7d04cf7b9 | ||
|
7ed2cf30a5 | ||
|
78fe5576a9 | ||
|
41179b1282 | ||
|
f82d99b4c2 | ||
|
967f0d07f2 | ||
|
940dbe99e9 | ||
|
98e1c68a70 | ||
|
d8e250e9b0 | ||
|
c8be8510ba | ||
|
c99460ba15 | ||
|
769fcb7f50 | ||
|
60812790e1 | ||
|
a01e44f3b9 | ||
|
0917370bd5 | ||
|
09b612b130 | ||
|
26fdf3fb07 | ||
|
f41f3f6b51 | ||
|
677664e71d | ||
|
a8071f1039 | ||
|
bc08351a0a | ||
|
088b3893c3 | ||
|
d9d6dd9ba6 | ||
|
aca66674d3 | ||
|
597429ab3e | ||
|
715360c1e7 | ||
|
fcabaa7eff | ||
|
b14af989b8 | ||
|
1a919e0c3e | ||
|
9c1a6bed7b | ||
|
c48ec02f42 | ||
|
6f376489a5 | ||
|
0cbf7bef1e | ||
|
0f87e598f6 | ||
|
363b75619f | ||
|
090c801cc6 | ||
|
b711778d4a | ||
|
d52569d66f | ||
|
c676b7a473 | ||
|
71c49bc8cd | ||
|
97a7f747fb | ||
|
cef9e0de0c | ||
|
5637acb799 | ||
|
3d3bdcb966 | ||
|
c9f02ae020 | ||
|
0e7512a225 | ||
|
c330016109 | ||
|
cb13cdec85 | ||
|
c65c580b20 | ||
|
d159ae9342 | ||
|
a540af1ca7 | ||
|
e9c234d89f | ||
|
b472dac6b3 | ||
|
523dac1be3 | ||
|
962a2126b5 | ||
|
5c495ad1b0 | ||
|
f633f34e43 | ||
|
bacf1b9acc | ||
|
4df9da5c48 | ||
|
30bbc1350d | ||
|
2f0f1fd5f5 | ||
|
c28e6ebc4c | ||
|
6479c11e9a | ||
|
4ed0fcdde6 | ||
|
296a8ade63 | ||
|
a84953ccfd | ||
|
8492031fd0 | ||
|
bff7259111 | ||
|
67e1814581 | ||
|
12e92dd59d | ||
|
4ee366edfa | ||
|
61573756f8 | ||
|
fe5fed1182 | ||
|
0c90307677 | ||
|
cdd2a51f1f | ||
|
0dbe3434f0 | ||
|
ef205593c5 | ||
|
8b5ba771ad | ||
|
991f99b239 | ||
|
d6f17517cb | ||
|
15b2f280e3 | ||
|
60b43a1ddf | ||
|
d6b83e3b0a | ||
|
0446f89d22 | ||
|
54bc3e606e | ||
|
fed90cfbe8 | ||
|
e2e41a29eb | ||
|
3b813db42f | ||
|
16b1a4d003 | ||
|
b51ea3ca0c | ||
|
dc76675644 | ||
|
274a238a00 | ||
|
10507f0ade | ||
|
af2a6106da | ||
|
120a7e433f | ||
|
98e34f07df | ||
|
738df79394 | ||
|
98e9b6e70b | ||
|
c9318f8dc2 | ||
|
78f3606e30 | ||
|
97f4d098e1 | ||
|
144a13b096 | ||
|
af0869c66c | ||
|
48e565038a | ||
|
cd6e1d921c | ||
|
fb767f4612 | ||
|
9f35db28e5 | ||
|
ab19543dff | ||
|
3d5f333a3b | ||
|
d06ca605cf | ||
|
334e11e4b9 | ||
|
fd68b4e7a8 | ||
|
e5ea16fad8 | ||
|
6d5a4b5cce | ||
|
ffb6b5a23b | ||
|
e247625025 | ||
|
67c07aa5d3 | ||
|
0de1ce0c2c | ||
|
59f4fba05c | ||
|
c0c764377c | ||
|
c9bc059637 | ||
|
78147d48e4 | ||
|
5e7db52087 | ||
|
ae42413d57 | ||
|
d433bd3d84 | ||
|
58dd6dc227 | ||
|
893df9b277 | ||
|
17dc13760b | ||
|
498bf911eb | ||
|
96de58b3a4 | ||
|
9b61fa24c7 | ||
|
e9be3cf6bc | ||
|
ea44a71914 | ||
|
a00fbbf5ca | ||
|
eb1a04af65 | ||
|
d1fbf77f3f | ||
|
04fbf73a29 | ||
|
db70eb3160 | ||
|
cd974c26b6 | ||
|
53e0f5d61c | ||
|
e864bf4898 | ||
|
81d12b7644 | ||
|
309fcd6270 | ||
|
938112e449 | ||
|
3e012ea69e | ||
|
975c942ea7 | ||
|
2798271da0 | ||
|
dc258cebab | ||
|
4b8c5194c7 | ||
|
3c7c6dacfb | ||
|
e36337a764 | ||
|
a49856b898 | ||
|
8ca2f52041 | ||
|
2f7f243022 | ||
|
7e443770d7 | ||
|
8ec09884b8 | ||
|
88c7e636d6 | ||
|
add3fd479d | ||
|
70410536b9 | ||
|
6f3f9b485c | ||
|
bd9ce3590d |
677
Cargo.lock
generated
677
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -39,11 +39,11 @@ members = [
|
||||
"metrics",
|
||||
"net-shaper",
|
||||
"notifier",
|
||||
"poh",
|
||||
"poh-bench",
|
||||
"program-test",
|
||||
"programs/secp256k1",
|
||||
"programs/bpf_loader",
|
||||
"programs/budget",
|
||||
"programs/config",
|
||||
"programs/exchange",
|
||||
"programs/failure",
|
||||
@@ -52,6 +52,7 @@ members = [
|
||||
"programs/stake",
|
||||
"programs/vote",
|
||||
"remote-wallet",
|
||||
"rpc",
|
||||
"runtime",
|
||||
"runtime/store-tool",
|
||||
"sdk",
|
||||
|
@@ -19,7 +19,7 @@ $ source $HOME/.cargo/env
|
||||
$ rustup component add rustfmt
|
||||
```
|
||||
|
||||
Please sure you are always using the latest stable rust version by running:
|
||||
Please make sure you are always using the latest stable rust version by running:
|
||||
|
||||
```bash
|
||||
$ rustup update
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,11 +19,10 @@ lazy_static = "1.4.0"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-config-program = { path = "../programs/config", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.7.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.0" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.4" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.1", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
zstd = "0.5.1"
|
||||
|
||||
|
@@ -69,32 +69,32 @@ impl UiAccount {
|
||||
) -> Self {
|
||||
let data = match encoding {
|
||||
UiAccountEncoding::Binary => UiAccountData::LegacyBinary(
|
||||
bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(),
|
||||
bs58::encode(slice_data(account.data(), data_slice_config)).into_string(),
|
||||
),
|
||||
UiAccountEncoding::Base58 => UiAccountData::Binary(
|
||||
bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(),
|
||||
bs58::encode(slice_data(account.data(), data_slice_config)).into_string(),
|
||||
encoding,
|
||||
),
|
||||
UiAccountEncoding::Base64 => UiAccountData::Binary(
|
||||
base64::encode(slice_data(&account.data(), data_slice_config)),
|
||||
base64::encode(slice_data(account.data(), data_slice_config)),
|
||||
encoding,
|
||||
),
|
||||
UiAccountEncoding::Base64Zstd => {
|
||||
let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
|
||||
match encoder
|
||||
.write_all(slice_data(&account.data(), data_slice_config))
|
||||
.write_all(slice_data(account.data(), data_slice_config))
|
||||
.and_then(|()| encoder.finish())
|
||||
{
|
||||
Ok(zstd_data) => UiAccountData::Binary(base64::encode(zstd_data), encoding),
|
||||
Err(_) => UiAccountData::Binary(
|
||||
base64::encode(slice_data(&account.data(), data_slice_config)),
|
||||
base64::encode(slice_data(account.data(), data_slice_config)),
|
||||
UiAccountEncoding::Base64,
|
||||
),
|
||||
}
|
||||
}
|
||||
UiAccountEncoding::JsonParsed => {
|
||||
if let Ok(parsed_data) =
|
||||
parse_account_data(pubkey, &account.owner(), &account.data(), additional_data)
|
||||
parse_account_data(pubkey, account.owner(), account.data(), additional_data)
|
||||
{
|
||||
UiAccountData::Json(parsed_data)
|
||||
} else {
|
||||
|
@@ -9,14 +9,14 @@ use crate::{
|
||||
};
|
||||
use inflector::Inflector;
|
||||
use serde_json::Value;
|
||||
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, system_program, sysvar};
|
||||
use solana_sdk::{instruction::InstructionError, pubkey::Pubkey, stake, system_program, sysvar};
|
||||
use std::collections::HashMap;
|
||||
use thiserror::Error;
|
||||
|
||||
lazy_static! {
|
||||
static ref BPF_UPGRADEABLE_LOADER_PROGRAM_ID: Pubkey = solana_sdk::bpf_loader_upgradeable::id();
|
||||
static ref CONFIG_PROGRAM_ID: Pubkey = solana_config_program::id();
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = solana_stake_program::id();
|
||||
static ref STAKE_PROGRAM_ID: Pubkey = stake::program::id();
|
||||
static ref SYSTEM_PROGRAM_ID: Pubkey = system_program::id();
|
||||
static ref SYSVAR_PROGRAM_ID: Pubkey = sysvar::id();
|
||||
static ref TOKEN_PROGRAM_ID: Pubkey = spl_token_id_v2_0();
|
||||
|
@@ -6,10 +6,10 @@ use bincode::deserialize;
|
||||
use serde_json::Value;
|
||||
use solana_config_program::{get_config_data, ConfigKeys};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_stake_program::config::Config as StakeConfig;
|
||||
use solana_sdk::stake::config::{self as stake_config, Config as StakeConfig};
|
||||
|
||||
pub fn parse_config(data: &[u8], pubkey: &Pubkey) -> Result<ConfigAccountType, ParseAccountError> {
|
||||
let parsed_account = if pubkey == &solana_stake_program::config::id() {
|
||||
let parsed_account = if pubkey == &stake_config::id() {
|
||||
get_config_data(data)
|
||||
.ok()
|
||||
.and_then(|data| deserialize::<StakeConfig>(data).ok())
|
||||
@@ -37,7 +37,7 @@ fn parse_config_data<T>(data: &[u8], keys: Vec<(Pubkey, bool)>) -> Option<UiConf
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
{
|
||||
let config_data: T = deserialize(&get_config_data(data).ok()?).ok()?;
|
||||
let config_data: T = deserialize(get_config_data(data).ok()?).ok()?;
|
||||
let keys = keys
|
||||
.iter()
|
||||
.map(|key| UiConfigKey {
|
||||
@@ -101,11 +101,7 @@ mod test {
|
||||
};
|
||||
let stake_config_account = create_config_account(vec![], &stake_config, 10);
|
||||
assert_eq!(
|
||||
parse_config(
|
||||
&stake_config_account.data(),
|
||||
&solana_stake_program::config::id()
|
||||
)
|
||||
.unwrap(),
|
||||
parse_config(stake_config_account.data(), &stake_config::id()).unwrap(),
|
||||
ConfigAccountType::StakeConfig(UiStakeConfig {
|
||||
warmup_cooldown_rate: 0.25,
|
||||
slash_penalty: 50,
|
||||
@@ -125,7 +121,7 @@ mod test {
|
||||
10,
|
||||
);
|
||||
assert_eq!(
|
||||
parse_config(&validator_info_config_account.data(), &info_pubkey).unwrap(),
|
||||
parse_config(validator_info_config_account.data(), &info_pubkey).unwrap(),
|
||||
ConfigAccountType::ValidatorInfo(UiConfig {
|
||||
keys: vec![
|
||||
UiConfigKey {
|
||||
|
@@ -4,7 +4,7 @@ use crate::{
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use solana_sdk::clock::{Epoch, UnixTimestamp};
|
||||
use solana_stake_program::stake_state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState};
|
||||
use solana_sdk::stake::state::{Authorized, Delegation, Lockup, Meta, Stake, StakeState};
|
||||
|
||||
pub fn parse_stake(data: &[u8]) -> Result<StakeAccountType, ParseAccountError> {
|
||||
let stake_state: StakeState = deserialize(data)
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -11,11 +11,11 @@ publish = false
|
||||
[dependencies]
|
||||
log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@@ -6,6 +6,7 @@ use rayon::prelude::*;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::{
|
||||
accounts::{create_test_accounts, update_accounts_bench, Accounts},
|
||||
accounts_db::AccountShrinkThreshold,
|
||||
accounts_index::AccountSecondaryIndexes,
|
||||
ancestors::Ancestors,
|
||||
};
|
||||
@@ -64,6 +65,7 @@ fn main() {
|
||||
&ClusterType::Testnet,
|
||||
AccountSecondaryIndexes::default(),
|
||||
false,
|
||||
AccountShrinkThreshold::default(),
|
||||
);
|
||||
println!("Creating {} accounts", num_accounts);
|
||||
let mut create_time = Measure::start("create accounts");
|
||||
@@ -119,6 +121,7 @@ fn main() {
|
||||
solana_sdk::clock::Slot::default(),
|
||||
&ancestors,
|
||||
None,
|
||||
false,
|
||||
);
|
||||
time_store.stop();
|
||||
if results != results_store {
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-cluster-bench"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -13,23 +13,23 @@ clap = "2.33.1"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.4.1"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-client = { path = "../client", version = "=1.7.0" }
|
||||
solana-core = { path = "../core", version = "=1.7.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-client = { path = "../client", version = "=1.7.4" }
|
||||
solana-core = { path = "../core", version = "=1.7.4" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.4" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.1", features = ["no-entrypoint"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.4" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -55,7 +55,7 @@ pub fn airdrop_lamports(
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = client.get_recent_blockhash().unwrap();
|
||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
Ok(transaction) => {
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
@@ -363,7 +363,7 @@ fn run_accounts_bench(
|
||||
iterations: usize,
|
||||
maybe_space: Option<u64>,
|
||||
batch_size: usize,
|
||||
close_nth: u64,
|
||||
close_nth_batch: u64,
|
||||
maybe_lamports: Option<u64>,
|
||||
num_instructions: usize,
|
||||
mint: Option<Pubkey>,
|
||||
@@ -431,7 +431,7 @@ fn run_accounts_bench(
|
||||
if !airdrop_lamports(
|
||||
&client,
|
||||
&faucet_addr,
|
||||
&payer_keypairs[i],
|
||||
payer_keypairs[i],
|
||||
lamports * 100_000,
|
||||
) {
|
||||
warn!("failed airdrop, exiting");
|
||||
@@ -441,6 +441,7 @@ fn run_accounts_bench(
|
||||
}
|
||||
}
|
||||
|
||||
// Create accounts
|
||||
let sigs_len = executor.num_outstanding();
|
||||
if sigs_len < batch_size {
|
||||
let num_to_create = batch_size - sigs_len;
|
||||
@@ -475,21 +476,25 @@ fn run_accounts_bench(
|
||||
}
|
||||
}
|
||||
|
||||
if close_nth > 0 {
|
||||
let expected_closed = total_accounts_created as u64 / close_nth;
|
||||
if expected_closed > total_accounts_closed {
|
||||
let txs: Vec<_> = (0..expected_closed - total_accounts_closed)
|
||||
if close_nth_batch > 0 {
|
||||
let num_batches_to_close =
|
||||
total_accounts_created as u64 / (close_nth_batch * batch_size as u64);
|
||||
let expected_closed = num_batches_to_close * batch_size as u64;
|
||||
let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed);
|
||||
// Close every account we've created with seed between max_closed_seed..expected_closed
|
||||
if max_closed_seed < expected_closed {
|
||||
let txs: Vec<_> = (0..expected_closed - max_closed_seed)
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let message = make_close_message(
|
||||
&payer_keypairs[0],
|
||||
payer_keypairs[0],
|
||||
&base_keypair,
|
||||
seed_tracker.max_closed.clone(),
|
||||
1,
|
||||
min_balance,
|
||||
mint.is_some(),
|
||||
);
|
||||
let signers: Vec<&Keypair> = vec![&payer_keypairs[0], &base_keypair];
|
||||
let signers: Vec<&Keypair> = vec![payer_keypairs[0], &base_keypair];
|
||||
Transaction::new(&signers, message, recent_blockhash.0)
|
||||
})
|
||||
.collect();
|
||||
@@ -572,14 +577,14 @@ fn main() {
|
||||
.help("Number of transactions to send per batch"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("close_nth")
|
||||
Arg::with_name("close_nth_batch")
|
||||
.long("close-frequency")
|
||||
.takes_value(true)
|
||||
.value_name("BYTES")
|
||||
.help(
|
||||
"Send close transactions after this many accounts created. \
|
||||
Note: a `close-frequency` value near or below `batch-size` \
|
||||
may result in transaction-simulation errors, as the close \
|
||||
"Every `n` batches, create a batch of close transactions for
|
||||
the earliest remaining batch of accounts created.
|
||||
Note: Should be > 1 to avoid situations where the close \
|
||||
transactions will be submitted before the corresponding \
|
||||
create transactions have been confirmed",
|
||||
),
|
||||
@@ -632,7 +637,7 @@ fn main() {
|
||||
let space = value_t!(matches, "space", u64).ok();
|
||||
let lamports = value_t!(matches, "lamports", u64).ok();
|
||||
let batch_size = value_t!(matches, "batch_size", usize).unwrap_or(4);
|
||||
let close_nth = value_t!(matches, "close_nth", u64).unwrap_or(0);
|
||||
let close_nth_batch = value_t!(matches, "close_nth_batch", u64).unwrap_or(0);
|
||||
let iterations = value_t!(matches, "iterations", usize).unwrap_or(10);
|
||||
let num_instructions = value_t!(matches, "num_instructions", usize).unwrap_or(1);
|
||||
if num_instructions == 0 || num_instructions > 500 {
|
||||
@@ -685,7 +690,7 @@ fn main() {
|
||||
iterations,
|
||||
space,
|
||||
batch_size,
|
||||
close_nth,
|
||||
close_nth_batch,
|
||||
lamports,
|
||||
num_instructions,
|
||||
mint,
|
||||
@@ -720,7 +725,7 @@ pub mod test {
|
||||
let iterations = 10;
|
||||
let maybe_space = None;
|
||||
let batch_size = 100;
|
||||
let close_nth = 100;
|
||||
let close_nth_batch = 100;
|
||||
let maybe_lamports = None;
|
||||
let num_instructions = 2;
|
||||
let mut start = Measure::start("total accounts run");
|
||||
@@ -731,7 +736,7 @@ pub mod test {
|
||||
iterations,
|
||||
maybe_space,
|
||||
batch_size,
|
||||
close_nth,
|
||||
close_nth_batch,
|
||||
maybe_lamports,
|
||||
num_instructions,
|
||||
None,
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,17 +14,18 @@ crossbeam-channel = "0.4"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
solana-core = { path = "../core", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.0" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
solana-core = { path = "../core", version = "=1.7.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.4" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.4" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.4" }
|
||||
solana-poh = { path = "../poh", version = "=1.7.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -4,11 +4,7 @@ use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_core::{
|
||||
banking_stage::{create_test_recorder, BankingStage},
|
||||
poh_recorder::PohRecorder,
|
||||
poh_recorder::WorkingBankEntry,
|
||||
};
|
||||
use solana_core::banking_stage::BankingStage;
|
||||
use solana_gossip::{cluster_info::ClusterInfo, cluster_info::Node};
|
||||
use solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
@@ -17,6 +13,7 @@ use solana_ledger::{
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry};
|
||||
use solana_runtime::{
|
||||
accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks,
|
||||
};
|
||||
@@ -77,7 +74,7 @@ fn make_accounts_txs(
|
||||
.into_par_iter()
|
||||
.map(|_| {
|
||||
let mut new = dummy.clone();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
if !same_payer {
|
||||
new.message.account_keys[0] = solana_sdk::pubkey::new_rand();
|
||||
}
|
||||
@@ -188,7 +185,7 @@ fn main() {
|
||||
genesis_config.hash(),
|
||||
);
|
||||
// Ignore any pesky duplicate signature errors in the case we are using single-payer
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
fund.signatures = vec![Signature::new(&sig[0..64])];
|
||||
let x = bank.process_transaction(&fund);
|
||||
x.unwrap();
|
||||
@@ -198,7 +195,7 @@ fn main() {
|
||||
if !skip_sanity {
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
let res = bank.process_transaction(tx);
|
||||
assert!(res.is_ok(), "sanity test transactions error: {:?}", res);
|
||||
});
|
||||
bank.clear_signatures();
|
||||
@@ -354,7 +351,7 @@ fn main() {
|
||||
if bank.slot() > 0 && bank.slot() % 16 == 0 {
|
||||
for tx in transactions.iter_mut() {
|
||||
tx.message.recent_blockhash = bank.last_blockhash();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen()).collect();
|
||||
let sig: Vec<u8> = (0..64).map(|_| thread_rng().gen::<u8>()).collect();
|
||||
tx.signatures[0] = Signature::new(&sig[0..64]);
|
||||
}
|
||||
verified = to_packets_chunked(&transactions.clone(), packets_per_chunk);
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-client"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
description = "Solana banks client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,20 +11,20 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.3.1"
|
||||
borsh = "0.8.1"
|
||||
borsh-derive = "0.8.1"
|
||||
borsh = "0.9.0"
|
||||
borsh-derive = "0.9.0"
|
||||
futures = "0.3"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.0" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.4" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.0" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.7.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.7.4" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@@ -377,8 +377,8 @@ mod tests {
|
||||
|
||||
let mint_pubkey = &genesis.mint_keypair.pubkey();
|
||||
let bob_pubkey = solana_sdk::pubkey::new_rand();
|
||||
let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1);
|
||||
let message = Message::new(&[instruction], Some(&mint_pubkey));
|
||||
let instruction = system_instruction::transfer(mint_pubkey, &bob_pubkey, 1);
|
||||
let message = Message::new(&[instruction], Some(mint_pubkey));
|
||||
|
||||
Runtime::new()?.block_on(async {
|
||||
let client_transport = start_local_server(bank_forks, block_commitment_cache).await;
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-interface"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
description = "Solana banks RPC interface"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,7 +12,7 @@ edition = "2018"
|
||||
[dependencies]
|
||||
mio = "0.7.6"
|
||||
serde = { version = "1.0.122", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-server"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
description = "Solana banks server"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -14,10 +14,10 @@ bincode = "1.3.1"
|
||||
futures = "0.3"
|
||||
log = "0.4.11"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.0" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.4" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
@@ -150,7 +150,7 @@ impl Banks for BanksServer {
|
||||
.read()
|
||||
.unwrap()
|
||||
.root_bank()
|
||||
.get_blockhash_last_valid_slot(&blockhash)
|
||||
.get_blockhash_last_valid_slot(blockhash)
|
||||
.unwrap();
|
||||
let signature = transaction.signatures.get(0).cloned().unwrap_or_default();
|
||||
let info =
|
||||
|
@@ -138,8 +138,8 @@ impl SendTransactionService {
|
||||
result.retried += 1;
|
||||
inc_new_counter_info!("send_transaction_service-retry", 1);
|
||||
Self::send_transaction(
|
||||
&send_socket,
|
||||
&tpu_address,
|
||||
send_socket,
|
||||
tpu_address,
|
||||
&transaction_info.wire_transaction,
|
||||
);
|
||||
true
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -18,22 +18,22 @@ rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-core = { path = "../core", version = "=1.7.0" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.0" }
|
||||
solana-client = { path = "../client", version = "=1.7.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.7.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-core = { path = "../core", version = "=1.7.4" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.4" }
|
||||
solana-client = { path = "../client", version = "=1.7.4" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.7.4" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.4" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.4" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -451,13 +451,13 @@ fn swapper<T>(
|
||||
let to_swap_txs: Vec<_> = to_swap
|
||||
.par_iter()
|
||||
.map(|(signer, swap, profit)| {
|
||||
let s: &Keypair = &signer;
|
||||
let s: &Keypair = signer;
|
||||
let owner = &signer.pubkey();
|
||||
let instruction = exchange_instruction::swap_request(
|
||||
owner,
|
||||
&swap.0.pubkey,
|
||||
&swap.1.pubkey,
|
||||
&profit,
|
||||
profit,
|
||||
);
|
||||
let message = Message::new(&[instruction], Some(&s.pubkey()));
|
||||
Transaction::new(&[s], message, blockhash)
|
||||
@@ -600,7 +600,7 @@ fn trader<T>(
|
||||
src,
|
||||
),
|
||||
];
|
||||
let message = Message::new(&instructions, Some(&owner_pubkey));
|
||||
let message = Message::new(&instructions, Some(owner_pubkey));
|
||||
Transaction::new(&[owner.as_ref(), trade], message, blockhash)
|
||||
})
|
||||
.collect();
|
||||
@@ -739,7 +739,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
||||
let mut to_fund_txs: Vec<_> = chunk
|
||||
.par_iter()
|
||||
.map(|(k, m)| {
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), &m);
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), m);
|
||||
let message = Message::new(&instructions, Some(&k.pubkey()));
|
||||
(k.clone(), Transaction::new_unsigned(message))
|
||||
})
|
||||
@@ -777,7 +777,7 @@ pub fn fund_keys<T: Client>(client: &T, source: &Keypair, dests: &[Arc<Keypair>]
|
||||
let mut waits = 0;
|
||||
loop {
|
||||
sleep(Duration::from_millis(200));
|
||||
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount));
|
||||
to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, tx, amount));
|
||||
if to_fund_txs.is_empty() {
|
||||
break;
|
||||
}
|
||||
@@ -836,7 +836,7 @@ pub fn create_token_accounts<T: Client>(
|
||||
);
|
||||
let request_ix =
|
||||
exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey());
|
||||
let message = Message::new(&[create_ix, request_ix], Some(&owner_pubkey));
|
||||
let message = Message::new(&[create_ix, request_ix], Some(owner_pubkey));
|
||||
(
|
||||
(from_keypair, new_keypair),
|
||||
Transaction::new_unsigned(message),
|
||||
@@ -872,7 +872,7 @@ pub fn create_token_accounts<T: Client>(
|
||||
let mut waits = 0;
|
||||
while !to_create_txs.is_empty() {
|
||||
sleep(Duration::from_millis(200));
|
||||
to_create_txs.retain(|(_, tx)| !verify_transaction(client, &tx));
|
||||
to_create_txs.retain(|(_, tx)| !verify_transaction(client, tx));
|
||||
if to_create_txs.is_empty() {
|
||||
break;
|
||||
}
|
||||
@@ -958,7 +958,7 @@ fn compute_and_report_stats(maxes: &Arc<RwLock<Vec<(String, SampleStats)>>>, tot
|
||||
|
||||
fn generate_keypairs(num: u64) -> Vec<Keypair> {
|
||||
let mut seed = [0_u8; 32];
|
||||
seed.copy_from_slice(&Keypair::new().pubkey().as_ref());
|
||||
seed.copy_from_slice(Keypair::new().pubkey().as_ref());
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
rnd.gen_n_keypairs(num)
|
||||
}
|
||||
@@ -989,7 +989,7 @@ pub fn airdrop_lamports<T: Client>(
|
||||
let (blockhash, _fee_calculator, _last_valid_slot) = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())
|
||||
.expect("Failed to get blockhash");
|
||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
|
||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), amount_to_drop, blockhash) {
|
||||
Ok(transaction) => {
|
||||
let signature = client.async_send_transaction(transaction).unwrap();
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,11 +10,11 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -18,7 +18,7 @@ fn producer(addr: &SocketAddr, exit: Arc<AtomicBool>) -> JoinHandle<()> {
|
||||
msgs.packets.resize(10, Packet::default());
|
||||
for w in msgs.packets.iter_mut() {
|
||||
w.meta.size = PACKET_DATA_SIZE;
|
||||
w.meta.set_addr(&addr);
|
||||
w.meta.set_addr(addr);
|
||||
}
|
||||
let msgs = Arc::new(msgs);
|
||||
spawn(move || loop {
|
||||
@@ -92,6 +92,7 @@ fn main() -> Result<()> {
|
||||
recycler.clone(),
|
||||
"bench-streamer-test",
|
||||
1,
|
||||
true,
|
||||
));
|
||||
}
|
||||
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -15,23 +15,23 @@ log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-core = { path = "../core", version = "=1.7.0" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.0" }
|
||||
solana-client = { path = "../client", version = "=1.7.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-core = { path = "../core", version = "=1.7.4" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.4" }
|
||||
solana-client = { path = "../client", version = "=1.7.4" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.4" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.4" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -544,12 +544,12 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
|
||||
// re-sign retained to_fund_txes with updated blockhash
|
||||
self.sign(blockhash);
|
||||
self.send(&client);
|
||||
self.send(client);
|
||||
|
||||
// Sleep a few slots to allow transactions to process
|
||||
sleep(Duration::from_secs(1));
|
||||
|
||||
self.verify(&client, to_lamports);
|
||||
self.verify(client, to_lamports);
|
||||
|
||||
// retry anything that seems to have dropped through cracks
|
||||
// again since these txs are all or nothing, they're fine to
|
||||
@@ -564,7 +564,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
|
||||
.par_iter()
|
||||
.map(|(k, t)| {
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), &t);
|
||||
let instructions = system_instruction::transfer_many(&k.pubkey(), t);
|
||||
let message = Message::new(&instructions, Some(&k.pubkey()));
|
||||
(*k, Transaction::new_unsigned(message))
|
||||
})
|
||||
@@ -617,7 +617,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
|
||||
return None;
|
||||
}
|
||||
|
||||
let verified = if verify_funding_transfer(&client, &tx, to_lamports) {
|
||||
let verified = if verify_funding_transfer(&client, tx, to_lamports) {
|
||||
verified_txs.fetch_add(1, Ordering::Relaxed);
|
||||
Some(k.pubkey())
|
||||
} else {
|
||||
@@ -733,7 +733,7 @@ pub fn airdrop_lamports<T: Client>(
|
||||
);
|
||||
|
||||
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
|
||||
match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) {
|
||||
Ok(transaction) => {
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
|
@@ -39,7 +39,7 @@ fn main() {
|
||||
let keypair_count = *tx_count * keypair_multiplier;
|
||||
if *write_to_client_file {
|
||||
info!("Generating {} keypairs", keypair_count);
|
||||
let (keypairs, _) = generate_keypairs(&id, keypair_count as u64);
|
||||
let (keypairs, _) = generate_keypairs(id, keypair_count as u64);
|
||||
let num_accounts = keypairs.len() as u64;
|
||||
let max_fee =
|
||||
FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
|
||||
@@ -68,7 +68,7 @@ fn main() {
|
||||
}
|
||||
|
||||
info!("Connecting to the cluster");
|
||||
let nodes = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
|
||||
let nodes = discover_cluster(entrypoint_addr, *num_nodes).unwrap_or_else(|err| {
|
||||
eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err);
|
||||
exit(1);
|
||||
});
|
||||
@@ -135,7 +135,7 @@ fn main() {
|
||||
generate_and_fund_keypairs(
|
||||
client.clone(),
|
||||
Some(*faucet_addr),
|
||||
&id,
|
||||
id,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
)
|
||||
|
@@ -3,13 +3,19 @@
|
||||
# Pull requests to not run these steps.
|
||||
steps:
|
||||
- command: "ci/publish-tarball.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball"
|
||||
- wait
|
||||
- command: "sdk/docker-solana/build.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish docker"
|
||||
- command: "ci/publish-crate.sh"
|
||||
agents:
|
||||
- "queue=release-build"
|
||||
timeout_in_minutes: 240
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
|
@@ -74,10 +74,13 @@ else
|
||||
export CI_BUILD_ID=
|
||||
export CI_COMMIT=
|
||||
export CI_JOB_ID=
|
||||
export CI_OS_NAME=
|
||||
export CI_PULL_REQUEST=
|
||||
export CI_REPO_SLUG=
|
||||
export CI_TAG=
|
||||
# Don't override ci/run-local.sh
|
||||
if [[ -z $CL_LOCAL_RUN ]]; then
|
||||
export CI_OS_NAME=
|
||||
fi
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
|
@@ -127,7 +127,7 @@ startNode() {
|
||||
waitForNodeToInit() {
|
||||
declare initCompleteFile=$1
|
||||
while [[ ! -r $initCompleteFile ]]; do
|
||||
if [[ $SECONDS -ge 240 ]]; then
|
||||
if [[ $SECONDS -ge 300 ]]; then
|
||||
echo "^^^ +++"
|
||||
echo "Error: $initCompleteFile not found in $SECONDS seconds"
|
||||
exit 1
|
||||
|
@@ -12,10 +12,14 @@ import json
|
||||
import subprocess
|
||||
import sys;
|
||||
|
||||
real_file = os.path.realpath(__file__)
|
||||
ci_path = os.path.dirname(real_file)
|
||||
src_root = os.path.dirname(ci_path)
|
||||
|
||||
def load_metadata():
|
||||
cmd = f'{src_root}/cargo metadata --no-deps --format-version=1'
|
||||
return json.loads(subprocess.Popen(
|
||||
'cargo metadata --no-deps --format-version=1',
|
||||
shell=True, stdout=subprocess.PIPE).communicate()[0])
|
||||
cmd, shell=True, stdout=subprocess.PIPE).communicate()[0])
|
||||
|
||||
def get_packages():
|
||||
metadata = load_metadata()
|
||||
|
54
ci/run-local.sh
Executable file
54
ci/run-local.sh
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export CI_LOCAL_RUN=true
|
||||
|
||||
set -e
|
||||
|
||||
case $(uname -o) in
|
||||
*/Linux)
|
||||
export CI_OS_NAME=linux
|
||||
;;
|
||||
*)
|
||||
echo "local CI runs are only supported on Linux" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
steps=()
|
||||
steps+=(test-sanity)
|
||||
steps+=(shellcheck)
|
||||
steps+=(test-checks)
|
||||
steps+=(test-coverage)
|
||||
steps+=(test-stable)
|
||||
steps+=(test-stable-perf)
|
||||
steps+=(test-downstream-builds)
|
||||
steps+=(test-bench)
|
||||
steps+=(test-local-cluster)
|
||||
|
||||
step_index=0
|
||||
if [[ -n "$1" ]]; then
|
||||
start_step="$1"
|
||||
while [[ $step_index -lt ${#steps[@]} ]]; do
|
||||
step="${steps[$step_index]}"
|
||||
if [[ "$step" = "$start_step" ]]; then
|
||||
break
|
||||
fi
|
||||
step_index=$((step_index + 1))
|
||||
done
|
||||
if [[ $step_index -eq ${#steps[@]} ]]; then
|
||||
echo "unexpected start step: \"$start_step\"" 1>&2
|
||||
exit 1
|
||||
else
|
||||
echo "** starting at step: \"$start_step\" **"
|
||||
echo
|
||||
fi
|
||||
fi
|
||||
|
||||
while [[ $step_index -lt ${#steps[@]} ]]; do
|
||||
step="${steps[$step_index]}"
|
||||
cmd="ci/${step}.sh"
|
||||
$cmd
|
||||
step_index=$((step_index + 1))
|
||||
done
|
@@ -49,6 +49,10 @@ _ "$cargo" nightly bench --manifest-path runtime/Cargo.toml ${V:+--verbose} \
|
||||
_ "$cargo" nightly bench --manifest-path gossip/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
||||
# Run poh benches
|
||||
_ "$cargo" nightly bench --manifest-path poh/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
||||
# Run core benches
|
||||
_ "$cargo" nightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \
|
||||
-- -Z unstable-options --format=json | tee -a "$BENCH_FILE"
|
||||
|
9
ci/test-downstream-builds.sh
Executable file
9
ci/test-downstream-builds.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
export CI_LOCAL_RUN=true
|
||||
|
||||
set -ex
|
||||
|
||||
scripts/build-downstream-projects.sh
|
@@ -57,7 +57,7 @@ test-stable-perf)
|
||||
# Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a
|
||||
# lengthy and unexpected delay the first time CUDA is involved when the driver
|
||||
# is not yet loaded.
|
||||
sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh
|
||||
sudo --non-interactive ./net/scripts/enable-nvidia-persistence-mode.sh || true
|
||||
|
||||
rm -rf target/perf-libs
|
||||
./fetch-perf-libs.sh
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,8 +12,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
thiserror = "1.0.21"
|
||||
tiny-bip39 = "0.8.0"
|
||||
uriparse = "0.6.3"
|
||||
|
@@ -24,9 +24,11 @@ use {
|
||||
},
|
||||
},
|
||||
std::{
|
||||
cell::RefCell,
|
||||
convert::TryFrom,
|
||||
error,
|
||||
io::{stdin, stdout, Write},
|
||||
ops::Deref,
|
||||
process::exit,
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
@@ -89,33 +91,49 @@ impl CliSignerInfo {
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
#[derive(Debug)]
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct DefaultSigner {
|
||||
pub arg_name: String,
|
||||
pub path: String,
|
||||
is_path_checked: RefCell<bool>,
|
||||
}
|
||||
|
||||
impl DefaultSigner {
|
||||
pub fn new(path: String) -> Self {
|
||||
pub fn new<AN: AsRef<str>, P: AsRef<str>>(arg_name: AN, path: P) -> Self {
|
||||
let arg_name = arg_name.as_ref().to_string();
|
||||
let path = path.as_ref().to_string();
|
||||
Self {
|
||||
arg_name: "keypair".to_string(),
|
||||
arg_name,
|
||||
path,
|
||||
..Self::default()
|
||||
}
|
||||
}
|
||||
pub fn from_path(path: String) -> Result<Self, Box<dyn error::Error>> {
|
||||
std::fs::metadata(&path)
|
||||
.map_err(|_| {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
format!(
|
||||
"No default signer found, run \"solana-keygen new -o {}\" to create a new one",
|
||||
path
|
||||
),
|
||||
)
|
||||
.into()
|
||||
})
|
||||
.map(|_| Self::new(path))
|
||||
|
||||
fn path(&self) -> Result<&str, Box<dyn std::error::Error>> {
|
||||
if !self.is_path_checked.borrow().deref() {
|
||||
parse_signer_source(&self.path)
|
||||
.and_then(|s| {
|
||||
if let SignerSourceKind::Filepath(path) = &s.kind {
|
||||
std::fs::metadata(path).map(|_| ()).map_err(|e| e.into())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
})
|
||||
.map_err(|_| {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
format!(
|
||||
"No default signer found, run \"solana-keygen new -o {}\" to create a new one",
|
||||
self.path
|
||||
),
|
||||
)
|
||||
})?;
|
||||
*self.is_path_checked.borrow_mut() = true;
|
||||
}
|
||||
Ok(&self.path)
|
||||
}
|
||||
|
||||
pub fn generate_unique_signers(
|
||||
&self,
|
||||
bulk_signers: Vec<Option<Box<dyn Signer>>>,
|
||||
@@ -145,7 +163,7 @@ impl DefaultSigner {
|
||||
matches: &ArgMatches,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<Box<dyn Signer>, Box<dyn std::error::Error>> {
|
||||
signer_from_path(matches, &self.path, &self.arg_name, wallet_manager)
|
||||
signer_from_path(matches, self.path()?, &self.arg_name, wallet_manager)
|
||||
}
|
||||
|
||||
pub fn signer_from_path_with_config(
|
||||
@@ -154,7 +172,13 @@ impl DefaultSigner {
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
config: &SignerFromPathConfig,
|
||||
) -> Result<Box<dyn Signer>, Box<dyn std::error::Error>> {
|
||||
signer_from_path_with_config(matches, &self.path, &self.arg_name, wallet_manager, config)
|
||||
signer_from_path_with_config(
|
||||
matches,
|
||||
self.path()?,
|
||||
&self.arg_name,
|
||||
wallet_manager,
|
||||
config,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -277,7 +301,9 @@ pub(crate) fn parse_signer_source<S: AsRef<str>>(
|
||||
ASK_KEYWORD => Ok(SignerSource::new_legacy(SignerSourceKind::Prompt)),
|
||||
_ => match Pubkey::from_str(source.as_str()) {
|
||||
Ok(pubkey) => Ok(SignerSource::new(SignerSourceKind::Pubkey(pubkey))),
|
||||
Err(_) => Ok(SignerSource::new(SignerSourceKind::Filepath(source))),
|
||||
Err(_) => std::fs::metadata(source.as_str())
|
||||
.map(|_| SignerSource::new(SignerSourceKind::Filepath(source)))
|
||||
.map_err(|err| err.into()),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -480,7 +506,7 @@ pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant {
|
||||
|
||||
/// Prompts user for a passphrase and then asks for confirmirmation to check for mistakes
|
||||
pub fn prompt_passphrase(prompt: &str) -> Result<String, Box<dyn error::Error>> {
|
||||
let passphrase = prompt_password_stderr(&prompt)?;
|
||||
let passphrase = prompt_password_stderr(prompt)?;
|
||||
if !passphrase.is_empty() {
|
||||
let confirmed = rpassword::prompt_password_stderr("Enter same passphrase again: ")?;
|
||||
if confirmed != passphrase {
|
||||
@@ -560,9 +586,9 @@ pub fn keypair_from_seed_phrase(
|
||||
let keypair = if skip_validation {
|
||||
let passphrase = prompt_passphrase(&passphrase_prompt)?;
|
||||
if legacy {
|
||||
keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)?
|
||||
keypair_from_seed_phrase_and_passphrase(seed_phrase, &passphrase)?
|
||||
} else {
|
||||
let seed = generate_seed_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase);
|
||||
let seed = generate_seed_from_seed_phrase_and_passphrase(seed_phrase, &passphrase);
|
||||
keypair_from_seed_and_derivation_path(&seed, derivation_path)?
|
||||
}
|
||||
} else {
|
||||
@@ -590,7 +616,7 @@ pub fn keypair_from_seed_phrase(
|
||||
if legacy {
|
||||
keypair_from_seed(seed.as_bytes())?
|
||||
} else {
|
||||
keypair_from_seed_and_derivation_path(&seed.as_bytes(), derivation_path)?
|
||||
keypair_from_seed_and_derivation_path(seed.as_bytes(), derivation_path)?
|
||||
}
|
||||
};
|
||||
|
||||
@@ -751,6 +777,10 @@ mod tests {
|
||||
// Catchall into SignerSource::Filepath fails
|
||||
let junk = "sometextthatisnotapubkeyorfile".to_string();
|
||||
assert!(Pubkey::from_str(&junk).is_err());
|
||||
assert!(matches!(
|
||||
parse_signer_source(&junk),
|
||||
Err(SignerSourceError::IoError(_))
|
||||
));
|
||||
|
||||
let prompt = "prompt:".to_string();
|
||||
assert!(matches!(
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@@ -107,24 +107,24 @@ mod test {
|
||||
#[test]
|
||||
fn compute_websocket_url() {
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"http://api.devnet.solana.com"),
|
||||
Config::compute_websocket_url("http://api.devnet.solana.com"),
|
||||
"ws://api.devnet.solana.com/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"https://api.devnet.solana.com"),
|
||||
Config::compute_websocket_url("https://api.devnet.solana.com"),
|
||||
"wss://api.devnet.solana.com/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"http://example.com:8899"),
|
||||
Config::compute_websocket_url("http://example.com:8899"),
|
||||
"ws://example.com:8900/".to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"https://example.com:1234"),
|
||||
Config::compute_websocket_url("https://example.com:1234"),
|
||||
"wss://example.com:1235/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(Config::compute_websocket_url(&"garbage"), String::new());
|
||||
assert_eq!(Config::compute_websocket_url("garbage"), String::new());
|
||||
}
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-output"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -12,20 +12,19 @@ documentation = "https://docs.rs/solana-cli-output"
|
||||
[dependencies]
|
||||
base64 = "0.13.0"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
console = "0.11.3"
|
||||
console = "0.14.1"
|
||||
humantime = "2.0.1"
|
||||
Inflector = "0.11.4"
|
||||
indicatif = "0.15.0"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-client = { path = "../client", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.7.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-client = { path = "../client", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.4" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.4" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@@ -25,10 +25,10 @@ use {
|
||||
native_token::lamports_to_sol,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
stake::state::{Authorized, Lockup},
|
||||
stake_history::StakeHistoryEntry,
|
||||
transaction::{Transaction, TransactionError},
|
||||
},
|
||||
solana_stake_program::stake_state::{Authorized, Lockup},
|
||||
solana_transaction_status::{
|
||||
EncodedConfirmedBlock, EncodedTransaction, TransactionConfirmationStatus,
|
||||
UiTransactionStatusMeta,
|
||||
@@ -233,6 +233,10 @@ pub struct CliEpochInfo {
|
||||
pub epoch_info: EpochInfo,
|
||||
#[serde(skip)]
|
||||
pub average_slot_time_ms: u64,
|
||||
#[serde(skip)]
|
||||
pub start_block_time: Option<UnixTimestamp>,
|
||||
#[serde(skip)]
|
||||
pub current_block_time: Option<UnixTimestamp>,
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliEpochInfo {}
|
||||
@@ -277,21 +281,41 @@ impl fmt::Display for CliEpochInfo {
|
||||
remaining_slots_in_epoch
|
||||
),
|
||||
)?;
|
||||
let (time_elapsed, annotation) = if let (Some(start_block_time), Some(current_block_time)) =
|
||||
(self.start_block_time, self.current_block_time)
|
||||
{
|
||||
(
|
||||
Duration::from_secs((current_block_time - start_block_time) as u64),
|
||||
None,
|
||||
)
|
||||
} else {
|
||||
(
|
||||
slot_to_duration(self.epoch_info.slot_index, self.average_slot_time_ms),
|
||||
Some("* estimated based on current slot durations"),
|
||||
)
|
||||
};
|
||||
let time_remaining = slot_to_duration(remaining_slots_in_epoch, self.average_slot_time_ms);
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Epoch Completed Time:",
|
||||
&format!(
|
||||
"{}/{} ({} remaining)",
|
||||
slot_to_human_time(self.epoch_info.slot_index, self.average_slot_time_ms),
|
||||
slot_to_human_time(self.epoch_info.slots_in_epoch, self.average_slot_time_ms),
|
||||
slot_to_human_time(remaining_slots_in_epoch, self.average_slot_time_ms)
|
||||
"{}{}/{} ({} remaining)",
|
||||
humantime::format_duration(time_elapsed).to_string(),
|
||||
if annotation.is_some() { "*" } else { "" },
|
||||
humantime::format_duration(time_elapsed + time_remaining).to_string(),
|
||||
humantime::format_duration(time_remaining).to_string(),
|
||||
),
|
||||
)
|
||||
)?;
|
||||
if let Some(annotation) = annotation {
|
||||
writeln!(f)?;
|
||||
writeln!(f, "{}", annotation)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn slot_to_human_time(slot: Slot, slot_time_ms: u64) -> String {
|
||||
humantime::format_duration(Duration::from_secs((slot * slot_time_ms) / 1000)).to_string()
|
||||
fn slot_to_duration(slot: Slot, slot_time_ms: u64) -> Duration {
|
||||
Duration::from_secs((slot * slot_time_ms) / 1000)
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
@@ -323,6 +347,8 @@ pub struct CliValidators {
|
||||
pub total_current_stake: u64,
|
||||
pub total_delinquent_stake: u64,
|
||||
pub validators: Vec<CliValidator>,
|
||||
pub average_skip_rate: f64,
|
||||
pub average_stake_weighted_skip_rate: f64,
|
||||
#[serde(skip_serializing)]
|
||||
pub validators_sort_order: CliValidatorsSortOrder,
|
||||
#[serde(skip_serializing)]
|
||||
@@ -486,6 +512,18 @@ impl fmt::Display for CliValidators {
|
||||
writeln!(f, "{}", header)?;
|
||||
}
|
||||
|
||||
writeln!(f)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Average Stake-Weighted Skip Rate:",
|
||||
&format!("{:.2}%", self.average_stake_weighted_skip_rate,),
|
||||
)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Average Unweighted Skip Rate: ",
|
||||
&format!("{:.2}%", self.average_skip_rate),
|
||||
)?;
|
||||
|
||||
writeln!(f)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
@@ -1287,7 +1325,7 @@ impl fmt::Display for CliValidatorInfo {
|
||||
writeln_name_value(
|
||||
f,
|
||||
&format!(" {}:", to_title_case(key)),
|
||||
&value.as_str().unwrap_or("?"),
|
||||
value.as_str().unwrap_or("?"),
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -1513,11 +1551,15 @@ impl fmt::Display for CliInflation {
|
||||
"Staking rate: {:>5.2}%",
|
||||
self.current_rate.validator * 100.
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Foundation rate: {:>5.2}%",
|
||||
self.current_rate.foundation * 100.
|
||||
)
|
||||
|
||||
if self.current_rate.foundation > 0. {
|
||||
writeln!(
|
||||
f,
|
||||
"Foundation rate: {:>5.2}%",
|
||||
self.current_rate.foundation * 100.
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1768,7 +1810,7 @@ impl fmt::Display for CliTokenAccount {
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Close authority:",
|
||||
&account.close_authority.as_ref().unwrap_or(&String::new()),
|
||||
account.close_authority.as_ref().unwrap_or(&String::new()),
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -2006,7 +2048,7 @@ pub fn return_signers_with_config(
|
||||
}
|
||||
|
||||
pub fn parse_sign_only_reply_string(reply: &str) -> SignOnly {
|
||||
let object: Value = serde_json::from_str(&reply).unwrap();
|
||||
let object: Value = serde_json::from_str(reply).unwrap();
|
||||
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
|
||||
let blockhash = blockhash_str.parse::<Hash>().unwrap();
|
||||
let mut present_signers: Vec<(Pubkey, Signature)> = Vec::new();
|
||||
|
@@ -5,7 +5,7 @@ use {
|
||||
indicatif::{ProgressBar, ProgressStyle},
|
||||
solana_sdk::{
|
||||
clock::UnixTimestamp, hash::Hash, message::Message, native_token::lamports_to_sol,
|
||||
program_utils::limited_deserialize, pubkey::Pubkey, transaction::Transaction,
|
||||
program_utils::limited_deserialize, pubkey::Pubkey, stake, transaction::Transaction,
|
||||
},
|
||||
solana_transaction_status::UiTransactionStatusMeta,
|
||||
spl_memo::id as spl_memo_id,
|
||||
@@ -244,10 +244,9 @@ pub fn write_transaction<W: io::Write>(
|
||||
writeln!(w, "{} {:?}", prefix, vote_instruction)?;
|
||||
raw = false;
|
||||
}
|
||||
} else if program_pubkey == solana_stake_program::id() {
|
||||
if let Ok(stake_instruction) = limited_deserialize::<
|
||||
solana_stake_program::stake_instruction::StakeInstruction,
|
||||
>(&instruction.data)
|
||||
} else if program_pubkey == stake::program::id() {
|
||||
if let Ok(stake_instruction) =
|
||||
limited_deserialize::<stake::instruction::StakeInstruction>(&instruction.data)
|
||||
{
|
||||
writeln!(w, "{} {:?}", prefix, stake_instruction)?;
|
||||
raw = false;
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -16,7 +16,7 @@ chrono = { version = "0.4.11", features = ["serde"] }
|
||||
clap = "2.33.1"
|
||||
criterion-stats = "0.3.0"
|
||||
ctrlc = { version = "3.1.5", features = ["termination"] }
|
||||
console = "0.11.3"
|
||||
console = "0.14.1"
|
||||
dirs-next = "2.0.0"
|
||||
log = "0.4.11"
|
||||
Inflector = "0.11.4"
|
||||
@@ -28,30 +28,29 @@ reqwest = { version = "0.11.2", default-features = false, features = ["blocking"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.0" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.7.0" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.7.0" }
|
||||
solana-client = { path = "../client", version = "=1.7.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.7.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.4" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.7.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.7.4" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.7.4" }
|
||||
solana-client = { path = "../client", version = "=1.7.4" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.7.4" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.4" }
|
||||
solana_rbpf = "=0.2.11"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.7.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.0" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.4" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0.21"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "=1.7.0" }
|
||||
solana-core = { path = "../core", version = "=1.7.4" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
126
cli/src/cli.rs
126
cli/src/cli.rs
@@ -32,7 +32,8 @@ use solana_client::{
|
||||
RpcLargestAccountsFilter, RpcSendTransactionConfig, RpcTransactionConfig,
|
||||
RpcTransactionLogsFilter,
|
||||
},
|
||||
rpc_response::RpcKeyedAccount,
|
||||
rpc_request::{RpcError, RpcResponseErrorData},
|
||||
rpc_response::{RpcKeyedAccount, RpcSimulateTransactionResult},
|
||||
};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
@@ -44,14 +45,15 @@ use solana_sdk::{
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Signature, Signer, SignerError},
|
||||
stake::{
|
||||
self,
|
||||
instruction::LockupArgs,
|
||||
state::{Lockup, StakeAuthorize},
|
||||
},
|
||||
system_instruction::{self, SystemError},
|
||||
system_program,
|
||||
transaction::{Transaction, TransactionError},
|
||||
};
|
||||
use solana_stake_program::{
|
||||
stake_instruction::LockupArgs,
|
||||
stake_state::{Lockup, StakeAuthorize},
|
||||
};
|
||||
use solana_transaction_status::{EncodedTransaction, UiTransactionEncoding};
|
||||
use solana_vote_program::vote_state::VoteAuthorize;
|
||||
use std::{
|
||||
@@ -61,6 +63,7 @@ use std::{
|
||||
use thiserror::Error;
|
||||
|
||||
pub const DEFAULT_RPC_TIMEOUT_SECONDS: &str = "30";
|
||||
pub const DEFAULT_CONFIRM_TX_TIMEOUT_SECONDS: &str = "5";
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
@@ -451,6 +454,7 @@ pub struct CliConfig<'a> {
|
||||
pub output_format: OutputFormat,
|
||||
pub commitment: CommitmentConfig,
|
||||
pub send_transaction_config: RpcSendTransactionConfig,
|
||||
pub confirm_transaction_initial_timeout: Duration,
|
||||
pub address_labels: HashMap<String, String>,
|
||||
}
|
||||
|
||||
@@ -595,6 +599,9 @@ impl Default for CliConfig<'_> {
|
||||
output_format: OutputFormat::Display,
|
||||
commitment: CommitmentConfig::confirmed(),
|
||||
send_transaction_config: RpcSendTransactionConfig::default(),
|
||||
confirm_transaction_initial_timeout: Duration::from_secs(
|
||||
u64::from_str(DEFAULT_CONFIRM_TX_TIMEOUT_SECONDS).unwrap(),
|
||||
),
|
||||
address_labels: HashMap::new(),
|
||||
}
|
||||
}
|
||||
@@ -931,7 +938,7 @@ pub type ProcessResult = Result<String, Box<dyn std::error::Error>>;
|
||||
fn resolve_derived_address_program_id(matches: &ArgMatches<'_>, arg_name: &str) -> Option<Pubkey> {
|
||||
matches.value_of(arg_name).and_then(|v| match v {
|
||||
"NONCE" => Some(system_program::id()),
|
||||
"STAKE" => Some(solana_stake_program::id()),
|
||||
"STAKE" => Some(stake::program::id()),
|
||||
"VOTE" => Some(solana_vote_program::id()),
|
||||
_ => pubkey_of(matches, arg_name),
|
||||
})
|
||||
@@ -998,7 +1005,7 @@ fn process_airdrop(
|
||||
|
||||
let result = request_and_confirm_airdrop(rpc_client, config, &pubkey, lamports);
|
||||
if let Ok(signature) = result {
|
||||
let signature_cli_message = log_instruction_custom_error::<SystemError>(result, &config)?;
|
||||
let signature_cli_message = log_instruction_custom_error::<SystemError>(result, config)?;
|
||||
println!("{}", signature_cli_message);
|
||||
|
||||
let current_balance = rpc_client.get_balance(&pubkey)?;
|
||||
@@ -1011,7 +1018,7 @@ fn process_airdrop(
|
||||
Ok(build_balance_message(current_balance, false, true))
|
||||
}
|
||||
} else {
|
||||
log_instruction_custom_error::<SystemError>(result, &config)
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1096,7 +1103,7 @@ fn process_confirm(
|
||||
|
||||
#[allow(clippy::unnecessary_wraps)]
|
||||
fn process_decode_transaction(config: &CliConfig, transaction: &Transaction) -> ProcessResult {
|
||||
let sigverify_status = CliSignatureVerificationStatus::verify_transaction(&transaction);
|
||||
let sigverify_status = CliSignatureVerificationStatus::verify_transaction(transaction);
|
||||
let decode_transaction = CliTransaction {
|
||||
decoded_transaction: transaction.clone(),
|
||||
transaction: EncodedTransaction::encode(transaction.clone(), UiTransactionEncoding::Json),
|
||||
@@ -1267,7 +1274,7 @@ fn process_transfer(
|
||||
} else {
|
||||
rpc_client.send_and_confirm_transaction_with_spinner(&tx)
|
||||
};
|
||||
log_instruction_custom_error::<SystemError>(result, &config)
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1286,10 +1293,11 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
}
|
||||
|
||||
let rpc_client = if config.rpc_client.is_none() {
|
||||
Arc::new(RpcClient::new_with_timeout_and_commitment(
|
||||
Arc::new(RpcClient::new_with_timeouts_and_commitment(
|
||||
config.json_rpc_url.to_string(),
|
||||
config.rpc_timeout,
|
||||
config.commitment,
|
||||
config.confirm_transaction_initial_timeout,
|
||||
))
|
||||
} else {
|
||||
// Primarily for testing
|
||||
@@ -1322,7 +1330,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
from_pubkey,
|
||||
seed,
|
||||
program_id,
|
||||
} => process_create_address_with_seed(config, from_pubkey.as_ref(), &seed, &program_id),
|
||||
} => process_create_address_with_seed(config, from_pubkey.as_ref(), seed, program_id),
|
||||
CliCommand::Fees { ref blockhash } => process_fees(&rpc_client, config, blockhash.as_ref()),
|
||||
CliCommand::Feature(feature_subcommand) => {
|
||||
process_feature_subcommand(&rpc_client, config, feature_subcommand)
|
||||
@@ -1345,8 +1353,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::LeaderSchedule { epoch } => {
|
||||
process_leader_schedule(&rpc_client, config, *epoch)
|
||||
}
|
||||
CliCommand::LiveSlots => process_live_slots(&config),
|
||||
CliCommand::Logs { filter } => process_logs(&config, filter),
|
||||
CliCommand::LiveSlots => process_live_slots(config),
|
||||
CliCommand::Logs { filter } => process_logs(config, filter),
|
||||
CliCommand::Ping {
|
||||
lamports,
|
||||
interval,
|
||||
@@ -1451,7 +1459,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
),
|
||||
// Get the current nonce
|
||||
CliCommand::GetNonce(nonce_account_pubkey) => {
|
||||
process_get_nonce(&rpc_client, config, &nonce_account_pubkey)
|
||||
process_get_nonce(&rpc_client, config, nonce_account_pubkey)
|
||||
}
|
||||
// Get a new nonce
|
||||
CliCommand::NewNonce {
|
||||
@@ -1472,7 +1480,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_show_nonce_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
&nonce_account_pubkey,
|
||||
nonce_account_pubkey,
|
||||
*use_lamports_unit,
|
||||
),
|
||||
// Withdraw lamports from a nonce account
|
||||
@@ -1485,10 +1493,10 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_withdraw_from_nonce_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
&nonce_account,
|
||||
nonce_account,
|
||||
*nonce_authority,
|
||||
memo.as_ref(),
|
||||
&destination_account_pubkey,
|
||||
destination_account_pubkey,
|
||||
*lamports,
|
||||
),
|
||||
|
||||
@@ -1562,7 +1570,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_deactivate_stake_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
stake_account_pubkey,
|
||||
*stake_authority,
|
||||
*sign_only,
|
||||
*dump_transaction_message,
|
||||
@@ -1588,8 +1596,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_delegate_stake(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
&vote_account_pubkey,
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
*stake_authority,
|
||||
*force,
|
||||
*sign_only,
|
||||
@@ -1616,7 +1624,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_split_stake(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
stake_account_pubkey,
|
||||
*stake_authority,
|
||||
*sign_only,
|
||||
*dump_transaction_message,
|
||||
@@ -1643,8 +1651,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_merge_stake(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
&source_stake_account_pubkey,
|
||||
stake_account_pubkey,
|
||||
source_stake_account_pubkey,
|
||||
*stake_authority,
|
||||
*sign_only,
|
||||
*dump_transaction_message,
|
||||
@@ -1661,7 +1669,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_show_stake_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
stake_account_pubkey,
|
||||
*use_lamports_unit,
|
||||
*with_rewards,
|
||||
),
|
||||
@@ -1684,7 +1692,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_stake_authorize(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
stake_account_pubkey,
|
||||
new_authorizations,
|
||||
*custodian,
|
||||
*sign_only,
|
||||
@@ -1710,7 +1718,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_stake_set_lockup(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
stake_account_pubkey,
|
||||
&mut lockup,
|
||||
*custodian,
|
||||
*sign_only,
|
||||
@@ -1738,8 +1746,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_withdraw_stake(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
&destination_account_pubkey,
|
||||
stake_account_pubkey,
|
||||
destination_account_pubkey,
|
||||
*amount,
|
||||
*withdraw_authority,
|
||||
*custodian,
|
||||
@@ -1767,7 +1775,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_set_validator_info(
|
||||
&rpc_client,
|
||||
config,
|
||||
&validator_info,
|
||||
validator_info,
|
||||
*force_keybase,
|
||||
*info_pubkey,
|
||||
),
|
||||
@@ -1801,7 +1809,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_show_vote_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
*use_lamports_unit,
|
||||
*with_rewards,
|
||||
),
|
||||
@@ -1828,8 +1836,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_vote_authorize(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
&new_authorized_pubkey,
|
||||
vote_account_pubkey,
|
||||
new_authorized_pubkey,
|
||||
*vote_authorize,
|
||||
memo.as_ref(),
|
||||
),
|
||||
@@ -1841,7 +1849,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_vote_update_validator(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
*new_identity_account,
|
||||
*withdraw_authority,
|
||||
memo.as_ref(),
|
||||
@@ -1854,7 +1862,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
} => process_vote_update_commission(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
*commission,
|
||||
*withdraw_authority,
|
||||
memo.as_ref(),
|
||||
@@ -1870,7 +1878,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::Balance {
|
||||
pubkey,
|
||||
use_lamports_unit,
|
||||
} => process_balance(&rpc_client, config, &pubkey, *use_lamports_unit),
|
||||
} => process_balance(&rpc_client, config, pubkey, *use_lamports_unit),
|
||||
// Confirm the last client transaction by signature
|
||||
CliCommand::Confirm(signature) => process_confirm(&rpc_client, config, signature),
|
||||
CliCommand::DecodeTransaction(transaction) => {
|
||||
@@ -1887,13 +1895,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
pubkey,
|
||||
output_file,
|
||||
use_lamports_unit,
|
||||
} => process_show_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
&pubkey,
|
||||
&output_file,
|
||||
*use_lamports_unit,
|
||||
),
|
||||
} => process_show_account(&rpc_client, config, pubkey, output_file, *use_lamports_unit),
|
||||
CliCommand::Transfer {
|
||||
amount,
|
||||
to,
|
||||
@@ -1956,6 +1958,28 @@ where
|
||||
{
|
||||
match result {
|
||||
Err(err) => {
|
||||
// If transaction simulation returns a known Custom InstructionError, decode it
|
||||
if let ClientErrorKind::RpcError(RpcError::RpcResponseError {
|
||||
data:
|
||||
RpcResponseErrorData::SendTransactionPreflightFailure(
|
||||
RpcSimulateTransactionResult {
|
||||
err:
|
||||
Some(TransactionError::InstructionError(
|
||||
_,
|
||||
InstructionError::Custom(code),
|
||||
)),
|
||||
..
|
||||
},
|
||||
),
|
||||
..
|
||||
}) = err.kind()
|
||||
{
|
||||
if let Some(specific_error) = E::decode_custom_error_to_enum(*code) {
|
||||
return Err(specific_error.into());
|
||||
}
|
||||
}
|
||||
// If the transaction was instead submitted and returned a known Custom
|
||||
// InstructionError, decode it
|
||||
if let ClientErrorKind::TransactionError(TransactionError::InstructionError(
|
||||
_,
|
||||
InstructionError::Custom(code),
|
||||
@@ -2299,7 +2323,7 @@ mod tests {
|
||||
let default_keypair_file = make_tmp_path("keypair_file");
|
||||
write_keypair_file(&default_keypair, &default_keypair_file).unwrap();
|
||||
|
||||
let default_signer = DefaultSigner::new(default_keypair_file);
|
||||
let default_signer = DefaultSigner::new("keypair", &default_keypair_file);
|
||||
|
||||
let signer_info = default_signer
|
||||
.generate_unique_signers(vec![], &matches, &mut None)
|
||||
@@ -2377,7 +2401,7 @@ mod tests {
|
||||
let keypair_file = make_tmp_path("keypair_file");
|
||||
write_keypair_file(&default_keypair, &keypair_file).unwrap();
|
||||
let keypair = read_keypair_file(&keypair_file).unwrap();
|
||||
let default_signer = DefaultSigner::new(keypair_file.clone());
|
||||
let default_signer = DefaultSigner::new("", &keypair_file);
|
||||
// Test Airdrop Subcommand
|
||||
let test_airdrop =
|
||||
test_commands
|
||||
@@ -2464,7 +2488,7 @@ mod tests {
|
||||
let from_pubkey = Some(solana_sdk::pubkey::new_rand());
|
||||
let from_str = from_pubkey.unwrap().to_string();
|
||||
for (name, program_id) in &[
|
||||
("STAKE", solana_stake_program::id()),
|
||||
("STAKE", stake::program::id()),
|
||||
("VOTE", solana_vote_program::id()),
|
||||
("NONCE", system_program::id()),
|
||||
] {
|
||||
@@ -2500,7 +2524,7 @@ mod tests {
|
||||
command: CliCommand::CreateAddressWithSeed {
|
||||
from_pubkey: None,
|
||||
seed: "seed".to_string(),
|
||||
program_id: solana_stake_program::id(),
|
||||
program_id: stake::program::id(),
|
||||
},
|
||||
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
|
||||
}
|
||||
@@ -2763,11 +2787,11 @@ mod tests {
|
||||
config.command = CliCommand::CreateAddressWithSeed {
|
||||
from_pubkey: Some(from_pubkey),
|
||||
seed: "seed".to_string(),
|
||||
program_id: solana_stake_program::id(),
|
||||
program_id: stake::program::id(),
|
||||
};
|
||||
let address = process_command(&config);
|
||||
let expected_address =
|
||||
Pubkey::create_with_seed(&from_pubkey, "seed", &solana_stake_program::id()).unwrap();
|
||||
Pubkey::create_with_seed(&from_pubkey, "seed", &stake::program::id()).unwrap();
|
||||
assert_eq!(address.unwrap(), expected_address.to_string());
|
||||
|
||||
// Need airdrop cases
|
||||
@@ -2905,7 +2929,7 @@ mod tests {
|
||||
let default_keypair = Keypair::new();
|
||||
let default_keypair_file = make_tmp_path("keypair_file");
|
||||
write_keypair_file(&default_keypair, &default_keypair_file).unwrap();
|
||||
let default_signer = DefaultSigner::new(default_keypair_file.clone());
|
||||
let default_signer = DefaultSigner::new("", &default_keypair_file);
|
||||
|
||||
//Test Transfer Subcommand, SOL
|
||||
let from_keypair = keypair_from_seed(&[0u8; 32]).unwrap();
|
||||
@@ -3154,7 +3178,7 @@ mod tests {
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
derived_address_seed: Some(derived_address_seed),
|
||||
derived_address_program_id: Some(solana_stake_program::id()),
|
||||
derived_address_program_id: Some(stake::program::id()),
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into(),],
|
||||
}
|
||||
|
@@ -46,7 +46,9 @@ use solana_sdk::{
|
||||
rent::Rent,
|
||||
rpc_port::DEFAULT_RPC_PORT_STR,
|
||||
signature::Signature,
|
||||
slot_history, system_instruction, system_program,
|
||||
slot_history,
|
||||
stake::{self, state::StakeState},
|
||||
system_instruction, system_program,
|
||||
sysvar::{
|
||||
self,
|
||||
slot_history::SlotHistory,
|
||||
@@ -55,7 +57,6 @@ use solana_sdk::{
|
||||
timing,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_stake_program::stake_state::StakeState;
|
||||
use solana_transaction_status::UiTransactionEncoding;
|
||||
use solana_vote_program::vote_state::VoteState;
|
||||
use std::{
|
||||
@@ -121,7 +122,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.long("our-localhost")
|
||||
.takes_value(false)
|
||||
.value_name("PORT")
|
||||
.default_value(&DEFAULT_RPC_PORT_STR)
|
||||
.default_value(DEFAULT_RPC_PORT_STR)
|
||||
.validator(is_port)
|
||||
.help("Guess Identity pubkey and validator rpc node assuming local (possibly private) validator"),
|
||||
)
|
||||
@@ -1075,9 +1076,15 @@ pub fn process_get_epoch_info(rpc_client: &RpcClient, config: &CliConfig) -> Pro
|
||||
(secs as u64).saturating_mul(1000).checked_div(slots)
|
||||
})
|
||||
.unwrap_or(clock::DEFAULT_MS_PER_SLOT);
|
||||
let start_block_time = rpc_client
|
||||
.get_block_time(epoch_info.absolute_slot - epoch_info.slot_index)
|
||||
.ok();
|
||||
let current_block_time = rpc_client.get_block_time(epoch_info.absolute_slot).ok();
|
||||
let epoch_info = CliEpochInfo {
|
||||
epoch_info,
|
||||
average_slot_time_ms,
|
||||
start_block_time,
|
||||
current_block_time,
|
||||
};
|
||||
Ok(config.output_format.formatted_string(&epoch_info))
|
||||
}
|
||||
@@ -1707,7 +1714,7 @@ pub fn process_show_stakes(
|
||||
}
|
||||
}
|
||||
let all_stake_accounts = rpc_client
|
||||
.get_program_accounts_with_config(&solana_stake_program::id(), program_accounts_config)?;
|
||||
.get_program_accounts_with_config(&stake::program::id(), program_accounts_config)?;
|
||||
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
|
||||
let clock_account = rpc_client.get_account(&sysvar::clock::id())?;
|
||||
let clock: Clock = from_account(&clock_account).ok_or_else(|| {
|
||||
@@ -1888,14 +1895,40 @@ pub fn process_show_validators(
|
||||
entry.delinquent_active_stake += validator.activated_stake;
|
||||
}
|
||||
|
||||
let validators: Vec<_> = current_validators
|
||||
.into_iter()
|
||||
.chain(delinquent_validators.into_iter())
|
||||
.collect();
|
||||
|
||||
let (average_skip_rate, average_stake_weighted_skip_rate) = {
|
||||
let mut skip_rate_len = 0;
|
||||
let mut skip_rate_sum = 0.;
|
||||
let mut skip_rate_weighted_sum = 0.;
|
||||
for validator in validators.iter() {
|
||||
if let Some(skip_rate) = validator.skip_rate {
|
||||
skip_rate_sum += skip_rate;
|
||||
skip_rate_len += 1;
|
||||
skip_rate_weighted_sum += skip_rate * validator.activated_stake as f64;
|
||||
}
|
||||
}
|
||||
|
||||
if skip_rate_len > 0 && total_active_stake > 0 {
|
||||
(
|
||||
skip_rate_sum / skip_rate_len as f64,
|
||||
skip_rate_weighted_sum / total_active_stake as f64,
|
||||
)
|
||||
} else {
|
||||
(100., 100.) // Impossible?
|
||||
}
|
||||
};
|
||||
|
||||
let cli_validators = CliValidators {
|
||||
total_active_stake,
|
||||
total_current_stake,
|
||||
total_delinquent_stake,
|
||||
validators: current_validators
|
||||
.into_iter()
|
||||
.chain(delinquent_validators.into_iter())
|
||||
.collect(),
|
||||
validators,
|
||||
average_skip_rate,
|
||||
average_stake_weighted_skip_rate,
|
||||
validators_sort_order,
|
||||
validators_reverse_sort,
|
||||
number_validators,
|
||||
@@ -2098,7 +2131,7 @@ mod tests {
|
||||
let default_keypair = Keypair::new();
|
||||
let (default_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
let default_signer = DefaultSigner::new(default_keypair_file);
|
||||
let default_signer = DefaultSigner::new("", &default_keypair_file);
|
||||
|
||||
let test_cluster_version = test_commands
|
||||
.clone()
|
||||
|
@@ -102,7 +102,7 @@ fn process_rewards(
|
||||
rewards_epoch: Option<Epoch>,
|
||||
) -> ProcessResult {
|
||||
let rewards = rpc_client
|
||||
.get_inflation_reward(&addresses, rewards_epoch)
|
||||
.get_inflation_reward(addresses, rewards_epoch)
|
||||
.map_err(|err| {
|
||||
if let Some(epoch) = rewards_epoch {
|
||||
format!("Rewards not available for epoch {}", epoch)
|
||||
|
@@ -10,7 +10,7 @@ use solana_clap_utils::{
|
||||
};
|
||||
use solana_cli::cli::{
|
||||
app, parse_command, process_command, CliCommandInfo, CliConfig, SettingType,
|
||||
DEFAULT_RPC_TIMEOUT_SECONDS,
|
||||
DEFAULT_CONFIRM_TX_TIMEOUT_SECONDS, DEFAULT_RPC_TIMEOUT_SECONDS,
|
||||
};
|
||||
use solana_cli_config::{Config, CONFIG_FILE};
|
||||
use solana_cli_output::{display::println_name_value, OutputFormat};
|
||||
@@ -167,23 +167,29 @@ pub fn parse_args<'a>(
|
||||
let rpc_timeout = value_t_or_exit!(matches, "rpc_timeout", u64);
|
||||
let rpc_timeout = Duration::from_secs(rpc_timeout);
|
||||
|
||||
let confirm_transaction_initial_timeout =
|
||||
value_t_or_exit!(matches, "confirm_transaction_initial_timeout", u64);
|
||||
let confirm_transaction_initial_timeout =
|
||||
Duration::from_secs(confirm_transaction_initial_timeout);
|
||||
|
||||
let (_, websocket_url) = CliConfig::compute_websocket_url_setting(
|
||||
matches.value_of("websocket_url").unwrap_or(""),
|
||||
&config.websocket_url,
|
||||
matches.value_of("json_rpc_url").unwrap_or(""),
|
||||
&config.json_rpc_url,
|
||||
);
|
||||
let default_signer_arg_name = "keypair".to_string();
|
||||
let (_, default_signer_path) = CliConfig::compute_keypair_path_setting(
|
||||
matches.value_of("keypair").unwrap_or(""),
|
||||
matches.value_of(&default_signer_arg_name).unwrap_or(""),
|
||||
&config.keypair_path,
|
||||
);
|
||||
|
||||
let default_signer = DefaultSigner::from_path(default_signer_path.clone())?;
|
||||
let default_signer = DefaultSigner::new(default_signer_arg_name, &default_signer_path);
|
||||
|
||||
let CliCommandInfo {
|
||||
command,
|
||||
mut signers,
|
||||
} = parse_command(&matches, &default_signer, &mut wallet_manager)?;
|
||||
} = parse_command(matches, &default_signer, &mut wallet_manager)?;
|
||||
|
||||
if signers.is_empty() {
|
||||
if let Ok(signer_info) =
|
||||
@@ -234,6 +240,7 @@ pub fn parse_args<'a>(
|
||||
preflight_commitment: Some(commitment.commitment),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
confirm_transaction_initial_timeout,
|
||||
address_labels,
|
||||
},
|
||||
signers,
|
||||
@@ -256,7 +263,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.global(true)
|
||||
.help("Configuration file to use");
|
||||
if let Some(ref config_file) = *CONFIG_FILE {
|
||||
arg.default_value(&config_file)
|
||||
arg.default_value(config_file)
|
||||
} else {
|
||||
arg
|
||||
}
|
||||
@@ -349,6 +356,16 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.hidden(true)
|
||||
.help("Timeout value for RPC requests"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("confirm_transaction_initial_timeout")
|
||||
.long("confirm-timeout")
|
||||
.value_name("SECONDS")
|
||||
.takes_value(true)
|
||||
.default_value(DEFAULT_CONFIRM_TX_TIMEOUT_SECONDS)
|
||||
.global(true)
|
||||
.hidden(true)
|
||||
.help("Timeout value for initial transaction status"),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("config")
|
||||
.about("Solana command-line tool configuration settings")
|
||||
@@ -410,10 +427,10 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
}
|
||||
|
||||
fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
|
||||
if parse_settings(&matches)? {
|
||||
if parse_settings(matches)? {
|
||||
let mut wallet_manager = None;
|
||||
|
||||
let (mut config, signers) = parse_args(&matches, &mut wallet_manager)?;
|
||||
let (mut config, signers) = parse_args(matches, &mut wallet_manager)?;
|
||||
config.signers = signers.iter().map(|s| s.as_ref()).collect();
|
||||
let result = process_command(&config)?;
|
||||
println!("{}", result);
|
||||
|
@@ -364,7 +364,7 @@ pub fn process_authorize_nonce_account(
|
||||
config.commitment,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<NonceError>(result, &config)
|
||||
log_instruction_custom_error::<NonceError>(result, config)
|
||||
}
|
||||
|
||||
pub fn process_create_nonce_account(
|
||||
@@ -449,7 +449,7 @@ pub fn process_create_nonce_account(
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<SystemError>(result, &config)
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
}
|
||||
|
||||
pub fn process_get_nonce(
|
||||
@@ -474,10 +474,10 @@ pub fn process_new_nonce(
|
||||
) -> ProcessResult {
|
||||
check_unique_pubkeys(
|
||||
(&config.signers[0].pubkey(), "cli keypair".to_string()),
|
||||
(&nonce_account, "nonce_account_pubkey".to_string()),
|
||||
(nonce_account, "nonce_account_pubkey".to_string()),
|
||||
)?;
|
||||
|
||||
if let Err(err) = rpc_client.get_account(&nonce_account) {
|
||||
if let Err(err) = rpc_client.get_account(nonce_account) {
|
||||
return Err(CliError::BadParameter(format!(
|
||||
"Unable to advance nonce account {}. error: {}",
|
||||
nonce_account, err
|
||||
@@ -487,7 +487,7 @@ pub fn process_new_nonce(
|
||||
|
||||
let nonce_authority = config.signers[nonce_authority];
|
||||
let ixs = vec![advance_nonce_account(
|
||||
&nonce_account,
|
||||
nonce_account,
|
||||
&nonce_authority.pubkey(),
|
||||
)]
|
||||
.with_memo(memo);
|
||||
@@ -503,7 +503,7 @@ pub fn process_new_nonce(
|
||||
config.commitment,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<SystemError>(result, &config)
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
}
|
||||
|
||||
pub fn process_show_nonce_account(
|
||||
@@ -522,7 +522,7 @@ pub fn process_show_nonce_account(
|
||||
use_lamports_unit,
|
||||
..CliNonceAccount::default()
|
||||
};
|
||||
if let Some(ref data) = data {
|
||||
if let Some(data) = data {
|
||||
nonce_account.nonce = Some(data.blockhash.to_string());
|
||||
nonce_account.lamports_per_signature = Some(data.fee_calculator.lamports_per_signature);
|
||||
nonce_account.authority = Some(data.authority.to_string());
|
||||
@@ -566,7 +566,7 @@ pub fn process_withdraw_from_nonce_account(
|
||||
config.commitment,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<NonceError>(result, &config)
|
||||
log_instruction_custom_error::<NonceError>(result, config)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -596,7 +596,7 @@ mod tests {
|
||||
let default_keypair = Keypair::new();
|
||||
let (default_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
let default_signer = DefaultSigner::new(default_keypair_file.clone());
|
||||
let default_signer = DefaultSigner::new("", &default_keypair_file);
|
||||
let (keypair_file, mut tmp_file) = make_tmp_file();
|
||||
let nonce_account_keypair = Keypair::new();
|
||||
write_keypair(&nonce_account_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
@@ -150,7 +150,7 @@ impl ProgramSubCommands for App<'_, '_> {
|
||||
pubkey!(Arg::with_name("program_id")
|
||||
.long("program-id")
|
||||
.value_name("PROGRAM_ID"),
|
||||
"Executable program's address, must be a signer for initial deploys, can be a pubkey for upgrades \
|
||||
"Executable program's address, must be a keypair for initial deploys, can be a pubkey for upgrades \
|
||||
[default: address of keypair at /path/to/program-keypair.json if present, otherwise a random address]"),
|
||||
)
|
||||
.arg(
|
||||
@@ -767,7 +767,7 @@ fn process_program_deploy(
|
||||
};
|
||||
let upgrade_authority_signer = config.signers[upgrade_authority_signer_index];
|
||||
|
||||
let default_program_keypair = get_default_program_keypair(&program_location);
|
||||
let default_program_keypair = get_default_program_keypair(program_location);
|
||||
let (program_signer, program_pubkey) = if let Some(i) = program_signer_index {
|
||||
(Some(config.signers[i]), config.signers[i].pubkey())
|
||||
} else if let Some(program_pubkey) = program_pubkey {
|
||||
@@ -843,7 +843,7 @@ fn process_program_deploy(
|
||||
};
|
||||
|
||||
let (program_data, program_len) = if let Some(program_location) = program_location {
|
||||
let program_data = read_and_verify_elf(&program_location)?;
|
||||
let program_data = read_and_verify_elf(program_location)?;
|
||||
let program_len = program_data.len();
|
||||
(program_data, program_len)
|
||||
} else if buffer_provided {
|
||||
@@ -886,6 +886,11 @@ fn process_program_deploy(
|
||||
)?;
|
||||
|
||||
let result = if do_deploy {
|
||||
if program_signer.is_none() {
|
||||
return Err(
|
||||
"Initial deployments require a keypair be provided for the program id".into(),
|
||||
);
|
||||
}
|
||||
do_process_program_write_and_deploy(
|
||||
rpc_client.clone(),
|
||||
config,
|
||||
@@ -1254,7 +1259,7 @@ fn process_dump(
|
||||
UpgradeableLoaderState::programdata_data_offset().unwrap_or(0);
|
||||
let program_data = &programdata_account.data[offset..];
|
||||
let mut f = File::create(output_location)?;
|
||||
f.write_all(&program_data)?;
|
||||
f.write_all(program_data)?;
|
||||
Ok(format!("Wrote program to {}", output_location))
|
||||
} else {
|
||||
Err(
|
||||
@@ -1274,7 +1279,7 @@ fn process_dump(
|
||||
let offset = UpgradeableLoaderState::buffer_data_offset().unwrap_or(0);
|
||||
let program_data = &account.data[offset..];
|
||||
let mut f = File::create(output_location)?;
|
||||
f.write_all(&program_data)?;
|
||||
f.write_all(program_data)?;
|
||||
Ok(format!("Wrote program to {}", output_location))
|
||||
} else {
|
||||
Err(format!(
|
||||
@@ -1305,8 +1310,8 @@ fn close(
|
||||
|
||||
let mut tx = Transaction::new_unsigned(Message::new(
|
||||
&[bpf_loader_upgradeable::close(
|
||||
&account_pubkey,
|
||||
&recipient_pubkey,
|
||||
account_pubkey,
|
||||
recipient_pubkey,
|
||||
&authority_signer.pubkey(),
|
||||
)],
|
||||
Some(&config.signers[0].pubkey()),
|
||||
@@ -1415,7 +1420,7 @@ fn process_close(
|
||||
if close(
|
||||
rpc_client,
|
||||
config,
|
||||
&address,
|
||||
address,
|
||||
&recipient_pubkey,
|
||||
authority_signer,
|
||||
)
|
||||
@@ -1516,7 +1521,7 @@ fn do_process_program_write_and_deploy(
|
||||
.value
|
||||
{
|
||||
complete_partial_program_init(
|
||||
&loader_id,
|
||||
loader_id,
|
||||
&config.signers[0].pubkey(),
|
||||
buffer_pubkey,
|
||||
&account,
|
||||
@@ -1546,7 +1551,7 @@ fn do_process_program_write_and_deploy(
|
||||
buffer_pubkey,
|
||||
minimum_balance,
|
||||
buffer_data_len as u64,
|
||||
&loader_id,
|
||||
loader_id,
|
||||
)],
|
||||
minimum_balance,
|
||||
)
|
||||
@@ -1574,7 +1579,7 @@ fn do_process_program_write_and_deploy(
|
||||
} else {
|
||||
loader_instruction::write(
|
||||
buffer_pubkey,
|
||||
&loader_id,
|
||||
loader_id,
|
||||
(i * DATA_CHUNK_SIZE) as u32,
|
||||
chunk.to_vec(),
|
||||
)
|
||||
@@ -1618,7 +1623,7 @@ fn do_process_program_write_and_deploy(
|
||||
)
|
||||
} else {
|
||||
Message::new(
|
||||
&[loader_instruction::finalize(buffer_pubkey, &loader_id)],
|
||||
&[loader_instruction::finalize(buffer_pubkey, loader_id)],
|
||||
Some(&config.signers[0].pubkey()),
|
||||
)
|
||||
};
|
||||
@@ -1744,8 +1749,8 @@ fn do_process_program_upgrade(
|
||||
// Create and add final message
|
||||
let final_message = Message::new(
|
||||
&[bpf_loader_upgradeable::upgrade(
|
||||
&program_id,
|
||||
&buffer_pubkey,
|
||||
program_id,
|
||||
buffer_pubkey,
|
||||
&upgrade_authority.pubkey(),
|
||||
&config.signers[0].pubkey(),
|
||||
)],
|
||||
@@ -1813,7 +1818,7 @@ fn complete_partial_program_init(
|
||||
account_data_len as u64,
|
||||
));
|
||||
if account.owner != *loader_id {
|
||||
instructions.push(system_instruction::assign(elf_pubkey, &loader_id));
|
||||
instructions.push(system_instruction::assign(elf_pubkey, loader_id));
|
||||
}
|
||||
}
|
||||
if account.lamports < minimum_balance {
|
||||
@@ -1885,7 +1890,7 @@ fn send_deploy_messages(
|
||||
initial_transaction.try_sign(&[payer_signer], blockhash)?;
|
||||
}
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&initial_transaction);
|
||||
log_instruction_custom_error::<SystemError>(result, &config)
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
.map_err(|err| format!("Account allocation failed: {}", err))?;
|
||||
} else {
|
||||
return Err("Buffer account not created yet, must provide a key pair".into());
|
||||
@@ -2131,7 +2136,7 @@ mod tests {
|
||||
let default_keypair = Keypair::new();
|
||||
let keypair_file = make_tmp_path("keypair_file");
|
||||
write_keypair_file(&default_keypair, &keypair_file).unwrap();
|
||||
let default_signer = DefaultSigner::new(keypair_file.clone());
|
||||
let default_signer = DefaultSigner::new("", &keypair_file);
|
||||
|
||||
let test_command = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
@@ -2339,7 +2344,7 @@ mod tests {
|
||||
let default_keypair = Keypair::new();
|
||||
let keypair_file = make_tmp_path("keypair_file");
|
||||
write_keypair_file(&default_keypair, &keypair_file).unwrap();
|
||||
let default_signer = DefaultSigner::new(keypair_file.clone());
|
||||
let default_signer = DefaultSigner::new("", &keypair_file);
|
||||
|
||||
// defaults
|
||||
let test_command = test_commands.clone().get_matches_from(vec![
|
||||
@@ -2487,7 +2492,7 @@ mod tests {
|
||||
let default_keypair = Keypair::new();
|
||||
let keypair_file = make_tmp_path("keypair_file");
|
||||
write_keypair_file(&default_keypair, &keypair_file).unwrap();
|
||||
let default_signer = DefaultSigner::new(keypair_file.clone());
|
||||
let default_signer = DefaultSigner::new("", &keypair_file);
|
||||
|
||||
let program_pubkey = Pubkey::new_unique();
|
||||
let new_authority_pubkey = Pubkey::new_unique();
|
||||
@@ -2595,7 +2600,7 @@ mod tests {
|
||||
let default_keypair = Keypair::new();
|
||||
let keypair_file = make_tmp_path("keypair_file");
|
||||
write_keypair_file(&default_keypair, &keypair_file).unwrap();
|
||||
let default_signer = DefaultSigner::new(keypair_file.clone());
|
||||
let default_signer = DefaultSigner::new("", &keypair_file);
|
||||
|
||||
let buffer_pubkey = Pubkey::new_unique();
|
||||
let new_authority_pubkey = Pubkey::new_unique();
|
||||
@@ -2652,7 +2657,7 @@ mod tests {
|
||||
let default_keypair = Keypair::new();
|
||||
let keypair_file = make_tmp_path("keypair_file");
|
||||
write_keypair_file(&default_keypair, &keypair_file).unwrap();
|
||||
let default_signer = DefaultSigner::new(keypair_file);
|
||||
let default_signer = DefaultSigner::new("", &keypair_file);
|
||||
|
||||
// defaults
|
||||
let buffer_pubkey = Pubkey::new_unique();
|
||||
@@ -2751,7 +2756,7 @@ mod tests {
|
||||
let default_keypair = Keypair::new();
|
||||
let keypair_file = make_tmp_path("keypair_file");
|
||||
write_keypair_file(&default_keypair, &keypair_file).unwrap();
|
||||
let default_signer = DefaultSigner::new(keypair_file.clone());
|
||||
let default_signer = DefaultSigner::new("", &keypair_file);
|
||||
|
||||
// defaults
|
||||
let buffer_pubkey = Pubkey::new_unique();
|
||||
|
@@ -92,7 +92,7 @@ where
|
||||
Ok((message, spend))
|
||||
} else {
|
||||
let from_balance = rpc_client
|
||||
.get_balance_with_commitment(&from_pubkey, commitment)?
|
||||
.get_balance_with_commitment(from_pubkey, commitment)?
|
||||
.value;
|
||||
let (message, SpendAndFee { spend, fee }) = resolve_spend_message(
|
||||
amount,
|
||||
|
@@ -36,6 +36,11 @@ use solana_sdk::{
|
||||
feature, feature_set,
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
stake::{
|
||||
self,
|
||||
instruction::{self as stake_instruction, LockupArgs, StakeError},
|
||||
state::{Authorized, Lockup, Meta, StakeAuthorize, StakeState},
|
||||
},
|
||||
system_instruction::SystemError,
|
||||
sysvar::{
|
||||
clock,
|
||||
@@ -43,10 +48,6 @@ use solana_sdk::{
|
||||
},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_stake_program::{
|
||||
stake_instruction::{self, LockupArgs, StakeError},
|
||||
stake_state::{Authorized, Lockup, Meta, StakeAuthorize, StakeState},
|
||||
};
|
||||
use solana_vote_program::vote_state::VoteState;
|
||||
use std::{ops::Deref, sync::Arc};
|
||||
|
||||
@@ -971,7 +972,7 @@ pub fn process_create_stake_account(
|
||||
) -> ProcessResult {
|
||||
let stake_account = config.signers[stake_account];
|
||||
let stake_account_address = if let Some(seed) = seed {
|
||||
Pubkey::create_with_seed(&stake_account.pubkey(), &seed, &solana_stake_program::id())?
|
||||
Pubkey::create_with_seed(&stake_account.pubkey(), seed, &stake::program::id())?
|
||||
} else {
|
||||
stake_account.pubkey()
|
||||
};
|
||||
@@ -1039,7 +1040,7 @@ pub fn process_create_stake_account(
|
||||
|
||||
if !sign_only {
|
||||
if let Ok(stake_account) = rpc_client.get_account(&stake_account_address) {
|
||||
let err_msg = if stake_account.owner == solana_stake_program::id() {
|
||||
let err_msg = if stake_account.owner == stake::program::id() {
|
||||
format!("Stake account {} already exists", stake_account_address)
|
||||
} else {
|
||||
format!(
|
||||
@@ -1084,7 +1085,7 @@ pub fn process_create_stake_account(
|
||||
} else {
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<SystemError>(result, &config)
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1171,7 +1172,7 @@ pub fn process_stake_authorize(
|
||||
} else {
|
||||
rpc_client.send_and_confirm_transaction_with_spinner(&tx)
|
||||
};
|
||||
log_instruction_custom_error::<StakeError>(result, &config)
|
||||
log_instruction_custom_error::<StakeError>(result, config)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1195,7 +1196,7 @@ pub fn process_deactivate_stake_account(
|
||||
let stake_authority = config.signers[stake_authority];
|
||||
|
||||
let stake_account_address = if let Some(seed) = seed {
|
||||
Pubkey::create_with_seed(&stake_account_pubkey, seed, &solana_stake_program::id())?
|
||||
Pubkey::create_with_seed(stake_account_pubkey, seed, &stake::program::id())?
|
||||
} else {
|
||||
*stake_account_pubkey
|
||||
};
|
||||
@@ -1247,7 +1248,7 @@ pub fn process_deactivate_stake_account(
|
||||
config.commitment,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<StakeError>(result, &config)
|
||||
log_instruction_custom_error::<StakeError>(result, config)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1273,7 +1274,7 @@ pub fn process_withdraw_stake(
|
||||
let custodian = custodian.map(|index| config.signers[index]);
|
||||
|
||||
let stake_account_address = if let Some(seed) = seed {
|
||||
Pubkey::create_with_seed(&stake_account_pubkey, seed, &solana_stake_program::id())?
|
||||
Pubkey::create_with_seed(stake_account_pubkey, seed, &stake::program::id())?
|
||||
} else {
|
||||
*stake_account_pubkey
|
||||
};
|
||||
@@ -1346,7 +1347,7 @@ pub fn process_withdraw_stake(
|
||||
config.commitment,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<SystemError>(result, &config)
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1381,10 +1382,10 @@ pub fn process_split_stake(
|
||||
}
|
||||
check_unique_pubkeys(
|
||||
(&fee_payer.pubkey(), "fee-payer keypair".to_string()),
|
||||
(&stake_account_pubkey, "stake_account".to_string()),
|
||||
(stake_account_pubkey, "stake_account".to_string()),
|
||||
)?;
|
||||
check_unique_pubkeys(
|
||||
(&stake_account_pubkey, "stake_account".to_string()),
|
||||
(stake_account_pubkey, "stake_account".to_string()),
|
||||
(
|
||||
&split_stake_account.pubkey(),
|
||||
"split_stake_account".to_string(),
|
||||
@@ -1394,18 +1395,14 @@ pub fn process_split_stake(
|
||||
let stake_authority = config.signers[stake_authority];
|
||||
|
||||
let split_stake_account_address = if let Some(seed) = split_stake_account_seed {
|
||||
Pubkey::create_with_seed(
|
||||
&split_stake_account.pubkey(),
|
||||
&seed,
|
||||
&solana_stake_program::id(),
|
||||
)?
|
||||
Pubkey::create_with_seed(&split_stake_account.pubkey(), seed, &stake::program::id())?
|
||||
} else {
|
||||
split_stake_account.pubkey()
|
||||
};
|
||||
|
||||
if !sign_only {
|
||||
if let Ok(stake_account) = rpc_client.get_account(&split_stake_account_address) {
|
||||
let err_msg = if stake_account.owner == solana_stake_program::id() {
|
||||
let err_msg = if stake_account.owner == stake::program::id() {
|
||||
format!(
|
||||
"Stake account {} already exists",
|
||||
split_stake_account_address
|
||||
@@ -1436,7 +1433,7 @@ pub fn process_split_stake(
|
||||
|
||||
let ixs = if let Some(seed) = split_stake_account_seed {
|
||||
stake_instruction::split_with_seed(
|
||||
&stake_account_pubkey,
|
||||
stake_account_pubkey,
|
||||
&stake_authority.pubkey(),
|
||||
lamports,
|
||||
&split_stake_account_address,
|
||||
@@ -1446,7 +1443,7 @@ pub fn process_split_stake(
|
||||
.with_memo(memo)
|
||||
} else {
|
||||
stake_instruction::split(
|
||||
&stake_account_pubkey,
|
||||
stake_account_pubkey,
|
||||
&stake_authority.pubkey(),
|
||||
lamports,
|
||||
&split_stake_account_address,
|
||||
@@ -1495,7 +1492,7 @@ pub fn process_split_stake(
|
||||
config.commitment,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<StakeError>(result, &config)
|
||||
log_instruction_custom_error::<StakeError>(result, config)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1518,19 +1515,19 @@ pub fn process_merge_stake(
|
||||
|
||||
check_unique_pubkeys(
|
||||
(&fee_payer.pubkey(), "fee-payer keypair".to_string()),
|
||||
(&stake_account_pubkey, "stake_account".to_string()),
|
||||
(stake_account_pubkey, "stake_account".to_string()),
|
||||
)?;
|
||||
check_unique_pubkeys(
|
||||
(&fee_payer.pubkey(), "fee-payer keypair".to_string()),
|
||||
(
|
||||
&source_stake_account_pubkey,
|
||||
source_stake_account_pubkey,
|
||||
"source_stake_account".to_string(),
|
||||
),
|
||||
)?;
|
||||
check_unique_pubkeys(
|
||||
(&stake_account_pubkey, "stake_account".to_string()),
|
||||
(stake_account_pubkey, "stake_account".to_string()),
|
||||
(
|
||||
&source_stake_account_pubkey,
|
||||
source_stake_account_pubkey,
|
||||
"source_stake_account".to_string(),
|
||||
),
|
||||
)?;
|
||||
@@ -1540,7 +1537,7 @@ pub fn process_merge_stake(
|
||||
if !sign_only {
|
||||
for stake_account_address in &[stake_account_pubkey, source_stake_account_pubkey] {
|
||||
if let Ok(stake_account) = rpc_client.get_account(stake_account_address) {
|
||||
if stake_account.owner != solana_stake_program::id() {
|
||||
if stake_account.owner != stake::program::id() {
|
||||
return Err(CliError::BadParameter(format!(
|
||||
"Account {} is not a stake account",
|
||||
stake_account_address
|
||||
@@ -1555,8 +1552,8 @@ pub fn process_merge_stake(
|
||||
blockhash_query.get_blockhash_and_fee_calculator(rpc_client, config.commitment)?;
|
||||
|
||||
let ixs = stake_instruction::merge(
|
||||
&stake_account_pubkey,
|
||||
&source_stake_account_pubkey,
|
||||
stake_account_pubkey,
|
||||
source_stake_account_pubkey,
|
||||
&stake_authority.pubkey(),
|
||||
)
|
||||
.with_memo(memo);
|
||||
@@ -1606,7 +1603,7 @@ pub fn process_merge_stake(
|
||||
config.commitment,
|
||||
config.send_transaction_config,
|
||||
);
|
||||
log_instruction_custom_error::<StakeError>(result, &config)
|
||||
log_instruction_custom_error::<StakeError>(result, config)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1677,7 +1674,7 @@ pub fn process_stake_set_lockup(
|
||||
config.commitment,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<StakeError>(result, &config)
|
||||
log_instruction_custom_error::<StakeError>(result, config)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1876,7 +1873,7 @@ pub fn process_show_stake_account(
|
||||
with_rewards: Option<usize>,
|
||||
) -> ProcessResult {
|
||||
let stake_account = rpc_client.get_account(stake_account_address)?;
|
||||
if stake_account.owner != solana_stake_program::id() {
|
||||
if stake_account.owner != stake::program::id() {
|
||||
return Err(CliError::RpcRequestError(format!(
|
||||
"{:?} is not a stake account",
|
||||
stake_account_address,
|
||||
@@ -2079,7 +2076,7 @@ pub fn process_delegate_stake(
|
||||
config.commitment,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<StakeError>(result, &config)
|
||||
log_instruction_custom_error::<StakeError>(result, config)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2117,7 +2114,7 @@ mod tests {
|
||||
let default_keypair = Keypair::new();
|
||||
let (default_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
let default_signer = DefaultSigner::new(default_keypair_file.clone());
|
||||
let default_signer = DefaultSigner::new("", &default_keypair_file);
|
||||
let (keypair_file, mut tmp_file) = make_tmp_file();
|
||||
let stake_account_keypair = Keypair::new();
|
||||
write_keypair(&stake_account_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
@@ -119,7 +119,7 @@ fn parse_validator_info(
|
||||
let key_list: ConfigKeys = deserialize(&account.data)?;
|
||||
if !key_list.keys.is_empty() {
|
||||
let (validator_pubkey, _) = key_list.keys[1];
|
||||
let validator_info_string: String = deserialize(&get_config_data(&account.data)?)?;
|
||||
let validator_info_string: String = deserialize(get_config_data(&account.data)?)?;
|
||||
let validator_info: Map<_, _> = serde_json::from_str(&validator_info_string)?;
|
||||
Ok((validator_pubkey, validator_info))
|
||||
} else {
|
||||
@@ -246,7 +246,7 @@ pub fn process_set_validator_info(
|
||||
) -> ProcessResult {
|
||||
// Validate keybase username
|
||||
if let Some(string) = validator_info.get("keybaseUsername") {
|
||||
let result = verify_keybase(&config.signers[0].pubkey(), &string);
|
||||
let result = verify_keybase(&config.signers[0].pubkey(), string);
|
||||
if result.is_err() {
|
||||
if force_keybase {
|
||||
println!("--force supplied, ignoring: {:?}", result);
|
||||
@@ -272,7 +272,7 @@ pub fn process_set_validator_info(
|
||||
},
|
||||
)
|
||||
.find(|(pubkey, account)| {
|
||||
let (validator_pubkey, _) = parse_validator_info(&pubkey, &account).unwrap();
|
||||
let (validator_pubkey, _) = parse_validator_info(pubkey, account).unwrap();
|
||||
validator_pubkey == config.signers[0].pubkey()
|
||||
});
|
||||
|
||||
@@ -393,7 +393,7 @@ pub fn process_get_validator_info(
|
||||
}
|
||||
for (validator_info_pubkey, validator_info_account) in validator_info.iter() {
|
||||
let (validator_pubkey, validator_info) =
|
||||
parse_validator_info(&validator_info_pubkey, &validator_info_account)?;
|
||||
parse_validator_info(validator_info_pubkey, validator_info_account)?;
|
||||
validator_info_list.push(CliValidatorInfo {
|
||||
identity_pubkey: validator_pubkey.to_string(),
|
||||
info_pubkey: validator_info_pubkey.to_string(),
|
||||
@@ -451,7 +451,7 @@ mod tests {
|
||||
"name": "Alice",
|
||||
"keybaseUsername": "alice_keybase",
|
||||
});
|
||||
assert_eq!(parse_args(&matches), expected);
|
||||
assert_eq!(parse_args(matches), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@@ -468,7 +468,7 @@ pub fn process_create_vote_account(
|
||||
let vote_account = config.signers[vote_account];
|
||||
let vote_account_pubkey = vote_account.pubkey();
|
||||
let vote_account_address = if let Some(seed) = seed {
|
||||
Pubkey::create_with_seed(&vote_account_pubkey, &seed, &solana_vote_program::id())?
|
||||
Pubkey::create_with_seed(&vote_account_pubkey, seed, &solana_vote_program::id())?
|
||||
} else {
|
||||
vote_account_pubkey
|
||||
};
|
||||
@@ -549,7 +549,7 @@ pub fn process_create_vote_account(
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<SystemError>(result, &config)
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
}
|
||||
|
||||
pub fn process_vote_authorize(
|
||||
@@ -592,7 +592,7 @@ pub fn process_vote_authorize(
|
||||
config.commitment,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<VoteError>(result, &config)
|
||||
log_instruction_custom_error::<VoteError>(result, config)
|
||||
}
|
||||
|
||||
pub fn process_vote_update_validator(
|
||||
@@ -629,7 +629,7 @@ pub fn process_vote_update_validator(
|
||||
config.commitment,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<VoteError>(result, &config)
|
||||
log_instruction_custom_error::<VoteError>(result, config)
|
||||
}
|
||||
|
||||
pub fn process_vote_update_commission(
|
||||
@@ -660,7 +660,7 @@ pub fn process_vote_update_commission(
|
||||
config.commitment,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<VoteError>(result, &config)
|
||||
log_instruction_custom_error::<VoteError>(result, config)
|
||||
}
|
||||
|
||||
fn get_vote_account(
|
||||
@@ -763,7 +763,7 @@ pub fn process_withdraw_from_vote_account(
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let withdraw_authority = config.signers[withdraw_authority];
|
||||
|
||||
let current_balance = rpc_client.get_balance(&vote_account_pubkey)?;
|
||||
let current_balance = rpc_client.get_balance(vote_account_pubkey)?;
|
||||
let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(VoteState::size_of())?;
|
||||
|
||||
let lamports = match withdraw_amount {
|
||||
@@ -798,7 +798,7 @@ pub fn process_withdraw_from_vote_account(
|
||||
config.commitment,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&transaction);
|
||||
log_instruction_custom_error::<VoteError>(result, &config)
|
||||
log_instruction_custom_error::<VoteError>(result, config)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -826,7 +826,7 @@ mod tests {
|
||||
let default_keypair = Keypair::new();
|
||||
let (default_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
let default_signer = DefaultSigner::new(default_keypair_file.clone());
|
||||
let default_signer = DefaultSigner::new("", &default_keypair_file);
|
||||
|
||||
let test_authorize_voter = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
|
@@ -68,7 +68,7 @@ fn test_cli_program_deploy_non_upgradeable() {
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
let program_id = Pubkey::from_str(&program_id_str).unwrap();
|
||||
let program_id = Pubkey::from_str(program_id_str).unwrap();
|
||||
let account0 = rpc_client.get_account(&program_id).unwrap();
|
||||
assert_eq!(account0.lamports, minimum_balance_for_rent_exemption);
|
||||
assert_eq!(account0.owner, bpf_loader::id());
|
||||
@@ -198,7 +198,7 @@ fn test_cli_program_deploy_no_authority() {
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
let program_id = Pubkey::from_str(&program_id_str).unwrap();
|
||||
let program_id = Pubkey::from_str(program_id_str).unwrap();
|
||||
|
||||
// Attempt to upgrade the program
|
||||
config.signers = vec![&keypair, &upgrade_authority];
|
||||
@@ -284,7 +284,7 @@ fn test_cli_program_deploy_with_authority() {
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
program_keypair.pubkey(),
|
||||
Pubkey::from_str(&program_pubkey_str).unwrap()
|
||||
Pubkey::from_str(program_pubkey_str).unwrap()
|
||||
);
|
||||
let program_account = rpc_client.get_account(&program_keypair.pubkey()).unwrap();
|
||||
assert_eq!(program_account.lamports, minimum_balance_for_program);
|
||||
@@ -328,7 +328,7 @@ fn test_cli_program_deploy_with_authority() {
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
let program_pubkey = Pubkey::from_str(&program_pubkey_str).unwrap();
|
||||
let program_pubkey = Pubkey::from_str(program_pubkey_str).unwrap();
|
||||
let program_account = rpc_client.get_account(&program_pubkey).unwrap();
|
||||
assert_eq!(program_account.lamports, minimum_balance_for_program);
|
||||
assert_eq!(program_account.owner, bpf_loader_upgradeable::id());
|
||||
@@ -397,7 +397,7 @@ fn test_cli_program_deploy_with_authority() {
|
||||
.as_str()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
Pubkey::from_str(&new_upgrade_authority_str).unwrap(),
|
||||
Pubkey::from_str(new_upgrade_authority_str).unwrap(),
|
||||
new_upgrade_authority.pubkey()
|
||||
);
|
||||
|
||||
@@ -452,7 +452,7 @@ fn test_cli_program_deploy_with_authority() {
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
new_upgrade_authority.pubkey(),
|
||||
Pubkey::from_str(&authority_pubkey_str).unwrap()
|
||||
Pubkey::from_str(authority_pubkey_str).unwrap()
|
||||
);
|
||||
|
||||
// Set no authority
|
||||
@@ -510,7 +510,7 @@ fn test_cli_program_deploy_with_authority() {
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
let program_pubkey = Pubkey::from_str(&program_pubkey_str).unwrap();
|
||||
let program_pubkey = Pubkey::from_str(program_pubkey_str).unwrap();
|
||||
let (programdata_pubkey, _) =
|
||||
Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id());
|
||||
let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap();
|
||||
@@ -606,7 +606,7 @@ fn test_cli_program_write_buffer() {
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
let new_buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap();
|
||||
let new_buffer_pubkey = Pubkey::from_str(buffer_pubkey_str).unwrap();
|
||||
let buffer_account = rpc_client.get_account(&new_buffer_pubkey).unwrap();
|
||||
assert_eq!(buffer_account.lamports, minimum_balance_for_buffer_default);
|
||||
assert_eq!(buffer_account.owner, bpf_loader_upgradeable::id());
|
||||
@@ -641,7 +641,7 @@ fn test_cli_program_write_buffer() {
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
buffer_keypair.pubkey(),
|
||||
Pubkey::from_str(&buffer_pubkey_str).unwrap()
|
||||
Pubkey::from_str(buffer_pubkey_str).unwrap()
|
||||
);
|
||||
let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap();
|
||||
assert_eq!(buffer_account.lamports, minimum_balance_for_buffer);
|
||||
@@ -675,7 +675,7 @@ fn test_cli_program_write_buffer() {
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
keypair.pubkey(),
|
||||
Pubkey::from_str(&authority_pubkey_str).unwrap()
|
||||
Pubkey::from_str(authority_pubkey_str).unwrap()
|
||||
);
|
||||
|
||||
// Specify buffer authority
|
||||
@@ -700,7 +700,7 @@ fn test_cli_program_write_buffer() {
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
buffer_keypair.pubkey(),
|
||||
Pubkey::from_str(&buffer_pubkey_str).unwrap()
|
||||
Pubkey::from_str(buffer_pubkey_str).unwrap()
|
||||
);
|
||||
let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap();
|
||||
assert_eq!(buffer_account.lamports, minimum_balance_for_buffer_default);
|
||||
@@ -735,7 +735,7 @@ fn test_cli_program_write_buffer() {
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
let buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap();
|
||||
let buffer_pubkey = Pubkey::from_str(buffer_pubkey_str).unwrap();
|
||||
let buffer_account = rpc_client.get_account(&buffer_pubkey).unwrap();
|
||||
assert_eq!(buffer_account.lamports, minimum_balance_for_buffer_default);
|
||||
assert_eq!(buffer_account.owner, bpf_loader_upgradeable::id());
|
||||
@@ -768,7 +768,7 @@ fn test_cli_program_write_buffer() {
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
authority_keypair.pubkey(),
|
||||
Pubkey::from_str(&authority_pubkey_str).unwrap()
|
||||
Pubkey::from_str(authority_pubkey_str).unwrap()
|
||||
);
|
||||
|
||||
// Close buffer
|
||||
@@ -806,7 +806,7 @@ fn test_cli_program_write_buffer() {
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.unwrap();
|
||||
let new_buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap();
|
||||
let new_buffer_pubkey = Pubkey::from_str(buffer_pubkey_str).unwrap();
|
||||
|
||||
// Close buffers and deposit default keypair
|
||||
let pre_lamports = rpc_client.get_account(&keypair.pubkey()).unwrap().lamports;
|
||||
@@ -901,7 +901,7 @@ fn test_cli_program_set_buffer_authority() {
|
||||
.as_str()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
Pubkey::from_str(&new_buffer_authority_str).unwrap(),
|
||||
Pubkey::from_str(new_buffer_authority_str).unwrap(),
|
||||
new_buffer_authority.pubkey()
|
||||
);
|
||||
let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap();
|
||||
@@ -928,7 +928,7 @@ fn test_cli_program_set_buffer_authority() {
|
||||
.as_str()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
Pubkey::from_str(&buffer_authority_str).unwrap(),
|
||||
Pubkey::from_str(buffer_authority_str).unwrap(),
|
||||
buffer_keypair.pubkey()
|
||||
);
|
||||
let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap();
|
||||
@@ -1101,7 +1101,7 @@ fn test_cli_program_show() {
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
buffer_keypair.pubkey(),
|
||||
Pubkey::from_str(&address_str).unwrap()
|
||||
Pubkey::from_str(address_str).unwrap()
|
||||
);
|
||||
let authority_str = json
|
||||
.as_object()
|
||||
@@ -1112,7 +1112,7 @@ fn test_cli_program_show() {
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
authority_keypair.pubkey(),
|
||||
Pubkey::from_str(&authority_str).unwrap()
|
||||
Pubkey::from_str(authority_str).unwrap()
|
||||
);
|
||||
let data_len = json
|
||||
.as_object()
|
||||
@@ -1161,7 +1161,7 @@ fn test_cli_program_show() {
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
program_keypair.pubkey(),
|
||||
Pubkey::from_str(&address_str).unwrap()
|
||||
Pubkey::from_str(address_str).unwrap()
|
||||
);
|
||||
let programdata_address_str = json
|
||||
.as_object()
|
||||
@@ -1176,7 +1176,7 @@ fn test_cli_program_show() {
|
||||
);
|
||||
assert_eq!(
|
||||
programdata_pubkey,
|
||||
Pubkey::from_str(&programdata_address_str).unwrap()
|
||||
Pubkey::from_str(programdata_address_str).unwrap()
|
||||
);
|
||||
let authority_str = json
|
||||
.as_object()
|
||||
@@ -1187,7 +1187,7 @@ fn test_cli_program_show() {
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
authority_keypair.pubkey(),
|
||||
Pubkey::from_str(&authority_str).unwrap()
|
||||
Pubkey::from_str(authority_str).unwrap()
|
||||
);
|
||||
let deployed_slot = json
|
||||
.as_object()
|
||||
|
@@ -17,10 +17,11 @@ use solana_sdk::{
|
||||
nonce::State as NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, Keypair, Signer},
|
||||
};
|
||||
use solana_stake_program::{
|
||||
stake_instruction::LockupArgs,
|
||||
stake_state::{Lockup, StakeAuthorize, StakeState},
|
||||
stake::{
|
||||
self,
|
||||
instruction::LockupArgs,
|
||||
state::{Lockup, StakeAuthorize, StakeState},
|
||||
},
|
||||
};
|
||||
|
||||
#[test]
|
||||
@@ -139,7 +140,7 @@ fn test_seed_stake_delegation_and_deactivation() {
|
||||
let stake_address = Pubkey::create_with_seed(
|
||||
&config_validator.signers[0].pubkey(),
|
||||
"hi there",
|
||||
&solana_stake_program::id(),
|
||||
&stake::program::id(),
|
||||
)
|
||||
.expect("bad seed");
|
||||
|
||||
@@ -1557,6 +1558,6 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let seed_address =
|
||||
Pubkey::create_with_seed(&stake_pubkey, seed, &solana_stake_program::id()).unwrap();
|
||||
Pubkey::create_with_seed(&stake_pubkey, seed, &stake::program::id()).unwrap();
|
||||
check_recent_balance(50_000, &rpc_client, &seed_address);
|
||||
}
|
||||
|
@@ -16,6 +16,7 @@ use solana_sdk::{
|
||||
nonce::State as NonceState,
|
||||
pubkey::Pubkey,
|
||||
signature::{keypair_from_seed, Keypair, NullSigner, Signer},
|
||||
stake,
|
||||
};
|
||||
|
||||
#[test]
|
||||
@@ -513,7 +514,7 @@ fn test_transfer_with_seed() {
|
||||
let sender_pubkey = config.signers[0].pubkey();
|
||||
let recipient_pubkey = Pubkey::new(&[1u8; 32]);
|
||||
let derived_address_seed = "seed".to_string();
|
||||
let derived_address_program_id = solana_stake_program::id();
|
||||
let derived_address_program_id = stake::program::id();
|
||||
let derived_address = Pubkey::create_with_seed(
|
||||
&sender_pubkey,
|
||||
&derived_address_seed,
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -24,14 +24,14 @@ semver = "0.11.0"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.0" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.4" }
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tungstenite = "0.10.1"
|
||||
@@ -40,7 +40,7 @@ url = "2.1.1"
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-http-server = "17.0.0"
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -31,7 +31,7 @@ impl LargestAccountsCache {
|
||||
&self,
|
||||
filter: &Option<RpcLargestAccountsFilter>,
|
||||
) -> Option<(u64, Vec<RpcAccountBalance>)> {
|
||||
self.cache.get(&filter).and_then(|value| {
|
||||
self.cache.get(filter).and_then(|value| {
|
||||
if let Ok(elapsed) = value.cached_time.elapsed() {
|
||||
if elapsed < Duration::from_secs(self.duration) {
|
||||
return Some((value.slot, value.accounts.clone()));
|
||||
|
@@ -49,41 +49,36 @@ use {
|
||||
},
|
||||
};
|
||||
|
||||
pub struct RpcClient {
|
||||
sender: Box<dyn RpcSender + Send + Sync + 'static>,
|
||||
#[derive(Default)]
|
||||
pub struct RpcClientConfig {
|
||||
commitment_config: CommitmentConfig,
|
||||
node_version: RwLock<Option<semver::Version>>,
|
||||
confirm_transaction_initial_timeout: Option<Duration>,
|
||||
}
|
||||
|
||||
fn serialize_encode_transaction(
|
||||
transaction: &Transaction,
|
||||
encoding: UiTransactionEncoding,
|
||||
) -> ClientResult<String> {
|
||||
let serialized = serialize(transaction)
|
||||
.map_err(|e| ClientErrorKind::Custom(format!("transaction serialization failed: {}", e)))?;
|
||||
let encoded = match encoding {
|
||||
UiTransactionEncoding::Base58 => bs58::encode(serialized).into_string(),
|
||||
UiTransactionEncoding::Base64 => base64::encode(serialized),
|
||||
_ => {
|
||||
return Err(ClientErrorKind::Custom(format!(
|
||||
"unsupported transaction encoding: {}. Supported encodings: base58, base64",
|
||||
encoding
|
||||
))
|
||||
.into())
|
||||
impl RpcClientConfig {
|
||||
fn with_commitment(commitment_config: CommitmentConfig) -> Self {
|
||||
RpcClientConfig {
|
||||
commitment_config,
|
||||
..Self::default()
|
||||
}
|
||||
};
|
||||
Ok(encoded)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RpcClient {
|
||||
sender: Box<dyn RpcSender + Send + Sync + 'static>,
|
||||
config: RpcClientConfig,
|
||||
node_version: RwLock<Option<semver::Version>>,
|
||||
}
|
||||
|
||||
impl RpcClient {
|
||||
fn new_sender<T: RpcSender + Send + Sync + 'static>(
|
||||
sender: T,
|
||||
commitment_config: CommitmentConfig,
|
||||
config: RpcClientConfig,
|
||||
) -> Self {
|
||||
Self {
|
||||
sender: Box::new(sender),
|
||||
node_version: RwLock::new(None),
|
||||
commitment_config,
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,13 +87,16 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn new_with_commitment(url: String, commitment_config: CommitmentConfig) -> Self {
|
||||
Self::new_sender(HttpSender::new(url), commitment_config)
|
||||
Self::new_sender(
|
||||
HttpSender::new(url),
|
||||
RpcClientConfig::with_commitment(commitment_config),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new_with_timeout(url: String, timeout: Duration) -> Self {
|
||||
Self::new_sender(
|
||||
HttpSender::new_with_timeout(url, timeout),
|
||||
CommitmentConfig::default(),
|
||||
RpcClientConfig::with_commitment(CommitmentConfig::default()),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -109,18 +107,36 @@ impl RpcClient {
|
||||
) -> Self {
|
||||
Self::new_sender(
|
||||
HttpSender::new_with_timeout(url, timeout),
|
||||
commitment_config,
|
||||
RpcClientConfig::with_commitment(commitment_config),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new_with_timeouts_and_commitment(
|
||||
url: String,
|
||||
timeout: Duration,
|
||||
commitment_config: CommitmentConfig,
|
||||
confirm_transaction_initial_timeout: Duration,
|
||||
) -> Self {
|
||||
Self::new_sender(
|
||||
HttpSender::new_with_timeout(url, timeout),
|
||||
RpcClientConfig {
|
||||
commitment_config,
|
||||
confirm_transaction_initial_timeout: Some(confirm_transaction_initial_timeout),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new_mock(url: String) -> Self {
|
||||
Self::new_sender(MockSender::new(url), CommitmentConfig::default())
|
||||
Self::new_sender(
|
||||
MockSender::new(url),
|
||||
RpcClientConfig::with_commitment(CommitmentConfig::default()),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new_mock_with_mocks(url: String, mocks: Mocks) -> Self {
|
||||
Self::new_sender(
|
||||
MockSender::new_with_mocks(url, mocks),
|
||||
CommitmentConfig::default(),
|
||||
RpcClientConfig::with_commitment(CommitmentConfig::default()),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -159,7 +175,7 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn commitment(&self) -> CommitmentConfig {
|
||||
self.commitment_config
|
||||
self.config.commitment_config
|
||||
}
|
||||
|
||||
fn use_deprecated_commitment(&self) -> Result<bool, RpcError> {
|
||||
@@ -201,7 +217,7 @@ impl RpcClient {
|
||||
|
||||
pub fn confirm_transaction(&self, signature: &Signature) -> ClientResult<bool> {
|
||||
Ok(self
|
||||
.confirm_transaction_with_commitment(signature, self.commitment_config)?
|
||||
.confirm_transaction_with_commitment(signature, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -227,8 +243,7 @@ impl RpcClient {
|
||||
transaction,
|
||||
RpcSendTransactionConfig {
|
||||
preflight_commitment: Some(
|
||||
self.maybe_map_commitment(self.commitment_config)?
|
||||
.commitment,
|
||||
self.maybe_map_commitment(self.commitment())?.commitment,
|
||||
),
|
||||
..RpcSendTransactionConfig::default()
|
||||
},
|
||||
@@ -317,7 +332,7 @@ impl RpcClient {
|
||||
self.simulate_transaction_with_config(
|
||||
transaction,
|
||||
RpcSimulateTransactionConfig {
|
||||
commitment: Some(self.commitment_config),
|
||||
commitment: Some(self.commitment()),
|
||||
..RpcSimulateTransactionConfig::default()
|
||||
},
|
||||
)
|
||||
@@ -355,7 +370,7 @@ impl RpcClient {
|
||||
&self,
|
||||
signature: &Signature,
|
||||
) -> ClientResult<Option<transaction::Result<()>>> {
|
||||
self.get_signature_status_with_commitment(signature, self.commitment_config)
|
||||
self.get_signature_status_with_commitment(signature, self.commitment())
|
||||
}
|
||||
|
||||
pub fn get_signature_statuses(
|
||||
@@ -413,7 +428,7 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_slot(&self) -> ClientResult<Slot> {
|
||||
self.get_slot_with_commitment(self.commitment_config)
|
||||
self.get_slot_with_commitment(self.commitment())
|
||||
}
|
||||
|
||||
pub fn get_slot_with_commitment(
|
||||
@@ -427,7 +442,7 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_block_height(&self) -> ClientResult<u64> {
|
||||
self.get_block_height_with_commitment(self.commitment_config)
|
||||
self.get_block_height_with_commitment(self.commitment())
|
||||
}
|
||||
|
||||
pub fn get_block_height_with_commitment(
|
||||
@@ -481,14 +496,14 @@ impl RpcClient {
|
||||
stake_account.to_string(),
|
||||
RpcEpochConfig {
|
||||
epoch,
|
||||
commitment: Some(self.commitment_config),
|
||||
commitment: Some(self.commitment()),
|
||||
}
|
||||
]),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn supply(&self) -> RpcResult<RpcSupply> {
|
||||
self.supply_with_commitment(self.commitment_config)
|
||||
self.supply_with_commitment(self.commitment())
|
||||
}
|
||||
|
||||
pub fn supply_with_commitment(
|
||||
@@ -515,7 +530,7 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_vote_accounts(&self) -> ClientResult<RpcVoteAccountStatus> {
|
||||
self.get_vote_accounts_with_commitment(self.commitment_config)
|
||||
self.get_vote_accounts_with_commitment(self.commitment())
|
||||
}
|
||||
|
||||
pub fn get_vote_accounts_with_commitment(
|
||||
@@ -892,7 +907,7 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_epoch_info(&self) -> ClientResult<EpochInfo> {
|
||||
self.get_epoch_info_with_commitment(self.commitment_config)
|
||||
self.get_epoch_info_with_commitment(self.commitment())
|
||||
}
|
||||
|
||||
pub fn get_epoch_info_with_commitment(
|
||||
@@ -909,7 +924,7 @@ impl RpcClient {
|
||||
&self,
|
||||
slot: Option<Slot>,
|
||||
) -> ClientResult<Option<RpcLeaderSchedule>> {
|
||||
self.get_leader_schedule_with_commitment(slot, self.commitment_config)
|
||||
self.get_leader_schedule_with_commitment(slot, self.commitment())
|
||||
}
|
||||
|
||||
pub fn get_leader_schedule_with_commitment(
|
||||
@@ -979,7 +994,7 @@ impl RpcClient {
|
||||
addresses,
|
||||
RpcEpochConfig {
|
||||
epoch,
|
||||
commitment: Some(self.commitment_config),
|
||||
commitment: Some(self.commitment()),
|
||||
}
|
||||
]),
|
||||
)
|
||||
@@ -1045,7 +1060,7 @@ impl RpcClient {
|
||||
/// Note that `get_account` returns `Err(..)` if the account does not exist whereas
|
||||
/// `get_account_with_commitment` returns `Ok(None)` if the account does not exist.
|
||||
pub fn get_account(&self, pubkey: &Pubkey) -> ClientResult<Account> {
|
||||
self.get_account_with_commitment(pubkey, self.commitment_config)?
|
||||
self.get_account_with_commitment(pubkey, self.commitment())?
|
||||
.value
|
||||
.ok_or_else(|| RpcError::ForUser(format!("AccountNotFound: pubkey={}", pubkey)).into())
|
||||
}
|
||||
@@ -1101,7 +1116,7 @@ impl RpcClient {
|
||||
|
||||
pub fn get_multiple_accounts(&self, pubkeys: &[Pubkey]) -> ClientResult<Vec<Option<Account>>> {
|
||||
Ok(self
|
||||
.get_multiple_accounts_with_commitment(pubkeys, self.commitment_config)?
|
||||
.get_multiple_accounts_with_commitment(pubkeys, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -1155,7 +1170,7 @@ impl RpcClient {
|
||||
/// Request the balance of the account `pubkey`.
|
||||
pub fn get_balance(&self, pubkey: &Pubkey) -> ClientResult<u64> {
|
||||
Ok(self
|
||||
.get_balance_with_commitment(pubkey, self.commitment_config)?
|
||||
.get_balance_with_commitment(pubkey, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -1213,7 +1228,7 @@ impl RpcClient {
|
||||
|
||||
/// Request the transaction count.
|
||||
pub fn get_transaction_count(&self) -> ClientResult<u64> {
|
||||
self.get_transaction_count_with_commitment(self.commitment_config)
|
||||
self.get_transaction_count_with_commitment(self.commitment())
|
||||
}
|
||||
|
||||
pub fn get_transaction_count_with_commitment(
|
||||
@@ -1228,7 +1243,7 @@ impl RpcClient {
|
||||
|
||||
pub fn get_recent_blockhash(&self) -> ClientResult<(Hash, FeeCalculator)> {
|
||||
let (blockhash, fee_calculator, _last_valid_slot) = self
|
||||
.get_recent_blockhash_with_commitment(self.commitment_config)?
|
||||
.get_recent_blockhash_with_commitment(self.commitment())?
|
||||
.value;
|
||||
Ok((blockhash, fee_calculator))
|
||||
}
|
||||
@@ -1301,7 +1316,7 @@ impl RpcClient {
|
||||
blockhash: &Hash,
|
||||
) -> ClientResult<Option<FeeCalculator>> {
|
||||
Ok(self
|
||||
.get_fee_calculator_for_blockhash_with_commitment(blockhash, self.commitment_config)?
|
||||
.get_fee_calculator_for_blockhash_with_commitment(blockhash, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -1383,7 +1398,7 @@ impl RpcClient {
|
||||
|
||||
pub fn get_token_account(&self, pubkey: &Pubkey) -> ClientResult<Option<UiTokenAccount>> {
|
||||
Ok(self
|
||||
.get_token_account_with_commitment(pubkey, self.commitment_config)?
|
||||
.get_token_account_with_commitment(pubkey, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -1444,7 +1459,7 @@ impl RpcClient {
|
||||
|
||||
pub fn get_token_account_balance(&self, pubkey: &Pubkey) -> ClientResult<UiTokenAmount> {
|
||||
Ok(self
|
||||
.get_token_account_balance_with_commitment(pubkey, self.commitment_config)?
|
||||
.get_token_account_balance_with_commitment(pubkey, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -1471,7 +1486,7 @@ impl RpcClient {
|
||||
.get_token_accounts_by_delegate_with_commitment(
|
||||
delegate,
|
||||
token_account_filter,
|
||||
self.commitment_config,
|
||||
self.commitment(),
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
@@ -1510,7 +1525,7 @@ impl RpcClient {
|
||||
.get_token_accounts_by_owner_with_commitment(
|
||||
owner,
|
||||
token_account_filter,
|
||||
self.commitment_config,
|
||||
self.commitment(),
|
||||
)?
|
||||
.value)
|
||||
}
|
||||
@@ -1542,7 +1557,7 @@ impl RpcClient {
|
||||
|
||||
pub fn get_token_supply(&self, mint: &Pubkey) -> ClientResult<UiTokenAmount> {
|
||||
Ok(self
|
||||
.get_token_supply_with_commitment(mint, self.commitment_config)?
|
||||
.get_token_supply_with_commitment(mint, self.commitment())?
|
||||
.value)
|
||||
}
|
||||
|
||||
@@ -1565,7 +1580,7 @@ impl RpcClient {
|
||||
pubkey,
|
||||
lamports,
|
||||
RpcRequestAirdropConfig {
|
||||
commitment: Some(self.commitment_config),
|
||||
commitment: Some(self.commitment()),
|
||||
..RpcRequestAirdropConfig::default()
|
||||
},
|
||||
)
|
||||
@@ -1581,7 +1596,7 @@ impl RpcClient {
|
||||
pubkey,
|
||||
lamports,
|
||||
RpcRequestAirdropConfig {
|
||||
commitment: Some(self.commitment_config),
|
||||
commitment: Some(self.commitment()),
|
||||
recent_blockhash: Some(recent_blockhash.to_string()),
|
||||
},
|
||||
)
|
||||
@@ -1627,7 +1642,7 @@ impl RpcClient {
|
||||
) -> ClientResult<u64> {
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
match self.get_balance_with_commitment(&pubkey, commitment_config) {
|
||||
match self.get_balance_with_commitment(pubkey, commitment_config) {
|
||||
Ok(bal) => {
|
||||
return Ok(bal.value);
|
||||
}
|
||||
@@ -1684,7 +1699,7 @@ impl RpcClient {
|
||||
|
||||
/// Poll the server to confirm a transaction.
|
||||
pub fn poll_for_signature(&self, signature: &Signature) -> ClientResult<()> {
|
||||
self.poll_for_signature_with_commitment(signature, self.commitment_config)
|
||||
self.poll_for_signature_with_commitment(signature, self.commitment())
|
||||
}
|
||||
|
||||
/// Poll the server to confirm a transaction.
|
||||
@@ -1696,7 +1711,7 @@ impl RpcClient {
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
if let Ok(Some(_)) =
|
||||
self.get_signature_status_with_commitment(&signature, commitment_config)
|
||||
self.get_signature_status_with_commitment(signature, commitment_config)
|
||||
{
|
||||
break;
|
||||
}
|
||||
@@ -1794,7 +1809,7 @@ impl RpcClient {
|
||||
) -> ClientResult<Signature> {
|
||||
self.send_and_confirm_transaction_with_spinner_and_commitment(
|
||||
transaction,
|
||||
self.commitment_config,
|
||||
self.commitment(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1850,19 +1865,25 @@ impl RpcClient {
|
||||
"[{}/{}] Finalizing transaction {}",
|
||||
confirmations, desired_confirmations, signature,
|
||||
));
|
||||
|
||||
let now = Instant::now();
|
||||
let confirm_transaction_initial_timeout = self
|
||||
.config
|
||||
.confirm_transaction_initial_timeout
|
||||
.unwrap_or_default();
|
||||
let (signature, status) = loop {
|
||||
// Get recent commitment in order to count confirmations for successful transactions
|
||||
let status = self
|
||||
.get_signature_status_with_commitment(&signature, CommitmentConfig::processed())?;
|
||||
.get_signature_status_with_commitment(signature, CommitmentConfig::processed())?;
|
||||
if status.is_none() {
|
||||
if self
|
||||
let blockhash_not_found = self
|
||||
.get_fee_calculator_for_blockhash_with_commitment(
|
||||
&recent_blockhash,
|
||||
recent_blockhash,
|
||||
CommitmentConfig::processed(),
|
||||
)?
|
||||
.value
|
||||
.is_none()
|
||||
{
|
||||
.is_none();
|
||||
if blockhash_not_found && now.elapsed() >= confirm_transaction_initial_timeout {
|
||||
break (signature, status);
|
||||
}
|
||||
} else {
|
||||
@@ -1891,7 +1912,7 @@ impl RpcClient {
|
||||
// Return when specified commitment is reached
|
||||
// Failed transactions have already been eliminated, `is_some` check is sufficient
|
||||
if self
|
||||
.get_signature_status_with_commitment(&signature, commitment)?
|
||||
.get_signature_status_with_commitment(signature, commitment)?
|
||||
.is_some()
|
||||
{
|
||||
progress_bar.set_message("Transaction confirmed");
|
||||
@@ -1907,7 +1928,7 @@ impl RpcClient {
|
||||
));
|
||||
sleep(Duration::from_millis(500));
|
||||
confirmations = self
|
||||
.get_num_blocks_since_signature_confirmation(&signature)
|
||||
.get_num_blocks_since_signature_confirmation(signature)
|
||||
.unwrap_or(confirmations);
|
||||
if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 {
|
||||
return Err(
|
||||
@@ -1933,6 +1954,26 @@ impl RpcClient {
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_encode_transaction(
|
||||
transaction: &Transaction,
|
||||
encoding: UiTransactionEncoding,
|
||||
) -> ClientResult<String> {
|
||||
let serialized = serialize(transaction)
|
||||
.map_err(|e| ClientErrorKind::Custom(format!("transaction serialization failed: {}", e)))?;
|
||||
let encoded = match encoding {
|
||||
UiTransactionEncoding::Base58 => bs58::encode(serialized).into_string(),
|
||||
UiTransactionEncoding::Base64 => base64::encode(serialized),
|
||||
_ => {
|
||||
return Err(ClientErrorKind::Custom(format!(
|
||||
"unsupported transaction encoding: {}. Supported encodings: base58, base64",
|
||||
encoding
|
||||
))
|
||||
.into())
|
||||
}
|
||||
};
|
||||
Ok(encoded)
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct GetConfirmedSignaturesForAddress2Config {
|
||||
pub before: Option<Signature>,
|
||||
|
@@ -1,5 +1,5 @@
|
||||
//! Implementation defined RPC server errors
|
||||
|
||||
use thiserror::Error;
|
||||
use {
|
||||
crate::rpc_response::RpcSimulateTransactionResult,
|
||||
jsonrpc_core::{Error, ErrorCode},
|
||||
@@ -17,35 +17,40 @@ pub const JSON_RPC_SERVER_ERROR_NO_SNAPSHOT: i64 = -32008;
|
||||
pub const JSON_RPC_SERVER_ERROR_LONG_TERM_STORAGE_SLOT_SKIPPED: i64 = -32009;
|
||||
pub const JSON_RPC_SERVER_ERROR_KEY_EXCLUDED_FROM_SECONDARY_INDEX: i64 = -32010;
|
||||
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_HISTORY_NOT_AVAILABLE: i64 = -32011;
|
||||
pub const JSON_RPC_SCAN_ERROR: i64 = -32012;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum RpcCustomError {
|
||||
#[error("BlockCleanedUp")]
|
||||
BlockCleanedUp {
|
||||
slot: Slot,
|
||||
first_available_block: Slot,
|
||||
},
|
||||
#[error("SendTransactionPreflightFailure")]
|
||||
SendTransactionPreflightFailure {
|
||||
message: String,
|
||||
result: RpcSimulateTransactionResult,
|
||||
},
|
||||
#[error("TransactionSignatureVerificationFailure")]
|
||||
TransactionSignatureVerificationFailure,
|
||||
BlockNotAvailable {
|
||||
slot: Slot,
|
||||
},
|
||||
NodeUnhealthy {
|
||||
num_slots_behind: Option<Slot>,
|
||||
},
|
||||
#[error("BlockNotAvailable")]
|
||||
BlockNotAvailable { slot: Slot },
|
||||
#[error("NodeUnhealthy")]
|
||||
NodeUnhealthy { num_slots_behind: Option<Slot> },
|
||||
#[error("TransactionPrecompileVerificationFailure")]
|
||||
TransactionPrecompileVerificationFailure(solana_sdk::transaction::TransactionError),
|
||||
SlotSkipped {
|
||||
slot: Slot,
|
||||
},
|
||||
#[error("SlotSkipped")]
|
||||
SlotSkipped { slot: Slot },
|
||||
#[error("NoSnapshot")]
|
||||
NoSnapshot,
|
||||
LongTermStorageSlotSkipped {
|
||||
slot: Slot,
|
||||
},
|
||||
KeyExcludedFromSecondaryIndex {
|
||||
index_key: String,
|
||||
},
|
||||
#[error("LongTermStorageSlotSkipped")]
|
||||
LongTermStorageSlotSkipped { slot: Slot },
|
||||
#[error("KeyExcludedFromSecondaryIndex")]
|
||||
KeyExcludedFromSecondaryIndex { index_key: String },
|
||||
#[error("TransactionHistoryNotAvailable")]
|
||||
TransactionHistoryNotAvailable,
|
||||
#[error("ScanError")]
|
||||
ScanError { message: String },
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@@ -141,6 +146,11 @@ impl From<RpcCustomError> for Error {
|
||||
message: "Transaction history is not available from this node".to_string(),
|
||||
data: None,
|
||||
},
|
||||
RpcCustomError::ScanError { message } => Self {
|
||||
code: ErrorCode::ServerError(JSON_RPC_SCAN_ERROR),
|
||||
message,
|
||||
data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -451,7 +451,7 @@ impl SyncClient for ThinClient {
|
||||
) -> TransportResult<Option<transaction::Result<()>>> {
|
||||
let status = self
|
||||
.rpc_client()
|
||||
.get_signature_status(&signature)
|
||||
.get_signature_status(signature)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@@ -468,7 +468,7 @@ impl SyncClient for ThinClient {
|
||||
) -> TransportResult<Option<transaction::Result<()>>> {
|
||||
let status = self
|
||||
.rpc_client()
|
||||
.get_signature_status_with_commitment(&signature, commitment_config)
|
||||
.get_signature_status_with_commitment(signature, commitment_config)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
|
@@ -121,7 +121,7 @@ struct LeaderTpuCache {
|
||||
impl LeaderTpuCache {
|
||||
fn new(rpc_client: &RpcClient, first_slot: Slot) -> Self {
|
||||
let leaders = Self::fetch_slot_leaders(rpc_client, first_slot).unwrap_or_default();
|
||||
let leader_tpu_map = Self::fetch_cluster_tpu_sockets(&rpc_client).unwrap_or_default();
|
||||
let leader_tpu_map = Self::fetch_cluster_tpu_sockets(rpc_client).unwrap_or_default();
|
||||
Self {
|
||||
first_slot,
|
||||
leaders,
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.7.0"
|
||||
version = "1.7.4"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-core"
|
||||
readme = "../README.md"
|
||||
@@ -22,17 +22,12 @@ bv = { version = "0.11.1", features = ["serde"] }
|
||||
bs58 = "0.3.1"
|
||||
byteorder = "1.3.4"
|
||||
chrono = { version = "0.4.11", features = ["serde"] }
|
||||
core_affinity = "0.5.10"
|
||||
crossbeam-channel = "0.4"
|
||||
ed25519-dalek = "=1.0.1"
|
||||
fs_extra = "1.2.0"
|
||||
flate2 = "1.0"
|
||||
indexmap = { version = "1.5", features = ["rayon"] }
|
||||
itertools = "0.9.0"
|
||||
jsonrpc-core = "17.0.0"
|
||||
jsonrpc-core-client = { version = "17.0.0", features = ["ipc", "ws"] }
|
||||
jsonrpc-derive = "17.0.0"
|
||||
jsonrpc-http-server = "17.0.0"
|
||||
libc = "0.2.81"
|
||||
log = "0.4.11"
|
||||
lru = "0.6.1"
|
||||
@@ -44,54 +39,52 @@ rand_chacha = "0.2.2"
|
||||
rand_core = "0.6.2"
|
||||
raptorq = "1.4.2"
|
||||
rayon = "1.5.0"
|
||||
regex = "1.3.9"
|
||||
retain_mut = "0.1.2"
|
||||
serde = "1.0.122"
|
||||
serde_bytes = "0.11"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.0" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.7.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.0" }
|
||||
solana-client = { path = "../client", version = "=1.7.0" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.0" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.0" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.0" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "=1.7.0" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.0" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.0" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.0" }
|
||||
solana-program-test = { path = "../program-test", version = "=1.7.0" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.7.0" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.0" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.0" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.7.0" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.7.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.7.0" }
|
||||
solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.7.0" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.0" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "=1.7.0" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.0" }
|
||||
solana-version = { path = "../version", version = "=1.7.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.0" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.0", features = ["no-entrypoint"] }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.4" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.7.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-client = { path = "../client", version = "=1.7.4" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.4" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "=1.7.4" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.4" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.4" }
|
||||
solana-poh = { path = "../poh", version = "=1.7.4" }
|
||||
solana-program-test = { path = "../program-test", version = "=1.7.4" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.7.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.7.4" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.7.4" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.4" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.4" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.1", features = ["no-entrypoint"] }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio_02 = { version = "0.2", package = "tokio", features = ["full"] }
|
||||
tokio-util = { version = "0.3", features = ["codec"] } # This crate needs to stay in sync with tokio_02, until that dependency can be removed
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.4" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
jsonrpc-core = "17.1.0"
|
||||
jsonrpc-core-client = { version = "17.1.0", features = ["ipc", "ws"] }
|
||||
matches = "0.1.6"
|
||||
num_cpus = "1.13.0"
|
||||
reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serde_json = "1.0.56"
|
||||
serial_test = "0.4.0"
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
symlink = "0.1.0"
|
||||
systemstat = "0.1.5"
|
||||
tokio_02 = { version = "0.2", package = "tokio", features = ["full"] }
|
||||
|
||||
[build-dependencies]
|
||||
rustc_version = "0.2"
|
||||
@@ -111,9 +104,6 @@ name = "gen_keys"
|
||||
[[bench]]
|
||||
name = "sigverify_stage"
|
||||
|
||||
[[bench]]
|
||||
name = "poh"
|
||||
|
||||
[[bench]]
|
||||
name = "retransmit_stage"
|
||||
|
||||
|
@@ -7,8 +7,7 @@ use crossbeam_channel::unbounded;
|
||||
use log::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rayon::prelude::*;
|
||||
use solana_core::banking_stage::{create_test_recorder, BankingStage, BankingStageStats};
|
||||
use solana_core::poh_recorder::WorkingBankEntry;
|
||||
use solana_core::banking_stage::{BankingStage, BankingStageStats};
|
||||
use solana_gossip::cluster_info::ClusterInfo;
|
||||
use solana_gossip::cluster_info::Node;
|
||||
use solana_ledger::blockstore_processor::process_entries;
|
||||
@@ -17,6 +16,7 @@ use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_perf::test_tx::test_tx;
|
||||
use solana_poh::poh_recorder::{create_test_recorder, WorkingBankEntry};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::genesis_config::GenesisConfig;
|
||||
use solana_sdk::hash::Hash;
|
||||
@@ -183,7 +183,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
});
|
||||
//sanity check, make sure all the transactions can execute sequentially
|
||||
transactions.iter().for_each(|tx| {
|
||||
let res = bank.process_transaction(&tx);
|
||||
let res = bank.process_transaction(tx);
|
||||
assert!(res.is_ok(), "sanity test transactions");
|
||||
});
|
||||
bank.clear_signatures();
|
||||
|
@@ -24,10 +24,10 @@ fn bench_save_tower(bench: &mut Bencher) {
|
||||
let heaviest_bank = BankForks::new(Bank::default()).working_bank();
|
||||
let tower = Tower::new(
|
||||
&node_keypair.pubkey(),
|
||||
&vote_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
0,
|
||||
&heaviest_bank,
|
||||
&path,
|
||||
path,
|
||||
);
|
||||
|
||||
bench.iter(move || {
|
||||
|
@@ -39,7 +39,12 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
const NUM_PEERS: usize = 4;
|
||||
let mut peer_sockets = Vec::new();
|
||||
for _ in 0..NUM_PEERS {
|
||||
let id = pubkey::new_rand();
|
||||
// This ensures that cluster_info.id() is the root of turbine
|
||||
// retransmit tree and so the shreds are retransmited to all other
|
||||
// nodes in the cluster.
|
||||
let id = std::iter::repeat_with(pubkey::new_rand)
|
||||
.find(|pk| cluster_info.id() < *pk)
|
||||
.unwrap();
|
||||
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut contact_info = ContactInfo::new_localhost(&id, timestamp());
|
||||
contact_info.tvu = socket.local_addr().unwrap();
|
||||
|
@@ -148,7 +148,7 @@ impl AccountsHashVerifier {
|
||||
for (slot, hash) in hashes.iter() {
|
||||
slot_to_hash.insert(*slot, *hash);
|
||||
}
|
||||
if Self::should_halt(&cluster_info, trusted_validators, &mut slot_to_hash) {
|
||||
if Self::should_halt(cluster_info, trusted_validators, &mut slot_to_hash) {
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
@@ -1,20 +1,13 @@
|
||||
//! The `banking_stage` processes Transaction messages. It is intended to be used
|
||||
//! to contruct a software pipeline. The stage uses all available CPU cores and
|
||||
//! can do its processing in parallel with signature verification on the GPU.
|
||||
use crate::{
|
||||
packet_hasher::PacketHasher,
|
||||
poh_recorder::{PohRecorder, PohRecorderError, TransactionRecorder, WorkingBankEntry},
|
||||
poh_service::{self, PohService},
|
||||
};
|
||||
use crate::packet_hasher::PacketHasher;
|
||||
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
|
||||
use itertools::Itertools;
|
||||
use lru::LruCache;
|
||||
use retain_mut::RetainMut;
|
||||
use solana_gossip::cluster_info::ClusterInfo;
|
||||
use solana_ledger::{
|
||||
blockstore::Blockstore, blockstore_processor::TransactionStatusSender,
|
||||
entry::hash_transactions, leader_schedule_cache::LeaderScheduleCache,
|
||||
};
|
||||
use solana_ledger::{blockstore_processor::TransactionStatusSender, entry::hash_transactions};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info};
|
||||
use solana_perf::{
|
||||
@@ -22,6 +15,7 @@ use solana_perf::{
|
||||
packet::{limited_deserialize, Packet, Packets, PACKETS_PER_BATCH},
|
||||
perf_libs,
|
||||
};
|
||||
use solana_poh::poh_recorder::{PohRecorder, PohRecorderError, TransactionRecorder};
|
||||
use solana_runtime::{
|
||||
accounts_db::ErrorCounters,
|
||||
bank::{
|
||||
@@ -39,7 +33,6 @@ use solana_sdk::{
|
||||
MAX_TRANSACTION_FORWARDING_DELAY_GPU,
|
||||
},
|
||||
message::Message,
|
||||
poh_config::PohConfig,
|
||||
pubkey::Pubkey,
|
||||
short_vec::decode_shortu16_len,
|
||||
signature::Signature,
|
||||
@@ -57,8 +50,7 @@ use std::{
|
||||
mem::size_of,
|
||||
net::UdpSocket,
|
||||
ops::DerefMut,
|
||||
sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
|
||||
sync::mpsc::Receiver,
|
||||
sync::atomic::{AtomicU64, AtomicUsize, Ordering},
|
||||
sync::{Arc, Mutex},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
@@ -360,9 +352,9 @@ impl BankingStage {
|
||||
// We've hit the end of this slot, no need to perform more processing,
|
||||
// just filter the remaining packets for the invalid (e.g. too old) ones
|
||||
let new_unprocessed_indexes = Self::filter_unprocessed_packets(
|
||||
&bank,
|
||||
&msgs,
|
||||
&original_unprocessed_indexes,
|
||||
bank,
|
||||
msgs,
|
||||
original_unprocessed_indexes,
|
||||
my_pubkey,
|
||||
*next_leader,
|
||||
);
|
||||
@@ -377,8 +369,8 @@ impl BankingStage {
|
||||
Self::process_packets_transactions(
|
||||
&bank,
|
||||
&bank_creation_time,
|
||||
&recorder,
|
||||
&msgs,
|
||||
recorder,
|
||||
msgs,
|
||||
original_unprocessed_indexes.to_owned(),
|
||||
transaction_status_sender.clone(),
|
||||
gossip_vote_sender,
|
||||
@@ -411,7 +403,7 @@ impl BankingStage {
|
||||
// `original_unprocessed_indexes` must have remaining packets to process
|
||||
// if not yet processed.
|
||||
assert!(Self::packet_has_more_unprocessed_transactions(
|
||||
&original_unprocessed_indexes
|
||||
original_unprocessed_indexes
|
||||
));
|
||||
true
|
||||
}
|
||||
@@ -605,7 +597,7 @@ impl BankingStage {
|
||||
let decision = Self::process_buffered_packets(
|
||||
&my_pubkey,
|
||||
&socket,
|
||||
&poh_recorder,
|
||||
poh_recorder,
|
||||
cluster_info,
|
||||
&mut buffered_packets,
|
||||
enable_forwarding,
|
||||
@@ -635,8 +627,8 @@ impl BankingStage {
|
||||
|
||||
match Self::process_packets(
|
||||
&my_pubkey,
|
||||
&verified_receiver,
|
||||
&poh_recorder,
|
||||
verified_receiver,
|
||||
poh_recorder,
|
||||
recv_start,
|
||||
recv_timeout,
|
||||
id,
|
||||
@@ -746,7 +738,7 @@ impl BankingStage {
|
||||
let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new();
|
||||
|
||||
let pre_token_balances = if transaction_status_sender.is_some() {
|
||||
collect_token_balances(&bank, &batch, &mut mint_decimals)
|
||||
collect_token_balances(bank, batch, &mut mint_decimals)
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
@@ -806,7 +798,7 @@ impl BankingStage {
|
||||
if let Some(transaction_status_sender) = transaction_status_sender {
|
||||
let txs = batch.transactions_iter().cloned().collect();
|
||||
let post_balances = bank.collect_balances(batch);
|
||||
let post_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals);
|
||||
let post_token_balances = collect_token_balances(bank, batch, &mut mint_decimals);
|
||||
transaction_status_sender.send_transaction_status_batch(
|
||||
bank.clone(),
|
||||
txs,
|
||||
@@ -1257,7 +1249,7 @@ impl BankingStage {
|
||||
&bank,
|
||||
&msgs,
|
||||
&packet_indexes,
|
||||
&my_pubkey,
|
||||
my_pubkey,
|
||||
next_leader,
|
||||
);
|
||||
Self::push_unprocessed(
|
||||
@@ -1392,66 +1384,29 @@ fn next_leader_tpu_forwards(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_test_recorder(
|
||||
bank: &Arc<Bank>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
poh_config: Option<PohConfig>,
|
||||
) -> (
|
||||
Arc<AtomicBool>,
|
||||
Arc<Mutex<PohRecorder>>,
|
||||
PohService,
|
||||
Receiver<WorkingBankEntry>,
|
||||
) {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let poh_config = Arc::new(poh_config.unwrap_or_default());
|
||||
let (mut poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
bank.slot(),
|
||||
Some((4, 4)),
|
||||
bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
blockstore,
|
||||
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
|
||||
&poh_config,
|
||||
exit.clone(),
|
||||
);
|
||||
poh_recorder.set_bank(&bank);
|
||||
|
||||
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
|
||||
let poh_service = PohService::new(
|
||||
poh_recorder.clone(),
|
||||
&poh_config,
|
||||
&exit,
|
||||
bank.ticks_per_slot(),
|
||||
poh_service::DEFAULT_PINNED_CPU_CORE,
|
||||
poh_service::DEFAULT_HASHES_PER_BATCH,
|
||||
record_receiver,
|
||||
);
|
||||
|
||||
(exit, poh_recorder, poh_service, entry_receiver)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
poh_recorder::Record, poh_recorder::WorkingBank,
|
||||
transaction_status_service::TransactionStatusService,
|
||||
};
|
||||
use crossbeam_channel::unbounded;
|
||||
use itertools::Itertools;
|
||||
use solana_gossip::cluster_info::Node;
|
||||
use solana_ledger::{
|
||||
blockstore::entries_to_test_shreds,
|
||||
blockstore::{entries_to_test_shreds, Blockstore},
|
||||
entry::{next_entry, Entry, EntrySlice},
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
};
|
||||
use solana_perf::packet::to_packets_chunked;
|
||||
use solana_poh::{
|
||||
poh_recorder::{create_test_recorder, Record, WorkingBank, WorkingBankEntry},
|
||||
poh_service::PohService,
|
||||
};
|
||||
use solana_rpc::transaction_status_service::TransactionStatusService;
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
instruction::InstructionError,
|
||||
poh_config::PohConfig,
|
||||
signature::{Keypair, Signer},
|
||||
system_instruction::SystemError,
|
||||
system_transaction,
|
||||
@@ -1461,7 +1416,10 @@ mod tests {
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
path::Path,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::Receiver,
|
||||
},
|
||||
thread::sleep,
|
||||
};
|
||||
|
||||
@@ -2491,7 +2449,7 @@ mod tests {
|
||||
Receiver<WorkingBankEntry>,
|
||||
JoinHandle<()>,
|
||||
) {
|
||||
Blockstore::destroy(&ledger_path).unwrap();
|
||||
Blockstore::destroy(ledger_path).unwrap();
|
||||
let genesis_config_info = create_slow_genesis_config(10_000);
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@@ -2499,8 +2457,8 @@ mod tests {
|
||||
..
|
||||
} = &genesis_config_info;
|
||||
let blockstore =
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger");
|
||||
let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config));
|
||||
Blockstore::open(ledger_path).expect("Expected to be able to open database ledger");
|
||||
let bank = Arc::new(Bank::new_no_wallclock_throttle(genesis_config));
|
||||
let exit = Arc::new(AtomicBool::default());
|
||||
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
|
||||
bank.tick_height(),
|
||||
@@ -2521,9 +2479,9 @@ mod tests {
|
||||
let pubkey1 = solana_sdk::pubkey::new_rand();
|
||||
let pubkey2 = solana_sdk::pubkey::new_rand();
|
||||
let transactions = vec![
|
||||
system_transaction::transfer(&mint_keypair, &pubkey0, 1, genesis_config.hash()),
|
||||
system_transaction::transfer(&mint_keypair, &pubkey1, 1, genesis_config.hash()),
|
||||
system_transaction::transfer(&mint_keypair, &pubkey2, 1, genesis_config.hash()),
|
||||
system_transaction::transfer(mint_keypair, &pubkey0, 1, genesis_config.hash()),
|
||||
system_transaction::transfer(mint_keypair, &pubkey1, 1, genesis_config.hash()),
|
||||
system_transaction::transfer(mint_keypair, &pubkey2, 1, genesis_config.hash()),
|
||||
];
|
||||
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
|
||||
|
||||
|
@@ -1,14 +1,12 @@
|
||||
//! A stage to broadcast data from a leader node to validators
|
||||
#![allow(clippy::rc_buffer)]
|
||||
use self::{
|
||||
broadcast_duplicates_run::BroadcastDuplicatesRun,
|
||||
broadcast_fake_shreds_run::BroadcastFakeShredsRun, broadcast_metrics::*,
|
||||
fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun,
|
||||
standard_broadcast_run::StandardBroadcastRun,
|
||||
};
|
||||
use crate::{
|
||||
poh_recorder::WorkingBankEntry,
|
||||
result::{Error, Result},
|
||||
};
|
||||
use crate::result::{Error, Result};
|
||||
use crossbeam_channel::{
|
||||
Receiver as CrossbeamReceiver, RecvTimeoutError as CrossbeamRecvTimeoutError,
|
||||
Sender as CrossbeamSender,
|
||||
@@ -22,6 +20,7 @@ use solana_gossip::{
|
||||
use solana_ledger::{blockstore::Blockstore, shred::Shred};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
|
||||
use solana_poh::poh_recorder::WorkingBankEntry;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey};
|
||||
@@ -37,6 +36,7 @@ use std::{
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
mod broadcast_duplicates_run;
|
||||
mod broadcast_fake_shreds_run;
|
||||
pub mod broadcast_metrics;
|
||||
pub(crate) mod broadcast_utils;
|
||||
@@ -54,11 +54,20 @@ pub enum BroadcastStageReturnType {
|
||||
ChannelDisconnected,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Clone, Debug)]
|
||||
pub struct BroadcastDuplicatesConfig {
|
||||
/// Percentage of stake to send different version of slots to
|
||||
pub stake_partition: u8,
|
||||
/// Number of slots to wait before sending duplicate shreds
|
||||
pub duplicate_send_delay: usize,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Clone, Debug)]
|
||||
pub enum BroadcastStageType {
|
||||
Standard,
|
||||
FailEntryVerification,
|
||||
BroadcastFakeShreds,
|
||||
BroadcastDuplicates(BroadcastDuplicatesConfig),
|
||||
}
|
||||
|
||||
impl BroadcastStageType {
|
||||
@@ -103,6 +112,16 @@ impl BroadcastStageType {
|
||||
blockstore,
|
||||
BroadcastFakeShredsRun::new(keypair, 0, shred_version),
|
||||
),
|
||||
|
||||
BroadcastStageType::BroadcastDuplicates(config) => BroadcastStage::new(
|
||||
sock,
|
||||
cluster_info,
|
||||
receiver,
|
||||
retransmit_slots_receiver,
|
||||
exit_sender,
|
||||
blockstore,
|
||||
BroadcastDuplicatesRun::new(keypair, shred_version, config.clone()),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -172,15 +191,15 @@ impl BroadcastStage {
|
||||
fn handle_error(r: Result<()>, name: &str) -> Option<BroadcastStageReturnType> {
|
||||
if let Err(e) = r {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected)
|
||||
| Error::SendError
|
||||
| Error::RecvError(RecvError)
|
||||
| Error::CrossbeamRecvTimeoutError(CrossbeamRecvTimeoutError::Disconnected) => {
|
||||
Error::RecvTimeout(RecvTimeoutError::Disconnected)
|
||||
| Error::Send
|
||||
| Error::Recv(RecvError)
|
||||
| Error::CrossbeamRecvTimeout(CrossbeamRecvTimeoutError::Disconnected) => {
|
||||
return Some(BroadcastStageReturnType::ChannelDisconnected);
|
||||
}
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout)
|
||||
| Error::CrossbeamRecvTimeoutError(CrossbeamRecvTimeoutError::Timeout) => (),
|
||||
Error::ClusterInfoError(ClusterInfoError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these?
|
||||
Error::RecvTimeout(RecvTimeoutError::Timeout)
|
||||
| Error::CrossbeamRecvTimeout(CrossbeamRecvTimeoutError::Timeout) => (),
|
||||
Error::ClusterInfo(ClusterInfoError::NoPeers) => (), // TODO: Why are the unit-tests throwing hundreds of these?
|
||||
_ => {
|
||||
inc_new_counter_error!("streamer-broadcaster-error", 1, 1);
|
||||
error!("{} broadcaster error: {:?}", name, e);
|
||||
@@ -389,7 +408,7 @@ pub fn broadcast_shreds(
|
||||
let packets: Vec<_> = shreds
|
||||
.iter()
|
||||
.map(|shred| {
|
||||
let broadcast_index = weighted_best(&peers_and_stakes, shred.seed());
|
||||
let broadcast_index = weighted_best(peers_and_stakes, shred.seed());
|
||||
|
||||
(&shred.payload, &peers[broadcast_index].tvu)
|
||||
})
|
||||
@@ -410,7 +429,7 @@ pub fn broadcast_shreds(
|
||||
send_mmsg_time.stop();
|
||||
transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us();
|
||||
|
||||
let num_live_peers = num_live_peers(&peers);
|
||||
let num_live_peers = num_live_peers(peers);
|
||||
update_peer_stats(
|
||||
num_live_peers,
|
||||
broadcast_len as i64 + 1,
|
||||
|
333
core/src/broadcast_stage/broadcast_duplicates_run.rs
Normal file
333
core/src/broadcast_stage/broadcast_duplicates_run.rs
Normal file
@@ -0,0 +1,333 @@
|
||||
use super::broadcast_utils::ReceiveResults;
|
||||
use super::*;
|
||||
use log::*;
|
||||
use solana_ledger::entry::{create_ticks, Entry, EntrySlice};
|
||||
use solana_ledger::shred::Shredder;
|
||||
use solana_runtime::blockhash_queue::BlockhashQueue;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_sdk::fee_calculator::FeeCalculator;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Mutex;
|
||||
|
||||
// Queue which facilitates delivering shreds with a delay
|
||||
type DelayedQueue = VecDeque<(Option<Pubkey>, Option<Vec<Shred>>)>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(super) struct BroadcastDuplicatesRun {
|
||||
config: BroadcastDuplicatesConfig,
|
||||
// Local queue for broadcast to track which duplicate blockhashes we've sent
|
||||
duplicate_queue: BlockhashQueue,
|
||||
// Shared queue between broadcast and transmit threads
|
||||
delayed_queue: Arc<Mutex<DelayedQueue>>,
|
||||
// Buffer for duplicate entries
|
||||
duplicate_entries_buffer: Vec<Entry>,
|
||||
last_duplicate_entry_hash: Hash,
|
||||
last_broadcast_slot: Slot,
|
||||
next_shred_index: u32,
|
||||
shred_version: u16,
|
||||
keypair: Arc<Keypair>,
|
||||
}
|
||||
|
||||
impl BroadcastDuplicatesRun {
|
||||
pub(super) fn new(
|
||||
keypair: Arc<Keypair>,
|
||||
shred_version: u16,
|
||||
config: BroadcastDuplicatesConfig,
|
||||
) -> Self {
|
||||
let mut delayed_queue = DelayedQueue::new();
|
||||
delayed_queue.resize(config.duplicate_send_delay, (None, None));
|
||||
Self {
|
||||
config,
|
||||
delayed_queue: Arc::new(Mutex::new(delayed_queue)),
|
||||
duplicate_queue: BlockhashQueue::default(),
|
||||
duplicate_entries_buffer: vec![],
|
||||
next_shred_index: u32::MAX,
|
||||
last_broadcast_slot: 0,
|
||||
last_duplicate_entry_hash: Hash::default(),
|
||||
shred_version,
|
||||
keypair,
|
||||
}
|
||||
}
|
||||
|
||||
fn queue_or_create_duplicate_entries(
|
||||
&mut self,
|
||||
bank: &Arc<Bank>,
|
||||
receive_results: &ReceiveResults,
|
||||
) -> (Vec<Entry>, u32) {
|
||||
// If the last entry hash is default, grab the last blockhash from the parent bank
|
||||
if self.last_duplicate_entry_hash == Hash::default() {
|
||||
self.last_duplicate_entry_hash = bank.last_blockhash();
|
||||
}
|
||||
|
||||
// Create duplicate entries by..
|
||||
// 1) rearranging real entries so that all transaction entries are moved to
|
||||
// the front and tick entries are moved to the back.
|
||||
// 2) setting all transaction entries to zero hashes and all tick entries to `hashes_per_tick`.
|
||||
// 3) removing any transactions which reference blockhashes which aren't in the
|
||||
// duplicate blockhash queue.
|
||||
let (duplicate_entries, next_shred_index) = if bank.slot() > MINIMUM_DUPLICATE_SLOT {
|
||||
let mut tx_entries: Vec<Entry> = receive_results
|
||||
.entries
|
||||
.iter()
|
||||
.filter_map(|entry| {
|
||||
if entry.is_tick() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let transactions: Vec<Transaction> = entry
|
||||
.transactions
|
||||
.iter()
|
||||
.filter(|tx| {
|
||||
self.duplicate_queue
|
||||
.get_hash_age(&tx.message.recent_blockhash)
|
||||
.is_some()
|
||||
})
|
||||
.cloned()
|
||||
.collect();
|
||||
if !transactions.is_empty() {
|
||||
Some(Entry::new_mut(
|
||||
&mut self.last_duplicate_entry_hash,
|
||||
&mut 0,
|
||||
transactions,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let mut tick_entries = create_ticks(
|
||||
receive_results.entries.tick_count(),
|
||||
bank.hashes_per_tick().unwrap_or_default(),
|
||||
self.last_duplicate_entry_hash,
|
||||
);
|
||||
self.duplicate_entries_buffer.append(&mut tx_entries);
|
||||
self.duplicate_entries_buffer.append(&mut tick_entries);
|
||||
|
||||
// Only send out duplicate entries when the block is finished otherwise the
|
||||
// recipient will start repairing for shreds they haven't received yet and
|
||||
// hit duplicate slot issues before we want them to.
|
||||
let entries = if receive_results.last_tick_height == bank.max_tick_height() {
|
||||
self.duplicate_entries_buffer.drain(..).collect()
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
// Set next shred index to 0 since we are sending the full slot
|
||||
(entries, 0)
|
||||
} else {
|
||||
// Send real entries until we hit min duplicate slot
|
||||
(receive_results.entries.clone(), self.next_shred_index)
|
||||
};
|
||||
|
||||
// Save last duplicate entry hash to avoid invalid entry hash errors
|
||||
if let Some(last_duplicate_entry) = duplicate_entries.last() {
|
||||
self.last_duplicate_entry_hash = last_duplicate_entry.hash;
|
||||
}
|
||||
|
||||
(duplicate_entries, next_shred_index)
|
||||
}
|
||||
}
|
||||
|
||||
/// Duplicate slots should only be sent once all validators have started.
|
||||
/// This constant is intended to be used as a buffer so that all validators
|
||||
/// are live before sending duplicate slots.
|
||||
pub const MINIMUM_DUPLICATE_SLOT: Slot = 20;
|
||||
|
||||
impl BroadcastRun for BroadcastDuplicatesRun {
|
||||
fn run(
|
||||
&mut self,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
receiver: &Receiver<WorkingBankEntry>,
|
||||
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
|
||||
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
|
||||
) -> Result<()> {
|
||||
// 1) Pull entries from banking stage
|
||||
let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
|
||||
let bank = receive_results.bank.clone();
|
||||
let last_tick_height = receive_results.last_tick_height;
|
||||
|
||||
if self.next_shred_index == u32::MAX {
|
||||
self.next_shred_index = blockstore
|
||||
.meta(bank.slot())
|
||||
.expect("Database error")
|
||||
.map(|meta| meta.consumed)
|
||||
.unwrap_or(0) as u32
|
||||
}
|
||||
|
||||
// We were not the leader, but just became leader again
|
||||
if bank.slot() > self.last_broadcast_slot + 1 {
|
||||
self.last_duplicate_entry_hash = Hash::default();
|
||||
}
|
||||
self.last_broadcast_slot = bank.slot();
|
||||
|
||||
let shredder = Shredder::new(
|
||||
bank.slot(),
|
||||
bank.parent().unwrap().slot(),
|
||||
self.keypair.clone(),
|
||||
(bank.tick_height() % bank.ticks_per_slot()) as u8,
|
||||
self.shred_version,
|
||||
)
|
||||
.expect("Expected to create a new shredder");
|
||||
|
||||
let (data_shreds, coding_shreds, last_shred_index) = shredder.entries_to_shreds(
|
||||
&receive_results.entries,
|
||||
last_tick_height == bank.max_tick_height(),
|
||||
self.next_shred_index,
|
||||
);
|
||||
|
||||
let (duplicate_entries, next_duplicate_shred_index) =
|
||||
self.queue_or_create_duplicate_entries(&bank, &receive_results);
|
||||
let (duplicate_data_shreds, duplicate_coding_shreds, _) = if !duplicate_entries.is_empty() {
|
||||
shredder.entries_to_shreds(
|
||||
&duplicate_entries,
|
||||
last_tick_height == bank.max_tick_height(),
|
||||
next_duplicate_shred_index,
|
||||
)
|
||||
} else {
|
||||
(vec![], vec![], 0)
|
||||
};
|
||||
|
||||
// Manually track the shred index because relying on slot meta consumed is racy
|
||||
if last_tick_height == bank.max_tick_height() {
|
||||
self.next_shred_index = 0;
|
||||
self.duplicate_queue
|
||||
.register_hash(&self.last_duplicate_entry_hash, &FeeCalculator::default());
|
||||
} else {
|
||||
self.next_shred_index = last_shred_index;
|
||||
}
|
||||
|
||||
// Partition network with duplicate and real shreds based on stake
|
||||
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
let mut duplicate_recipients = HashMap::new();
|
||||
let mut real_recipients = HashMap::new();
|
||||
|
||||
let mut stakes: Vec<(Pubkey, u64)> = bank
|
||||
.epoch_staked_nodes(bank_epoch)
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.filter(|(pubkey, _)| *pubkey != self.keypair.pubkey())
|
||||
.collect();
|
||||
stakes.sort_by(|(l_key, l_stake), (r_key, r_stake)| {
|
||||
if r_stake == l_stake {
|
||||
l_key.cmp(r_key)
|
||||
} else {
|
||||
r_stake.cmp(l_stake)
|
||||
}
|
||||
});
|
||||
|
||||
let highest_staked_node = stakes.first().cloned().map(|x| x.0);
|
||||
let stake_total: u64 = stakes.iter().map(|(_, stake)| *stake).sum();
|
||||
let mut cumulative_stake: u64 = 0;
|
||||
for (pubkey, stake) in stakes.into_iter().rev() {
|
||||
cumulative_stake += stake;
|
||||
if (100 * cumulative_stake / stake_total) as u8 <= self.config.stake_partition {
|
||||
duplicate_recipients.insert(pubkey, stake);
|
||||
} else {
|
||||
real_recipients.insert(pubkey, stake);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(highest_staked_node) = highest_staked_node {
|
||||
if bank.slot() > MINIMUM_DUPLICATE_SLOT && last_tick_height == bank.max_tick_height() {
|
||||
warn!(
|
||||
"{} sent duplicate slot {} to nodes: {:?}",
|
||||
self.keypair.pubkey(),
|
||||
bank.slot(),
|
||||
&duplicate_recipients,
|
||||
);
|
||||
warn!(
|
||||
"Duplicate shreds for slot {} will be broadcast in {} slot(s)",
|
||||
bank.slot(),
|
||||
self.config.duplicate_send_delay
|
||||
);
|
||||
|
||||
let delayed_shreds: Option<Vec<Shred>> = vec![
|
||||
duplicate_data_shreds.last().cloned(),
|
||||
data_shreds.last().cloned(),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
self.delayed_queue
|
||||
.lock()
|
||||
.unwrap()
|
||||
.push_back((Some(highest_staked_node), delayed_shreds));
|
||||
}
|
||||
}
|
||||
|
||||
let duplicate_recipients = Arc::new(duplicate_recipients);
|
||||
let real_recipients = Arc::new(real_recipients);
|
||||
|
||||
let data_shreds = Arc::new(data_shreds);
|
||||
blockstore_sender.send((data_shreds.clone(), None))?;
|
||||
|
||||
// 3) Start broadcast step
|
||||
socket_sender.send((
|
||||
(
|
||||
Some(duplicate_recipients.clone()),
|
||||
Arc::new(duplicate_data_shreds),
|
||||
),
|
||||
None,
|
||||
))?;
|
||||
socket_sender.send((
|
||||
(
|
||||
Some(duplicate_recipients),
|
||||
Arc::new(duplicate_coding_shreds),
|
||||
),
|
||||
None,
|
||||
))?;
|
||||
socket_sender.send(((Some(real_recipients.clone()), data_shreds), None))?;
|
||||
socket_sender.send(((Some(real_recipients), Arc::new(coding_shreds)), None))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
fn transmit(
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<TransmitReceiver>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
sock: &UdpSocket,
|
||||
) -> Result<()> {
|
||||
// Check the delay queue for shreds that are ready to be sent
|
||||
let (delayed_recipient, delayed_shreds) = {
|
||||
let mut delayed_deque = self.delayed_queue.lock().unwrap();
|
||||
if delayed_deque.len() > self.config.duplicate_send_delay {
|
||||
delayed_deque.pop_front().unwrap()
|
||||
} else {
|
||||
(None, None)
|
||||
}
|
||||
};
|
||||
|
||||
let ((stakes, shreds), _) = receiver.lock().unwrap().recv()?;
|
||||
let stakes = stakes.unwrap();
|
||||
for peer in cluster_info.tvu_peers() {
|
||||
// Forward shreds to circumvent gossip
|
||||
if stakes.get(&peer.id).is_some() {
|
||||
shreds.iter().for_each(|shred| {
|
||||
sock.send_to(&shred.payload, &peer.tvu_forwards).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
// After a delay, broadcast duplicate shreds to a single node
|
||||
if let Some(shreds) = delayed_shreds.as_ref() {
|
||||
if Some(peer.id) == delayed_recipient {
|
||||
shreds.iter().for_each(|shred| {
|
||||
sock.send_to(&shred.payload, &peer.tvu).unwrap();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
fn record(
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<RecordReceiver>>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> Result<()> {
|
||||
let (data_shreds, _) = receiver.lock().unwrap().recv()?;
|
||||
blockstore.insert_shreds(data_shreds.to_vec(), None, true)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
@@ -1,6 +1,6 @@
|
||||
use crate::poh_recorder::WorkingBankEntry;
|
||||
use crate::result::Result;
|
||||
use solana_ledger::{entry::Entry, shred::Shred};
|
||||
use solana_poh::poh_recorder::WorkingBankEntry;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::clock::Slot;
|
||||
use std::{
|
||||
|
@@ -161,7 +161,7 @@ impl StandardBroadcastRun {
|
||||
) -> Result<()> {
|
||||
let (bsend, brecv) = channel();
|
||||
let (ssend, srecv) = channel();
|
||||
self.process_receive_results(&blockstore, &ssend, &bsend, receive_results)?;
|
||||
self.process_receive_results(blockstore, &ssend, &bsend, receive_results)?;
|
||||
let srecv = Arc::new(Mutex::new(srecv));
|
||||
let brecv = Arc::new(Mutex::new(brecv));
|
||||
//data
|
||||
|
@@ -1,6 +1,5 @@
|
||||
use crate::{
|
||||
optimistic_confirmation_verifier::OptimisticConfirmationVerifier,
|
||||
poh_recorder::PohRecorder,
|
||||
replay_stage::DUPLICATE_THRESHOLD,
|
||||
result::{Error, Result},
|
||||
sigverify,
|
||||
@@ -20,6 +19,7 @@ use solana_gossip::{
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_metrics::inc_new_counter_debug;
|
||||
use solana_perf::packet::{self, Packets};
|
||||
use solana_poh::poh_recorder::PohRecorder;
|
||||
use solana_rpc::{
|
||||
optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender},
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
@@ -33,7 +33,7 @@ use solana_runtime::{
|
||||
vote_sender_types::{ReplayVoteReceiver, ReplayedVote},
|
||||
};
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot, DEFAULT_MS_PER_SLOT},
|
||||
clock::{Epoch, Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT},
|
||||
epoch_schedule::EpochSchedule,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
@@ -110,7 +110,7 @@ impl VoteTracker {
|
||||
epoch_schedule: *root_bank.epoch_schedule(),
|
||||
..VoteTracker::default()
|
||||
};
|
||||
vote_tracker.progress_with_new_root_bank(&root_bank);
|
||||
vote_tracker.progress_with_new_root_bank(root_bank);
|
||||
assert_eq!(
|
||||
*vote_tracker.leader_schedule_epoch.read().unwrap(),
|
||||
root_bank.get_leader_schedule_epoch(root_bank.slot())
|
||||
@@ -384,15 +384,20 @@ impl ClusterInfoVoteListener {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let would_be_leader = poh_recorder
|
||||
.lock()
|
||||
.unwrap()
|
||||
.would_be_leader(20 * DEFAULT_TICKS_PER_SLOT);
|
||||
if let Err(e) = verified_vote_packets.receive_and_process_vote_packets(
|
||||
&verified_vote_label_packets_receiver,
|
||||
&mut update_version,
|
||||
would_be_leader,
|
||||
) {
|
||||
match e {
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => {
|
||||
Error::CrossbeamRecvTimeout(RecvTimeoutError::Disconnected) => {
|
||||
return Ok(());
|
||||
}
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
Error::CrossbeamRecvTimeout(RecvTimeoutError::Timeout) => (),
|
||||
_ => {
|
||||
error!("thread {:?} error {:?}", thread::current().name(), e);
|
||||
}
|
||||
@@ -474,8 +479,8 @@ impl ClusterInfoVoteListener {
|
||||
.add_new_optimistic_confirmed_slots(confirmed_slots.clone());
|
||||
}
|
||||
Err(e) => match e {
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout)
|
||||
| Error::ReadyTimeoutError => (),
|
||||
Error::CrossbeamRecvTimeout(RecvTimeoutError::Timeout)
|
||||
| Error::ReadyTimeout => (),
|
||||
_ => {
|
||||
error!("thread {:?} error {:?}", thread::current().name(), e);
|
||||
}
|
||||
@@ -598,7 +603,7 @@ impl ClusterInfoVoteListener {
|
||||
if slot == last_vote_slot {
|
||||
let vote_accounts = Stakes::vote_accounts(epoch_stakes.stakes());
|
||||
let stake = vote_accounts
|
||||
.get(&vote_pubkey)
|
||||
.get(vote_pubkey)
|
||||
.map(|(stake, _)| *stake)
|
||||
.unwrap_or_default();
|
||||
let total_stake = epoch_stakes.total_stake();
|
||||
@@ -687,7 +692,7 @@ impl ClusterInfoVoteListener {
|
||||
// voters trying to make votes for slots earlier than the epoch for
|
||||
// which they are authorized
|
||||
let actual_authorized_voter =
|
||||
vote_tracker.get_authorized_voter(&vote_pubkey, *last_vote_slot);
|
||||
vote_tracker.get_authorized_voter(vote_pubkey, *last_vote_slot);
|
||||
|
||||
if actual_authorized_voter.is_none() {
|
||||
return false;
|
||||
@@ -695,7 +700,7 @@ impl ClusterInfoVoteListener {
|
||||
|
||||
// Voting without the correct authorized pubkey, dump the vote
|
||||
if !VoteTracker::vote_contains_authorized_voter(
|
||||
&gossip_tx,
|
||||
gossip_tx,
|
||||
&actual_authorized_voter.unwrap(),
|
||||
) {
|
||||
return false;
|
||||
@@ -733,7 +738,7 @@ impl ClusterInfoVoteListener {
|
||||
Self::track_new_votes_and_notify_confirmations(
|
||||
vote,
|
||||
&vote_pubkey,
|
||||
&vote_tracker,
|
||||
vote_tracker,
|
||||
root_bank,
|
||||
subscriptions,
|
||||
verified_vote_sender,
|
||||
|
@@ -3,8 +3,9 @@ use crate::{
|
||||
progress_map::ProgressMap,
|
||||
};
|
||||
use solana_sdk::{clock::Slot, hash::Hash};
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
|
||||
pub(crate) type DuplicateSlotsTracker = BTreeSet<Slot>;
|
||||
pub(crate) type GossipDuplicateConfirmedSlots = BTreeMap<Slot, Hash>;
|
||||
type SlotStateHandler = fn(Slot, &Hash, Option<&Hash>, bool, bool) -> Vec<ResultingStateChange>;
|
||||
|
||||
@@ -191,7 +192,7 @@ fn get_cluster_duplicate_confirmed_hash<'a>(
|
||||
slot, gossip_duplicate_confirmed_hash, local_duplicate_confirmed_hash
|
||||
);
|
||||
}
|
||||
Some(&local_frozen_hash)
|
||||
Some(local_frozen_hash)
|
||||
}
|
||||
(Some(local_frozen_hash), None) => Some(local_frozen_hash),
|
||||
_ => gossip_duplicate_confirmed_hash,
|
||||
@@ -200,19 +201,12 @@ fn get_cluster_duplicate_confirmed_hash<'a>(
|
||||
|
||||
fn apply_state_changes(
|
||||
slot: Slot,
|
||||
progress: &mut ProgressMap,
|
||||
fork_choice: &mut HeaviestSubtreeForkChoice,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
descendants: &HashMap<Slot, HashSet<Slot>>,
|
||||
state_changes: Vec<ResultingStateChange>,
|
||||
) {
|
||||
for state_change in state_changes {
|
||||
match state_change {
|
||||
ResultingStateChange::MarkSlotDuplicate(bank_frozen_hash) => {
|
||||
progress.set_unconfirmed_duplicate_slot(
|
||||
slot,
|
||||
descendants.get(&slot).unwrap_or(&HashSet::default()),
|
||||
);
|
||||
fork_choice.mark_fork_invalid_candidate(&(slot, bank_frozen_hash));
|
||||
}
|
||||
ResultingStateChange::RepairDuplicateConfirmedVersion(
|
||||
@@ -223,25 +217,20 @@ fn apply_state_changes(
|
||||
repair_correct_version(slot, &cluster_duplicate_confirmed_hash);
|
||||
}
|
||||
ResultingStateChange::DuplicateConfirmedSlotMatchesCluster(bank_frozen_hash) => {
|
||||
progress.set_confirmed_duplicate_slot(
|
||||
slot,
|
||||
ancestors.get(&slot).unwrap_or(&HashSet::default()),
|
||||
descendants.get(&slot).unwrap_or(&HashSet::default()),
|
||||
);
|
||||
fork_choice.mark_fork_valid_candidate(&(slot, bank_frozen_hash));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn check_slot_agrees_with_cluster(
|
||||
slot: Slot,
|
||||
root: Slot,
|
||||
frozen_hash: Option<Hash>,
|
||||
duplicate_slots_tracker: &mut DuplicateSlotsTracker,
|
||||
gossip_duplicate_confirmed_slots: &GossipDuplicateConfirmedSlots,
|
||||
ancestors: &HashMap<Slot, HashSet<Slot>>,
|
||||
descendants: &HashMap<Slot, HashSet<Slot>>,
|
||||
progress: &mut ProgressMap,
|
||||
progress: &ProgressMap,
|
||||
fork_choice: &mut HeaviestSubtreeForkChoice,
|
||||
slot_state_update: SlotStateUpdate,
|
||||
) {
|
||||
@@ -258,6 +247,15 @@ pub(crate) fn check_slot_agrees_with_cluster(
|
||||
return;
|
||||
}
|
||||
|
||||
// Needs to happen before the frozen_hash.is_none() check below to account for duplicate
|
||||
// signals arriving before the bank is constructed in replay.
|
||||
if matches!(slot_state_update, SlotStateUpdate::Duplicate) {
|
||||
// If this slot has already been processed before, return
|
||||
if !duplicate_slots_tracker.insert(slot) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if frozen_hash.is_none() {
|
||||
// If the bank doesn't even exist in BankForks yet,
|
||||
// then there's nothing to do as replay of the slot
|
||||
@@ -268,25 +266,18 @@ pub(crate) fn check_slot_agrees_with_cluster(
|
||||
let frozen_hash = frozen_hash.unwrap();
|
||||
let gossip_duplicate_confirmed_hash = gossip_duplicate_confirmed_slots.get(&slot);
|
||||
|
||||
let is_local_replay_duplicate_confirmed = progress.is_duplicate_confirmed(slot).expect("If the frozen hash exists, then the slot must exist in bank forks and thus in progress map");
|
||||
// If the bank hasn't been frozen yet, then we haven't duplicate confirmed a local version
|
||||
// this slot through replay yet.
|
||||
let is_local_replay_duplicate_confirmed = fork_choice
|
||||
.is_duplicate_confirmed(&(slot, frozen_hash))
|
||||
.unwrap_or(false);
|
||||
let cluster_duplicate_confirmed_hash = get_cluster_duplicate_confirmed_hash(
|
||||
slot,
|
||||
gossip_duplicate_confirmed_hash,
|
||||
&frozen_hash,
|
||||
is_local_replay_duplicate_confirmed,
|
||||
);
|
||||
let mut is_slot_duplicate =
|
||||
progress.is_unconfirmed_duplicate(slot).expect("If the frozen hash exists, then the slot must exist in bank forks and thus in progress map");
|
||||
if matches!(slot_state_update, SlotStateUpdate::Duplicate) {
|
||||
if is_slot_duplicate {
|
||||
// Already processed duplicate signal for this slot, no need to continue
|
||||
return;
|
||||
} else {
|
||||
// Otherwise, mark the slot as duplicate so the appropriate state changes
|
||||
// will trigger
|
||||
is_slot_duplicate = true;
|
||||
}
|
||||
}
|
||||
let is_slot_duplicate = duplicate_slots_tracker.contains(&slot);
|
||||
let is_dead = progress.is_dead(slot).expect("If the frozen hash exists, then the slot must exist in bank forks and thus in progress map");
|
||||
|
||||
info!(
|
||||
@@ -309,14 +300,7 @@ pub(crate) fn check_slot_agrees_with_cluster(
|
||||
is_slot_duplicate,
|
||||
is_dead,
|
||||
);
|
||||
apply_state_changes(
|
||||
slot,
|
||||
progress,
|
||||
fork_choice,
|
||||
ancestors,
|
||||
descendants,
|
||||
state_changes,
|
||||
);
|
||||
apply_state_changes(slot, fork_choice, state_changes);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -324,15 +308,16 @@ mod test {
|
||||
use super::*;
|
||||
use crate::consensus::test::VoteSimulator;
|
||||
use solana_runtime::bank_forks::BankForks;
|
||||
use std::sync::RwLock;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::RwLock,
|
||||
};
|
||||
use trees::tr;
|
||||
|
||||
struct InitialState {
|
||||
heaviest_subtree_fork_choice: HeaviestSubtreeForkChoice,
|
||||
progress: ProgressMap,
|
||||
ancestors: HashMap<Slot, HashSet<Slot>>,
|
||||
descendants: HashMap<Slot, HashSet<Slot>>,
|
||||
slot: Slot,
|
||||
bank_forks: RwLock<BankForks>,
|
||||
}
|
||||
|
||||
@@ -341,7 +326,6 @@ mod test {
|
||||
let forks = tr(0) / (tr(1) / (tr(2) / tr(3)));
|
||||
let mut vote_simulator = VoteSimulator::new(1);
|
||||
vote_simulator.fill_bank_forks(forks, &HashMap::new());
|
||||
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
|
||||
|
||||
let descendants = vote_simulator
|
||||
.bank_forks
|
||||
@@ -353,9 +337,7 @@ mod test {
|
||||
InitialState {
|
||||
heaviest_subtree_fork_choice: vote_simulator.heaviest_subtree_fork_choice,
|
||||
progress: vote_simulator.progress,
|
||||
ancestors,
|
||||
descendants,
|
||||
slot: 0,
|
||||
bank_forks: vote_simulator.bank_forks,
|
||||
}
|
||||
}
|
||||
@@ -626,75 +608,159 @@ mod test {
|
||||
// Common state
|
||||
let InitialState {
|
||||
mut heaviest_subtree_fork_choice,
|
||||
mut progress,
|
||||
ancestors,
|
||||
descendants,
|
||||
slot,
|
||||
bank_forks,
|
||||
..
|
||||
} = setup();
|
||||
|
||||
// MarkSlotDuplicate should mark progress map and remove
|
||||
// the slot from fork choice
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
let duplicate_slot = bank_forks.read().unwrap().root() + 1;
|
||||
let duplicate_slot_hash = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(duplicate_slot)
|
||||
.unwrap()
|
||||
.hash();
|
||||
apply_state_changes(
|
||||
slot,
|
||||
&mut progress,
|
||||
duplicate_slot,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
vec![ResultingStateChange::MarkSlotDuplicate(slot_hash)],
|
||||
vec![ResultingStateChange::MarkSlotDuplicate(duplicate_slot_hash)],
|
||||
);
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_candidate_slot(&(slot, slot_hash))
|
||||
.is_candidate(&(duplicate_slot, duplicate_slot_hash))
|
||||
.unwrap());
|
||||
for child_slot in descendants
|
||||
.get(&slot)
|
||||
.get(&duplicate_slot)
|
||||
.unwrap()
|
||||
.iter()
|
||||
.chain(std::iter::once(&slot))
|
||||
.chain(std::iter::once(&duplicate_slot))
|
||||
{
|
||||
assert_eq!(
|
||||
progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*child_slot)
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(
|
||||
*child_slot,
|
||||
bank_forks.read().unwrap().get(*child_slot).unwrap().hash()
|
||||
))
|
||||
.unwrap(),
|
||||
slot
|
||||
duplicate_slot
|
||||
);
|
||||
}
|
||||
|
||||
// DuplicateConfirmedSlotMatchesCluster should re-enable fork choice
|
||||
apply_state_changes(
|
||||
slot,
|
||||
&mut progress,
|
||||
duplicate_slot,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
vec![ResultingStateChange::DuplicateConfirmedSlotMatchesCluster(
|
||||
slot_hash,
|
||||
duplicate_slot_hash,
|
||||
)],
|
||||
);
|
||||
for child_slot in descendants
|
||||
.get(&slot)
|
||||
.get(&duplicate_slot)
|
||||
.unwrap()
|
||||
.iter()
|
||||
.chain(std::iter::once(&slot))
|
||||
.chain(std::iter::once(&duplicate_slot))
|
||||
{
|
||||
assert!(progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*child_slot)
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(
|
||||
*child_slot,
|
||||
bank_forks.read().unwrap().get(*child_slot).unwrap().hash()
|
||||
))
|
||||
.is_none());
|
||||
}
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_candidate_slot(&(slot, slot_hash))
|
||||
.is_candidate(&(duplicate_slot, duplicate_slot_hash))
|
||||
.unwrap());
|
||||
}
|
||||
|
||||
fn run_test_state_duplicate_then_bank_frozen(initial_bank_hash: Option<Hash>) {
|
||||
// Common state
|
||||
let InitialState {
|
||||
mut heaviest_subtree_fork_choice,
|
||||
progress,
|
||||
bank_forks,
|
||||
..
|
||||
} = setup();
|
||||
|
||||
// Setup a duplicate slot state transition with the initial bank state of the duplicate slot
|
||||
// determined by `initial_bank_hash`, which can be:
|
||||
// 1) A default hash (unfrozen bank),
|
||||
// 2) None (a slot that hasn't even started replay yet).
|
||||
let root = 0;
|
||||
let mut duplicate_slots_tracker = DuplicateSlotsTracker::default();
|
||||
let gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default();
|
||||
let duplicate_slot = 2;
|
||||
check_slot_agrees_with_cluster(
|
||||
duplicate_slot,
|
||||
root,
|
||||
initial_bank_hash,
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Duplicate,
|
||||
);
|
||||
assert!(duplicate_slots_tracker.contains(&duplicate_slot));
|
||||
// Nothing should be applied yet to fork choice, since bank was not yet frozen
|
||||
for slot in 2..=3 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.is_none());
|
||||
}
|
||||
|
||||
// Now freeze the bank
|
||||
let frozen_duplicate_slot_hash = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(duplicate_slot)
|
||||
.unwrap()
|
||||
.hash();
|
||||
check_slot_agrees_with_cluster(
|
||||
duplicate_slot,
|
||||
root,
|
||||
Some(frozen_duplicate_slot_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Frozen,
|
||||
);
|
||||
|
||||
// Progress map should have the correct updates, fork choice should mark duplicate
|
||||
// as unvotable
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_unconfirmed_duplicate(&(duplicate_slot, frozen_duplicate_slot_hash))
|
||||
.unwrap());
|
||||
|
||||
// The ancestor of the duplicate slot should be the best slot now
|
||||
let (duplicate_ancestor, duplicate_parent_hash) = {
|
||||
let r_bank_forks = bank_forks.read().unwrap();
|
||||
let parent_bank = r_bank_forks.get(duplicate_slot).unwrap().parent().unwrap();
|
||||
(parent_bank.slot(), parent_bank.hash())
|
||||
};
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(duplicate_ancestor, duplicate_parent_hash)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_unfrozen_bank_duplicate_then_bank_frozen() {
|
||||
run_test_state_duplicate_then_bank_frozen(Some(Hash::default()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_unreplayed_bank_duplicate_then_bank_frozen() {
|
||||
run_test_state_duplicate_then_bank_frozen(None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_ancestor_confirmed_descendant_duplicate() {
|
||||
// Common state
|
||||
let InitialState {
|
||||
mut heaviest_subtree_fork_choice,
|
||||
mut progress,
|
||||
ancestors,
|
||||
descendants,
|
||||
progress,
|
||||
bank_forks,
|
||||
..
|
||||
} = setup();
|
||||
@@ -705,6 +771,7 @@ mod test {
|
||||
(3, slot3_hash)
|
||||
);
|
||||
let root = 0;
|
||||
let mut duplicate_slots_tracker = DuplicateSlotsTracker::default();
|
||||
let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default();
|
||||
|
||||
// Mark slot 2 as duplicate confirmed
|
||||
@@ -714,36 +781,67 @@ mod test {
|
||||
2,
|
||||
root,
|
||||
Some(slot2_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::DuplicateConfirmed,
|
||||
);
|
||||
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(2, slot2_hash))
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(3, slot3_hash)
|
||||
);
|
||||
for slot in 0..=2 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.is_none());
|
||||
}
|
||||
|
||||
// Mark 3 as duplicate, should not remove slot 2 from fork choice
|
||||
// Mark 3 as duplicate, should not remove the duplicate confirmed slot 2 from
|
||||
// fork choice
|
||||
check_slot_agrees_with_cluster(
|
||||
3,
|
||||
root,
|
||||
Some(slot3_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Duplicate,
|
||||
);
|
||||
|
||||
assert!(duplicate_slots_tracker.contains(&3));
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(2, slot2_hash)
|
||||
);
|
||||
for slot in 0..=3 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
if slot <= 2 {
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.is_none());
|
||||
} else {
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.unwrap(),
|
||||
3
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -751,9 +849,7 @@ mod test {
|
||||
// Common state
|
||||
let InitialState {
|
||||
mut heaviest_subtree_fork_choice,
|
||||
mut progress,
|
||||
ancestors,
|
||||
descendants,
|
||||
progress,
|
||||
bank_forks,
|
||||
..
|
||||
} = setup();
|
||||
@@ -764,19 +860,30 @@ mod test {
|
||||
(3, slot3_hash)
|
||||
);
|
||||
let root = 0;
|
||||
let mut duplicate_slots_tracker = DuplicateSlotsTracker::default();
|
||||
let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default();
|
||||
// Mark 2 as duplicate confirmed
|
||||
|
||||
// Mark 2 as duplicate
|
||||
check_slot_agrees_with_cluster(
|
||||
2,
|
||||
root,
|
||||
Some(bank_forks.read().unwrap().get(2).unwrap().hash()),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Duplicate,
|
||||
);
|
||||
assert!(duplicate_slots_tracker.contains(&2));
|
||||
for slot in 2..=3 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.unwrap(),
|
||||
2
|
||||
);
|
||||
}
|
||||
|
||||
let slot1_hash = bank_forks.read().unwrap().get(1).unwrap().hash();
|
||||
assert_eq!(
|
||||
@@ -790,14 +897,93 @@ mod test {
|
||||
3,
|
||||
root,
|
||||
Some(slot3_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&ancestors,
|
||||
&descendants,
|
||||
&mut progress,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::DuplicateConfirmed,
|
||||
);
|
||||
for slot in 0..=3 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.is_none());
|
||||
}
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(3, slot3_hash)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_descendant_confirmed_ancestor_duplicate() {
|
||||
// Common state
|
||||
let InitialState {
|
||||
mut heaviest_subtree_fork_choice,
|
||||
progress,
|
||||
bank_forks,
|
||||
..
|
||||
} = setup();
|
||||
|
||||
let slot3_hash = bank_forks.read().unwrap().get(3).unwrap().hash();
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(3, slot3_hash)
|
||||
);
|
||||
let root = 0;
|
||||
let mut duplicate_slots_tracker = DuplicateSlotsTracker::default();
|
||||
let mut gossip_duplicate_confirmed_slots = GossipDuplicateConfirmedSlots::default();
|
||||
|
||||
// Mark 3 as duplicate confirmed
|
||||
gossip_duplicate_confirmed_slots.insert(3, slot3_hash);
|
||||
check_slot_agrees_with_cluster(
|
||||
3,
|
||||
root,
|
||||
Some(slot3_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::DuplicateConfirmed,
|
||||
);
|
||||
let verify_all_slots_duplicate_confirmed =
|
||||
|bank_forks: &RwLock<BankForks>,
|
||||
heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice| {
|
||||
for slot in 0..=3 {
|
||||
let slot_hash = bank_forks.read().unwrap().get(slot).unwrap().hash();
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(&(slot, slot_hash))
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(&(slot, slot_hash))
|
||||
.is_none());
|
||||
}
|
||||
};
|
||||
verify_all_slots_duplicate_confirmed(&bank_forks, &heaviest_subtree_fork_choice);
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(3, slot3_hash)
|
||||
);
|
||||
|
||||
// Mark ancestor 1 as duplicate, fork choice should be unaffected since
|
||||
// slot 1 was duplicate confirmed by the confirmation on its
|
||||
// descendant, 3.
|
||||
let slot1_hash = bank_forks.read().unwrap().get(1).unwrap().hash();
|
||||
check_slot_agrees_with_cluster(
|
||||
1,
|
||||
root,
|
||||
Some(slot1_hash),
|
||||
&mut duplicate_slots_tracker,
|
||||
&gossip_duplicate_confirmed_slots,
|
||||
&progress,
|
||||
&mut heaviest_subtree_fork_choice,
|
||||
SlotStateUpdate::Duplicate,
|
||||
);
|
||||
assert!(duplicate_slots_tracker.contains(&1));
|
||||
verify_all_slots_duplicate_confirmed(&bank_forks, &heaviest_subtree_fork_choice);
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(3, slot3_hash)
|
||||
|
@@ -1,29 +1,32 @@
|
||||
use crate::cluster_slots::ClusterSlots;
|
||||
use crossbeam_channel::{Receiver, RecvTimeoutError, Sender};
|
||||
use solana_gossip::cluster_info::ClusterInfo;
|
||||
use solana_ledger::blockstore::{Blockstore, CompletedSlotsReceiver};
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::bank_forks::BankForks;
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey};
|
||||
use std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::RecvTimeoutError,
|
||||
{Arc, RwLock},
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
pub type ClusterSlotsUpdateReceiver = Receiver<Vec<Slot>>;
|
||||
pub type ClusterSlotsUpdateSender = Sender<Vec<Slot>>;
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct ClusterSlotsServiceTiming {
|
||||
pub lowest_slot_elapsed: u64,
|
||||
pub update_completed_slots_elapsed: u64,
|
||||
pub process_cluster_slots_updates_elapsed: u64,
|
||||
}
|
||||
|
||||
impl ClusterSlotsServiceTiming {
|
||||
fn update(&mut self, lowest_slot_elapsed: u64, update_completed_slots_elapsed: u64) {
|
||||
fn update(&mut self, lowest_slot_elapsed: u64, process_cluster_slots_updates_elapsed: u64) {
|
||||
self.lowest_slot_elapsed += lowest_slot_elapsed;
|
||||
self.update_completed_slots_elapsed += update_completed_slots_elapsed;
|
||||
self.process_cluster_slots_updates_elapsed += process_cluster_slots_updates_elapsed;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,12 +40,12 @@ impl ClusterSlotsService {
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
completed_slots_receiver: CompletedSlotsReceiver,
|
||||
cluster_slots_update_receiver: ClusterSlotsUpdateReceiver,
|
||||
exit: Arc<AtomicBool>,
|
||||
) -> Self {
|
||||
let id = cluster_info.id();
|
||||
Self::initialize_lowest_slot(id, &blockstore, &cluster_info);
|
||||
Self::initialize_epoch_slots(&blockstore, &cluster_info, &completed_slots_receiver);
|
||||
Self::initialize_epoch_slots(&bank_forks, &cluster_info);
|
||||
let t_cluster_slots_service = Builder::new()
|
||||
.name("solana-cluster-slots-service".to_string())
|
||||
.spawn(move || {
|
||||
@@ -51,7 +54,7 @@ impl ClusterSlotsService {
|
||||
cluster_slots,
|
||||
bank_forks,
|
||||
cluster_info,
|
||||
completed_slots_receiver,
|
||||
cluster_slots_update_receiver,
|
||||
exit,
|
||||
)
|
||||
})
|
||||
@@ -71,7 +74,7 @@ impl ClusterSlotsService {
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
completed_slots_receiver: CompletedSlotsReceiver,
|
||||
cluster_slots_update_receiver: ClusterSlotsUpdateReceiver,
|
||||
exit: Arc<AtomicBool>,
|
||||
) {
|
||||
let mut cluster_slots_service_timing = ClusterSlotsServiceTiming::default();
|
||||
@@ -80,7 +83,8 @@ impl ClusterSlotsService {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
let slots = match completed_slots_receiver.recv_timeout(Duration::from_millis(200)) {
|
||||
let slots = match cluster_slots_update_receiver.recv_timeout(Duration::from_millis(200))
|
||||
{
|
||||
Ok(slots) => Some(slots),
|
||||
Err(RecvTimeoutError::Timeout) => None,
|
||||
Err(RecvTimeoutError::Disconnected) => {
|
||||
@@ -94,17 +98,21 @@ impl ClusterSlotsService {
|
||||
let lowest_slot = blockstore.lowest_slot();
|
||||
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
|
||||
lowest_slot_elapsed.stop();
|
||||
let mut update_completed_slots_elapsed =
|
||||
Measure::start("update_completed_slots_elapsed");
|
||||
let mut process_cluster_slots_updates_elapsed =
|
||||
Measure::start("process_cluster_slots_updates_elapsed");
|
||||
if let Some(slots) = slots {
|
||||
Self::update_completed_slots(slots, &completed_slots_receiver, &cluster_info);
|
||||
Self::process_cluster_slots_updates(
|
||||
slots,
|
||||
&cluster_slots_update_receiver,
|
||||
&cluster_info,
|
||||
);
|
||||
}
|
||||
cluster_slots.update(new_root, &cluster_info, &bank_forks);
|
||||
update_completed_slots_elapsed.stop();
|
||||
process_cluster_slots_updates_elapsed.stop();
|
||||
|
||||
cluster_slots_service_timing.update(
|
||||
lowest_slot_elapsed.as_us(),
|
||||
update_completed_slots_elapsed.as_us(),
|
||||
process_cluster_slots_updates_elapsed.as_us(),
|
||||
);
|
||||
|
||||
if last_stats.elapsed().as_secs() > 2 {
|
||||
@@ -116,8 +124,8 @@ impl ClusterSlotsService {
|
||||
i64
|
||||
),
|
||||
(
|
||||
"update_completed_slots_elapsed",
|
||||
cluster_slots_service_timing.update_completed_slots_elapsed,
|
||||
"process_cluster_slots_updates_elapsed",
|
||||
cluster_slots_service_timing.process_cluster_slots_updates_elapsed,
|
||||
i64
|
||||
),
|
||||
);
|
||||
@@ -127,12 +135,12 @@ impl ClusterSlotsService {
|
||||
}
|
||||
}
|
||||
|
||||
fn update_completed_slots(
|
||||
fn process_cluster_slots_updates(
|
||||
mut slots: Vec<Slot>,
|
||||
completed_slots_receiver: &CompletedSlotsReceiver,
|
||||
cluster_slots_update_receiver: &ClusterSlotsUpdateReceiver,
|
||||
cluster_info: &ClusterInfo,
|
||||
) {
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
while let Ok(mut more) = cluster_slots_update_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
#[allow(clippy::stable_sort_primitive)]
|
||||
@@ -155,30 +163,16 @@ impl ClusterSlotsService {
|
||||
cluster_info.push_lowest_slot(*id, lowest_slot);
|
||||
}
|
||||
|
||||
fn initialize_epoch_slots(
|
||||
blockstore: &Blockstore,
|
||||
cluster_info: &ClusterInfo,
|
||||
completed_slots_receiver: &CompletedSlotsReceiver,
|
||||
) {
|
||||
let root = blockstore.last_root();
|
||||
let mut slots: Vec<_> = blockstore
|
||||
.live_slots_iterator(root)
|
||||
.filter_map(|(slot, slot_meta)| {
|
||||
if slot_meta.is_full() {
|
||||
Some(slot)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
fn initialize_epoch_slots(bank_forks: &RwLock<BankForks>, cluster_info: &ClusterInfo) {
|
||||
// TODO: Should probably incorporate slots that were replayed on startup,
|
||||
// and maybe some that were frozen < snapshot root in case validators restart
|
||||
// from newer snapshots and lose history.
|
||||
let frozen_banks = bank_forks.read().unwrap().frozen_banks();
|
||||
let mut frozen_bank_slots: Vec<Slot> = frozen_banks.keys().cloned().collect();
|
||||
frozen_bank_slots.sort_unstable();
|
||||
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
slots.sort_unstable();
|
||||
slots.dedup();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
if !frozen_bank_slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&frozen_bank_slots);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -352,15 +352,15 @@ mod tests {
|
||||
if *a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
assert_eq!(*commitment.get(a).unwrap(), expected);
|
||||
} else if i <= 4 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(2, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
assert_eq!(*commitment.get(a).unwrap(), expected);
|
||||
} else if i <= 6 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(1, lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
assert_eq!(*commitment.get(a).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
|
@@ -1,4 +1,5 @@
|
||||
use crate::{
|
||||
heaviest_subtree_fork_choice::HeaviestSubtreeForkChoice,
|
||||
latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks,
|
||||
progress_map::{LockoutIntervals, ProgressMap},
|
||||
};
|
||||
@@ -163,7 +164,7 @@ impl Tower {
|
||||
bank: &Bank,
|
||||
path: &Path,
|
||||
) -> Self {
|
||||
let path = Self::get_filename(&path, node_pubkey);
|
||||
let path = Self::get_filename(path, node_pubkey);
|
||||
let tmp_path = Self::get_tmp_filename(&path);
|
||||
let mut tower = Self {
|
||||
node_pubkey: *node_pubkey,
|
||||
@@ -204,8 +205,8 @@ impl Tower {
|
||||
crate::replay_stage::ReplayStage::initialize_progress_and_fork_choice(
|
||||
root_bank.deref(),
|
||||
bank_forks.frozen_banks().values().cloned().collect(),
|
||||
&my_pubkey,
|
||||
&vote_account,
|
||||
my_pubkey,
|
||||
vote_account,
|
||||
);
|
||||
let root = root_bank.slot();
|
||||
|
||||
@@ -217,13 +218,7 @@ impl Tower {
|
||||
)
|
||||
.clone();
|
||||
|
||||
Self::new(
|
||||
&my_pubkey,
|
||||
&vote_account,
|
||||
root,
|
||||
&heaviest_bank,
|
||||
&ledger_path,
|
||||
)
|
||||
Self::new(my_pubkey, vote_account, root, &heaviest_bank, ledger_path)
|
||||
}
|
||||
|
||||
pub(crate) fn collect_vote_lockouts<F>(
|
||||
@@ -573,6 +568,7 @@ impl Tower {
|
||||
.map(|candidate_slot_ancestors| candidate_slot_ancestors.contains(&last_voted_slot))
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn make_check_switch_threshold_decision(
|
||||
&self,
|
||||
switch_slot: u64,
|
||||
@@ -582,9 +578,10 @@ impl Tower {
|
||||
total_stake: u64,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, ArcVoteAccount)>,
|
||||
latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
|
||||
heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
|
||||
) -> SwitchForkDecision {
|
||||
self.last_voted_slot()
|
||||
.map(|last_voted_slot| {
|
||||
self.last_voted_slot_hash()
|
||||
.map(|(last_voted_slot, last_voted_hash)| {
|
||||
let root = self.root();
|
||||
let empty_ancestors = HashSet::default();
|
||||
let empty_ancestors_due_to_minor_unsynced_ledger = || {
|
||||
@@ -673,7 +670,7 @@ impl Tower {
|
||||
if last_vote_ancestors.contains(&switch_slot) {
|
||||
if self.is_stray_last_vote() {
|
||||
return suspended_decision_due_to_major_unsynced_ledger();
|
||||
} else if let Some(latest_duplicate_ancestor) = progress.latest_unconfirmed_duplicate_ancestor(last_voted_slot) {
|
||||
} else if let Some(latest_duplicate_ancestor) = heaviest_subtree_fork_choice.latest_invalid_ancestor(&(last_voted_slot, last_voted_hash)) {
|
||||
// We're rolling back because one of the ancestors of the last vote was a duplicate. In this
|
||||
// case, it's acceptable if the switch candidate is one of ancestors of the previous vote,
|
||||
// just fail the switch check because there's no point in voting on an ancestor. ReplayStage
|
||||
@@ -733,7 +730,7 @@ impl Tower {
|
||||
// finding any lockout intervals in the `lockout_intervals` tree
|
||||
// for this bank that contain `last_vote`.
|
||||
let lockout_intervals = &progress
|
||||
.get(&candidate_slot)
|
||||
.get(candidate_slot)
|
||||
.unwrap()
|
||||
.fork_stats
|
||||
.lockout_intervals;
|
||||
@@ -821,6 +818,7 @@ impl Tower {
|
||||
.unwrap_or(SwitchForkDecision::SameFork)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn check_switch_threshold(
|
||||
&mut self,
|
||||
switch_slot: u64,
|
||||
@@ -830,6 +828,7 @@ impl Tower {
|
||||
total_stake: u64,
|
||||
epoch_vote_accounts: &HashMap<Pubkey, (u64, ArcVoteAccount)>,
|
||||
latest_validator_votes_for_frozen_banks: &LatestValidatorVotesForFrozenBanks,
|
||||
heaviest_subtree_fork_choice: &HeaviestSubtreeForkChoice,
|
||||
) -> SwitchForkDecision {
|
||||
let decision = self.make_check_switch_threshold_decision(
|
||||
switch_slot,
|
||||
@@ -839,6 +838,7 @@ impl Tower {
|
||||
total_stake,
|
||||
epoch_vote_accounts,
|
||||
latest_validator_votes_for_frozen_banks,
|
||||
heaviest_subtree_fork_choice,
|
||||
);
|
||||
let new_check = Some((switch_slot, decision.clone()));
|
||||
if new_check != self.last_switch_threshold_check {
|
||||
@@ -1322,7 +1322,7 @@ pub fn reconcile_blockstore_roots_with_tower(
|
||||
if last_blockstore_root < tower_root {
|
||||
// Ensure tower_root itself to exist and be marked as rooted in the blockstore
|
||||
// in addition to its ancestors.
|
||||
let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, &blockstore)
|
||||
let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, blockstore)
|
||||
.take_while(|current| match current.cmp(&last_blockstore_root) {
|
||||
Ordering::Greater => true,
|
||||
Ordering::Equal => false,
|
||||
@@ -1358,11 +1358,11 @@ pub mod test {
|
||||
use super::*;
|
||||
use crate::{
|
||||
cluster_info_vote_listener::VoteTracker,
|
||||
cluster_slot_state_verifier::GossipDuplicateConfirmedSlots,
|
||||
cluster_slot_state_verifier::{DuplicateSlotsTracker, GossipDuplicateConfirmedSlots},
|
||||
cluster_slots::ClusterSlots,
|
||||
fork_choice::SelectVoteAndResetForkResult,
|
||||
heaviest_subtree_fork_choice::{HeaviestSubtreeForkChoice, SlotHashKey},
|
||||
progress_map::{DuplicateStats, ForkProgress},
|
||||
fork_choice::{ForkChoice, SelectVoteAndResetForkResult},
|
||||
heaviest_subtree_fork_choice::SlotHashKey,
|
||||
progress_map::ForkProgress,
|
||||
replay_stage::{HeaviestForkFailures, ReplayStage},
|
||||
unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes,
|
||||
};
|
||||
@@ -1439,9 +1439,9 @@ pub mod test {
|
||||
|
||||
while let Some(visit) = walk.get() {
|
||||
let slot = visit.node().data;
|
||||
self.progress.entry(slot).or_insert_with(|| {
|
||||
ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0)
|
||||
});
|
||||
self.progress
|
||||
.entry(slot)
|
||||
.or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0));
|
||||
if self.bank_forks.read().unwrap().get(slot).is_some() {
|
||||
walk.forward();
|
||||
continue;
|
||||
@@ -1484,7 +1484,7 @@ pub mod test {
|
||||
tower: &mut Tower,
|
||||
) -> Vec<HeaviestForkFailures> {
|
||||
// Try to simulate the vote
|
||||
let my_keypairs = self.validator_keypairs.get(&my_pubkey).unwrap();
|
||||
let my_keypairs = self.validator_keypairs.get(my_pubkey).unwrap();
|
||||
let my_vote_pubkey = my_keypairs.vote_keypair.pubkey();
|
||||
let ancestors = self.bank_forks.read().unwrap().ancestors();
|
||||
let mut frozen_banks: Vec<_> = self
|
||||
@@ -1497,7 +1497,7 @@ pub mod test {
|
||||
.collect();
|
||||
|
||||
let _ = ReplayStage::compute_bank_stats(
|
||||
&my_pubkey,
|
||||
my_pubkey,
|
||||
&ancestors,
|
||||
&mut frozen_banks,
|
||||
tower,
|
||||
@@ -1530,6 +1530,7 @@ pub mod test {
|
||||
&self.progress,
|
||||
tower,
|
||||
&self.latest_validator_votes_for_frozen_banks,
|
||||
&self.heaviest_subtree_fork_choice,
|
||||
);
|
||||
|
||||
// Make sure this slot isn't locked out or failing threshold
|
||||
@@ -1554,6 +1555,7 @@ pub mod test {
|
||||
&AbsRequestSender::default(),
|
||||
None,
|
||||
&mut self.heaviest_subtree_fork_choice,
|
||||
&mut DuplicateSlotsTracker::default(),
|
||||
&mut GossipDuplicateConfirmedSlots::default(),
|
||||
&mut UnfrozenGossipVerifiedVoteHashes::default(),
|
||||
&mut true,
|
||||
@@ -1574,9 +1576,9 @@ pub mod test {
|
||||
.filter_map(|slot| {
|
||||
let mut fork_tip_parent = tr(slot - 1);
|
||||
fork_tip_parent.push_front(tr(slot));
|
||||
self.fill_bank_forks(fork_tip_parent, &cluster_votes);
|
||||
self.fill_bank_forks(fork_tip_parent, cluster_votes);
|
||||
if votes_to_simulate.contains(&slot) {
|
||||
Some((slot, self.simulate_vote(slot, &my_pubkey, tower)))
|
||||
Some((slot, self.simulate_vote(slot, my_pubkey, tower)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@@ -1592,9 +1594,7 @@ pub mod test {
|
||||
) {
|
||||
self.progress
|
||||
.entry(slot)
|
||||
.or_insert_with(|| {
|
||||
ForkProgress::new(Hash::default(), None, DuplicateStats::default(), None, 0, 0)
|
||||
})
|
||||
.or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0))
|
||||
.fork_stats
|
||||
.lockout_intervals
|
||||
.entry(lockout_interval.1)
|
||||
@@ -1621,7 +1621,7 @@ pub mod test {
|
||||
fork_tip_parent.push_front(tr(start_slot + i));
|
||||
self.fill_bank_forks(fork_tip_parent, cluster_votes);
|
||||
if self
|
||||
.simulate_vote(i + start_slot, &my_pubkey, tower)
|
||||
.simulate_vote(i + start_slot, my_pubkey, tower)
|
||||
.is_empty()
|
||||
{
|
||||
cluster_votes
|
||||
@@ -1701,14 +1701,7 @@ pub mod test {
|
||||
let mut progress = ProgressMap::default();
|
||||
progress.insert(
|
||||
0,
|
||||
ForkProgress::new(
|
||||
bank0.last_blockhash(),
|
||||
None,
|
||||
DuplicateStats::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0),
|
||||
);
|
||||
let bank_forks = BankForks::new(bank0);
|
||||
let heaviest_subtree_fork_choice =
|
||||
@@ -1867,21 +1860,46 @@ pub mod test {
|
||||
let mut tower = Tower::new_with_key(&vote_simulator.node_pubkeys[0]);
|
||||
|
||||
// Last vote is 47
|
||||
tower.record_vote(47, Hash::default());
|
||||
tower.record_vote(
|
||||
47,
|
||||
vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(47)
|
||||
.unwrap()
|
||||
.hash(),
|
||||
);
|
||||
|
||||
// Trying to switch to an ancestor of last vote should only not panic
|
||||
// if the current vote has a duplicate ancestor
|
||||
let ancestor_of_voted_slot = 43;
|
||||
let duplicate_ancestor1 = 44;
|
||||
let duplicate_ancestor2 = 45;
|
||||
vote_simulator.progress.set_unconfirmed_duplicate_slot(
|
||||
duplicate_ancestor1,
|
||||
&descendants.get(&duplicate_ancestor1).unwrap(),
|
||||
);
|
||||
vote_simulator.progress.set_unconfirmed_duplicate_slot(
|
||||
duplicate_ancestor2,
|
||||
&descendants.get(&duplicate_ancestor2).unwrap(),
|
||||
);
|
||||
vote_simulator
|
||||
.heaviest_subtree_fork_choice
|
||||
.mark_fork_invalid_candidate(&(
|
||||
duplicate_ancestor1,
|
||||
vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(duplicate_ancestor1)
|
||||
.unwrap()
|
||||
.hash(),
|
||||
));
|
||||
vote_simulator
|
||||
.heaviest_subtree_fork_choice
|
||||
.mark_fork_invalid_candidate(&(
|
||||
duplicate_ancestor2,
|
||||
vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(duplicate_ancestor2)
|
||||
.unwrap()
|
||||
.hash(),
|
||||
));
|
||||
assert_eq!(
|
||||
tower.check_switch_threshold(
|
||||
ancestor_of_voted_slot,
|
||||
@@ -1890,7 +1908,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2)
|
||||
);
|
||||
@@ -1903,11 +1922,18 @@ pub mod test {
|
||||
confirm_ancestors.push(duplicate_ancestor2);
|
||||
}
|
||||
for (i, duplicate_ancestor) in confirm_ancestors.into_iter().enumerate() {
|
||||
vote_simulator.progress.set_confirmed_duplicate_slot(
|
||||
duplicate_ancestor,
|
||||
ancestors.get(&duplicate_ancestor).unwrap(),
|
||||
&descendants.get(&duplicate_ancestor).unwrap(),
|
||||
);
|
||||
vote_simulator
|
||||
.heaviest_subtree_fork_choice
|
||||
.mark_fork_valid_candidate(&(
|
||||
duplicate_ancestor,
|
||||
vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(duplicate_ancestor)
|
||||
.unwrap()
|
||||
.hash(),
|
||||
));
|
||||
let res = tower.check_switch_threshold(
|
||||
ancestor_of_voted_slot,
|
||||
&ancestors,
|
||||
@@ -1916,6 +1942,7 @@ pub mod test {
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
);
|
||||
if i == 0 {
|
||||
assert_eq!(
|
||||
@@ -1951,7 +1978,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SameFork
|
||||
);
|
||||
@@ -1965,7 +1993,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -1981,7 +2010,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -1997,7 +2027,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -2013,7 +2044,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -2031,7 +2063,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -2047,7 +2080,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
@@ -2064,7 +2098,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
@@ -2090,7 +2125,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -2122,7 +2158,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, num_validators * 10000)
|
||||
);
|
||||
@@ -2137,7 +2174,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -2169,7 +2207,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
@@ -2193,7 +2232,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -2804,7 +2844,7 @@ pub mod test {
|
||||
|
||||
tower.save(&identity_keypair).unwrap();
|
||||
modify_serialized(&tower.path);
|
||||
let loaded = Tower::restore(&dir.path(), &identity_keypair.pubkey());
|
||||
let loaded = Tower::restore(dir.path(), &identity_keypair.pubkey());
|
||||
|
||||
(tower, loaded)
|
||||
}
|
||||
@@ -2872,7 +2912,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SameFork
|
||||
);
|
||||
@@ -2886,7 +2927,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -2901,7 +2943,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
@@ -2971,7 +3014,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -2986,7 +3030,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::FailedSwitchThreshold(0, 20000)
|
||||
);
|
||||
@@ -3001,7 +3046,8 @@ pub mod test {
|
||||
&vote_simulator.progress,
|
||||
total_stake,
|
||||
bank0.epoch_vote_accounts(0).unwrap(),
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks
|
||||
&vote_simulator.latest_validator_votes_for_frozen_banks,
|
||||
&vote_simulator.heaviest_subtree_fork_choice,
|
||||
),
|
||||
SwitchForkDecision::SwitchProof(Hash::default())
|
||||
);
|
||||
|
@@ -1,11 +1,11 @@
|
||||
//! The `fetch_stage` batches input from a UDP socket and sends it to a channel.
|
||||
|
||||
use crate::banking_stage::HOLD_TRANSACTIONS_SLOT_OFFSET;
|
||||
use crate::poh_recorder::PohRecorder;
|
||||
use crate::result::{Error, Result};
|
||||
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info};
|
||||
use solana_perf::packet::PacketsRecycler;
|
||||
use solana_perf::recycler::Recycler;
|
||||
use solana_poh::poh_recorder::PohRecorder;
|
||||
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
|
||||
use solana_streamer::streamer::{self, PacketReceiver, PacketSender};
|
||||
use std::net::UdpSocket;
|
||||
@@ -34,7 +34,7 @@ impl FetchStage {
|
||||
tpu_forwards_sockets,
|
||||
exit,
|
||||
&sender,
|
||||
&poh_recorder,
|
||||
poh_recorder,
|
||||
coalesce_ms,
|
||||
),
|
||||
receiver,
|
||||
@@ -54,8 +54,8 @@ impl FetchStage {
|
||||
tx_sockets,
|
||||
tpu_forwards_sockets,
|
||||
exit,
|
||||
&sender,
|
||||
&poh_recorder,
|
||||
sender,
|
||||
poh_recorder,
|
||||
coalesce_ms,
|
||||
)
|
||||
}
|
||||
@@ -85,7 +85,7 @@ impl FetchStage {
|
||||
inc_new_counter_debug!("fetch_stage-honor_forwards", len);
|
||||
for packets in batch {
|
||||
if sendr.send(packets).is_err() {
|
||||
return Err(Error::SendError);
|
||||
return Err(Error::Send);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -108,11 +108,12 @@ impl FetchStage {
|
||||
let tpu_threads = sockets.into_iter().map(|socket| {
|
||||
streamer::receiver(
|
||||
socket,
|
||||
&exit,
|
||||
exit,
|
||||
sender.clone(),
|
||||
recycler.clone(),
|
||||
"fetch_stage",
|
||||
coalesce_ms,
|
||||
true,
|
||||
)
|
||||
});
|
||||
|
||||
@@ -120,11 +121,12 @@ impl FetchStage {
|
||||
let tpu_forwards_threads = tpu_forwards_sockets.into_iter().map(|socket| {
|
||||
streamer::receiver(
|
||||
socket,
|
||||
&exit,
|
||||
exit,
|
||||
forward_sender.clone(),
|
||||
recycler.clone(),
|
||||
"fetch_forward_stage",
|
||||
coalesce_ms,
|
||||
true,
|
||||
)
|
||||
});
|
||||
|
||||
@@ -138,10 +140,10 @@ impl FetchStage {
|
||||
Self::handle_forwarded_packets(&forward_receiver, &sender, &poh_recorder)
|
||||
{
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
Error::RecvError(_) => break,
|
||||
Error::SendError => break,
|
||||
Error::RecvTimeout(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeout(RecvTimeoutError::Timeout) => (),
|
||||
Error::Recv(_) => break,
|
||||
Error::Send => break,
|
||||
_ => error!("{:?}", e),
|
||||
}
|
||||
}
|
||||
|
@@ -30,7 +30,17 @@ const MAX_ROOT_PRINT_SECONDS: u64 = 30;
|
||||
enum UpdateLabel {
|
||||
Aggregate,
|
||||
Add,
|
||||
MarkValid,
|
||||
|
||||
// Notify a fork in the tree that a particular slot in that fork is now
|
||||
// marked as valid. If there are multiple MarkValid operations for
|
||||
// a single node, should apply the one with the smaller slot first (hence
|
||||
// why the actual slot is included here).
|
||||
MarkValid(Slot),
|
||||
// Notify a fork in the tree that a particular slot in that fork is now
|
||||
// marked as invalid. If there are multiple MarkInvalid operations for
|
||||
// a single node, should apply the one with the smaller slot first (hence
|
||||
// why the actual slot is included here).
|
||||
MarkInvalid(Slot),
|
||||
Subtract,
|
||||
}
|
||||
|
||||
@@ -53,7 +63,10 @@ impl GetSlotHash for Slot {
|
||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||
enum UpdateOperation {
|
||||
Add(u64),
|
||||
MarkValid,
|
||||
MarkValid(Slot),
|
||||
// Notify a fork in the tree that a particular slot in that fork is now
|
||||
// marked as invalid.
|
||||
MarkInvalid(Slot),
|
||||
Subtract(u64),
|
||||
Aggregate,
|
||||
}
|
||||
@@ -63,7 +76,8 @@ impl UpdateOperation {
|
||||
match self {
|
||||
Self::Aggregate => panic!("Should not get here"),
|
||||
Self::Add(stake) => *stake += new_stake,
|
||||
Self::MarkValid => panic!("Should not get here"),
|
||||
Self::MarkValid(_slot) => panic!("Should not get here"),
|
||||
Self::MarkInvalid(_slot) => panic!("Should not get here"),
|
||||
Self::Subtract(stake) => *stake += new_stake,
|
||||
}
|
||||
}
|
||||
@@ -80,9 +94,68 @@ struct ForkInfo {
|
||||
best_slot: SlotHashKey,
|
||||
parent: Option<SlotHashKey>,
|
||||
children: Vec<SlotHashKey>,
|
||||
// Whether the fork rooted at this slot is a valid contender
|
||||
// for the best fork
|
||||
is_candidate: bool,
|
||||
// The latest ancestor of this node that has been marked invalid. If the slot
|
||||
// itself is a duplicate, this is set to the slot itself.
|
||||
latest_invalid_ancestor: Option<Slot>,
|
||||
// Set to true if this slot or a child node was duplicate confirmed.
|
||||
is_duplicate_confirmed: bool,
|
||||
}
|
||||
|
||||
impl ForkInfo {
|
||||
/// Returns if this node has been explicitly marked as a duplicate
|
||||
/// slot
|
||||
fn is_unconfirmed_duplicate(&self, my_slot: Slot) -> bool {
|
||||
self.latest_invalid_ancestor
|
||||
.map(|ancestor| ancestor == my_slot)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns if the fork rooted at this node is included in fork choice
|
||||
fn is_candidate(&self) -> bool {
|
||||
self.latest_invalid_ancestor.is_none()
|
||||
}
|
||||
|
||||
fn is_duplicate_confirmed(&self) -> bool {
|
||||
self.is_duplicate_confirmed
|
||||
}
|
||||
|
||||
fn set_duplicate_confirmed(&mut self) {
|
||||
self.is_duplicate_confirmed = true;
|
||||
self.latest_invalid_ancestor = None;
|
||||
}
|
||||
|
||||
fn update_with_newly_valid_ancestor(
|
||||
&mut self,
|
||||
my_key: &SlotHashKey,
|
||||
newly_valid_ancestor: Slot,
|
||||
) {
|
||||
if let Some(latest_invalid_ancestor) = self.latest_invalid_ancestor {
|
||||
if latest_invalid_ancestor <= newly_valid_ancestor {
|
||||
info!("Fork choice for {:?} clearing latest invalid ancestor {:?} because {:?} was duplicate confirmed", my_key, latest_invalid_ancestor, newly_valid_ancestor);
|
||||
self.latest_invalid_ancestor = None;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update_with_newly_invalid_ancestor(
|
||||
&mut self,
|
||||
my_key: &SlotHashKey,
|
||||
newly_invalid_ancestor: Slot,
|
||||
) {
|
||||
// Should not be marking a duplicate confirmed slot as invalid
|
||||
assert!(!self.is_duplicate_confirmed);
|
||||
if self
|
||||
.latest_invalid_ancestor
|
||||
.map(|latest_invalid_ancestor| newly_invalid_ancestor > latest_invalid_ancestor)
|
||||
.unwrap_or(true)
|
||||
{
|
||||
info!(
|
||||
"Fork choice for {:?} setting latest invalid ancestor from {:?} to {}",
|
||||
my_key, self.latest_invalid_ancestor, newly_invalid_ancestor
|
||||
);
|
||||
self.latest_invalid_ancestor = Some(newly_invalid_ancestor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HeaviestSubtreeForkChoice {
|
||||
@@ -182,12 +255,6 @@ impl HeaviestSubtreeForkChoice {
|
||||
.map(|fork_info| fork_info.stake_voted_subtree)
|
||||
}
|
||||
|
||||
pub fn is_candidate_slot(&self, key: &SlotHashKey) -> Option<bool> {
|
||||
self.fork_infos
|
||||
.get(key)
|
||||
.map(|fork_info| fork_info.is_candidate)
|
||||
}
|
||||
|
||||
pub fn root(&self) -> SlotHashKey {
|
||||
self.root
|
||||
}
|
||||
@@ -252,35 +319,41 @@ impl HeaviestSubtreeForkChoice {
|
||||
best_slot: root_info.best_slot,
|
||||
children: vec![self.root],
|
||||
parent: None,
|
||||
is_candidate: true,
|
||||
latest_invalid_ancestor: None,
|
||||
is_duplicate_confirmed: root_info.is_duplicate_confirmed,
|
||||
};
|
||||
self.fork_infos.insert(root_parent, root_parent_info);
|
||||
self.root = root_parent;
|
||||
}
|
||||
|
||||
pub fn add_new_leaf_slot(&mut self, slot: SlotHashKey, parent: Option<SlotHashKey>) {
|
||||
pub fn add_new_leaf_slot(&mut self, slot_hash_key: SlotHashKey, parent: Option<SlotHashKey>) {
|
||||
if self.last_root_time.elapsed().as_secs() > MAX_ROOT_PRINT_SECONDS {
|
||||
self.print_state();
|
||||
self.last_root_time = Instant::now();
|
||||
}
|
||||
|
||||
if self.fork_infos.contains_key(&slot) {
|
||||
if self.fork_infos.contains_key(&slot_hash_key) {
|
||||
// Can potentially happen if we repair the same version of the duplicate slot, after
|
||||
// dumping the original version
|
||||
return;
|
||||
}
|
||||
|
||||
let parent_latest_invalid_ancestor =
|
||||
parent.and_then(|parent| self.latest_invalid_ancestor(&parent));
|
||||
self.fork_infos
|
||||
.entry(slot)
|
||||
.and_modify(|slot_info| slot_info.parent = parent)
|
||||
.entry(slot_hash_key)
|
||||
.and_modify(|fork_info| fork_info.parent = parent)
|
||||
.or_insert(ForkInfo {
|
||||
stake_voted_at: 0,
|
||||
stake_voted_subtree: 0,
|
||||
// The `best_slot` of a leaf is itself
|
||||
best_slot: slot,
|
||||
best_slot: slot_hash_key,
|
||||
children: vec![],
|
||||
parent,
|
||||
is_candidate: true,
|
||||
latest_invalid_ancestor: parent_latest_invalid_ancestor,
|
||||
// If the parent is none, then this is the root, which implies this must
|
||||
// have reached the duplicate confirmed threshold
|
||||
is_duplicate_confirmed: parent.is_none(),
|
||||
});
|
||||
|
||||
if parent.is_none() {
|
||||
@@ -294,11 +367,11 @@ impl HeaviestSubtreeForkChoice {
|
||||
.get_mut(&parent)
|
||||
.unwrap()
|
||||
.children
|
||||
.push(slot);
|
||||
.push(slot_hash_key);
|
||||
|
||||
// Propagate leaf up the tree to any ancestors who considered the previous leaf
|
||||
// the `best_slot`
|
||||
self.propagate_new_leaf(&slot, &parent)
|
||||
self.propagate_new_leaf(&slot_hash_key, &parent)
|
||||
}
|
||||
|
||||
// Returns if the given `maybe_best_child` is the heaviest among the children
|
||||
@@ -316,10 +389,7 @@ impl HeaviestSubtreeForkChoice {
|
||||
.expect("child must exist in `self.fork_infos`");
|
||||
|
||||
// Don't count children currently marked as invalid
|
||||
if !self
|
||||
.is_candidate_slot(child)
|
||||
.expect("child must exist in tree")
|
||||
{
|
||||
if !self.is_candidate(child).expect("child must exist in tree") {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -378,6 +448,34 @@ impl HeaviestSubtreeForkChoice {
|
||||
.map(|fork_info| fork_info.stake_voted_at)
|
||||
}
|
||||
|
||||
pub fn latest_invalid_ancestor(&self, slot_hash_key: &SlotHashKey) -> Option<Slot> {
|
||||
self.fork_infos
|
||||
.get(slot_hash_key)
|
||||
.map(|fork_info| fork_info.latest_invalid_ancestor)
|
||||
.unwrap_or(None)
|
||||
}
|
||||
|
||||
pub fn is_duplicate_confirmed(&self, slot_hash_key: &SlotHashKey) -> Option<bool> {
|
||||
self.fork_infos
|
||||
.get(slot_hash_key)
|
||||
.map(|fork_info| fork_info.is_duplicate_confirmed())
|
||||
}
|
||||
|
||||
/// Returns if the exact node with the specified key has been explicitly marked as a duplicate
|
||||
/// slot (doesn't count ancestors being marked as duplicate).
|
||||
pub fn is_unconfirmed_duplicate(&self, slot_hash_key: &SlotHashKey) -> Option<bool> {
|
||||
self.fork_infos
|
||||
.get(slot_hash_key)
|
||||
.map(|fork_info| fork_info.is_unconfirmed_duplicate(slot_hash_key.0))
|
||||
}
|
||||
|
||||
/// Returns false if the node or any of its ancestors have been marked as duplicate
|
||||
pub fn is_candidate(&self, slot_hash_key: &SlotHashKey) -> Option<bool> {
|
||||
self.fork_infos
|
||||
.get(slot_hash_key)
|
||||
.map(|fork_info| fork_info.is_candidate())
|
||||
}
|
||||
|
||||
fn propagate_new_leaf(
|
||||
&mut self,
|
||||
slot_hash_key: &SlotHashKey,
|
||||
@@ -406,45 +504,72 @@ impl HeaviestSubtreeForkChoice {
|
||||
}
|
||||
}
|
||||
|
||||
fn insert_mark_valid_aggregate_operations(
|
||||
&self,
|
||||
update_operations: &mut BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation>,
|
||||
slot_hash_key: SlotHashKey,
|
||||
) {
|
||||
self.do_insert_aggregate_operations(update_operations, true, slot_hash_key);
|
||||
}
|
||||
|
||||
fn insert_aggregate_operations(
|
||||
&self,
|
||||
update_operations: &mut BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation>,
|
||||
slot_hash_key: SlotHashKey,
|
||||
) {
|
||||
self.do_insert_aggregate_operations(update_operations, false, slot_hash_key);
|
||||
self.do_insert_aggregate_operations_across_ancestors(
|
||||
update_operations,
|
||||
None,
|
||||
slot_hash_key,
|
||||
);
|
||||
}
|
||||
|
||||
#[allow(clippy::map_entry)]
|
||||
fn do_insert_aggregate_operations(
|
||||
fn do_insert_aggregate_operations_across_ancestors(
|
||||
&self,
|
||||
update_operations: &mut BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation>,
|
||||
should_mark_valid: bool,
|
||||
modify_fork_validity: Option<UpdateOperation>,
|
||||
slot_hash_key: SlotHashKey,
|
||||
) {
|
||||
for parent_slot_hash_key in self.ancestor_iterator(slot_hash_key) {
|
||||
let aggregate_label = (parent_slot_hash_key, UpdateLabel::Aggregate);
|
||||
if update_operations.contains_key(&aggregate_label) {
|
||||
if !self.do_insert_aggregate_operation(
|
||||
update_operations,
|
||||
&modify_fork_validity,
|
||||
parent_slot_hash_key,
|
||||
) {
|
||||
// If this parent was already inserted, we assume all the other parents have also
|
||||
// already been inserted. This is to prevent iterating over the parents multiple times
|
||||
// when we are aggregating leaves that have a lot of shared ancestors
|
||||
break;
|
||||
} else {
|
||||
if should_mark_valid {
|
||||
update_operations.insert(
|
||||
(parent_slot_hash_key, UpdateLabel::MarkValid),
|
||||
UpdateOperation::MarkValid,
|
||||
);
|
||||
}
|
||||
update_operations.insert(aggregate_label, UpdateOperation::Aggregate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::map_entry)]
|
||||
fn do_insert_aggregate_operation(
|
||||
&self,
|
||||
update_operations: &mut BTreeMap<(SlotHashKey, UpdateLabel), UpdateOperation>,
|
||||
modify_fork_validity: &Option<UpdateOperation>,
|
||||
slot_hash_key: SlotHashKey,
|
||||
) -> bool {
|
||||
let aggregate_label = (slot_hash_key, UpdateLabel::Aggregate);
|
||||
if update_operations.contains_key(&aggregate_label) {
|
||||
false
|
||||
} else {
|
||||
if let Some(mark_fork_validity) = modify_fork_validity {
|
||||
match mark_fork_validity {
|
||||
UpdateOperation::MarkValid(slot) => {
|
||||
update_operations.insert(
|
||||
(slot_hash_key, UpdateLabel::MarkValid(*slot)),
|
||||
UpdateOperation::MarkValid(*slot),
|
||||
);
|
||||
}
|
||||
UpdateOperation::MarkInvalid(slot) => {
|
||||
update_operations.insert(
|
||||
(slot_hash_key, UpdateLabel::MarkInvalid(*slot)),
|
||||
UpdateOperation::MarkInvalid(*slot),
|
||||
);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
update_operations.insert(aggregate_label, UpdateOperation::Aggregate);
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
fn ancestor_iterator(&self, start_slot_hash_key: SlotHashKey) -> AncestorIterator {
|
||||
AncestorIterator::new(start_slot_hash_key, &self.fork_infos)
|
||||
}
|
||||
@@ -452,12 +577,18 @@ impl HeaviestSubtreeForkChoice {
|
||||
fn aggregate_slot(&mut self, slot_hash_key: SlotHashKey) {
|
||||
let mut stake_voted_subtree;
|
||||
let mut best_slot_hash_key = slot_hash_key;
|
||||
let mut is_duplicate_confirmed = false;
|
||||
if let Some(fork_info) = self.fork_infos.get(&slot_hash_key) {
|
||||
stake_voted_subtree = fork_info.stake_voted_at;
|
||||
let mut best_child_stake_voted_subtree = 0;
|
||||
let mut best_child_slot = slot_hash_key;
|
||||
for child in &fork_info.children {
|
||||
let child_stake_voted_subtree = self.stake_voted_subtree(child).unwrap();
|
||||
let mut best_child_slot_key = slot_hash_key;
|
||||
for child_key in &fork_info.children {
|
||||
let child_fork_info = self
|
||||
.fork_infos
|
||||
.get(child_key)
|
||||
.expect("Child must exist in fork_info map");
|
||||
let child_stake_voted_subtree = child_fork_info.stake_voted_subtree;
|
||||
is_duplicate_confirmed |= child_fork_info.is_duplicate_confirmed;
|
||||
|
||||
// Child forks that are not candidates still contribute to the weight
|
||||
// of the subtree rooted at `slot_hash_key`. For instance:
|
||||
@@ -482,19 +613,15 @@ impl HeaviestSubtreeForkChoice {
|
||||
|
||||
// Note: If there's no valid children, then the best slot should default to the
|
||||
// input `slot` itself.
|
||||
if self
|
||||
.is_candidate_slot(child)
|
||||
.expect("Child must exist in fork_info map")
|
||||
&& (best_child_slot == slot_hash_key ||
|
||||
if child_fork_info.is_candidate()
|
||||
&& (best_child_slot_key == slot_hash_key ||
|
||||
child_stake_voted_subtree > best_child_stake_voted_subtree ||
|
||||
// tiebreaker by slot height, prioritize earlier slot
|
||||
(child_stake_voted_subtree == best_child_stake_voted_subtree && child < &best_child_slot))
|
||||
(child_stake_voted_subtree == best_child_stake_voted_subtree && child_key < &best_child_slot_key))
|
||||
{
|
||||
best_child_stake_voted_subtree = child_stake_voted_subtree;
|
||||
best_child_slot = *child;
|
||||
best_slot_hash_key = self
|
||||
.best_slot(child)
|
||||
.expect("`child` must exist in `self.fork_infos`");
|
||||
best_child_slot_key = *child_key;
|
||||
best_slot_hash_key = child_fork_info.best_slot;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -502,19 +629,38 @@ impl HeaviestSubtreeForkChoice {
|
||||
}
|
||||
|
||||
let fork_info = self.fork_infos.get_mut(&slot_hash_key).unwrap();
|
||||
if is_duplicate_confirmed {
|
||||
if !fork_info.is_duplicate_confirmed {
|
||||
info!(
|
||||
"Fork choice setting {:?} to duplicate confirmed",
|
||||
slot_hash_key
|
||||
);
|
||||
}
|
||||
fork_info.set_duplicate_confirmed();
|
||||
}
|
||||
fork_info.stake_voted_subtree = stake_voted_subtree;
|
||||
fork_info.best_slot = best_slot_hash_key;
|
||||
}
|
||||
|
||||
fn mark_slot_valid(&mut self, valid_slot_hash_key: (Slot, Hash)) {
|
||||
if let Some(fork_info) = self.fork_infos.get_mut(&valid_slot_hash_key) {
|
||||
if !fork_info.is_candidate {
|
||||
info!(
|
||||
"marked previously invalid fork starting at slot: {:?} as valid",
|
||||
valid_slot_hash_key
|
||||
);
|
||||
/// Mark that `valid_slot` on the fork starting at `fork_to_modify` has been marked
|
||||
/// valid. Note we don't need the hash for `valid_slot` because slot number uniquely
|
||||
/// identifies a node on a single fork.
|
||||
fn mark_fork_valid(&mut self, fork_to_modify_key: SlotHashKey, valid_slot: Slot) {
|
||||
if let Some(fork_info_to_modify) = self.fork_infos.get_mut(&fork_to_modify_key) {
|
||||
fork_info_to_modify.update_with_newly_valid_ancestor(&fork_to_modify_key, valid_slot);
|
||||
if fork_to_modify_key.0 == valid_slot {
|
||||
fork_info_to_modify.is_duplicate_confirmed = true;
|
||||
}
|
||||
fork_info.is_candidate = true;
|
||||
}
|
||||
}
|
||||
|
||||
/// Mark that `invalid_slot` on the fork starting at `fork_to_modify` has been marked
|
||||
/// invalid. Note we don't need the hash for `invalid_slot` because slot number uniquely
|
||||
/// identifies a node on a single fork.
|
||||
fn mark_fork_invalid(&mut self, fork_to_modify_key: SlotHashKey, invalid_slot: Slot) {
|
||||
if let Some(fork_info_to_modify) = self.fork_infos.get_mut(&fork_to_modify_key) {
|
||||
fork_info_to_modify
|
||||
.update_with_newly_invalid_ancestor(&fork_to_modify_key, invalid_slot);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -624,7 +770,7 @@ impl HeaviestSubtreeForkChoice {
|
||||
let epoch = epoch_schedule.get_epoch(new_vote_slot_hash.0);
|
||||
let stake_update = epoch_stakes
|
||||
.get(&epoch)
|
||||
.map(|epoch_stakes| epoch_stakes.vote_account_stake(&pubkey))
|
||||
.map(|epoch_stakes| epoch_stakes.vote_account_stake(pubkey))
|
||||
.unwrap_or(0);
|
||||
|
||||
update_operations
|
||||
@@ -641,7 +787,12 @@ impl HeaviestSubtreeForkChoice {
|
||||
// Iterate through the update operations from greatest to smallest slot
|
||||
for ((slot_hash_key, _), operation) in update_operations.into_iter().rev() {
|
||||
match operation {
|
||||
UpdateOperation::MarkValid => self.mark_slot_valid(slot_hash_key),
|
||||
UpdateOperation::MarkValid(valid_slot) => {
|
||||
self.mark_fork_valid(slot_hash_key, valid_slot)
|
||||
}
|
||||
UpdateOperation::MarkInvalid(invalid_slot) => {
|
||||
self.mark_fork_invalid(slot_hash_key, invalid_slot)
|
||||
}
|
||||
UpdateOperation::Aggregate => self.aggregate_slot(slot_hash_key),
|
||||
UpdateOperation::Add(stake) => self.add_slot_stake(&slot_hash_key, stake),
|
||||
UpdateOperation::Subtract(stake) => self.subtract_slot_stake(&slot_hash_key, stake),
|
||||
@@ -745,7 +896,7 @@ impl TreeDiff for HeaviestSubtreeForkChoice {
|
||||
|
||||
fn children(&self, slot_hash_key: &SlotHashKey) -> Option<&[SlotHashKey]> {
|
||||
self.fork_infos
|
||||
.get(&slot_hash_key)
|
||||
.get(slot_hash_key)
|
||||
.map(|fork_info| &fork_info.children[..])
|
||||
}
|
||||
}
|
||||
@@ -810,35 +961,48 @@ impl ForkChoice for HeaviestSubtreeForkChoice {
|
||||
|
||||
fn mark_fork_invalid_candidate(&mut self, invalid_slot_hash_key: &SlotHashKey) {
|
||||
info!(
|
||||
"marking fork starting at slot: {:?} invalid candidate",
|
||||
"marking fork starting at: {:?} invalid candidate",
|
||||
invalid_slot_hash_key
|
||||
);
|
||||
let fork_info = self.fork_infos.get_mut(invalid_slot_hash_key);
|
||||
if let Some(fork_info) = fork_info {
|
||||
if fork_info.is_candidate {
|
||||
fork_info.is_candidate = false;
|
||||
// Aggregate to find the new best slots excluding this fork
|
||||
let mut update_operations = UpdateOperations::default();
|
||||
self.insert_aggregate_operations(&mut update_operations, *invalid_slot_hash_key);
|
||||
self.process_update_operations(update_operations);
|
||||
// Should not be marking duplicate confirmed blocks as invalid candidates
|
||||
assert!(!fork_info.is_duplicate_confirmed);
|
||||
let mut update_operations = UpdateOperations::default();
|
||||
|
||||
// Notify all the children of this node that a parent was marked as invalid
|
||||
for child_hash_key in self.subtree_diff(*invalid_slot_hash_key, SlotHashKey::default())
|
||||
{
|
||||
self.do_insert_aggregate_operation(
|
||||
&mut update_operations,
|
||||
&Some(UpdateOperation::MarkInvalid(invalid_slot_hash_key.0)),
|
||||
child_hash_key,
|
||||
);
|
||||
}
|
||||
|
||||
// Aggregate across all ancestors to find the new best slots excluding this fork
|
||||
self.insert_aggregate_operations(&mut update_operations, *invalid_slot_hash_key);
|
||||
self.process_update_operations(update_operations);
|
||||
}
|
||||
}
|
||||
|
||||
fn mark_fork_valid_candidate(&mut self, valid_slot_hash_key: &SlotHashKey) {
|
||||
info!(
|
||||
"marking fork starting at: {:?} valid candidate",
|
||||
valid_slot_hash_key
|
||||
);
|
||||
let mut update_operations = UpdateOperations::default();
|
||||
let fork_info = self.fork_infos.get_mut(valid_slot_hash_key);
|
||||
if let Some(fork_info) = fork_info {
|
||||
// If a bunch of slots on the same fork are confirmed at once, then only the latest
|
||||
// slot will incur this aggregation operation
|
||||
fork_info.is_candidate = true;
|
||||
self.insert_mark_valid_aggregate_operations(
|
||||
// Notify all the children of this node that a parent was marked as valid
|
||||
for child_hash_key in self.subtree_diff(*valid_slot_hash_key, SlotHashKey::default()) {
|
||||
self.do_insert_aggregate_operation(
|
||||
&mut update_operations,
|
||||
*valid_slot_hash_key,
|
||||
&Some(UpdateOperation::MarkValid(valid_slot_hash_key.0)),
|
||||
child_hash_key,
|
||||
);
|
||||
}
|
||||
|
||||
// Aggregate to find the new best slots including this fork
|
||||
// Aggregate across all ancestors to find the new best slots including this fork
|
||||
self.insert_aggregate_operations(&mut update_operations, *valid_slot_hash_key);
|
||||
self.process_update_operations(update_operations);
|
||||
}
|
||||
}
|
||||
@@ -1333,7 +1497,7 @@ mod test {
|
||||
.chain(std::iter::once(&duplicate_leaves_descended_from_4[1]))
|
||||
{
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.children(&duplicate_leaf)
|
||||
.children(duplicate_leaf)
|
||||
.unwrap()
|
||||
.is_empty(),);
|
||||
}
|
||||
@@ -2740,34 +2904,50 @@ mod test {
|
||||
(expected_best_slot, Hash::default()),
|
||||
);
|
||||
|
||||
// Mark slot 5 as invalid, the best fork should be its ancestor 3,
|
||||
// not the other for at 4.
|
||||
let invalid_candidate = (5, Hash::default());
|
||||
// Simulate a vote on slot 5
|
||||
let last_voted_slot_hash = (5, Hash::default());
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower.record_vote(last_voted_slot_hash.0, last_voted_slot_hash.1);
|
||||
|
||||
// The heaviest_slot_on_same_voted_fork() should be 6, descended from 5.
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.heaviest_slot_on_same_voted_fork(&tower)
|
||||
.unwrap(),
|
||||
(6, Hash::default())
|
||||
);
|
||||
|
||||
// Mark slot 5 as invalid
|
||||
let invalid_candidate = last_voted_slot_hash;
|
||||
heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&invalid_candidate);
|
||||
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 3);
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_candidate_slot(&invalid_candidate)
|
||||
.is_candidate(&invalid_candidate)
|
||||
.unwrap());
|
||||
|
||||
// The ancestor is still a candidate
|
||||
// The ancestor 3 is still a candidate
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_candidate_slot(&(3, Hash::default()))
|
||||
.is_candidate(&(3, Hash::default()))
|
||||
.unwrap());
|
||||
|
||||
// The best fork should be its ancestor 3, not the other fork at 4.
|
||||
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 3);
|
||||
|
||||
// After marking the last vote in the tower as invalid, `heaviest_slot_on_same_voted_fork()`
|
||||
// should disregard all descendants of that invalid vote
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.heaviest_slot_on_same_voted_fork(&tower)
|
||||
.is_none());
|
||||
|
||||
// Adding another descendant to the invalid candidate won't
|
||||
// update the best slot, even if it contains votes
|
||||
let new_leaf_slot7 = 7;
|
||||
heaviest_subtree_fork_choice.add_new_leaf_slot(
|
||||
(new_leaf_slot7, Hash::default()),
|
||||
Some((6, Hash::default())),
|
||||
);
|
||||
let new_leaf7 = (7, Hash::default());
|
||||
heaviest_subtree_fork_choice.add_new_leaf_slot(new_leaf7, Some((6, Hash::default())));
|
||||
let invalid_slot_ancestor = 3;
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot().0,
|
||||
invalid_slot_ancestor
|
||||
);
|
||||
let pubkey_votes: Vec<(Pubkey, SlotHashKey)> =
|
||||
vec![(vote_pubkeys[0], (new_leaf_slot7, Hash::default()))];
|
||||
let pubkey_votes: Vec<(Pubkey, SlotHashKey)> = vec![(vote_pubkeys[0], new_leaf7)];
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.add_votes(
|
||||
pubkey_votes.iter(),
|
||||
@@ -2777,34 +2957,51 @@ mod test {
|
||||
(invalid_slot_ancestor, Hash::default()),
|
||||
);
|
||||
|
||||
// This shouldn't update the `heaviest_slot_on_same_voted_fork` either
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.heaviest_slot_on_same_voted_fork(&tower)
|
||||
.is_none());
|
||||
|
||||
// Adding a descendant to the ancestor of the invalid candidate *should* update
|
||||
// the best slot though, since the ancestor is on the heaviest fork
|
||||
let new_leaf_slot8 = 8;
|
||||
heaviest_subtree_fork_choice.add_new_leaf_slot(
|
||||
(new_leaf_slot8, Hash::default()),
|
||||
Some((invalid_slot_ancestor, Hash::default())),
|
||||
);
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot().0,
|
||||
new_leaf_slot8,
|
||||
);
|
||||
let new_leaf8 = (8, Hash::default());
|
||||
heaviest_subtree_fork_choice
|
||||
.add_new_leaf_slot(new_leaf8, Some((invalid_slot_ancestor, Hash::default())));
|
||||
assert_eq!(heaviest_subtree_fork_choice.best_overall_slot(), new_leaf8,);
|
||||
// Should not update the `heaviest_slot_on_same_voted_fork` because the new leaf
|
||||
// is not descended from the last vote
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.heaviest_slot_on_same_voted_fork(&tower)
|
||||
.is_none());
|
||||
|
||||
// If we mark slot a descendant of `invalid_candidate` as valid, then that
|
||||
// should also mark `invalid_candidate` as valid, and the best slot should
|
||||
// be the leaf of the heaviest fork, `new_leaf_slot`.
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&invalid_candidate);
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_candidate_slot(&invalid_candidate)
|
||||
.is_candidate(&invalid_candidate)
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot().0,
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
// Should pick the smaller slot of the two new equally weighted leaves
|
||||
new_leaf_slot7
|
||||
new_leaf7
|
||||
);
|
||||
// Should update the `heaviest_slot_on_same_voted_fork` as well
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.heaviest_slot_on_same_voted_fork(&tower)
|
||||
.unwrap(),
|
||||
new_leaf7
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mark_valid_invalid_forks_duplicate() {
|
||||
fn setup_mark_invalid_forks_duplicate_tests() -> (
|
||||
HeaviestSubtreeForkChoice,
|
||||
Vec<SlotHashKey>,
|
||||
SlotHashKey,
|
||||
Bank,
|
||||
Vec<Pubkey>,
|
||||
) {
|
||||
let (
|
||||
mut heaviest_subtree_fork_choice,
|
||||
duplicate_leaves_descended_from_4,
|
||||
@@ -2832,11 +3029,27 @@ mod test {
|
||||
// the other branch at slot 5
|
||||
let invalid_candidate = (4, Hash::default());
|
||||
heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&invalid_candidate);
|
||||
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
(2, Hash::default())
|
||||
);
|
||||
(
|
||||
heaviest_subtree_fork_choice,
|
||||
duplicate_leaves_descended_from_4,
|
||||
invalid_candidate,
|
||||
bank,
|
||||
vote_pubkeys,
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mark_invalid_then_valid_duplicate() {
|
||||
let (
|
||||
mut heaviest_subtree_fork_choice,
|
||||
duplicate_leaves_descended_from_4,
|
||||
invalid_candidate,
|
||||
..,
|
||||
) = setup_mark_invalid_forks_duplicate_tests();
|
||||
|
||||
// Marking candidate as valid again will choose the the heaviest leaf of
|
||||
// the newly valid branch
|
||||
@@ -2851,16 +3064,26 @@ mod test {
|
||||
heaviest_subtree_fork_choice.best_overall_slot(),
|
||||
duplicate_descendant
|
||||
);
|
||||
}
|
||||
|
||||
// Mark the current heaviest branch as invalid again
|
||||
heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&invalid_candidate);
|
||||
#[test]
|
||||
fn test_mark_invalid_then_add_new_heavier_duplicate_slot() {
|
||||
let (
|
||||
mut heaviest_subtree_fork_choice,
|
||||
duplicate_leaves_descended_from_4,
|
||||
_invalid_candidate,
|
||||
bank,
|
||||
vote_pubkeys,
|
||||
) = setup_mark_invalid_forks_duplicate_tests();
|
||||
|
||||
// If we add a new version of the duplicate slot that is not descended from the invalid
|
||||
// candidate and votes for that duplicate slot, the new duplicate slot should be picked
|
||||
// once it has more weight
|
||||
let new_duplicate_hash = Hash::default();
|
||||
|
||||
// The hash has to be smaller in order for the votes to be counted
|
||||
assert!(new_duplicate_hash < duplicate_leaves_descended_from_4[0].1);
|
||||
let duplicate_slot = duplicate_leaves_descended_from_4[0].0;
|
||||
let new_duplicate = (duplicate_slot, new_duplicate_hash);
|
||||
heaviest_subtree_fork_choice.add_new_leaf_slot(new_duplicate, Some((3, Hash::default())));
|
||||
|
||||
@@ -2881,6 +3104,285 @@ mod test {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mark_valid_then_descendant_invalid() {
|
||||
let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / (tr(5) / tr(6))))));
|
||||
let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks);
|
||||
let duplicate_confirmed_slot: Slot = 1;
|
||||
let duplicate_confirmed_key = duplicate_confirmed_slot.slot_hash();
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&duplicate_confirmed_key);
|
||||
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
let slot = slot_hash_key.0;
|
||||
if slot <= duplicate_confirmed_slot {
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
} else {
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
}
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
}
|
||||
|
||||
// Mark a later descendant invalid
|
||||
let invalid_descendant_slot = 5;
|
||||
let invalid_descendant_key = invalid_descendant_slot.slot_hash();
|
||||
heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&invalid_descendant_key);
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
let slot = slot_hash_key.0;
|
||||
if slot <= duplicate_confirmed_slot {
|
||||
// All ancestors of the duplicate confirmed slot should:
|
||||
// 1) Be duplicate confirmed
|
||||
// 2) Have no invalid ancestors
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
} else if slot >= invalid_descendant_slot {
|
||||
// Anything descended from the invalid slot should:
|
||||
// 1) Not be duplicate confirmed
|
||||
// 2) Should have an invalid ancestor == `invalid_descendant_slot`
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.unwrap(),
|
||||
invalid_descendant_slot
|
||||
);
|
||||
} else {
|
||||
// Anything in between the duplicate confirmed slot and the invalid slot should:
|
||||
// 1) Not be duplicate confirmed
|
||||
// 2) Should not have an invalid ancestor
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
// Mark later descendant `d` duplicate confirmed where `duplicate_confirmed_slot < d < invalid_descendant_slot`.
|
||||
let later_duplicate_confirmed_slot = 4;
|
||||
assert!(
|
||||
later_duplicate_confirmed_slot > duplicate_confirmed_slot
|
||||
&& later_duplicate_confirmed_slot < invalid_descendant_slot
|
||||
);
|
||||
let later_duplicate_confirmed_key = later_duplicate_confirmed_slot.slot_hash();
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&later_duplicate_confirmed_key);
|
||||
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
let slot = slot_hash_key.0;
|
||||
if slot <= later_duplicate_confirmed_slot {
|
||||
// All ancestors of the later_duplicate_confirmed_slot should:
|
||||
// 1) Be duplicate confirmed
|
||||
// 2) Have no invalid ancestors
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
} else if slot >= invalid_descendant_slot {
|
||||
// Anything descended from the invalid slot should:
|
||||
// 1) Not be duplicate confirmed
|
||||
// 2) Should have an invalid ancestor == `invalid_descendant_slot`
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.unwrap(),
|
||||
invalid_descendant_slot
|
||||
);
|
||||
} else {
|
||||
// Anything in between the duplicate confirmed slot and the invalid slot should:
|
||||
// 1) Not be duplicate confirmed
|
||||
// 2) Should not have an invalid ancestor
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
// Mark all slots duplicate confirmed
|
||||
let last_duplicate_confirmed_slot = 6;
|
||||
let last_duplicate_confirmed_key = last_duplicate_confirmed_slot.slot_hash();
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&last_duplicate_confirmed_key);
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_mark_valid_then_ancestor_invalid() {
|
||||
let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / (tr(5) / tr(6))))));
|
||||
let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks);
|
||||
let duplicate_confirmed_slot: Slot = 4;
|
||||
let duplicate_confirmed_key = duplicate_confirmed_slot.slot_hash();
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&duplicate_confirmed_key);
|
||||
|
||||
// Now mark an ancestor of this fork invalid, should panic since this ancestor
|
||||
// was duplicate confirmed by its descendant 4 already
|
||||
heaviest_subtree_fork_choice.mark_fork_invalid_candidate(&3.slot_hash());
|
||||
}
|
||||
|
||||
fn setup_set_unconfirmed_and_confirmed_duplicate_slot_tests(
|
||||
smaller_duplicate_slot: Slot,
|
||||
larger_duplicate_slot: Slot,
|
||||
) -> HeaviestSubtreeForkChoice {
|
||||
// Create simple fork 0 -> 1 -> 2 -> 3 -> 4 -> 5
|
||||
let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / tr(5)))));
|
||||
let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new_from_tree(forks);
|
||||
|
||||
// Mark the slots as unconfirmed duplicates
|
||||
heaviest_subtree_fork_choice
|
||||
.mark_fork_invalid_candidate(&smaller_duplicate_slot.slot_hash());
|
||||
heaviest_subtree_fork_choice
|
||||
.mark_fork_invalid_candidate(&larger_duplicate_slot.slot_hash());
|
||||
|
||||
// Correctness checks
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
let slot = slot_hash_key.0;
|
||||
if slot < smaller_duplicate_slot {
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
} else if slot < larger_duplicate_slot {
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.unwrap(),
|
||||
smaller_duplicate_slot
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.unwrap(),
|
||||
larger_duplicate_slot
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
heaviest_subtree_fork_choice
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_unconfirmed_duplicate_confirm_smaller_slot_first() {
|
||||
let smaller_duplicate_slot = 1;
|
||||
let larger_duplicate_slot = 4;
|
||||
let mut heaviest_subtree_fork_choice =
|
||||
setup_set_unconfirmed_and_confirmed_duplicate_slot_tests(
|
||||
smaller_duplicate_slot,
|
||||
larger_duplicate_slot,
|
||||
);
|
||||
|
||||
// Mark the smaller duplicate slot as confirmed
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&smaller_duplicate_slot.slot_hash());
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
let slot = slot_hash_key.0;
|
||||
if slot < larger_duplicate_slot {
|
||||
// Only slots <= smaller_duplicate_slot have been duplicate confirmed
|
||||
if slot <= smaller_duplicate_slot {
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
} else {
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap());
|
||||
}
|
||||
// The unconfirmed duplicate flag has been cleared on the smaller
|
||||
// descendants because their most recent duplicate ancestor has
|
||||
// been confirmed
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
} else {
|
||||
assert!(!heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap(),);
|
||||
// The unconfirmed duplicate flag has not been cleared on the smaller
|
||||
// descendants because their most recent duplicate ancestor,
|
||||
// `larger_duplicate_slot` has not yet been confirmed
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.unwrap(),
|
||||
larger_duplicate_slot
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Mark the larger duplicate slot as confirmed, all slots should no longer
|
||||
// have any unconfirmed duplicate ancestors, and should be marked as duplciate confirmed
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&larger_duplicate_slot.slot_hash());
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
let slot = slot_hash_key.0;
|
||||
// All slots <= the latest duplciate confirmed slot are ancestors of
|
||||
// that slot, so they should all be marked duplicate confirmed
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap(),
|
||||
slot <= larger_duplicate_slot
|
||||
);
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_unconfirmed_duplicate_confirm_larger_slot_first() {
|
||||
let smaller_duplicate_slot = 1;
|
||||
let larger_duplicate_slot = 4;
|
||||
let mut heaviest_subtree_fork_choice =
|
||||
setup_set_unconfirmed_and_confirmed_duplicate_slot_tests(
|
||||
smaller_duplicate_slot,
|
||||
larger_duplicate_slot,
|
||||
);
|
||||
|
||||
// Mark the larger duplicate slot as confirmed
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&larger_duplicate_slot.slot_hash());
|
||||
|
||||
// All slots should no longer have any unconfirmed duplicate ancestors
|
||||
heaviest_subtree_fork_choice.mark_fork_valid_candidate(&smaller_duplicate_slot.slot_hash());
|
||||
for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() {
|
||||
let slot = slot_hash_key.0;
|
||||
// All slots <= the latest duplciate confirmed slot are ancestors of
|
||||
// that slot, so they should all be marked duplicate confirmed
|
||||
assert_eq!(
|
||||
heaviest_subtree_fork_choice
|
||||
.is_duplicate_confirmed(slot_hash_key)
|
||||
.unwrap(),
|
||||
slot <= larger_duplicate_slot
|
||||
);
|
||||
assert!(heaviest_subtree_fork_choice
|
||||
.latest_invalid_ancestor(slot_hash_key)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
fn setup_forks() -> HeaviestSubtreeForkChoice {
|
||||
/*
|
||||
Build fork structure:
|
||||
|
@@ -187,7 +187,7 @@ impl LedgerCleanupService {
|
||||
*last_purge_slot = root;
|
||||
|
||||
let (slots_to_clean, purge_first_slot, lowest_cleanup_slot, total_shreds) =
|
||||
Self::find_slots_to_clean(&blockstore, root, max_ledger_shreds);
|
||||
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
|
||||
|
||||
if slots_to_clean {
|
||||
let purge_complete = Arc::new(AtomicBool::new(false));
|
||||
@@ -207,11 +207,25 @@ impl LedgerCleanupService {
|
||||
);
|
||||
|
||||
let mut purge_time = Measure::start("purge_slots");
|
||||
|
||||
blockstore.purge_slots(
|
||||
purge_first_slot,
|
||||
lowest_cleanup_slot,
|
||||
PurgeType::PrimaryIndex,
|
||||
PurgeType::CompactionFilter,
|
||||
);
|
||||
// Update only after purge operation.
|
||||
// Safety: This value can be used by compaction_filters shared via Arc<AtomicU64>.
|
||||
// Compactions are async and run as a multi-threaded background job. However, this
|
||||
// shouldn't cause consistency issues for iterators and getters because we have
|
||||
// already expired all affected keys (older than or equal to lowest_cleanup_slot)
|
||||
// by the above `purge_slots`. According to the general RocksDB design where SST
|
||||
// files are immutable, even running iterators aren't affected; the database grabs
|
||||
// a snapshot of the live set of sst files at iterator's creation.
|
||||
// Also, we passed the PurgeType::CompactionFilter, meaning no delete_range for
|
||||
// transaction_status and address_signatures CFs. These are fine because they
|
||||
// don't require strong consistent view for their operation.
|
||||
blockstore.set_max_expired_slot(lowest_cleanup_slot);
|
||||
|
||||
purge_time.stop();
|
||||
info!("{}", purge_time);
|
||||
|
||||
|
@@ -9,7 +9,6 @@
|
||||
|
||||
pub mod accounts_hash_verifier;
|
||||
pub mod banking_stage;
|
||||
pub mod bigtable_upload_service;
|
||||
pub mod broadcast_stage;
|
||||
pub mod cache_block_meta_service;
|
||||
pub mod cluster_info_vote_listener;
|
||||
@@ -28,8 +27,6 @@ pub mod ledger_cleanup_service;
|
||||
pub mod optimistic_confirmation_verifier;
|
||||
pub mod outstanding_requests;
|
||||
pub mod packet_hasher;
|
||||
pub mod poh_recorder;
|
||||
pub mod poh_service;
|
||||
pub mod progress_map;
|
||||
pub mod repair_response;
|
||||
pub mod repair_service;
|
||||
@@ -40,11 +37,7 @@ pub mod request_response;
|
||||
mod result;
|
||||
pub mod retransmit_stage;
|
||||
pub mod rewards_recorder_service;
|
||||
pub mod rpc;
|
||||
pub mod rpc_health;
|
||||
pub mod rpc_service;
|
||||
pub mod sample_performance_service;
|
||||
pub mod send_transaction_service;
|
||||
pub mod serve_repair;
|
||||
pub mod serve_repair_service;
|
||||
pub mod shred_fetch_stage;
|
||||
@@ -54,7 +47,6 @@ pub mod sigverify_stage;
|
||||
pub mod snapshot_packager_service;
|
||||
pub mod test_validator;
|
||||
pub mod tpu;
|
||||
pub mod transaction_status_service;
|
||||
pub mod tree_diff;
|
||||
pub mod tvu;
|
||||
pub mod unfrozen_gossip_verified_vote_hashes;
|
||||
@@ -69,10 +61,6 @@ extern crate log;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate serde_json;
|
||||
|
||||
#[macro_use]
|
||||
extern crate solana_metrics;
|
||||
|
||||
|
@@ -36,7 +36,7 @@ impl OptimisticConfirmationVerifier {
|
||||
.into_iter()
|
||||
.filter(|(optimistic_slot, optimistic_hash)| {
|
||||
(*optimistic_slot == root && *optimistic_hash != root_bank.hash())
|
||||
|| (!root_ancestors.contains_key(&optimistic_slot) &&
|
||||
|| (!root_ancestors.contains_key(optimistic_slot) &&
|
||||
// In this second part of the `and`, we account for the possibility that
|
||||
// there was some other root `rootX` set in BankForks where:
|
||||
//
|
||||
|
@@ -63,6 +63,16 @@ impl ReplaySlotStats {
|
||||
("load_us", self.execute_timings.load_us, i64),
|
||||
("execute_us", self.execute_timings.execute_us, i64),
|
||||
("store_us", self.execute_timings.store_us, i64),
|
||||
(
|
||||
"total_batches_len",
|
||||
self.execute_timings.total_batches_len,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"num_execute_batches",
|
||||
self.execute_timings.num_execute_batches,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"serialize_us",
|
||||
self.execute_timings.details.serialize_us,
|
||||
@@ -140,7 +150,6 @@ pub(crate) struct ForkProgress {
|
||||
pub(crate) propagated_stats: PropagatedStats,
|
||||
pub(crate) replay_stats: ReplaySlotStats,
|
||||
pub(crate) replay_progress: ConfirmationProgress,
|
||||
pub(crate) duplicate_stats: DuplicateStats,
|
||||
// Note `num_blocks_on_fork` and `num_dropped_blocks_on_fork` only
|
||||
// count new blocks replayed since last restart, which won't include
|
||||
// blocks already existing in the ledger/before snapshot at start,
|
||||
@@ -153,7 +162,6 @@ impl ForkProgress {
|
||||
pub fn new(
|
||||
last_entry: Hash,
|
||||
prev_leader_slot: Option<Slot>,
|
||||
duplicate_stats: DuplicateStats,
|
||||
validator_stake_info: Option<ValidatorStakeInfo>,
|
||||
num_blocks_on_fork: u64,
|
||||
num_dropped_blocks_on_fork: u64,
|
||||
@@ -187,7 +195,6 @@ impl ForkProgress {
|
||||
fork_stats: ForkStats::default(),
|
||||
replay_stats: ReplaySlotStats::default(),
|
||||
replay_progress: ConfirmationProgress::new(last_entry),
|
||||
duplicate_stats,
|
||||
num_blocks_on_fork,
|
||||
num_dropped_blocks_on_fork,
|
||||
propagated_stats: PropagatedStats {
|
||||
@@ -207,16 +214,14 @@ impl ForkProgress {
|
||||
my_pubkey: &Pubkey,
|
||||
voting_pubkey: &Pubkey,
|
||||
prev_leader_slot: Option<Slot>,
|
||||
duplicate_stats: DuplicateStats,
|
||||
num_blocks_on_fork: u64,
|
||||
num_dropped_blocks_on_fork: u64,
|
||||
) -> Self {
|
||||
let validator_fork_info = {
|
||||
let validator_stake_info = {
|
||||
if bank.collector_id() == my_pubkey {
|
||||
let stake = bank.epoch_vote_account_stake(voting_pubkey);
|
||||
Some(ValidatorStakeInfo::new(
|
||||
*voting_pubkey,
|
||||
stake,
|
||||
bank.epoch_vote_account_stake(voting_pubkey),
|
||||
bank.total_epoch_stake(),
|
||||
))
|
||||
} else {
|
||||
@@ -227,20 +232,11 @@ impl ForkProgress {
|
||||
Self::new(
|
||||
bank.last_blockhash(),
|
||||
prev_leader_slot,
|
||||
duplicate_stats,
|
||||
validator_fork_info,
|
||||
validator_stake_info,
|
||||
num_blocks_on_fork,
|
||||
num_dropped_blocks_on_fork,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn is_duplicate_confirmed(&self) -> bool {
|
||||
self.duplicate_stats.is_duplicate_confirmed
|
||||
}
|
||||
|
||||
pub fn set_duplicate_confirmed(&mut self) {
|
||||
self.duplicate_stats.set_duplicate_confirmed();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
@@ -275,38 +271,6 @@ pub(crate) struct PropagatedStats {
|
||||
pub(crate) total_epoch_stake: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub(crate) struct DuplicateStats {
|
||||
latest_unconfirmed_duplicate_ancestor: Option<Slot>,
|
||||
is_duplicate_confirmed: bool,
|
||||
}
|
||||
|
||||
impl DuplicateStats {
|
||||
pub fn new_with_unconfirmed_duplicate_ancestor(
|
||||
latest_unconfirmed_duplicate_ancestor: Option<Slot>,
|
||||
) -> Self {
|
||||
Self {
|
||||
latest_unconfirmed_duplicate_ancestor,
|
||||
is_duplicate_confirmed: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn set_duplicate_confirmed(&mut self) {
|
||||
self.is_duplicate_confirmed = true;
|
||||
self.latest_unconfirmed_duplicate_ancestor = None;
|
||||
}
|
||||
|
||||
fn update_with_newly_confirmed_duplicate_ancestor(&mut self, newly_confirmed_ancestor: Slot) {
|
||||
if let Some(latest_unconfirmed_duplicate_ancestor) =
|
||||
self.latest_unconfirmed_duplicate_ancestor
|
||||
{
|
||||
if latest_unconfirmed_duplicate_ancestor <= newly_confirmed_ancestor {
|
||||
self.latest_unconfirmed_duplicate_ancestor = None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PropagatedStats {
|
||||
pub fn add_vote_pubkey(&mut self, vote_pubkey: Pubkey, stake: u64) {
|
||||
if self.propagated_validators.insert(vote_pubkey) {
|
||||
@@ -317,7 +281,7 @@ impl PropagatedStats {
|
||||
pub fn add_node_pubkey(&mut self, node_pubkey: &Pubkey, bank: &Bank) {
|
||||
if !self.propagated_node_ids.contains(node_pubkey) {
|
||||
let node_vote_accounts = bank
|
||||
.epoch_vote_accounts_for_node_id(&node_pubkey)
|
||||
.epoch_vote_accounts_for_node_id(node_pubkey)
|
||||
.map(|v| &v.vote_accounts);
|
||||
|
||||
if let Some(node_vote_accounts) = node_vote_accounts {
|
||||
@@ -438,101 +402,6 @@ impl ProgressMap {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_unconfirmed_duplicate(&self, slot: Slot) -> Option<bool> {
|
||||
self.get(&slot).map(|p| {
|
||||
p.duplicate_stats
|
||||
.latest_unconfirmed_duplicate_ancestor
|
||||
.map(|ancestor| ancestor == slot)
|
||||
.unwrap_or(false)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn latest_unconfirmed_duplicate_ancestor(&self, slot: Slot) -> Option<Slot> {
|
||||
self.get(&slot)
|
||||
.map(|p| p.duplicate_stats.latest_unconfirmed_duplicate_ancestor)
|
||||
.unwrap_or(None)
|
||||
}
|
||||
|
||||
pub fn set_unconfirmed_duplicate_slot(&mut self, slot: Slot, descendants: &HashSet<u64>) {
|
||||
if let Some(fork_progress) = self.get_mut(&slot) {
|
||||
if fork_progress.is_duplicate_confirmed() {
|
||||
assert!(fork_progress
|
||||
.duplicate_stats
|
||||
.latest_unconfirmed_duplicate_ancestor
|
||||
.is_none());
|
||||
return;
|
||||
}
|
||||
|
||||
if fork_progress
|
||||
.duplicate_stats
|
||||
.latest_unconfirmed_duplicate_ancestor
|
||||
== Some(slot)
|
||||
{
|
||||
// Already been marked
|
||||
return;
|
||||
}
|
||||
fork_progress
|
||||
.duplicate_stats
|
||||
.latest_unconfirmed_duplicate_ancestor = Some(slot);
|
||||
|
||||
for d in descendants {
|
||||
if let Some(fork_progress) = self.get_mut(&d) {
|
||||
fork_progress
|
||||
.duplicate_stats
|
||||
.latest_unconfirmed_duplicate_ancestor = Some(std::cmp::max(
|
||||
fork_progress
|
||||
.duplicate_stats
|
||||
.latest_unconfirmed_duplicate_ancestor
|
||||
.unwrap_or(0),
|
||||
slot,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_confirmed_duplicate_slot(
|
||||
&mut self,
|
||||
slot: Slot,
|
||||
ancestors: &HashSet<u64>,
|
||||
descendants: &HashSet<u64>,
|
||||
) {
|
||||
for a in ancestors {
|
||||
if let Some(fork_progress) = self.get_mut(&a) {
|
||||
fork_progress.set_duplicate_confirmed();
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(slot_fork_progress) = self.get_mut(&slot) {
|
||||
// Setting the fields here is nly correct and necessary if the loop above didn't
|
||||
// already do this, so check with an assert.
|
||||
assert!(!ancestors.contains(&slot));
|
||||
let slot_had_unconfirmed_duplicate_ancestor = slot_fork_progress
|
||||
.duplicate_stats
|
||||
.latest_unconfirmed_duplicate_ancestor
|
||||
.is_some();
|
||||
slot_fork_progress.set_duplicate_confirmed();
|
||||
|
||||
if slot_had_unconfirmed_duplicate_ancestor {
|
||||
for d in descendants {
|
||||
if let Some(descendant_fork_progress) = self.get_mut(&d) {
|
||||
descendant_fork_progress
|
||||
.duplicate_stats
|
||||
.update_with_newly_confirmed_duplicate_ancestor(slot);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Neither this slot `S`, nor earlier ancestors were marked as duplicate,
|
||||
// so this means all descendants either:
|
||||
// 1) Have no duplicate ancestors
|
||||
// 2) Have a duplicate ancestor > `S`
|
||||
|
||||
// In both cases, there's no need to iterate through descendants because
|
||||
// this confirmation on `S` is irrelevant to them.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn my_latest_landed_vote(&self, slot: Slot) -> Option<Slot> {
|
||||
self.progress_map
|
||||
.get(&slot)
|
||||
@@ -550,12 +419,6 @@ impl ProgressMap {
|
||||
.map(|s| s.fork_stats.is_supermajority_confirmed)
|
||||
}
|
||||
|
||||
pub fn is_duplicate_confirmed(&self, slot: Slot) -> Option<bool> {
|
||||
self.progress_map
|
||||
.get(&slot)
|
||||
.map(|s| s.is_duplicate_confirmed())
|
||||
}
|
||||
|
||||
pub fn get_bank_prev_leader_slot(&self, bank: &Bank) -> Option<Slot> {
|
||||
let parent_slot = bank.parent_slot();
|
||||
self.get_propagated_stats(parent_slot)
|
||||
@@ -598,8 +461,6 @@ impl ProgressMap {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::consensus::test::VoteSimulator;
|
||||
use trees::tr;
|
||||
|
||||
#[test]
|
||||
fn test_add_vote_pubkey() {
|
||||
@@ -690,21 +551,13 @@ mod test {
|
||||
fn test_is_propagated_status_on_construction() {
|
||||
// If the given ValidatorStakeInfo == None, then this is not
|
||||
// a leader slot and is_propagated == false
|
||||
let progress = ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
DuplicateStats::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
);
|
||||
let progress = ForkProgress::new(Hash::default(), Some(9), None, 0, 0);
|
||||
assert!(!progress.propagated_stats.is_propagated);
|
||||
|
||||
// If the stake is zero, then threshold is always achieved
|
||||
let progress = ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo {
|
||||
total_epoch_stake: 0,
|
||||
..ValidatorStakeInfo::default()
|
||||
@@ -719,7 +572,6 @@ mod test {
|
||||
let progress = ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo {
|
||||
total_epoch_stake: 2,
|
||||
..ValidatorStakeInfo::default()
|
||||
@@ -733,7 +585,6 @@ mod test {
|
||||
let progress = ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo {
|
||||
stake: 1,
|
||||
total_epoch_stake: 2,
|
||||
@@ -750,7 +601,6 @@ mod test {
|
||||
let progress = ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo::default()),
|
||||
0,
|
||||
0,
|
||||
@@ -764,23 +614,12 @@ mod test {
|
||||
|
||||
// Insert new ForkProgress for slot 10 (not a leader slot) and its
|
||||
// previous leader slot 9 (leader slot)
|
||||
progress_map.insert(
|
||||
10,
|
||||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(9),
|
||||
DuplicateStats::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
);
|
||||
progress_map.insert(10, ForkProgress::new(Hash::default(), Some(9), None, 0, 0));
|
||||
progress_map.insert(
|
||||
9,
|
||||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
None,
|
||||
DuplicateStats::default(),
|
||||
Some(ValidatorStakeInfo::default()),
|
||||
0,
|
||||
0,
|
||||
@@ -795,17 +634,7 @@ mod test {
|
||||
// The previous leader before 8, slot 7, does not exist in
|
||||
// progress map, so is_propagated(8) should return true as
|
||||
// this implies the parent is rooted
|
||||
progress_map.insert(
|
||||
8,
|
||||
ForkProgress::new(
|
||||
Hash::default(),
|
||||
Some(7),
|
||||
DuplicateStats::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
);
|
||||
progress_map.insert(8, ForkProgress::new(Hash::default(), Some(7), None, 0, 0));
|
||||
assert!(progress_map.is_propagated(8));
|
||||
|
||||
// If we set the is_propagated = true, is_propagated should return true
|
||||
@@ -828,157 +657,4 @@ mod test {
|
||||
.is_leader_slot = true;
|
||||
assert!(!progress_map.is_propagated(10));
|
||||
}
|
||||
|
||||
fn setup_set_unconfirmed_and_confirmed_duplicate_slot_tests(
|
||||
smaller_duplicate_slot: Slot,
|
||||
larger_duplicate_slot: Slot,
|
||||
) -> (ProgressMap, RwLock<BankForks>) {
|
||||
// Create simple fork 0 -> 1 -> 2 -> 3 -> 4 -> 5
|
||||
let forks = tr(0) / (tr(1) / (tr(2) / (tr(3) / (tr(4) / tr(5)))));
|
||||
let mut vote_simulator = VoteSimulator::new(1);
|
||||
vote_simulator.fill_bank_forks(forks, &HashMap::new());
|
||||
let VoteSimulator {
|
||||
mut progress,
|
||||
bank_forks,
|
||||
..
|
||||
} = vote_simulator;
|
||||
let descendants = bank_forks.read().unwrap().descendants().clone();
|
||||
|
||||
// Mark the slots as unconfirmed duplicates
|
||||
progress.set_unconfirmed_duplicate_slot(
|
||||
smaller_duplicate_slot,
|
||||
&descendants.get(&smaller_duplicate_slot).unwrap(),
|
||||
);
|
||||
progress.set_unconfirmed_duplicate_slot(
|
||||
larger_duplicate_slot,
|
||||
&descendants.get(&larger_duplicate_slot).unwrap(),
|
||||
);
|
||||
|
||||
// Correctness checks
|
||||
for slot in bank_forks.read().unwrap().banks().keys() {
|
||||
if *slot < smaller_duplicate_slot {
|
||||
assert!(progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*slot)
|
||||
.is_none());
|
||||
} else if *slot < larger_duplicate_slot {
|
||||
assert_eq!(
|
||||
progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*slot)
|
||||
.unwrap(),
|
||||
smaller_duplicate_slot
|
||||
);
|
||||
} else {
|
||||
assert_eq!(
|
||||
progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*slot)
|
||||
.unwrap(),
|
||||
larger_duplicate_slot
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
(progress, bank_forks)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_unconfirmed_duplicate_confirm_smaller_slot_first() {
|
||||
let smaller_duplicate_slot = 1;
|
||||
let larger_duplicate_slot = 4;
|
||||
let (mut progress, bank_forks) = setup_set_unconfirmed_and_confirmed_duplicate_slot_tests(
|
||||
smaller_duplicate_slot,
|
||||
larger_duplicate_slot,
|
||||
);
|
||||
let descendants = bank_forks.read().unwrap().descendants().clone();
|
||||
let ancestors = bank_forks.read().unwrap().ancestors();
|
||||
|
||||
// Mark the smaller duplicate slot as confirmed
|
||||
progress.set_confirmed_duplicate_slot(
|
||||
smaller_duplicate_slot,
|
||||
&ancestors.get(&smaller_duplicate_slot).unwrap(),
|
||||
&descendants.get(&smaller_duplicate_slot).unwrap(),
|
||||
);
|
||||
for slot in bank_forks.read().unwrap().banks().keys() {
|
||||
if *slot < larger_duplicate_slot {
|
||||
// Only slots <= smaller_duplicate_slot have been duplicate confirmed
|
||||
if *slot <= smaller_duplicate_slot {
|
||||
assert!(progress.is_duplicate_confirmed(*slot).unwrap());
|
||||
} else {
|
||||
assert!(!progress.is_duplicate_confirmed(*slot).unwrap());
|
||||
}
|
||||
// The unconfirmed duplicate flag has been cleared on the smaller
|
||||
// descendants because their most recent duplicate ancestor has
|
||||
// been confirmed
|
||||
assert!(progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*slot)
|
||||
.is_none());
|
||||
} else {
|
||||
assert!(!progress.is_duplicate_confirmed(*slot).unwrap(),);
|
||||
// The unconfirmed duplicate flag has not been cleared on the smaller
|
||||
// descendants because their most recent duplicate ancestor,
|
||||
// `larger_duplicate_slot` has not yet been confirmed
|
||||
assert_eq!(
|
||||
progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*slot)
|
||||
.unwrap(),
|
||||
larger_duplicate_slot
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Mark the larger duplicate slot as confirmed, all slots should no longer
|
||||
// have any unconfirmed duplicate ancestors, and should be marked as duplciate confirmed
|
||||
progress.set_confirmed_duplicate_slot(
|
||||
larger_duplicate_slot,
|
||||
&ancestors.get(&larger_duplicate_slot).unwrap(),
|
||||
&descendants.get(&larger_duplicate_slot).unwrap(),
|
||||
);
|
||||
for slot in bank_forks.read().unwrap().banks().keys() {
|
||||
// All slots <= the latest duplciate confirmed slot are ancestors of
|
||||
// that slot, so they should all be marked duplicate confirmed
|
||||
assert_eq!(
|
||||
progress.is_duplicate_confirmed(*slot).unwrap(),
|
||||
*slot <= larger_duplicate_slot
|
||||
);
|
||||
assert!(progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*slot)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_set_unconfirmed_duplicate_confirm_larger_slot_first() {
|
||||
let smaller_duplicate_slot = 1;
|
||||
let larger_duplicate_slot = 4;
|
||||
let (mut progress, bank_forks) = setup_set_unconfirmed_and_confirmed_duplicate_slot_tests(
|
||||
smaller_duplicate_slot,
|
||||
larger_duplicate_slot,
|
||||
);
|
||||
let descendants = bank_forks.read().unwrap().descendants().clone();
|
||||
let ancestors = bank_forks.read().unwrap().ancestors();
|
||||
|
||||
// Mark the larger duplicate slot as confirmed
|
||||
progress.set_confirmed_duplicate_slot(
|
||||
larger_duplicate_slot,
|
||||
&ancestors.get(&larger_duplicate_slot).unwrap(),
|
||||
&descendants.get(&larger_duplicate_slot).unwrap(),
|
||||
);
|
||||
|
||||
// All slots should no longer have any unconfirmed duplicate ancestors
|
||||
progress.set_confirmed_duplicate_slot(
|
||||
larger_duplicate_slot,
|
||||
&ancestors.get(&larger_duplicate_slot).unwrap(),
|
||||
&descendants.get(&larger_duplicate_slot).unwrap(),
|
||||
);
|
||||
for slot in bank_forks.read().unwrap().banks().keys() {
|
||||
// All slots <= the latest duplciate confirmed slot are ancestors of
|
||||
// that slot, so they should all be marked duplicate confirmed
|
||||
assert_eq!(
|
||||
progress.is_duplicate_confirmed(*slot).unwrap(),
|
||||
*slot <= larger_duplicate_slot
|
||||
);
|
||||
assert!(progress
|
||||
.latest_unconfirmed_duplicate_ancestor(*slot)
|
||||
.is_none());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -5,6 +5,7 @@ use crate::{
|
||||
cluster_slots::ClusterSlots,
|
||||
outstanding_requests::OutstandingRequests,
|
||||
repair_weight::RepairWeight,
|
||||
replay_stage::DUPLICATE_THRESHOLD,
|
||||
result::Result,
|
||||
serve_repair::{RepairType, ServeRepair},
|
||||
};
|
||||
@@ -15,10 +16,13 @@ use solana_ledger::{
|
||||
shred::Nonce,
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_runtime::{
|
||||
bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE, contains::Contains,
|
||||
use solana_runtime::{bank::Bank, bank_forks::BankForks, contains::Contains};
|
||||
use solana_sdk::{
|
||||
clock::{BankId, Slot},
|
||||
epoch_schedule::EpochSchedule,
|
||||
pubkey::Pubkey,
|
||||
timing::timestamp,
|
||||
};
|
||||
use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
iter::Iterator,
|
||||
@@ -33,6 +37,8 @@ use std::{
|
||||
|
||||
pub type DuplicateSlotsResetSender = CrossbeamSender<Slot>;
|
||||
pub type DuplicateSlotsResetReceiver = CrossbeamReceiver<Slot>;
|
||||
pub type ConfirmedSlotsSender = CrossbeamSender<Vec<Slot>>;
|
||||
pub type ConfirmedSlotsReceiver = CrossbeamReceiver<Vec<Slot>>;
|
||||
|
||||
pub type OutstandingRepairs = OutstandingRequests<RepairType>;
|
||||
|
||||
@@ -223,7 +229,7 @@ impl RepairService {
|
||||
|
||||
add_votes_elapsed = Measure::start("add_votes");
|
||||
repair_weight.add_votes(
|
||||
&blockstore,
|
||||
blockstore,
|
||||
slot_to_vote_pubkeys.into_iter(),
|
||||
root_bank.epoch_stakes_map(),
|
||||
root_bank.epoch_schedule(),
|
||||
@@ -271,7 +277,7 @@ impl RepairService {
|
||||
let mut outstanding_requests = outstanding_requests.write().unwrap();
|
||||
repairs.into_iter().for_each(|repair_request| {
|
||||
if let Ok((to, req)) = serve_repair.repair_request(
|
||||
&cluster_slots,
|
||||
cluster_slots,
|
||||
repair_request,
|
||||
&mut cache,
|
||||
&mut repair_stats,
|
||||
@@ -487,7 +493,7 @@ impl RepairService {
|
||||
repair_validators,
|
||||
);
|
||||
if let Some((repair_pubkey, repair_addr)) = status.repair_pubkey_and_addr {
|
||||
let repairs = Self::generate_duplicate_repairs_for_slot(&blockstore, *slot);
|
||||
let repairs = Self::generate_duplicate_repairs_for_slot(blockstore, *slot);
|
||||
|
||||
if let Some(repairs) = repairs {
|
||||
let mut outstanding_requests = outstanding_requests.write().unwrap();
|
||||
@@ -529,7 +535,7 @@ impl RepairService {
|
||||
nonce: Nonce,
|
||||
) -> Result<()> {
|
||||
let req =
|
||||
serve_repair.map_repair_request(&repair_type, repair_pubkey, repair_stats, nonce)?;
|
||||
serve_repair.map_repair_request(repair_type, repair_pubkey, repair_stats, nonce)?;
|
||||
repair_socket.send_to(&req, to)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -558,7 +564,7 @@ impl RepairService {
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn process_new_duplicate_slots(
|
||||
new_duplicate_slots: &[Slot],
|
||||
new_duplicate_slots: &[(Slot, BankId)],
|
||||
duplicate_slot_repair_statuses: &mut HashMap<Slot, DuplicateSlotRepairStatus>,
|
||||
cluster_slots: &ClusterSlots,
|
||||
root_bank: &Bank,
|
||||
@@ -567,16 +573,16 @@ impl RepairService {
|
||||
duplicate_slots_reset_sender: &DuplicateSlotsResetSender,
|
||||
repair_validators: &Option<HashSet<Pubkey>>,
|
||||
) {
|
||||
for slot in new_duplicate_slots {
|
||||
for (slot, bank_id) in new_duplicate_slots {
|
||||
warn!(
|
||||
"Cluster completed slot: {}, dumping our current version and repairing",
|
||||
"Cluster confirmed slot: {}, dumping our current version and repairing",
|
||||
slot
|
||||
);
|
||||
// Clear the slot signatures from status cache for this slot
|
||||
root_bank.clear_slot_signatures(*slot);
|
||||
|
||||
// Clear the accounts for this slot
|
||||
root_bank.remove_unrooted_slot(*slot);
|
||||
root_bank.remove_unrooted_slots(&[(*slot, *bank_id)]);
|
||||
|
||||
// Clear the slot-related data in blockstore. This will:
|
||||
// 1) Clear old shreds allowing new ones to be inserted
|
||||
@@ -641,7 +647,7 @@ impl RepairService {
|
||||
})
|
||||
.sum();
|
||||
if total_completed_slot_stake as f64 / total_stake as f64
|
||||
> VOTE_THRESHOLD_SIZE
|
||||
> DUPLICATE_THRESHOLD
|
||||
{
|
||||
Some(dead_slot)
|
||||
} else {
|
||||
@@ -1059,7 +1065,7 @@ mod test {
|
||||
let serve_repair = ServeRepair::new(cluster_info.clone());
|
||||
let valid_repair_peer = Node::new_localhost().info;
|
||||
|
||||
// Signal that this peer has completed the dead slot, and is thus
|
||||
// Signal that this peer has confirmed the dead slot, and is thus
|
||||
// a valid target for repair
|
||||
let dead_slot = 9;
|
||||
let cluster_slots = ClusterSlots::default();
|
||||
@@ -1138,6 +1144,7 @@ mod test {
|
||||
);
|
||||
let bank0 = Arc::new(Bank::new(&genesis_config));
|
||||
let bank9 = Bank::new_from_parent(&bank0, &Pubkey::default(), duplicate_slot);
|
||||
let duplicate_bank_id = bank9.bank_id();
|
||||
let old_balance = bank9.get_balance(&keypairs.node_keypair.pubkey());
|
||||
bank9
|
||||
.transfer(10_000, &mint_keypair, &keypairs.node_keypair.pubkey())
|
||||
@@ -1155,7 +1162,7 @@ mod test {
|
||||
assert!(bank9.get_signature_status(&vote_tx.signatures[0]).is_some());
|
||||
|
||||
RepairService::process_new_duplicate_slots(
|
||||
&[duplicate_slot],
|
||||
&[(duplicate_slot, duplicate_bank_id)],
|
||||
&mut duplicate_slot_repair_statuses,
|
||||
&cluster_slots,
|
||||
&bank9,
|
||||
|
@@ -495,7 +495,7 @@ impl RepairWeight {
|
||||
for ((slot, _), _) in all_slots {
|
||||
*self
|
||||
.slot_to_tree
|
||||
.get_mut(&slot)
|
||||
.get_mut(slot)
|
||||
.expect("Nodes in tree must exist in `self.slot_to_tree`") = root2;
|
||||
}
|
||||
}
|
||||
@@ -521,9 +521,9 @@ impl RepairWeight {
|
||||
fn sort_by_stake_weight_slot(slot_stake_voted: &mut Vec<(Slot, u64)>) {
|
||||
slot_stake_voted.sort_by(|(slot, stake_voted), (slot_, stake_voted_)| {
|
||||
if stake_voted == stake_voted_ {
|
||||
slot.cmp(&slot_)
|
||||
slot.cmp(slot_)
|
||||
} else {
|
||||
stake_voted.cmp(&stake_voted_).reverse()
|
||||
stake_voted.cmp(stake_voted_).reverse()
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -757,7 +757,7 @@ mod test {
|
||||
);
|
||||
|
||||
for slot in &[8, 10, 11] {
|
||||
assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 8);
|
||||
assert_eq!(*repair_weight.slot_to_tree.get(slot).unwrap(), 8);
|
||||
}
|
||||
for slot in 0..=1 {
|
||||
assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 0);
|
||||
@@ -772,7 +772,7 @@ mod test {
|
||||
);
|
||||
|
||||
for slot in &[8, 10, 11] {
|
||||
assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 0);
|
||||
assert_eq!(*repair_weight.slot_to_tree.get(slot).unwrap(), 0);
|
||||
}
|
||||
assert_eq!(repair_weight.trees.len(), 1);
|
||||
assert!(repair_weight.trees.contains_key(&0));
|
||||
@@ -1088,10 +1088,10 @@ mod test {
|
||||
let purged_slots = vec![0, 1, 2, 4, 8, 10];
|
||||
let mut expected_unrooted_len = 0;
|
||||
for purged_slot in &purged_slots {
|
||||
assert!(!repair_weight.slot_to_tree.contains_key(&purged_slot));
|
||||
assert!(!repair_weight.trees.contains_key(&purged_slot));
|
||||
assert!(!repair_weight.slot_to_tree.contains_key(purged_slot));
|
||||
assert!(!repair_weight.trees.contains_key(purged_slot));
|
||||
if *purged_slot > 3 {
|
||||
assert!(repair_weight.unrooted_slots.contains(&purged_slot));
|
||||
assert!(repair_weight.unrooted_slots.contains(purged_slot));
|
||||
expected_unrooted_len += 1;
|
||||
}
|
||||
}
|
||||
|
@@ -101,7 +101,7 @@ pub fn get_best_repair_shreds<'a>(
|
||||
let new_repairs = RepairService::generate_repairs_for_slot(
|
||||
blockstore,
|
||||
slot,
|
||||
&slot_meta,
|
||||
slot_meta,
|
||||
max_repairs - repairs.len(),
|
||||
);
|
||||
repairs.extend(new_repairs);
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -6,18 +6,18 @@ use solana_ledger::blockstore;
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
Io(std::io::Error),
|
||||
RecvError(std::sync::mpsc::RecvError),
|
||||
CrossbeamRecvTimeoutError(crossbeam_channel::RecvTimeoutError),
|
||||
ReadyTimeoutError,
|
||||
RecvTimeoutError(std::sync::mpsc::RecvTimeoutError),
|
||||
CrossbeamSendError,
|
||||
TryCrossbeamSendError,
|
||||
Recv(std::sync::mpsc::RecvError),
|
||||
CrossbeamRecvTimeout(crossbeam_channel::RecvTimeoutError),
|
||||
ReadyTimeout,
|
||||
RecvTimeout(std::sync::mpsc::RecvTimeoutError),
|
||||
CrossbeamSend,
|
||||
TryCrossbeamSend,
|
||||
Serialize(std::boxed::Box<bincode::ErrorKind>),
|
||||
ClusterInfoError(cluster_info::ClusterInfoError),
|
||||
SendError,
|
||||
BlockstoreError(blockstore::BlockstoreError),
|
||||
WeightedIndexError(rand::distributions::weighted::WeightedError),
|
||||
GossipError(GossipError),
|
||||
ClusterInfo(cluster_info::ClusterInfoError),
|
||||
Send,
|
||||
Blockstore(blockstore::BlockstoreError),
|
||||
WeightedIndex(rand::distributions::weighted::WeightedError),
|
||||
Gossip(GossipError),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -32,42 +32,42 @@ impl std::error::Error for Error {}
|
||||
|
||||
impl std::convert::From<std::sync::mpsc::RecvError> for Error {
|
||||
fn from(e: std::sync::mpsc::RecvError) -> Error {
|
||||
Error::RecvError(e)
|
||||
Error::Recv(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<crossbeam_channel::RecvTimeoutError> for Error {
|
||||
fn from(e: crossbeam_channel::RecvTimeoutError) -> Error {
|
||||
Error::CrossbeamRecvTimeoutError(e)
|
||||
Error::CrossbeamRecvTimeout(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<crossbeam_channel::ReadyTimeoutError> for Error {
|
||||
fn from(_e: crossbeam_channel::ReadyTimeoutError) -> Error {
|
||||
Error::ReadyTimeoutError
|
||||
Error::ReadyTimeout
|
||||
}
|
||||
}
|
||||
impl std::convert::From<std::sync::mpsc::RecvTimeoutError> for Error {
|
||||
fn from(e: std::sync::mpsc::RecvTimeoutError) -> Error {
|
||||
Error::RecvTimeoutError(e)
|
||||
Error::RecvTimeout(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<cluster_info::ClusterInfoError> for Error {
|
||||
fn from(e: cluster_info::ClusterInfoError) -> Error {
|
||||
Error::ClusterInfoError(e)
|
||||
Error::ClusterInfo(e)
|
||||
}
|
||||
}
|
||||
impl<T> std::convert::From<crossbeam_channel::SendError<T>> for Error {
|
||||
fn from(_e: crossbeam_channel::SendError<T>) -> Error {
|
||||
Error::CrossbeamSendError
|
||||
Error::CrossbeamSend
|
||||
}
|
||||
}
|
||||
impl<T> std::convert::From<crossbeam_channel::TrySendError<T>> for Error {
|
||||
fn from(_e: crossbeam_channel::TrySendError<T>) -> Error {
|
||||
Error::TryCrossbeamSendError
|
||||
Error::TryCrossbeamSend
|
||||
}
|
||||
}
|
||||
impl<T> std::convert::From<std::sync::mpsc::SendError<T>> for Error {
|
||||
fn from(_e: std::sync::mpsc::SendError<T>) -> Error {
|
||||
Error::SendError
|
||||
Error::Send
|
||||
}
|
||||
}
|
||||
impl std::convert::From<std::io::Error> for Error {
|
||||
@@ -82,17 +82,17 @@ impl std::convert::From<std::boxed::Box<bincode::ErrorKind>> for Error {
|
||||
}
|
||||
impl std::convert::From<blockstore::BlockstoreError> for Error {
|
||||
fn from(e: blockstore::BlockstoreError) -> Error {
|
||||
Error::BlockstoreError(e)
|
||||
Error::Blockstore(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<rand::distributions::weighted::WeightedError> for Error {
|
||||
fn from(e: rand::distributions::weighted::WeightedError) -> Error {
|
||||
Error::WeightedIndexError(e)
|
||||
Error::WeightedIndex(e)
|
||||
}
|
||||
}
|
||||
impl std::convert::From<GossipError> for Error {
|
||||
fn from(e: GossipError) -> Error {
|
||||
Error::GossipError(e)
|
||||
Error::Gossip(e)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,12 +116,12 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn from_test() {
|
||||
assert_matches!(Error::from(RecvError {}), Error::RecvError(_));
|
||||
assert_matches!(Error::from(RecvError {}), Error::Recv(_));
|
||||
assert_matches!(
|
||||
Error::from(RecvTimeoutError::Timeout),
|
||||
Error::RecvTimeoutError(_)
|
||||
Error::RecvTimeout(_)
|
||||
);
|
||||
assert_matches!(send_error(), Err(Error::SendError));
|
||||
assert_matches!(send_error(), Err(Error::Send));
|
||||
let ioe = io::Error::new(io::ErrorKind::NotFound, "hi");
|
||||
assert_matches!(Error::from(ioe), Error::Io(_));
|
||||
}
|
||||
|
@@ -4,10 +4,9 @@
|
||||
use crate::{
|
||||
cluster_info_vote_listener::VerifiedVoteReceiver,
|
||||
cluster_slots::ClusterSlots,
|
||||
cluster_slots_service::ClusterSlotsService,
|
||||
cluster_slots_service::{ClusterSlotsService, ClusterSlotsUpdateReceiver},
|
||||
completed_data_sets_service::CompletedDataSetsSender,
|
||||
repair_service::DuplicateSlotsResetSender,
|
||||
repair_service::RepairInfo,
|
||||
repair_service::{DuplicateSlotsResetSender, RepairInfo},
|
||||
result::{Error, Result},
|
||||
window_service::{should_retransmit_and_persist, WindowService},
|
||||
};
|
||||
@@ -429,7 +428,9 @@ fn retransmit(
|
||||
// neighborhood), then we expect that the packet arrives at tvu socket
|
||||
// as opposed to tvu-forwards. If this is not the case, then the
|
||||
// turbine broadcast/retransmit tree is mismatched across nodes.
|
||||
if packet.meta.forward == (my_index % DATA_PLANE_FANOUT == 0) {
|
||||
let anchor_node = my_index % DATA_PLANE_FANOUT == 0;
|
||||
if packet.meta.forward == anchor_node {
|
||||
// TODO: Consider forwarding the packet to the root node here.
|
||||
retransmit_tree_mismatch += 1;
|
||||
}
|
||||
peers_len = cmp::max(peers_len, shuffled_stakes_and_index.len());
|
||||
@@ -465,10 +466,19 @@ fn retransmit(
|
||||
.or_default() += 1;
|
||||
|
||||
let mut retransmit_time = Measure::start("retransmit_to");
|
||||
if !packet.meta.forward {
|
||||
ClusterInfo::retransmit_to(&neighbors, packet, sock, true)?;
|
||||
// If the node is on the critical path (i.e. the first node in each
|
||||
// neighborhood), it should send the packet to tvu socket of its
|
||||
// children and also tvu_forward socket of its neighbors. Otherwise it
|
||||
// should only forward to tvu_forward socket of its children.
|
||||
if anchor_node {
|
||||
ClusterInfo::retransmit_to(&neighbors, packet, sock, /*forward socket=*/ true);
|
||||
}
|
||||
ClusterInfo::retransmit_to(&children, packet, sock, packet.meta.forward)?;
|
||||
ClusterInfo::retransmit_to(
|
||||
&children,
|
||||
packet,
|
||||
sock,
|
||||
!anchor_node, // send to forward socket!
|
||||
);
|
||||
retransmit_time.stop();
|
||||
retransmit_total += retransmit_time.as_us();
|
||||
}
|
||||
@@ -559,8 +569,8 @@ pub fn retransmitter(
|
||||
&rpc_subscriptions,
|
||||
) {
|
||||
match e {
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
|
||||
Error::RecvTimeout(RecvTimeoutError::Disconnected) => break,
|
||||
Error::RecvTimeout(RecvTimeoutError::Timeout) => (),
|
||||
_ => {
|
||||
inc_new_counter_error!("streamer-retransmit-error", 1, 1);
|
||||
}
|
||||
@@ -592,7 +602,8 @@ impl RetransmitStage {
|
||||
repair_socket: Arc<UdpSocket>,
|
||||
verified_receiver: Receiver<Vec<Packets>>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
completed_slots_receivers: [CompletedSlotsReceiver; 2],
|
||||
rpc_completed_slots_receiver: CompletedSlotsReceiver,
|
||||
cluster_slots_update_receiver: ClusterSlotsUpdateReceiver,
|
||||
epoch_schedule: EpochSchedule,
|
||||
cfg: Option<Arc<AtomicBool>>,
|
||||
shred_version: u16,
|
||||
@@ -618,8 +629,6 @@ impl RetransmitStage {
|
||||
rpc_subscriptions.clone(),
|
||||
);
|
||||
|
||||
let [rpc_completed_slots_receiver, cluster_completed_slots_receiver] =
|
||||
completed_slots_receivers;
|
||||
let rpc_completed_slots_hdl =
|
||||
RpcCompletedSlotsService::spawn(rpc_completed_slots_receiver, rpc_subscriptions);
|
||||
let cluster_slots_service = ClusterSlotsService::new(
|
||||
@@ -627,7 +636,7 @@ impl RetransmitStage {
|
||||
cluster_slots.clone(),
|
||||
bank_forks.clone(),
|
||||
cluster_info.clone(),
|
||||
cluster_completed_slots_receiver,
|
||||
cluster_slots_update_receiver,
|
||||
exit.clone(),
|
||||
);
|
||||
|
||||
@@ -709,6 +718,7 @@ mod tests {
|
||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
let opts = ProcessOptions {
|
||||
accounts_db_test_hash_calculation: true,
|
||||
full_leader_cache: true,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
@@ -728,8 +738,13 @@ mod tests {
|
||||
.unwrap()
|
||||
.local_addr()
|
||||
.unwrap();
|
||||
|
||||
let other = ContactInfo::new_localhost(&solana_sdk::pubkey::new_rand(), 0);
|
||||
// This fixes the order of nodes returned by shuffle_peers_and_index,
|
||||
// and makes turbine retransmit tree deterministic for the purpose of
|
||||
// the test.
|
||||
let other = std::iter::repeat_with(solana_sdk::pubkey::new_rand)
|
||||
.find(|pk| me.id < *pk)
|
||||
.unwrap();
|
||||
let other = ContactInfo::new_localhost(&other, 0);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(other);
|
||||
cluster_info.insert_info(me);
|
||||
|
||||
|
@@ -171,7 +171,7 @@ impl ServeRepair {
|
||||
Self::run_window_request(
|
||||
recycler,
|
||||
from,
|
||||
&from_addr,
|
||||
from_addr,
|
||||
blockstore,
|
||||
&me.read().unwrap().my_info,
|
||||
*slot,
|
||||
@@ -186,7 +186,7 @@ impl ServeRepair {
|
||||
(
|
||||
Self::run_highest_window_request(
|
||||
recycler,
|
||||
&from_addr,
|
||||
from_addr,
|
||||
blockstore,
|
||||
*slot,
|
||||
*highest_index,
|
||||
@@ -200,7 +200,7 @@ impl ServeRepair {
|
||||
(
|
||||
Self::run_orphan(
|
||||
recycler,
|
||||
&from_addr,
|
||||
from_addr,
|
||||
blockstore,
|
||||
*slot,
|
||||
MAX_ORPHAN_REPAIR_RESPONSES,
|
||||
@@ -256,7 +256,7 @@ impl ServeRepair {
|
||||
|
||||
let mut time = Measure::start("repair::handle_packets");
|
||||
for reqs in reqs_v {
|
||||
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender, stats);
|
||||
Self::handle_packets(obj, recycler, blockstore, reqs, response_sender, stats);
|
||||
}
|
||||
time.stop();
|
||||
if total_packets >= *max_packets {
|
||||
@@ -323,7 +323,7 @@ impl ServeRepair {
|
||||
&mut max_packets,
|
||||
);
|
||||
match result {
|
||||
Err(Error::RecvTimeoutError(_)) | Ok(_) => {}
|
||||
Err(Error::RecvTimeout(_)) | Ok(_) => {}
|
||||
Err(err) => info!("repair listener error: {:?}", err),
|
||||
};
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
@@ -411,7 +411,7 @@ impl ServeRepair {
|
||||
let (repair_peers, weighted_index) = match cache.entry(slot) {
|
||||
Entry::Occupied(entry) => entry.into_mut(),
|
||||
Entry::Vacant(entry) => {
|
||||
let repair_peers = self.repair_peers(&repair_validators, slot);
|
||||
let repair_peers = self.repair_peers(repair_validators, slot);
|
||||
if repair_peers.is_empty() {
|
||||
return Err(Error::from(ClusterInfoError::NoPeers));
|
||||
}
|
||||
@@ -515,7 +515,7 @@ impl ServeRepair {
|
||||
|
||||
if let Some(packet) = packet {
|
||||
inc_new_counter_debug!("serve_repair-window-request-ledger", 1);
|
||||
return Some(Packets::new_with_recycler_data(
|
||||
return Some(Packets::new_unpinned_with_recycler_data(
|
||||
recycler,
|
||||
"run_window_request",
|
||||
vec![packet],
|
||||
@@ -555,7 +555,7 @@ impl ServeRepair {
|
||||
from_addr,
|
||||
nonce,
|
||||
)?;
|
||||
return Some(Packets::new_with_recycler_data(
|
||||
return Some(Packets::new_unpinned_with_recycler_data(
|
||||
recycler,
|
||||
"run_highest_window_request",
|
||||
vec![packet],
|
||||
@@ -572,7 +572,7 @@ impl ServeRepair {
|
||||
max_responses: usize,
|
||||
nonce: Nonce,
|
||||
) -> Option<Packets> {
|
||||
let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan");
|
||||
let mut res = Packets::new_unpinned_with_recycler(recycler.clone(), 64, "run_orphan");
|
||||
if let Some(blockstore) = blockstore {
|
||||
// Try to find the next "n" parent slots of the input slot
|
||||
while let Ok(Some(meta)) = blockstore.meta(slot) {
|
||||
@@ -777,7 +777,7 @@ mod tests {
|
||||
&None,
|
||||
&mut outstanding_requests,
|
||||
);
|
||||
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
|
||||
assert_matches!(rv, Err(Error::ClusterInfo(ClusterInfoError::NoPeers)));
|
||||
|
||||
let serve_repair_addr = socketaddr!([127, 0, 0, 1], 1243);
|
||||
let nxt = ContactInfo {
|
||||
|
@@ -28,11 +28,12 @@ impl ServeRepairService {
|
||||
);
|
||||
let t_receiver = streamer::receiver(
|
||||
serve_repair_socket.clone(),
|
||||
&exit,
|
||||
exit,
|
||||
request_sender,
|
||||
Recycler::default(),
|
||||
"serve_repair_receiver",
|
||||
1,
|
||||
false,
|
||||
);
|
||||
let (response_sender, response_receiver) = channel();
|
||||
let t_responder =
|
||||
|
@@ -145,11 +145,12 @@ impl ShredFetchStage {
|
||||
.map(|s| {
|
||||
streamer::receiver(
|
||||
s,
|
||||
&exit,
|
||||
exit,
|
||||
packet_sender.clone(),
|
||||
recycler.clone(),
|
||||
"packet_modifier",
|
||||
1,
|
||||
true,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
@@ -173,7 +174,7 @@ impl ShredFetchStage {
|
||||
|
||||
let (mut tvu_threads, tvu_filter) = Self::packet_modifier(
|
||||
sockets,
|
||||
&exit,
|
||||
exit,
|
||||
sender.clone(),
|
||||
recycler.clone(),
|
||||
bank_forks.clone(),
|
||||
@@ -183,7 +184,7 @@ impl ShredFetchStage {
|
||||
|
||||
let (tvu_forwards_threads, fwd_thread_hdl) = Self::packet_modifier(
|
||||
forward_sockets,
|
||||
&exit,
|
||||
exit,
|
||||
sender.clone(),
|
||||
recycler.clone(),
|
||||
bank_forks.clone(),
|
||||
@@ -193,7 +194,7 @@ impl ShredFetchStage {
|
||||
|
||||
let (repair_receiver, repair_handler) = Self::packet_modifier(
|
||||
vec![repair_socket],
|
||||
&exit,
|
||||
exit,
|
||||
sender.clone(),
|
||||
recycler,
|
||||
bank_forks,
|
||||
|
@@ -24,10 +24,10 @@ const RECV_BATCH_MAX_GPU: usize = 5_000;
|
||||
#[derive(Error, Debug)]
|
||||
pub enum SigVerifyServiceError {
|
||||
#[error("send packets batch error")]
|
||||
SendError(#[from] SendError<Vec<Packets>>),
|
||||
Send(#[from] SendError<Vec<Packets>>),
|
||||
|
||||
#[error("streamer error")]
|
||||
StreamerError(#[from] StreamerError),
|
||||
Streamer(#[from] StreamerError),
|
||||
}
|
||||
|
||||
type Result<T> = std::result::Result<T, SigVerifyServiceError>;
|
||||
@@ -126,13 +126,13 @@ impl SigVerifyStage {
|
||||
.spawn(move || loop {
|
||||
if let Err(e) = Self::verifier(&packet_receiver, &verified_sender, id, &verifier) {
|
||||
match e {
|
||||
SigVerifyServiceError::StreamerError(StreamerError::RecvTimeoutError(
|
||||
SigVerifyServiceError::Streamer(StreamerError::RecvTimeout(
|
||||
RecvTimeoutError::Disconnected,
|
||||
)) => break,
|
||||
SigVerifyServiceError::StreamerError(StreamerError::RecvTimeoutError(
|
||||
SigVerifyServiceError::Streamer(StreamerError::RecvTimeout(
|
||||
RecvTimeoutError::Timeout,
|
||||
)) => (),
|
||||
SigVerifyServiceError::SendError(_) => {
|
||||
SigVerifyServiceError::Send(_) => {
|
||||
break;
|
||||
}
|
||||
_ => error!("{:?}", e),
|
||||
|
@@ -1,12 +1,10 @@
|
||||
use {
|
||||
crate::{
|
||||
rpc::JsonRpcConfig,
|
||||
validator::{Validator, ValidatorConfig, ValidatorExit, ValidatorStartProgress},
|
||||
},
|
||||
crate::validator::{Validator, ValidatorConfig, ValidatorStartProgress},
|
||||
solana_client::rpc_client::RpcClient,
|
||||
solana_gossip::{cluster_info::Node, gossip_service::discover_cluster, socketaddr},
|
||||
solana_ledger::{blockstore::create_new_ledger, create_new_tmp_ledger},
|
||||
solana_net_utils::PortRange,
|
||||
solana_rpc::rpc::JsonRpcConfig,
|
||||
solana_runtime::{
|
||||
bank_forks::{ArchiveFormat, SnapshotConfig, SnapshotVersion},
|
||||
genesis_utils::create_genesis_config_with_leader_ex,
|
||||
@@ -18,6 +16,7 @@ use {
|
||||
clock::{Slot, DEFAULT_MS_PER_SLOT},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_schedule::EpochSchedule,
|
||||
exit::Exit,
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
hash::Hash,
|
||||
native_token::sol_to_lamports,
|
||||
@@ -70,7 +69,7 @@ impl Default for TestValidatorNodeConfig {
|
||||
pub struct TestValidatorGenesis {
|
||||
fee_rate_governor: FeeRateGovernor,
|
||||
ledger_path: Option<PathBuf>,
|
||||
rent: Rent,
|
||||
pub rent: Rent,
|
||||
rpc_config: JsonRpcConfig,
|
||||
rpc_ports: Option<(u16, u16)>, // (JsonRpc, JsonRpcPubSub), None == random ports
|
||||
warp_slot: Option<Slot>,
|
||||
@@ -79,7 +78,7 @@ pub struct TestValidatorGenesis {
|
||||
programs: Vec<ProgramInfo>,
|
||||
epoch_schedule: Option<EpochSchedule>,
|
||||
node_config: TestValidatorNodeConfig,
|
||||
pub validator_exit: Arc<RwLock<ValidatorExit>>,
|
||||
pub validator_exit: Arc<RwLock<Exit>>,
|
||||
pub start_progress: Arc<RwLock<ValidatorStartProgress>>,
|
||||
pub authorized_voter_keypairs: Arc<RwLock<Vec<Arc<Keypair>>>>,
|
||||
pub max_ledger_shreds: Option<u64>,
|
||||
|
@@ -9,13 +9,13 @@ use crate::{
|
||||
VerifiedVoteSender, VoteTracker,
|
||||
},
|
||||
fetch_stage::FetchStage,
|
||||
poh_recorder::{PohRecorder, WorkingBankEntry},
|
||||
sigverify::TransactionSigVerifier,
|
||||
sigverify_stage::SigVerifyStage,
|
||||
};
|
||||
use crossbeam_channel::unbounded;
|
||||
use solana_gossip::cluster_info::ClusterInfo;
|
||||
use solana_ledger::{blockstore::Blockstore, blockstore_processor::TransactionStatusSender};
|
||||
use solana_poh::poh_recorder::{PohRecorder, WorkingBankEntry};
|
||||
use solana_rpc::{
|
||||
optimistically_confirmed_bank_tracker::BankNotificationSender,
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
@@ -74,9 +74,9 @@ impl Tpu {
|
||||
let fetch_stage = FetchStage::new_with_sender(
|
||||
transactions_sockets,
|
||||
tpu_forwards_sockets,
|
||||
&exit,
|
||||
exit,
|
||||
&packet_sender,
|
||||
&poh_recorder,
|
||||
poh_recorder,
|
||||
tpu_coalesce_ms,
|
||||
);
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
@@ -88,10 +88,10 @@ impl Tpu {
|
||||
|
||||
let (verified_vote_packets_sender, verified_vote_packets_receiver) = unbounded();
|
||||
let cluster_info_vote_listener = ClusterInfoVoteListener::new(
|
||||
&exit,
|
||||
exit,
|
||||
cluster_info.clone(),
|
||||
verified_vote_packets_sender,
|
||||
&poh_recorder,
|
||||
poh_recorder,
|
||||
vote_tracker,
|
||||
bank_forks,
|
||||
subscriptions.clone(),
|
||||
@@ -104,7 +104,7 @@ impl Tpu {
|
||||
);
|
||||
|
||||
let banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
cluster_info,
|
||||
poh_recorder,
|
||||
verified_receiver,
|
||||
verified_vote_packets_receiver,
|
||||
@@ -117,7 +117,7 @@ impl Tpu {
|
||||
cluster_info.clone(),
|
||||
entry_receiver,
|
||||
retransmit_slots_receiver,
|
||||
&exit,
|
||||
exit,
|
||||
blockstore,
|
||||
shred_version,
|
||||
);
|
||||
|
@@ -13,7 +13,6 @@ use crate::{
|
||||
completed_data_sets_service::CompletedDataSetsSender,
|
||||
consensus::Tower,
|
||||
ledger_cleanup_service::LedgerCleanupService,
|
||||
poh_recorder::PohRecorder,
|
||||
replay_stage::{ReplayStage, ReplayStageConfig},
|
||||
retransmit_stage::RetransmitStage,
|
||||
rewards_recorder_service::RewardsRecorderSender,
|
||||
@@ -29,6 +28,7 @@ use solana_ledger::{
|
||||
blockstore_processor::TransactionStatusSender,
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
};
|
||||
use solana_poh::poh_recorder::PohRecorder;
|
||||
use solana_rpc::{
|
||||
max_slots::MaxSlots, optimistically_confirmed_bank_tracker::BankNotificationSender,
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
@@ -37,6 +37,7 @@ use solana_runtime::{
|
||||
accounts_background_service::{
|
||||
AbsRequestHandler, AbsRequestSender, AccountsBackgroundService, SnapshotRequestHandler,
|
||||
},
|
||||
accounts_db::AccountShrinkThreshold,
|
||||
bank_forks::{BankForks, SnapshotConfig},
|
||||
commitment::BlockCommitmentCache,
|
||||
vote_sender_types::ReplayVoteSender,
|
||||
@@ -88,6 +89,7 @@ pub struct TvuConfig {
|
||||
pub rocksdb_compaction_interval: Option<u64>,
|
||||
pub rocksdb_max_compaction_jitter: Option<u64>,
|
||||
pub wait_for_vote_to_start_leader: bool,
|
||||
pub accounts_shrink_ratio: AccountShrinkThreshold,
|
||||
}
|
||||
|
||||
impl Tvu {
|
||||
@@ -111,7 +113,7 @@ impl Tvu {
|
||||
tower: Tower,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
completed_slots_receivers: [CompletedSlotsReceiver; 2],
|
||||
completed_slots_receiver: CompletedSlotsReceiver,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
cfg: Option<Arc<AtomicBool>>,
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
@@ -150,7 +152,7 @@ impl Tvu {
|
||||
repair_socket.clone(),
|
||||
&fetch_sender,
|
||||
Some(bank_forks.clone()),
|
||||
&exit,
|
||||
exit,
|
||||
);
|
||||
|
||||
let (verified_sender, verified_receiver) = unbounded();
|
||||
@@ -165,16 +167,18 @@ impl Tvu {
|
||||
let compaction_interval = tvu_config.rocksdb_compaction_interval;
|
||||
let max_compaction_jitter = tvu_config.rocksdb_max_compaction_jitter;
|
||||
let (duplicate_slots_sender, duplicate_slots_receiver) = unbounded();
|
||||
let (cluster_slots_update_sender, cluster_slots_update_receiver) = unbounded();
|
||||
let retransmit_stage = RetransmitStage::new(
|
||||
bank_forks.clone(),
|
||||
leader_schedule_cache,
|
||||
blockstore.clone(),
|
||||
&cluster_info,
|
||||
cluster_info,
|
||||
Arc::new(retransmit_sockets),
|
||||
repair_socket,
|
||||
verified_receiver,
|
||||
&exit,
|
||||
completed_slots_receivers,
|
||||
completed_slots_receiver,
|
||||
cluster_slots_update_receiver,
|
||||
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
|
||||
cfg,
|
||||
tvu_config.shred_version,
|
||||
@@ -208,7 +212,7 @@ impl Tvu {
|
||||
accounts_hash_receiver,
|
||||
pending_snapshot_package,
|
||||
exit,
|
||||
&cluster_info,
|
||||
cluster_info,
|
||||
tvu_config.trusted_validators.clone(),
|
||||
tvu_config.halt_on_trusted_validators_accounts_hash_mismatch,
|
||||
tvu_config.accounts_hash_fault_injection_slots,
|
||||
@@ -288,6 +292,7 @@ impl Tvu {
|
||||
replay_vote_sender,
|
||||
gossip_confirmed_slots_receiver,
|
||||
gossip_verified_vote_hash_receiver,
|
||||
cluster_slots_update_sender,
|
||||
);
|
||||
|
||||
let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| {
|
||||
@@ -295,7 +300,7 @@ impl Tvu {
|
||||
ledger_cleanup_slot_receiver,
|
||||
blockstore.clone(),
|
||||
max_ledger_shreds,
|
||||
&exit,
|
||||
exit,
|
||||
compaction_interval,
|
||||
max_compaction_jitter,
|
||||
)
|
||||
@@ -303,7 +308,7 @@ impl Tvu {
|
||||
|
||||
let accounts_background_service = AccountsBackgroundService::new(
|
||||
bank_forks.clone(),
|
||||
&exit,
|
||||
exit,
|
||||
accounts_background_request_handler,
|
||||
tvu_config.accounts_db_caching_enabled,
|
||||
tvu_config.test_hash_calculation,
|
||||
@@ -338,7 +343,6 @@ impl Tvu {
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use crate::banking_stage::create_test_recorder;
|
||||
use serial_test::serial;
|
||||
use solana_gossip::cluster_info::{ClusterInfo, Node};
|
||||
use solana_ledger::{
|
||||
@@ -346,6 +350,7 @@ pub mod tests {
|
||||
create_new_tmp_ledger,
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
};
|
||||
use solana_poh::poh_recorder::create_test_recorder;
|
||||
use solana_rpc::optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank;
|
||||
use solana_runtime::bank::Bank;
|
||||
use std::sync::atomic::Ordering;
|
||||
@@ -373,7 +378,7 @@ pub mod tests {
|
||||
let BlockstoreSignals {
|
||||
blockstore,
|
||||
ledger_signal_receiver,
|
||||
completed_slots_receivers,
|
||||
completed_slots_receiver,
|
||||
..
|
||||
} = Blockstore::open_with_signal(&blockstore_path, None, true)
|
||||
.expect("Expected to successfully open ledger");
|
||||
@@ -417,7 +422,7 @@ pub mod tests {
|
||||
tower,
|
||||
&leader_schedule_cache,
|
||||
&exit,
|
||||
completed_slots_receivers,
|
||||
completed_slots_receiver,
|
||||
block_commitment_cache,
|
||||
None,
|
||||
None,
|
||||
|
@@ -116,7 +116,7 @@ mod tests {
|
||||
if *unfrozen_vote_slot >= frozen_vote_slot {
|
||||
let vote_hashes_map = unfrozen_gossip_verified_vote_hashes
|
||||
.votes_per_slot
|
||||
.get(&unfrozen_vote_slot)
|
||||
.get(unfrozen_vote_slot)
|
||||
.unwrap();
|
||||
assert_eq!(vote_hashes_map.len(), num_duplicate_hashes);
|
||||
for pubkey_votes in vote_hashes_map.values() {
|
||||
|
@@ -6,18 +6,13 @@ use crate::{
|
||||
cluster_info_vote_listener::VoteTracker,
|
||||
completed_data_sets_service::CompletedDataSetsService,
|
||||
consensus::{reconcile_blockstore_roots_with_tower, Tower},
|
||||
poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
|
||||
poh_service::{self, PohService},
|
||||
rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService},
|
||||
rpc::JsonRpcConfig,
|
||||
rpc_service::JsonRpcService,
|
||||
sample_performance_service::SamplePerformanceService,
|
||||
serve_repair::ServeRepair,
|
||||
serve_repair_service::ServeRepairService,
|
||||
sigverify,
|
||||
snapshot_packager_service::{PendingSnapshotPackage, SnapshotPackagerService},
|
||||
tpu::{Tpu, DEFAULT_TPU_COALESCE_MS},
|
||||
transaction_status_service::TransactionStatusService,
|
||||
tvu::{Sockets, Tvu, TvuConfig},
|
||||
};
|
||||
use crossbeam_channel::{bounded, unbounded};
|
||||
@@ -41,15 +36,23 @@ use solana_ledger::{
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_poh::{
|
||||
poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS},
|
||||
poh_service::{self, PohService},
|
||||
};
|
||||
use solana_rpc::{
|
||||
max_slots::MaxSlots,
|
||||
optimistically_confirmed_bank_tracker::{
|
||||
OptimisticallyConfirmedBank, OptimisticallyConfirmedBankTracker,
|
||||
},
|
||||
rpc::JsonRpcConfig,
|
||||
rpc_pubsub_service::{PubSubConfig, PubSubService},
|
||||
rpc_service::JsonRpcService,
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
transaction_status_service::TransactionStatusService,
|
||||
};
|
||||
use solana_runtime::{
|
||||
accounts_db::AccountShrinkThreshold,
|
||||
accounts_index::AccountSecondaryIndexes,
|
||||
bank::Bank,
|
||||
bank_forks::{BankForks, SnapshotConfig},
|
||||
@@ -59,6 +62,7 @@ use solana_runtime::{
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET,
|
||||
exit::Exit,
|
||||
genesis_config::GenesisConfig,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
@@ -67,10 +71,8 @@ use solana_sdk::{
|
||||
timing::timestamp,
|
||||
};
|
||||
use solana_vote_program::vote_state::VoteState;
|
||||
use std::time::Instant;
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
fmt,
|
||||
net::SocketAddr,
|
||||
ops::Deref,
|
||||
path::{Path, PathBuf},
|
||||
@@ -78,7 +80,7 @@ use std::{
|
||||
sync::mpsc::Receiver,
|
||||
sync::{Arc, Mutex, RwLock},
|
||||
thread::{sleep, Builder},
|
||||
time::Duration,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
const MAX_COMPLETED_DATA_SETS_IN_CHANNEL: usize = 100_000;
|
||||
@@ -135,8 +137,9 @@ pub struct ValidatorConfig {
|
||||
pub accounts_db_test_hash_calculation: bool,
|
||||
pub accounts_db_use_index_hash_calculation: bool,
|
||||
pub tpu_coalesce_ms: u64,
|
||||
pub validator_exit: Arc<RwLock<ValidatorExit>>,
|
||||
pub validator_exit: Arc<RwLock<Exit>>,
|
||||
pub no_wait_for_vote_to_start_leader: bool,
|
||||
pub accounts_shrink_ratio: AccountShrinkThreshold,
|
||||
}
|
||||
|
||||
impl Default for ValidatorConfig {
|
||||
@@ -191,8 +194,9 @@ impl Default for ValidatorConfig {
|
||||
accounts_db_test_hash_calculation: false,
|
||||
accounts_db_use_index_hash_calculation: true,
|
||||
tpu_coalesce_ms: DEFAULT_TPU_COALESCE_MS,
|
||||
validator_exit: Arc::new(RwLock::new(ValidatorExit::default())),
|
||||
validator_exit: Arc::new(RwLock::new(Exit::default())),
|
||||
no_wait_for_vote_to_start_leader: true,
|
||||
accounts_shrink_ratio: AccountShrinkThreshold::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -223,35 +227,6 @@ impl Default for ValidatorStartProgress {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ValidatorExit {
|
||||
exited: bool,
|
||||
exits: Vec<Box<dyn FnOnce() + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ValidatorExit {
|
||||
pub fn register_exit(&mut self, exit: Box<dyn FnOnce() + Send + Sync>) {
|
||||
if self.exited {
|
||||
exit();
|
||||
} else {
|
||||
self.exits.push(exit);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn exit(&mut self) {
|
||||
self.exited = true;
|
||||
for exit in self.exits.drain(..) {
|
||||
exit();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for ValidatorExit {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{} exits", self.exits.len())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct TransactionHistoryServices {
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
@@ -264,7 +239,7 @@ struct TransactionHistoryServices {
|
||||
}
|
||||
|
||||
pub struct Validator {
|
||||
validator_exit: Arc<RwLock<ValidatorExit>>,
|
||||
validator_exit: Arc<RwLock<Exit>>,
|
||||
json_rpc_service: Option<JsonRpcService>,
|
||||
pubsub_service: Option<PubSubService>,
|
||||
optimistically_confirmed_bank_tracker: Option<OptimisticallyConfirmedBankTracker>,
|
||||
@@ -387,7 +362,7 @@ impl Validator {
|
||||
bank_forks,
|
||||
blockstore,
|
||||
ledger_signal_receiver,
|
||||
completed_slots_receivers,
|
||||
completed_slots_receiver,
|
||||
leader_schedule_cache,
|
||||
snapshot_hash,
|
||||
TransactionHistoryServices {
|
||||
@@ -409,6 +384,7 @@ impl Validator {
|
||||
&exit,
|
||||
config.enforce_ulimit_nofile,
|
||||
&start_progress,
|
||||
config.no_poh_speed_test,
|
||||
);
|
||||
|
||||
*start_progress.write().unwrap() = ValidatorStartProgress::StartingServices;
|
||||
@@ -594,9 +570,13 @@ impl Validator {
|
||||
*start_progress.write().unwrap() = ValidatorStartProgress::Halted;
|
||||
std::thread::park();
|
||||
}
|
||||
|
||||
let ip_echo_server = node.sockets.ip_echo.map(solana_net_utils::ip_echo_server);
|
||||
|
||||
let ip_echo_server = match node.sockets.ip_echo {
|
||||
None => None,
|
||||
Some(tcp_listener) => Some(solana_net_utils::ip_echo_server(
|
||||
tcp_listener,
|
||||
Some(node.info.shred_version),
|
||||
)),
|
||||
};
|
||||
let gossip_service = GossipService::new(
|
||||
&cluster_info,
|
||||
Some(bank_forks.clone()),
|
||||
@@ -640,10 +620,6 @@ impl Validator {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
if !config.no_poh_speed_test {
|
||||
check_poh_speed(&genesis_config, None);
|
||||
}
|
||||
|
||||
let waited_for_supermajority = if let Ok(waited) = wait_for_supermajority(
|
||||
config,
|
||||
&bank,
|
||||
@@ -719,7 +695,7 @@ impl Validator {
|
||||
tower,
|
||||
&leader_schedule_cache,
|
||||
&exit,
|
||||
completed_slots_receivers,
|
||||
completed_slots_receiver,
|
||||
block_commitment_cache,
|
||||
config.enable_partition.clone(),
|
||||
transaction_status_sender.clone(),
|
||||
@@ -748,6 +724,7 @@ impl Validator {
|
||||
rocksdb_compaction_interval: config.rocksdb_compaction_interval,
|
||||
rocksdb_max_compaction_jitter: config.rocksdb_compaction_interval,
|
||||
wait_for_vote_to_start_leader,
|
||||
accounts_shrink_ratio: config.accounts_shrink_ratio,
|
||||
},
|
||||
&max_slots,
|
||||
);
|
||||
@@ -986,7 +963,7 @@ fn post_process_restored_tower(
|
||||
})
|
||||
.unwrap_or_else(|err| {
|
||||
let voting_has_been_active =
|
||||
active_vote_account_exists_in_bank(&bank_forks.working_bank(), &vote_account);
|
||||
active_vote_account_exists_in_bank(&bank_forks.working_bank(), vote_account);
|
||||
if !err.is_file_missing() {
|
||||
datapoint_error!(
|
||||
"tower_error",
|
||||
@@ -1019,10 +996,10 @@ fn post_process_restored_tower(
|
||||
}
|
||||
|
||||
Tower::new_from_bankforks(
|
||||
&bank_forks,
|
||||
bank_forks,
|
||||
tower_path,
|
||||
&validator_identity,
|
||||
&vote_account,
|
||||
validator_identity,
|
||||
vote_account,
|
||||
)
|
||||
})
|
||||
}
|
||||
@@ -1037,12 +1014,13 @@ fn new_banks_from_ledger(
|
||||
exit: &Arc<AtomicBool>,
|
||||
enforce_ulimit_nofile: bool,
|
||||
start_progress: &Arc<RwLock<ValidatorStartProgress>>,
|
||||
no_poh_speed_test: bool,
|
||||
) -> (
|
||||
GenesisConfig,
|
||||
BankForks,
|
||||
Arc<Blockstore>,
|
||||
Receiver<bool>,
|
||||
[CompletedSlotsReceiver; 2],
|
||||
CompletedSlotsReceiver,
|
||||
LeaderScheduleCache,
|
||||
Option<(Slot, Hash)>,
|
||||
TransactionHistoryServices,
|
||||
@@ -1070,10 +1048,14 @@ fn new_banks_from_ledger(
|
||||
}
|
||||
}
|
||||
|
||||
if !no_poh_speed_test {
|
||||
check_poh_speed(&genesis_config, None);
|
||||
}
|
||||
|
||||
let BlockstoreSignals {
|
||||
mut blockstore,
|
||||
ledger_signal_receiver,
|
||||
completed_slots_receivers,
|
||||
completed_slots_receiver,
|
||||
..
|
||||
} = Blockstore::open_with_signal(
|
||||
ledger_path,
|
||||
@@ -1085,9 +1067,9 @@ fn new_banks_from_ledger(
|
||||
|
||||
let tower_path = config.tower_path.as_deref().unwrap_or(ledger_path);
|
||||
|
||||
let restored_tower = Tower::restore(tower_path, &validator_identity);
|
||||
let restored_tower = Tower::restore(tower_path, validator_identity);
|
||||
if let Ok(tower) = &restored_tower {
|
||||
reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap_or_else(|err| {
|
||||
reconcile_blockstore_roots_with_tower(tower, &blockstore).unwrap_or_else(|err| {
|
||||
error!("Failed to reconcile blockstore with tower: {:?}", err);
|
||||
abort()
|
||||
});
|
||||
@@ -1119,6 +1101,7 @@ fn new_banks_from_ledger(
|
||||
debug_keys: config.debug_keys.clone(),
|
||||
account_indexes: config.account_indexes.clone(),
|
||||
accounts_db_caching_enabled: config.accounts_db_caching_enabled,
|
||||
shrink_ratio: config.accounts_shrink_ratio,
|
||||
..blockstore_processor::ProcessOptions::default()
|
||||
};
|
||||
|
||||
@@ -1188,7 +1171,7 @@ fn new_banks_from_ledger(
|
||||
None,
|
||||
&snapshot_config.snapshot_package_output_path,
|
||||
snapshot_config.archive_format,
|
||||
Some(&bank_forks.root_bank().get_thread_pool()),
|
||||
Some(bank_forks.root_bank().get_thread_pool()),
|
||||
snapshot_config.maximum_snapshots_to_retain,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
@@ -1200,9 +1183,9 @@ fn new_banks_from_ledger(
|
||||
|
||||
let tower = post_process_restored_tower(
|
||||
restored_tower,
|
||||
&validator_identity,
|
||||
&vote_account,
|
||||
&config,
|
||||
validator_identity,
|
||||
vote_account,
|
||||
config,
|
||||
tower_path,
|
||||
&bank_forks,
|
||||
);
|
||||
@@ -1225,7 +1208,7 @@ fn new_banks_from_ledger(
|
||||
bank_forks,
|
||||
blockstore,
|
||||
ledger_signal_receiver,
|
||||
completed_slots_receivers,
|
||||
completed_slots_receiver,
|
||||
leader_schedule_cache,
|
||||
snapshot_hash,
|
||||
transaction_history_services,
|
||||
@@ -1407,7 +1390,7 @@ fn wait_for_supermajority(
|
||||
);
|
||||
}
|
||||
|
||||
let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info, i % 10 == 0);
|
||||
let gossip_stake_percent = get_stake_percent_in_gossip(bank, cluster_info, i % 10 == 0);
|
||||
|
||||
if gossip_stake_percent >= WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT {
|
||||
break;
|
||||
@@ -1636,9 +1619,11 @@ mod tests {
|
||||
}
|
||||
drop(blockstore);
|
||||
|
||||
// this purges and compacts all slots greater than or equal to 5
|
||||
backup_and_clear_blockstore(&blockstore_path, 5, 2);
|
||||
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
// assert that slots less than 5 aren't affected
|
||||
assert!(blockstore.meta(4).unwrap().unwrap().next_slots.is_empty());
|
||||
for i in 5..10 {
|
||||
assert!(blockstore
|
||||
|
@@ -15,17 +15,25 @@ impl VerifiedVotePackets {
|
||||
&mut self,
|
||||
vote_packets_receiver: &VerifiedLabelVotePacketsReceiver,
|
||||
last_update_version: &mut u64,
|
||||
would_be_leader: bool,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(200);
|
||||
let vote_packets = vote_packets_receiver.recv_timeout(timer)?;
|
||||
*last_update_version += 1;
|
||||
for (label, slot, packet) in vote_packets {
|
||||
self.0.insert(label, (*last_update_version, slot, packet));
|
||||
}
|
||||
while let Ok(vote_packets) = vote_packets_receiver.try_recv() {
|
||||
if would_be_leader {
|
||||
for (label, slot, packet) in vote_packets {
|
||||
self.0.insert(label, (*last_update_version, slot, packet));
|
||||
}
|
||||
} else {
|
||||
self.0.clear();
|
||||
self.0.shrink_to_fit();
|
||||
}
|
||||
while let Ok(vote_packets) = vote_packets_receiver.try_recv() {
|
||||
if would_be_leader {
|
||||
for (label, slot, packet) in vote_packets {
|
||||
self.0.insert(label, (*last_update_version, slot, packet));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -137,7 +145,7 @@ mod tests {
|
||||
s.send(vec![(label1.clone(), 42, later_packets)]).unwrap();
|
||||
let mut verified_vote_packets = VerifiedVotePackets(HashMap::new());
|
||||
verified_vote_packets
|
||||
.receive_and_process_vote_packets(&r, &mut update_version)
|
||||
.receive_and_process_vote_packets(&r, &mut update_version, true)
|
||||
.unwrap();
|
||||
|
||||
// Test timestamps for same batch are the same
|
||||
@@ -171,7 +179,7 @@ mod tests {
|
||||
s.send(vec![(label2.clone(), 51, Packets::default())])
|
||||
.unwrap();
|
||||
verified_vote_packets
|
||||
.receive_and_process_vote_packets(&r, &mut update_version)
|
||||
.receive_and_process_vote_packets(&r, &mut update_version, true)
|
||||
.unwrap();
|
||||
let update_version2 = verified_vote_packets.get_vote_packets(&label2).unwrap().0;
|
||||
assert!(update_version2 > update_version1);
|
||||
@@ -179,8 +187,8 @@ mod tests {
|
||||
// Test empty doesn't bump the version
|
||||
let before = update_version;
|
||||
assert_matches!(
|
||||
verified_vote_packets.receive_and_process_vote_packets(&r, &mut update_version),
|
||||
Err(Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout))
|
||||
verified_vote_packets.receive_and_process_vote_packets(&r, &mut update_version, true),
|
||||
Err(Error::CrossbeamRecvTimeout(RecvTimeoutError::Timeout))
|
||||
);
|
||||
assert_eq!(before, update_version);
|
||||
}
|
||||
|
@@ -134,7 +134,7 @@ fn verify_repair(
|
||||
.map(|repair_meta| {
|
||||
outstanding_requests.register_response(
|
||||
repair_meta.nonce,
|
||||
&shred,
|
||||
shred,
|
||||
solana_sdk::timing::timestamp(),
|
||||
)
|
||||
})
|
||||
@@ -153,7 +153,7 @@ fn prune_shreds_invalid_repair(
|
||||
let mut outstanding_requests = outstanding_requests.write().unwrap();
|
||||
shreds.retain(|shred| {
|
||||
let should_keep = (
|
||||
verify_repair(&mut outstanding_requests, &shred, &repair_infos[i]),
|
||||
verify_repair(&mut outstanding_requests, shred, &repair_infos[i]),
|
||||
i += 1,
|
||||
)
|
||||
.0;
|
||||
@@ -188,9 +188,14 @@ where
|
||||
}
|
||||
|
||||
prune_shreds_invalid_repair(&mut shreds, &mut repair_infos, outstanding_requests);
|
||||
let repairs: Vec<_> = repair_infos
|
||||
.iter()
|
||||
.map(|repair_info| repair_info.is_some())
|
||||
.collect();
|
||||
|
||||
let (completed_data_sets, inserted_indices) = blockstore.insert_shreds_handle_duplicate(
|
||||
shreds,
|
||||
repairs,
|
||||
Some(leader_schedule_cache),
|
||||
false,
|
||||
&handle_duplicate,
|
||||
@@ -582,12 +587,12 @@ impl WindowService {
|
||||
H: Fn(),
|
||||
{
|
||||
match e {
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Disconnected) => true,
|
||||
Error::CrossbeamRecvTimeoutError(RecvTimeoutError::Timeout) => {
|
||||
Error::CrossbeamRecvTimeout(RecvTimeoutError::Disconnected) => true,
|
||||
Error::CrossbeamRecvTimeout(RecvTimeoutError::Timeout) => {
|
||||
handle_timeout();
|
||||
false
|
||||
}
|
||||
Error::CrossbeamSendError => true,
|
||||
Error::CrossbeamSend => true,
|
||||
_ => {
|
||||
handle_error();
|
||||
error!("thread {:?} error {:?}", thread::current().name(), e);
|
||||
@@ -630,7 +635,7 @@ mod test {
|
||||
keypair: &Arc<Keypair>,
|
||||
) -> Vec<Shred> {
|
||||
let shredder = Shredder::new(slot, parent, keypair.clone(), 0, 0).unwrap();
|
||||
shredder.entries_to_shreds(&entries, true, 0).0
|
||||
shredder.entries_to_shreds(entries, true, 0).0
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user