Compare commits
76 Commits
Author | SHA1 | Date | |
---|---|---|---|
85bbcdad9a | |||
336c1c1d37 | |||
1570afe493 | |||
c7c650fccc | |||
9b7fba69f4 | |||
0bd355c166 | |||
0d8c8d013d | |||
7a57f7b8cc | |||
940c4731c4 | |||
4332f0ca05 | |||
e0e6e20e02 | |||
0fdaa1438e | |||
8a111229f7 | |||
e3eb9c195a | |||
8dd5ec6fbd | |||
e5d60bc56d | |||
cba97d576a | |||
0670213365 | |||
ed5c11b3aa | |||
4b8c4704b0 | |||
c5374095e6 | |||
6abd5fbc3e | |||
a6a302f41f | |||
4c764829da | |||
9900c1ad8a | |||
3fabbab417 | |||
6c99c1ae13 | |||
c2320fceab | |||
8b87d86358 | |||
5d0e1d62d5 | |||
7e613c7a78 | |||
2a30436e45 | |||
5315a89e7d | |||
8c328316ae | |||
030a97d098 | |||
c40e71dc03 | |||
2f633cdfb7 | |||
edd6ae588a | |||
2a73ba2fbb | |||
5eadb86500 | |||
c98ab6c6dc | |||
5321463892 | |||
d668a7694f | |||
9bb482e46f | |||
c534c928a7 | |||
0d0478c4a4 | |||
cda681a2f0 | |||
72ed4f28b1 | |||
6afeaac7a5 | |||
75a2b66206 | |||
5966053a6d | |||
e500b79858 | |||
428c20c79f | |||
bf84bc17ea | |||
30fa9cbee7 | |||
eefca613ad | |||
f6d943aec7 | |||
88462e67b5 | |||
eb683dd402 | |||
c7d0aea5f4 | |||
8f08953100 | |||
09b009abd9 | |||
c37e481a43 | |||
72cf55b8c3 | |||
6cb24ae7b6 | |||
eeaf0234f0 | |||
0200740d70 | |||
1268eef3b2 | |||
b433048003 | |||
daf2c3c155 | |||
03d213d764 | |||
99f9481b5d | |||
10bd14bca6 | |||
d26533e370 | |||
820abacf49 | |||
4466aa39c4 |
749
Cargo.lock
generated
749
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -42,13 +42,13 @@ members = [
|
||||
"poh",
|
||||
"poh-bench",
|
||||
"program-test",
|
||||
"programs/secp256k1",
|
||||
"programs/bpf_loader",
|
||||
"programs/config",
|
||||
"programs/exchange",
|
||||
"programs/failure",
|
||||
"programs/noop",
|
||||
"programs/ownable",
|
||||
"programs/secp256k1",
|
||||
"programs/stake",
|
||||
"programs/vote",
|
||||
"remote-wallet",
|
||||
@ -60,7 +60,6 @@ members = [
|
||||
"sdk/cargo-test-bpf",
|
||||
"scripts",
|
||||
"stake-accounts",
|
||||
"stake-monitor",
|
||||
"sys-tuner",
|
||||
"tokens",
|
||||
"transaction-status",
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-account-decoder"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
description = "Solana account decoder"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,9 +19,9 @@ lazy_static = "1.4.0"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-config-program = { path = "../programs/config", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.4" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.7.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.5" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.5" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.1", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0"
|
||||
zstd = "0.5.1"
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -11,11 +11,11 @@ publish = false
|
||||
[dependencies]
|
||||
log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.5" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.5" }
|
||||
solana-version = { path = "../version", version = "=1.7.5" }
|
||||
rand = "0.7.0"
|
||||
clap = "2.33.1"
|
||||
crossbeam-channel = "0.4"
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-cluster-bench"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -13,23 +13,23 @@ clap = "2.33.1"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.4.1"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-client = { path = "../client", version = "=1.7.4" }
|
||||
solana-core = { path = "../core", version = "=1.7.4" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.4" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.5" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.5" }
|
||||
solana-client = { path = "../client", version = "=1.7.5" }
|
||||
solana-core = { path = "../core", version = "=1.7.5" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.5" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.5" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.5" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.5" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.5" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.5" }
|
||||
solana-version = { path = "../version", version = "=1.7.5" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.1", features = ["no-entrypoint"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.4" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.5" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -14,18 +14,18 @@ crossbeam-channel = "0.4"
|
||||
log = "0.4.11"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
solana-core = { path = "../core", version = "=1.7.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.4" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.4" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.4" }
|
||||
solana-poh = { path = "../poh", version = "=1.7.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
solana-core = { path = "../core", version = "=1.7.5" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.5" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.5" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.5" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.5" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.5" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.5" }
|
||||
solana-poh = { path = "../poh", version = "=1.7.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.5" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.5" }
|
||||
solana-version = { path = "../version", version = "=1.7.5" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-client"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
description = "Solana banks client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -15,16 +15,16 @@ borsh = "0.9.0"
|
||||
borsh-derive = "0.9.0"
|
||||
futures = "0.3"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.4" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.5" }
|
||||
solana-program = { path = "../sdk/program", version = "=1.7.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.5" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.7.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.5" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.7.5" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@ -11,7 +11,7 @@ pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus};
|
||||
use solana_banks_interface::{BanksRequest, BanksResponse};
|
||||
use solana_program::{
|
||||
clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey,
|
||||
rent::Rent, sysvar,
|
||||
rent::Rent, sysvar::Sysvar,
|
||||
};
|
||||
use solana_sdk::{
|
||||
account::{from_account, Account},
|
||||
@ -124,15 +124,19 @@ impl BanksClient {
|
||||
self.get_fees_with_commitment_and_context(context::current(), CommitmentLevel::default())
|
||||
}
|
||||
|
||||
/// Return the cluster Sysvar
|
||||
pub fn get_sysvar<T: Sysvar>(&mut self) -> impl Future<Output = io::Result<T>> + '_ {
|
||||
self.get_account(T::id()).map(|result| {
|
||||
let sysvar = result?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Sysvar not present"))?;
|
||||
from_account::<T, _>(&sysvar)
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed to deserialize sysvar"))
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the cluster rent
|
||||
pub fn get_rent(&mut self) -> impl Future<Output = io::Result<Rent>> + '_ {
|
||||
self.get_account(sysvar::rent::id()).map(|result| {
|
||||
let rent_sysvar = result?
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Rent sysvar not present"))?;
|
||||
from_account::<Rent, _>(&rent_sysvar).ok_or_else(|| {
|
||||
io::Error::new(io::ErrorKind::Other, "Failed to deserialize Rent sysvar")
|
||||
})
|
||||
})
|
||||
self.get_sysvar::<Rent>()
|
||||
}
|
||||
|
||||
/// Return a recent, rooted blockhash from the server. The cluster will only accept
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-interface"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
description = "Solana banks RPC interface"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,7 +12,7 @@ edition = "2018"
|
||||
[dependencies]
|
||||
mio = "0.7.6"
|
||||
serde = { version = "1.0.122", features = ["derive"] }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.5" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-banks-server"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
description = "Solana banks server"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -14,10 +14,10 @@ bincode = "1.3.1"
|
||||
futures = "0.3"
|
||||
log = "0.4.11"
|
||||
mio = "0.7.6"
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.4" }
|
||||
solana-banks-interface = { path = "../banks-interface", version = "=1.7.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.5" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.5" }
|
||||
tarpc = { version = "0.24.1", features = ["full"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-serde = { version = "0.8", features = ["bincode"] }
|
||||
|
@ -131,10 +131,13 @@ impl BanksServer {
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_transaction(transaction: &Transaction) -> transaction::Result<()> {
|
||||
fn verify_transaction(
|
||||
transaction: &Transaction,
|
||||
libsecp256k1_0_5_upgrade_enabled: bool,
|
||||
) -> transaction::Result<()> {
|
||||
if let Err(err) = transaction.verify() {
|
||||
Err(err)
|
||||
} else if let Err(err) = transaction.verify_precompiles() {
|
||||
} else if let Err(err) = transaction.verify_precompiles(libsecp256k1_0_5_upgrade_enabled) {
|
||||
Err(err)
|
||||
} else {
|
||||
Ok(())
|
||||
@ -215,7 +218,10 @@ impl Banks for BanksServer {
|
||||
transaction: Transaction,
|
||||
commitment: CommitmentLevel,
|
||||
) -> Option<transaction::Result<()>> {
|
||||
if let Err(err) = verify_transaction(&transaction) {
|
||||
if let Err(err) = verify_transaction(
|
||||
&transaction,
|
||||
self.bank(commitment).libsecp256k1_0_5_upgrade_enabled(),
|
||||
) {
|
||||
return Some(Err(err));
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -18,22 +18,22 @@ rand = "0.7.0"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-core = { path = "../core", version = "=1.7.4" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.4" }
|
||||
solana-client = { path = "../client", version = "=1.7.4" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.7.4" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.4" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.5" }
|
||||
solana-core = { path = "../core", version = "=1.7.5" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.5" }
|
||||
solana-client = { path = "../client", version = "=1.7.5" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "=1.7.5" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.5" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.5" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.5" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.5" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.5" }
|
||||
solana-version = { path = "../version", version = "=1.7.5" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.4" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.5" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,11 +10,11 @@ publish = false
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.5" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.5" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.5" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.5" }
|
||||
solana-version = { path = "../version", version = "=1.7.5" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -15,23 +15,23 @@ log = "0.4.11"
|
||||
rayon = "1.5.0"
|
||||
serde_json = "1.0.56"
|
||||
serde_yaml = "0.8.13"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-core = { path = "../core", version = "=1.7.4" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.4" }
|
||||
solana-client = { path = "../client", version = "=1.7.4" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.4" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.5" }
|
||||
solana-core = { path = "../core", version = "=1.7.5" }
|
||||
solana-genesis = { path = "../genesis", version = "=1.7.5" }
|
||||
solana-client = { path = "../client", version = "=1.7.5" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.5" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.5" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.5" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.5" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.5" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.5" }
|
||||
solana-version = { path = "../version", version = "=1.7.5" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.4" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "=1.7.5" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -148,6 +148,33 @@ all_test_steps() {
|
||||
command_step stable ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-stable.sh" 60
|
||||
wait_step
|
||||
|
||||
# BPF test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable-bpf.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-local-cluster.sh \
|
||||
^core/build.rs \
|
||||
^fetch-perf-libs.sh \
|
||||
^programs/ \
|
||||
^sdk/ \
|
||||
; then
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-bpf.sh"
|
||||
name: "stable-bpf"
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "bpf-dumps.tar.bz2"
|
||||
agents:
|
||||
- "queue=default"
|
||||
EOF
|
||||
else
|
||||
annotate --style info \
|
||||
"Stable-BPF skipped as no relevant files were modified"
|
||||
fi
|
||||
|
||||
# Perf test suite
|
||||
if affects \
|
||||
.rs$ \
|
||||
@ -165,7 +192,7 @@ all_test_steps() {
|
||||
cat >> "$output_file" <<"EOF"
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 40
|
||||
timeout_in_minutes: 20
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=cuda"
|
||||
|
@ -22,6 +22,7 @@ steps+=(shellcheck)
|
||||
steps+=(test-checks)
|
||||
steps+=(test-coverage)
|
||||
steps+=(test-stable)
|
||||
steps+=(test-stable-bpf)
|
||||
steps+=(test-stable-perf)
|
||||
steps+=(test-downstream-builds)
|
||||
steps+=(test-bench)
|
||||
|
@ -35,8 +35,10 @@ echo --- build environment
|
||||
"$cargo" stable clippy --version --verbose
|
||||
"$cargo" nightly clippy --version --verbose
|
||||
|
||||
# audit is done only with stable
|
||||
# audit is done only with "$cargo stable"
|
||||
"$cargo" stable audit --version
|
||||
|
||||
grcov --version
|
||||
)
|
||||
|
||||
export RUST_BACKTRACE=1
|
||||
@ -65,7 +67,8 @@ _ ci/order-crates-for-publishing.py
|
||||
|
||||
# -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612
|
||||
# run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there
|
||||
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- --deny=warnings --deny=clippy::integer_arithmetic
|
||||
_ "$cargo" nightly clippy -Zunstable-options --workspace --all-targets -- \
|
||||
--deny=warnings --deny=clippy::integer_arithmetic --allow=clippy::inconsistent_struct_constructor
|
||||
|
||||
_ "$cargo" stable fmt --all -- --check
|
||||
|
||||
|
1
ci/test-stable-bpf.sh
Symbolic link
1
ci/test-stable-bpf.sh
Symbolic link
@ -0,0 +1 @@
|
||||
test-stable.sh
|
@ -21,10 +21,6 @@ export RUST_BACKTRACE=1
|
||||
export RUSTFLAGS="-D warnings"
|
||||
source scripts/ulimit-n.sh
|
||||
|
||||
# Clear the C dependency files, if dependency moves these files are not regenerated
|
||||
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
|
||||
# Limit compiler jobs to reduce memory usage
|
||||
# on machines with 2gb/thread of memory
|
||||
NPROC=$(nproc)
|
||||
@ -35,17 +31,25 @@ case $testName in
|
||||
test-stable)
|
||||
_ "$cargo" stable test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
;;
|
||||
test-stable-perf)
|
||||
test-stable-bpf)
|
||||
# Clear the C dependency files, if dependency moves these files are not regenerated
|
||||
test -d target/debug/bpf && find target/debug/bpf -name '*.d' -delete
|
||||
test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
|
||||
# rustfilt required for dumping BPF assembly listings
|
||||
"$cargo" install rustfilt
|
||||
|
||||
# solana-keygen required when building C programs
|
||||
_ "$cargo" build --manifest-path=keygen/Cargo.toml
|
||||
export PATH="$PWD/target/debug":$PATH
|
||||
cargo_build_bpf="$(realpath ./cargo-build-bpf)"
|
||||
|
||||
# BPF solana-sdk legacy compile test
|
||||
./cargo-build-bpf --manifest-path sdk/Cargo.toml
|
||||
"$cargo_build_bpf" --manifest-path sdk/Cargo.toml
|
||||
|
||||
# BPF Program unit tests
|
||||
"$cargo" test --manifest-path programs/bpf/Cargo.toml
|
||||
cargo-build-bpf --manifest-path programs/bpf/Cargo.toml --bpf-sdk sdk/bpf
|
||||
"$cargo_build_bpf" --manifest-path programs/bpf/Cargo.toml --bpf-sdk sdk/bpf
|
||||
|
||||
# BPF program system tests
|
||||
_ make -C programs/bpf/c tests
|
||||
@ -53,6 +57,26 @@ test-stable-perf)
|
||||
--manifest-path programs/bpf/Cargo.toml \
|
||||
--no-default-features --features=bpf_c,bpf_rust -- --nocapture
|
||||
|
||||
# Dump BPF program assembly listings
|
||||
for bpf_test in programs/bpf/rust/*; do
|
||||
if pushd "$bpf_test"; then
|
||||
"$cargo_build_bpf" --dump
|
||||
popd
|
||||
fi
|
||||
done
|
||||
|
||||
# BPF program instruction count assertion
|
||||
bpf_target_path=programs/bpf/target
|
||||
_ "$cargo" stable test \
|
||||
--manifest-path programs/bpf/Cargo.toml \
|
||||
--no-default-features --features=bpf_c,bpf_rust assert_instruction_count \
|
||||
-- --nocapture &> "${bpf_target_path}"/deploy/instuction_counts.txt
|
||||
|
||||
bpf_dump_archive="bpf-dumps.tar.bz2"
|
||||
rm -f "$bpf_dump_archive"
|
||||
tar cjvf "$bpf_dump_archive" "${bpf_target_path}"/{deploy/*.txt,bpfel-unknown-unknown/release/*.so}
|
||||
;;
|
||||
test-stable-perf)
|
||||
if [[ $(uname) = Linux ]]; then
|
||||
# Enable persistence mode to keep the CUDA kernel driver loaded, avoiding a
|
||||
# lengthy and unexpected delay the first time CUDA is involved when the driver
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,8 +12,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.5" }
|
||||
thiserror = "1.0.21"
|
||||
tiny-bip39 = "0.8.0"
|
||||
uriparse = "0.6.3"
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-output"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -19,12 +19,12 @@ indicatif = "0.15.0"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-client = { path = "../client", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.4" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.4" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.5" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.5" }
|
||||
solana-client = { path = "../client", version = "=1.7.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.5" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.5" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.5" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
|
@ -771,6 +771,7 @@ pub struct CliEpochReward {
|
||||
pub post_balance: u64, // lamports
|
||||
pub percent_change: f64,
|
||||
pub apr: Option<f64>,
|
||||
pub commission: Option<u8>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@ -815,23 +816,27 @@ impl fmt::Display for CliKeyedEpochRewards {
|
||||
writeln!(f, "Epoch Rewards:")?;
|
||||
writeln!(
|
||||
f,
|
||||
" {:<44} {:<18} {:<18} {:>14} {:>14}",
|
||||
"Address", "Amount", "New Balance", "Percent Change", "APR"
|
||||
" {:<44} {:<18} {:<18} {:>14} {:>14} {:>10}",
|
||||
"Address", "Amount", "New Balance", "Percent Change", "APR", "Commission"
|
||||
)?;
|
||||
for keyed_reward in &self.rewards {
|
||||
match &keyed_reward.reward {
|
||||
Some(reward) => {
|
||||
writeln!(
|
||||
f,
|
||||
" {:<44} ◎{:<17.9} ◎{:<17.9} {:>13.2}% {}",
|
||||
" {:<44} ◎{:<17.9} ◎{:<17.9} {:>13.9}% {:>14} {:>10}",
|
||||
keyed_reward.address,
|
||||
lamports_to_sol(reward.amount),
|
||||
lamports_to_sol(reward.post_balance),
|
||||
reward.percent_change,
|
||||
reward
|
||||
.apr
|
||||
.map(|apr| format!("{:>13.2}%", apr))
|
||||
.map(|apr| format!("{:.2}%", apr))
|
||||
.unwrap_or_default(),
|
||||
reward
|
||||
.commission
|
||||
.map(|commission| format!("{}%", commission))
|
||||
.unwrap_or_else(|| "-".to_string())
|
||||
)?;
|
||||
}
|
||||
None => {
|
||||
@ -948,13 +953,13 @@ fn show_epoch_rewards(
|
||||
writeln!(f, "Epoch Rewards:")?;
|
||||
writeln!(
|
||||
f,
|
||||
" {:<6} {:<11} {:<18} {:<18} {:>14} {:>14}",
|
||||
"Epoch", "Reward Slot", "Amount", "New Balance", "Percent Change", "APR"
|
||||
" {:<6} {:<11} {:<18} {:<18} {:>14} {:>14} {:>10}",
|
||||
"Epoch", "Reward Slot", "Amount", "New Balance", "Percent Change", "APR", "Commission"
|
||||
)?;
|
||||
for reward in epoch_rewards {
|
||||
writeln!(
|
||||
f,
|
||||
" {:<6} {:<11} ◎{:<17.9} ◎{:<17.9} {:>13.2}% {}",
|
||||
" {:<6} {:<11} ◎{:<17.9} ◎{:<17.9} {:>13.9}% {:>14} {:>10}",
|
||||
reward.epoch,
|
||||
reward.effective_slot,
|
||||
lamports_to_sol(reward.amount),
|
||||
@ -962,8 +967,12 @@ fn show_epoch_rewards(
|
||||
reward.percent_change,
|
||||
reward
|
||||
.apr
|
||||
.map(|apr| format!("{:>13.2}%", apr))
|
||||
.map(|apr| format!("{:.2}%", apr))
|
||||
.unwrap_or_default(),
|
||||
reward
|
||||
.commission
|
||||
.map(|commission| format!("{}%", commission))
|
||||
.unwrap_or_else(|| "-".to_string())
|
||||
)?;
|
||||
}
|
||||
}
|
||||
@ -1363,8 +1372,8 @@ impl fmt::Display for CliVoteAccount {
|
||||
build_balance_message(self.account_balance, self.use_lamports_unit, true)
|
||||
)?;
|
||||
writeln!(f, "Validator Identity: {}", self.validator_identity)?;
|
||||
writeln!(f, "Authorized Voters: {}", self.authorized_voters)?;
|
||||
writeln!(f, "Authorized Withdrawer: {}", self.authorized_withdrawer)?;
|
||||
writeln!(f, "Vote Authority: {}", self.authorized_voters)?;
|
||||
writeln!(f, "Withdraw Authority: {}", self.authorized_withdrawer)?;
|
||||
writeln!(f, "Credits: {}", self.credits)?;
|
||||
writeln!(f, "Commission: {}%", self.commission)?;
|
||||
writeln!(
|
||||
@ -1711,6 +1720,7 @@ pub struct CliFeesInner {
|
||||
pub blockhash: String,
|
||||
pub lamports_per_signature: u64,
|
||||
pub last_valid_slot: Option<Slot>,
|
||||
pub last_valid_block_height: Option<Slot>,
|
||||
}
|
||||
|
||||
impl QuietDisplay for CliFeesInner {}
|
||||
@ -1724,11 +1734,11 @@ impl fmt::Display for CliFeesInner {
|
||||
"Lamports per signature:",
|
||||
&self.lamports_per_signature.to_string(),
|
||||
)?;
|
||||
let last_valid_slot = self
|
||||
.last_valid_slot
|
||||
let last_valid_block_height = self
|
||||
.last_valid_block_height
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_default();
|
||||
writeln_name_value(f, "Last valid slot:", &last_valid_slot)
|
||||
writeln_name_value(f, "Last valid block height:", &last_valid_block_height)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1757,6 +1767,7 @@ impl CliFees {
|
||||
blockhash: Hash,
|
||||
lamports_per_signature: u64,
|
||||
last_valid_slot: Option<Slot>,
|
||||
last_valid_block_height: Option<Slot>,
|
||||
) -> Self {
|
||||
Self {
|
||||
inner: Some(CliFeesInner {
|
||||
@ -1764,6 +1775,7 @@ impl CliFees {
|
||||
blockhash: blockhash.to_string(),
|
||||
lamports_per_signature,
|
||||
last_valid_slot,
|
||||
last_valid_block_height,
|
||||
}),
|
||||
}
|
||||
}
|
||||
@ -2178,8 +2190,8 @@ impl fmt::Display for CliBlock {
|
||||
writeln!(f, "Rewards:")?;
|
||||
writeln!(
|
||||
f,
|
||||
" {:<44} {:^15} {:<15} {:<20} {:>14}",
|
||||
"Address", "Type", "Amount", "New Balance", "Percent Change"
|
||||
" {:<44} {:^15} {:<15} {:<20} {:>14} {:>10}",
|
||||
"Address", "Type", "Amount", "New Balance", "Percent Change", "Commission"
|
||||
)?;
|
||||
for reward in rewards {
|
||||
let sign = if reward.lamports < 0 { "-" } else { "" };
|
||||
@ -2187,7 +2199,7 @@ impl fmt::Display for CliBlock {
|
||||
total_rewards += reward.lamports;
|
||||
writeln!(
|
||||
f,
|
||||
" {:<44} {:^15} {:>15} {}",
|
||||
" {:<44} {:^15} {:>15} {} {}",
|
||||
reward.pubkey,
|
||||
if let Some(reward_type) = reward.reward_type {
|
||||
format!("{}", reward_type)
|
||||
@ -2209,7 +2221,11 @@ impl fmt::Display for CliBlock {
|
||||
/ (reward.post_balance as f64 - reward.lamports as f64))
|
||||
* 100.0
|
||||
)
|
||||
}
|
||||
},
|
||||
reward
|
||||
.commission
|
||||
.map(|commission| format!("{:>9}%", commission))
|
||||
.unwrap_or_else(|| " -".to_string())
|
||||
)?;
|
||||
}
|
||||
|
||||
@ -2450,6 +2466,10 @@ mod tests {
|
||||
fn try_sign_message(&self, _message: &[u8]) -> Result<Signature, SignerError> {
|
||||
Ok(Signature::new(&[1u8; 64]))
|
||||
}
|
||||
|
||||
fn is_interactive(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
let present: Box<dyn Signer> = Box::new(keypair_from_seed(&[2u8; 32]).unwrap());
|
||||
|
@ -140,7 +140,7 @@ fn format_account_mode(message: &Message, index: usize) -> String {
|
||||
} else {
|
||||
"-"
|
||||
},
|
||||
if message.is_writable(index, /*demote_sysvar_write_locks=*/ true) {
|
||||
if message.is_writable(index) {
|
||||
"w" // comment for consistent rust fmt (no joking; lol)
|
||||
} else {
|
||||
"-"
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -17,6 +17,7 @@ clap = "2.33.1"
|
||||
criterion-stats = "0.3.0"
|
||||
ctrlc = { version = "3.1.5", features = ["termination"] }
|
||||
console = "0.14.1"
|
||||
const_format = "0.2.14"
|
||||
dirs-next = "2.0.0"
|
||||
log = "0.4.11"
|
||||
Inflector = "0.11.4"
|
||||
@ -28,29 +29,29 @@ reqwest = { version = "0.11.2", default-features = false, features = ["blocking"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.4" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.7.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.7.4" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.7.4" }
|
||||
solana-client = { path = "../client", version = "=1.7.4" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.7.4" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.4" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.5" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.7.5" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.5" }
|
||||
solana-cli-config = { path = "../cli-config", version = "=1.7.5" }
|
||||
solana-cli-output = { path = "../cli-output", version = "=1.7.5" }
|
||||
solana-client = { path = "../client", version = "=1.7.5" }
|
||||
solana-config-program = { path = "../programs/config", version = "=1.7.5" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.5" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.5" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.5" }
|
||||
solana_rbpf = "=0.2.11"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.4" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "=1.7.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.5" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.5" }
|
||||
solana-version = { path = "../version", version = "=1.7.5" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.5" }
|
||||
spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] }
|
||||
thiserror = "1.0.21"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "=1.7.4" }
|
||||
solana-core = { path = "../core", version = "=1.7.5" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
145
cli/src/cli.rs
145
cli/src/cli.rs
@ -25,15 +25,14 @@ use solana_cli_output::{
|
||||
};
|
||||
use solana_client::{
|
||||
blockhash_query::BlockhashQuery,
|
||||
client_error::{ClientError, ClientErrorKind, Result as ClientResult},
|
||||
client_error::{ClientError, Result as ClientResult},
|
||||
nonce_utils,
|
||||
rpc_client::RpcClient,
|
||||
rpc_config::{
|
||||
RpcLargestAccountsFilter, RpcSendTransactionConfig, RpcTransactionConfig,
|
||||
RpcTransactionLogsFilter,
|
||||
},
|
||||
rpc_request::{RpcError, RpcResponseErrorData},
|
||||
rpc_response::{RpcKeyedAccount, RpcSimulateTransactionResult},
|
||||
rpc_response::RpcKeyedAccount,
|
||||
};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
@ -45,11 +44,7 @@ use solana_sdk::{
|
||||
message::Message,
|
||||
pubkey::Pubkey,
|
||||
signature::{Signature, Signer, SignerError},
|
||||
stake::{
|
||||
self,
|
||||
instruction::LockupArgs,
|
||||
state::{Lockup, StakeAuthorize},
|
||||
},
|
||||
stake::{self, instruction::LockupArgs, state::Lockup},
|
||||
system_instruction::{self, SystemError},
|
||||
system_program,
|
||||
transaction::{Transaction, TransactionError},
|
||||
@ -64,6 +59,7 @@ use thiserror::Error;
|
||||
|
||||
pub const DEFAULT_RPC_TIMEOUT_SECONDS: &str = "30";
|
||||
pub const DEFAULT_CONFIRM_TX_TIMEOUT_SECONDS: &str = "5";
|
||||
const CHECKED: bool = true;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
@ -137,6 +133,8 @@ pub enum CliCommand {
|
||||
sort_order: CliValidatorsSortOrder,
|
||||
reverse_sort: bool,
|
||||
number_validators: bool,
|
||||
keep_unstaked_delinquents: bool,
|
||||
delinquent_slot_distance: Option<Slot>,
|
||||
},
|
||||
Supply {
|
||||
print_accounts: bool,
|
||||
@ -197,6 +195,7 @@ pub enum CliCommand {
|
||||
seed: Option<String>,
|
||||
staker: Option<Pubkey>,
|
||||
withdrawer: Option<Pubkey>,
|
||||
withdrawer_signer: Option<SignerIndex>,
|
||||
lockup: Lockup,
|
||||
amount: SpendAmount,
|
||||
sign_only: bool,
|
||||
@ -270,7 +269,7 @@ pub enum CliCommand {
|
||||
},
|
||||
StakeAuthorize {
|
||||
stake_account_pubkey: Pubkey,
|
||||
new_authorizations: Vec<(StakeAuthorize, Pubkey, SignerIndex)>,
|
||||
new_authorizations: Vec<StakeAuthorizationIndexed>,
|
||||
sign_only: bool,
|
||||
dump_transaction_message: bool,
|
||||
blockhash_query: BlockhashQuery,
|
||||
@ -285,6 +284,7 @@ pub enum CliCommand {
|
||||
stake_account_pubkey: Pubkey,
|
||||
lockup: LockupArgs,
|
||||
custodian: SignerIndex,
|
||||
new_custodian_signer: Option<SignerIndex>,
|
||||
sign_only: bool,
|
||||
dump_transaction_message: bool,
|
||||
blockhash_query: BlockhashQuery,
|
||||
@ -342,6 +342,8 @@ pub enum CliCommand {
|
||||
new_authorized_pubkey: Pubkey,
|
||||
vote_authorize: VoteAuthorize,
|
||||
memo: Option<String>,
|
||||
authorized: SignerIndex,
|
||||
new_authorized: Option<SignerIndex>,
|
||||
},
|
||||
VoteUpdateValidator {
|
||||
vote_account_pubkey: Pubkey,
|
||||
@ -718,7 +720,10 @@ pub fn parse_command(
|
||||
}
|
||||
// Stake Commands
|
||||
("create-stake-account", Some(matches)) => {
|
||||
parse_create_stake_account(matches, default_signer, wallet_manager)
|
||||
parse_create_stake_account(matches, default_signer, wallet_manager, !CHECKED)
|
||||
}
|
||||
("create-stake-account-checked", Some(matches)) => {
|
||||
parse_create_stake_account(matches, default_signer, wallet_manager, CHECKED)
|
||||
}
|
||||
("delegate-stake", Some(matches)) => {
|
||||
parse_stake_delegate_stake(matches, default_signer, wallet_manager)
|
||||
@ -736,10 +741,16 @@ pub fn parse_command(
|
||||
parse_merge_stake(matches, default_signer, wallet_manager)
|
||||
}
|
||||
("stake-authorize", Some(matches)) => {
|
||||
parse_stake_authorize(matches, default_signer, wallet_manager)
|
||||
parse_stake_authorize(matches, default_signer, wallet_manager, !CHECKED)
|
||||
}
|
||||
("stake-authorize-checked", Some(matches)) => {
|
||||
parse_stake_authorize(matches, default_signer, wallet_manager, CHECKED)
|
||||
}
|
||||
("stake-set-lockup", Some(matches)) => {
|
||||
parse_stake_set_lockup(matches, default_signer, wallet_manager)
|
||||
parse_stake_set_lockup(matches, default_signer, wallet_manager, !CHECKED)
|
||||
}
|
||||
("stake-set-lockup-checked", Some(matches)) => {
|
||||
parse_stake_set_lockup(matches, default_signer, wallet_manager, CHECKED)
|
||||
}
|
||||
("stake-account", Some(matches)) => parse_show_stake_account(matches, wallet_manager),
|
||||
("stake-history", Some(matches)) => parse_show_stake_history(matches),
|
||||
@ -766,12 +777,28 @@ pub fn parse_command(
|
||||
default_signer,
|
||||
wallet_manager,
|
||||
VoteAuthorize::Voter,
|
||||
!CHECKED,
|
||||
),
|
||||
("vote-authorize-withdrawer", Some(matches)) => parse_vote_authorize(
|
||||
matches,
|
||||
default_signer,
|
||||
wallet_manager,
|
||||
VoteAuthorize::Withdrawer,
|
||||
!CHECKED,
|
||||
),
|
||||
("vote-authorize-voter-checked", Some(matches)) => parse_vote_authorize(
|
||||
matches,
|
||||
default_signer,
|
||||
wallet_manager,
|
||||
VoteAuthorize::Voter,
|
||||
CHECKED,
|
||||
),
|
||||
("vote-authorize-withdrawer-checked", Some(matches)) => parse_vote_authorize(
|
||||
matches,
|
||||
default_signer,
|
||||
wallet_manager,
|
||||
VoteAuthorize::Withdrawer,
|
||||
CHECKED,
|
||||
),
|
||||
("vote-account", Some(matches)) => parse_vote_get_account_command(matches, wallet_manager),
|
||||
("withdraw-from-vote-account", Some(matches)) => {
|
||||
@ -1397,6 +1424,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
sort_order,
|
||||
reverse_sort,
|
||||
number_validators,
|
||||
keep_unstaked_delinquents,
|
||||
delinquent_slot_distance,
|
||||
} => process_show_validators(
|
||||
&rpc_client,
|
||||
config,
|
||||
@ -1404,6 +1433,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
*sort_order,
|
||||
*reverse_sort,
|
||||
*number_validators,
|
||||
*keep_unstaked_delinquents,
|
||||
*delinquent_slot_distance,
|
||||
),
|
||||
CliCommand::Supply { print_accounts } => {
|
||||
process_supply(&rpc_client, config, *print_accounts)
|
||||
@ -1528,6 +1559,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
seed,
|
||||
staker,
|
||||
withdrawer,
|
||||
withdrawer_signer,
|
||||
lockup,
|
||||
amount,
|
||||
sign_only,
|
||||
@ -1545,6 +1577,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
seed,
|
||||
staker,
|
||||
withdrawer,
|
||||
*withdrawer_signer,
|
||||
lockup,
|
||||
*amount,
|
||||
*sign_only,
|
||||
@ -1706,8 +1739,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
),
|
||||
CliCommand::StakeSetLockup {
|
||||
stake_account_pubkey,
|
||||
mut lockup,
|
||||
lockup,
|
||||
custodian,
|
||||
new_custodian_signer,
|
||||
sign_only,
|
||||
dump_transaction_message,
|
||||
blockhash_query,
|
||||
@ -1719,7 +1753,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
&rpc_client,
|
||||
config,
|
||||
stake_account_pubkey,
|
||||
&mut lockup,
|
||||
lockup,
|
||||
*new_custodian_signer,
|
||||
*custodian,
|
||||
*sign_only,
|
||||
*dump_transaction_message,
|
||||
@ -1833,12 +1868,16 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
new_authorized_pubkey,
|
||||
vote_authorize,
|
||||
memo,
|
||||
authorized,
|
||||
new_authorized,
|
||||
} => process_vote_authorize(
|
||||
&rpc_client,
|
||||
config,
|
||||
vote_account_pubkey,
|
||||
new_authorized_pubkey,
|
||||
*vote_authorize,
|
||||
*authorized,
|
||||
*new_authorized,
|
||||
memo.as_ref(),
|
||||
),
|
||||
CliCommand::VoteUpdateValidator {
|
||||
@ -1949,43 +1988,41 @@ pub fn request_and_confirm_airdrop(
|
||||
Ok(signature)
|
||||
}
|
||||
|
||||
fn common_error_adapter<E>(ix_error: &InstructionError) -> Option<E>
|
||||
where
|
||||
E: 'static + std::error::Error + DecodeError<E> + FromPrimitive,
|
||||
{
|
||||
if let InstructionError::Custom(code) = ix_error {
|
||||
E::decode_custom_error_to_enum(*code)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn log_instruction_custom_error<E>(
|
||||
result: ClientResult<Signature>,
|
||||
config: &CliConfig,
|
||||
) -> ProcessResult
|
||||
where
|
||||
E: 'static + std::error::Error + DecodeError<E> + FromPrimitive,
|
||||
{
|
||||
log_instruction_custom_error_ex::<E, _>(result, config, common_error_adapter)
|
||||
}
|
||||
|
||||
pub fn log_instruction_custom_error_ex<E, F>(
|
||||
result: ClientResult<Signature>,
|
||||
config: &CliConfig,
|
||||
error_adapter: F,
|
||||
) -> ProcessResult
|
||||
where
|
||||
E: 'static + std::error::Error + DecodeError<E> + FromPrimitive,
|
||||
F: Fn(&InstructionError) -> Option<E>,
|
||||
{
|
||||
match result {
|
||||
Err(err) => {
|
||||
// If transaction simulation returns a known Custom InstructionError, decode it
|
||||
if let ClientErrorKind::RpcError(RpcError::RpcResponseError {
|
||||
data:
|
||||
RpcResponseErrorData::SendTransactionPreflightFailure(
|
||||
RpcSimulateTransactionResult {
|
||||
err:
|
||||
Some(TransactionError::InstructionError(
|
||||
_,
|
||||
InstructionError::Custom(code),
|
||||
)),
|
||||
..
|
||||
},
|
||||
),
|
||||
..
|
||||
}) = err.kind()
|
||||
{
|
||||
if let Some(specific_error) = E::decode_custom_error_to_enum(*code) {
|
||||
return Err(specific_error.into());
|
||||
}
|
||||
}
|
||||
// If the transaction was instead submitted and returned a known Custom
|
||||
// InstructionError, decode it
|
||||
if let ClientErrorKind::TransactionError(TransactionError::InstructionError(
|
||||
_,
|
||||
InstructionError::Custom(code),
|
||||
)) = err.kind()
|
||||
{
|
||||
if let Some(specific_error) = E::decode_custom_error_to_enum(*code) {
|
||||
let maybe_tx_err = err.get_transaction_error();
|
||||
if let Some(TransactionError::InstructionError(_, ix_error)) = maybe_tx_err {
|
||||
if let Some(specific_error) = error_adapter(&ix_error) {
|
||||
return Err(specific_error.into());
|
||||
}
|
||||
}
|
||||
@ -2023,22 +2060,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.stake_subcommands()
|
||||
.subcommand(
|
||||
SubCommand::with_name("airdrop")
|
||||
.about("Request lamports")
|
||||
.arg(
|
||||
Arg::with_name("faucet_host")
|
||||
.long("faucet-host")
|
||||
.value_name("URL")
|
||||
.takes_value(true)
|
||||
.help("Faucet host to use [default: the --url host]"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("faucet_port")
|
||||
.long("faucet-port")
|
||||
.value_name("PORT_NUMBER")
|
||||
.takes_value(true)
|
||||
.default_value(solana_faucet::faucet::FAUCET_PORT_STR)
|
||||
.help("Faucet port to use"),
|
||||
)
|
||||
.about("Request SOL from a faucet")
|
||||
.arg(
|
||||
Arg::with_name("amount")
|
||||
.index(1)
|
||||
@ -2250,7 +2272,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
)
|
||||
.offline_args()
|
||||
.nonce_args(false)
|
||||
.arg(memo_arg())
|
||||
.arg(memo_arg())
|
||||
.arg(fee_payer_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
@ -2657,6 +2679,8 @@ mod tests {
|
||||
new_authorized_pubkey,
|
||||
vote_authorize: VoteAuthorize::Voter,
|
||||
memo: None,
|
||||
authorized: 0,
|
||||
new_authorized: None,
|
||||
};
|
||||
let result = process_command(&config);
|
||||
assert!(result.is_ok());
|
||||
@ -2680,6 +2704,7 @@ mod tests {
|
||||
seed: None,
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
withdrawer_signer: None,
|
||||
lockup: Lockup {
|
||||
epoch: 0,
|
||||
unix_timestamp: 0,
|
||||
@ -2852,6 +2877,8 @@ mod tests {
|
||||
new_authorized_pubkey: bob_pubkey,
|
||||
vote_authorize: VoteAuthorize::Voter,
|
||||
memo: None,
|
||||
authorized: 0,
|
||||
new_authorized: None,
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
|
@ -24,11 +24,12 @@ use solana_client::{
|
||||
pubsub_client::PubsubClient,
|
||||
rpc_client::{GetConfirmedSignaturesForAddress2Config, RpcClient},
|
||||
rpc_config::{
|
||||
RpcAccountInfoConfig, RpcBlockConfig, RpcLargestAccountsConfig, RpcLargestAccountsFilter,
|
||||
RpcProgramAccountsConfig, RpcTransactionConfig, RpcTransactionLogsConfig,
|
||||
RpcTransactionLogsFilter,
|
||||
RpcAccountInfoConfig, RpcBlockConfig, RpcGetVoteAccountsConfig, RpcLargestAccountsConfig,
|
||||
RpcLargestAccountsFilter, RpcProgramAccountsConfig, RpcTransactionConfig,
|
||||
RpcTransactionLogsConfig, RpcTransactionLogsFilter,
|
||||
},
|
||||
rpc_filter,
|
||||
rpc_request::DELINQUENT_VALIDATOR_SLOT_DISTANCE,
|
||||
rpc_response::SlotInfo,
|
||||
};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
@ -176,7 +177,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.takes_value(true)
|
||||
.value_name("EPOCH")
|
||||
.validator(is_epoch)
|
||||
.help("Epoch to show leader schedule for. (default: current)")
|
||||
.help("Epoch to show leader schedule for. [default: current]")
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
@ -382,6 +383,25 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
])
|
||||
.default_value("stake")
|
||||
.help("Sort order (does not affect JSON output)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keep_unstaked_delinquents")
|
||||
.long("keep-unstaked-delinquents")
|
||||
.takes_value(false)
|
||||
.help("Don't discard unstaked, delinquent validators")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("delinquent_slot_distance")
|
||||
.long("delinquent-slot-distance")
|
||||
.takes_value(true)
|
||||
.value_name("SLOT_DISTANCE")
|
||||
.validator(is_slot)
|
||||
.help(
|
||||
concatcp!(
|
||||
"Minimum slot distance from the tip to consider a validator delinquent. [default: ",
|
||||
DELINQUENT_VALIDATOR_SLOT_DISTANCE,
|
||||
"]",
|
||||
))
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
@ -617,6 +637,8 @@ pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
|
||||
let use_lamports_unit = matches.is_present("lamports");
|
||||
let number_validators = matches.is_present("number");
|
||||
let reverse_sort = matches.is_present("reverse");
|
||||
let keep_unstaked_delinquents = matches.is_present("keep_unstaked_delinquents");
|
||||
let delinquent_slot_distance = value_of(matches, "delinquent_slot_distance");
|
||||
|
||||
let sort_order = match value_t_or_exit!(matches, "sort", String).as_str() {
|
||||
"delinquent" => CliValidatorsSortOrder::Delinquent,
|
||||
@ -637,6 +659,8 @@ pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
|
||||
sort_order,
|
||||
reverse_sort,
|
||||
number_validators,
|
||||
keep_unstaked_delinquents,
|
||||
delinquent_slot_distance,
|
||||
},
|
||||
signers: vec![],
|
||||
})
|
||||
@ -938,18 +962,19 @@ pub fn process_fees(
|
||||
*recent_blockhash,
|
||||
fee_calculator.lamports_per_signature,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
} else {
|
||||
CliFees::none()
|
||||
}
|
||||
} else {
|
||||
let result = rpc_client.get_recent_blockhash_with_commitment(config.commitment)?;
|
||||
let (recent_blockhash, fee_calculator, last_valid_slot) = result.value;
|
||||
let result = rpc_client.get_fees_with_commitment(config.commitment)?;
|
||||
CliFees::some(
|
||||
result.context.slot,
|
||||
recent_blockhash,
|
||||
fee_calculator.lamports_per_signature,
|
||||
Some(last_valid_slot),
|
||||
result.value.blockhash,
|
||||
result.value.fee_calculator.lamports_per_signature,
|
||||
None,
|
||||
Some(result.value.last_valid_block_height),
|
||||
)
|
||||
};
|
||||
Ok(config.output_format.formatted_string(&fees))
|
||||
@ -1792,11 +1817,17 @@ pub fn process_show_validators(
|
||||
validators_sort_order: CliValidatorsSortOrder,
|
||||
validators_reverse_sort: bool,
|
||||
number_validators: bool,
|
||||
keep_unstaked_delinquents: bool,
|
||||
delinquent_slot_distance: Option<Slot>,
|
||||
) -> ProcessResult {
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
progress_bar.set_message("Fetching vote accounts...");
|
||||
let epoch_info = rpc_client.get_epoch_info()?;
|
||||
let vote_accounts = rpc_client.get_vote_accounts()?;
|
||||
let vote_accounts = rpc_client.get_vote_accounts_with_config(RpcGetVoteAccountsConfig {
|
||||
keep_unstaked_delinquents: Some(keep_unstaked_delinquents),
|
||||
delinquent_slot_distance,
|
||||
..RpcGetVoteAccountsConfig::default()
|
||||
})?;
|
||||
|
||||
progress_bar.set_message("Fetching block production...");
|
||||
let skip_rate: HashMap<_, _> = rpc_client
|
||||
|
@ -10,6 +10,7 @@ use solana_cli_output::{QuietDisplay, VerboseDisplay};
|
||||
use solana_client::{client_error::ClientError, rpc_client::RpcClient};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
feature::{self, Feature},
|
||||
feature_set::FEATURE_NAMES,
|
||||
@ -312,6 +313,31 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result<boo
|
||||
Ok(feature_activation_allowed)
|
||||
}
|
||||
|
||||
fn status_from_account(account: Account) -> Option<CliFeatureStatus> {
|
||||
feature::from_account(&account).map(|feature| match feature.activated_at {
|
||||
None => CliFeatureStatus::Pending,
|
||||
Some(activation_slot) => CliFeatureStatus::Active(activation_slot),
|
||||
})
|
||||
}
|
||||
|
||||
fn get_feature_status(
|
||||
rpc_client: &RpcClient,
|
||||
feature_id: &Pubkey,
|
||||
) -> Result<Option<CliFeatureStatus>, Box<dyn std::error::Error>> {
|
||||
rpc_client
|
||||
.get_account(feature_id)
|
||||
.map(status_from_account)
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
pub fn get_feature_is_active(
|
||||
rpc_client: &RpcClient,
|
||||
feature_id: &Pubkey,
|
||||
) -> Result<bool, Box<dyn std::error::Error>> {
|
||||
get_feature_status(rpc_client, feature_id)
|
||||
.map(|status| matches!(status, Some(CliFeatureStatus::Active(_))))
|
||||
}
|
||||
|
||||
fn process_status(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
@ -327,11 +353,7 @@ fn process_status(
|
||||
let feature_id = &feature_ids[i];
|
||||
let feature_name = FEATURE_NAMES.get(feature_id).unwrap();
|
||||
if let Some(account) = account {
|
||||
if let Some(feature) = feature::from_account(&account) {
|
||||
let feature_status = match feature.activated_at {
|
||||
None => CliFeatureStatus::Pending,
|
||||
Some(activation_slot) => CliFeatureStatus::Active(activation_slot),
|
||||
};
|
||||
if let Some(feature_status) = status_from_account(account) {
|
||||
features.push(CliFeature {
|
||||
id: feature_id.to_string(),
|
||||
description: feature_name.to_string(),
|
||||
|
@ -18,6 +18,9 @@ macro_rules! pubkey {
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_use]
|
||||
extern crate const_format;
|
||||
|
||||
extern crate serde_derive;
|
||||
|
||||
pub mod checks;
|
||||
|
105
cli/src/nonce.rs
105
cli/src/nonce.rs
@ -1,9 +1,10 @@
|
||||
use crate::{
|
||||
checks::{check_account_for_fee_with_commitment, check_unique_pubkeys},
|
||||
cli::{
|
||||
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError,
|
||||
ProcessResult,
|
||||
log_instruction_custom_error, log_instruction_custom_error_ex, CliCommand, CliCommandInfo,
|
||||
CliConfig, CliError, ProcessResult,
|
||||
},
|
||||
feature::get_feature_is_active,
|
||||
memo::WithMemo,
|
||||
spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount},
|
||||
};
|
||||
@ -12,7 +13,7 @@ use solana_clap_utils::{
|
||||
input_parsers::*,
|
||||
input_validators::*,
|
||||
keypair::{DefaultSigner, SignerIndex},
|
||||
memo::MEMO_ARG,
|
||||
memo::{memo_arg, MEMO_ARG},
|
||||
nonce::*,
|
||||
};
|
||||
use solana_cli_output::CliNonceAccount;
|
||||
@ -20,16 +21,19 @@ use solana_client::{nonce_utils::*, rpc_client::RpcClient};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
feature_set::merge_nonce_error_into_system_error,
|
||||
hash::Hash,
|
||||
instruction::InstructionError,
|
||||
message::Message,
|
||||
nonce::{self, State},
|
||||
pubkey::Pubkey,
|
||||
system_instruction::{
|
||||
advance_nonce_account, authorize_nonce_account, create_nonce_account,
|
||||
create_nonce_account_with_seed, withdraw_nonce_account, NonceError, SystemError,
|
||||
create_nonce_account_with_seed, instruction_to_nonce_error, withdraw_nonce_account,
|
||||
NonceError, SystemError,
|
||||
},
|
||||
system_program,
|
||||
transaction::Transaction,
|
||||
transaction::{Transaction, TransactionError},
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
@ -56,7 +60,8 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
.required(true),
|
||||
"Account to be granted authority of the nonce account. "),
|
||||
)
|
||||
.arg(nonce_authority_arg()),
|
||||
.arg(nonce_authority_arg())
|
||||
.arg(memo_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("create-nonce-account")
|
||||
@ -91,7 +96,8 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
.value_name("STRING")
|
||||
.takes_value(true)
|
||||
.help("Seed for address generation; if specified, the resulting account will be at a derived address of the NONCE_ACCOUNT pubkey")
|
||||
),
|
||||
)
|
||||
.arg(memo_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("nonce")
|
||||
@ -115,7 +121,8 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
.required(true),
|
||||
"Address of the nonce account. "),
|
||||
)
|
||||
.arg(nonce_authority_arg()),
|
||||
.arg(nonce_authority_arg())
|
||||
.arg(memo_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("nonce-account")
|
||||
@ -161,7 +168,8 @@ impl NonceSubCommands for App<'_, '_> {
|
||||
.validator(is_amount)
|
||||
.help("The amount to withdraw from the nonce account, in SOL"),
|
||||
)
|
||||
.arg(nonce_authority_arg()),
|
||||
.arg(nonce_authority_arg())
|
||||
.arg(memo_arg()),
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -363,8 +371,21 @@ pub fn process_authorize_nonce_account(
|
||||
&tx.message,
|
||||
config.commitment,
|
||||
)?;
|
||||
let merge_errors =
|
||||
get_feature_is_active(rpc_client, &merge_nonce_error_into_system_error::id())?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<NonceError>(result, config)
|
||||
|
||||
if merge_errors {
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
} else {
|
||||
log_instruction_custom_error_ex::<NonceError, _>(result, config, |ix_error| {
|
||||
if let InstructionError::Custom(_) = ix_error {
|
||||
instruction_to_nonce_error(ix_error, merge_errors)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_create_nonce_account(
|
||||
@ -448,8 +469,40 @@ pub fn process_create_nonce_account(
|
||||
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
tx.try_sign(&config.signers, recent_blockhash)?;
|
||||
let merge_errors =
|
||||
get_feature_is_active(rpc_client, &merge_nonce_error_into_system_error::id())?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
|
||||
let err_ix_index = if let Err(err) = &result {
|
||||
err.get_transaction_error().and_then(|tx_err| {
|
||||
if let TransactionError::InstructionError(ix_index, _) = tx_err {
|
||||
Some(ix_index)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
match err_ix_index {
|
||||
// SystemInstruction::InitializeNonceAccount failed
|
||||
Some(1) => {
|
||||
if merge_errors {
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
} else {
|
||||
log_instruction_custom_error_ex::<NonceError, _>(result, config, |ix_error| {
|
||||
if let InstructionError::Custom(_) = ix_error {
|
||||
instruction_to_nonce_error(ix_error, merge_errors)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
// SystemInstruction::CreateAccount{,WithSeed} failed
|
||||
_ => log_instruction_custom_error::<SystemError>(result, config),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_get_nonce(
|
||||
@ -502,8 +555,21 @@ pub fn process_new_nonce(
|
||||
&tx.message,
|
||||
config.commitment,
|
||||
)?;
|
||||
let merge_errors =
|
||||
get_feature_is_active(rpc_client, &merge_nonce_error_into_system_error::id())?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
|
||||
if merge_errors {
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
} else {
|
||||
log_instruction_custom_error_ex::<NonceError, _>(result, config, |ix_error| {
|
||||
if let InstructionError::Custom(_) = ix_error {
|
||||
instruction_to_nonce_error(ix_error, merge_errors)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_show_nonce_account(
|
||||
@ -565,8 +631,21 @@ pub fn process_withdraw_from_nonce_account(
|
||||
&tx.message,
|
||||
config.commitment,
|
||||
)?;
|
||||
let merge_errors =
|
||||
get_feature_is_active(rpc_client, &merge_nonce_error_into_system_error::id())?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx);
|
||||
log_instruction_custom_error::<NonceError>(result, config)
|
||||
|
||||
if merge_errors {
|
||||
log_instruction_custom_error::<SystemError>(result, config)
|
||||
} else {
|
||||
log_instruction_custom_error_ex::<NonceError, _>(result, config, |ix_error| {
|
||||
if let InstructionError::Custom(_) = ix_error {
|
||||
instruction_to_nonce_error(ix_error, merge_errors)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
931
cli/src/stake.rs
931
cli/src/stake.rs
File diff suppressed because it is too large
Load Diff
200
cli/src/vote.rs
200
cli/src/vote.rs
@ -82,7 +82,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.takes_value(true)
|
||||
.help("Seed for address generation; if specified, the resulting account will be at a derived address of the VOTE ACCOUNT pubkey")
|
||||
)
|
||||
.arg(memo_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-authorize-voter")
|
||||
@ -109,7 +109,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.required(true),
|
||||
"New authorized vote signer. "),
|
||||
)
|
||||
.arg(memo_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-authorize-withdrawer")
|
||||
@ -136,7 +136,65 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.required(true),
|
||||
"New authorized withdrawer. "),
|
||||
)
|
||||
.arg(memo_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-authorize-voter-checked")
|
||||
.about("Authorize a new vote signing keypair for the given vote account, \
|
||||
checking the new authority as a signer")
|
||||
.arg(
|
||||
pubkey!(Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("VOTE_ACCOUNT_ADDRESS")
|
||||
.required(true),
|
||||
"Vote account in which to set the authorized voter. "),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized")
|
||||
.index(2)
|
||||
.value_name("AUTHORIZED_KEYPAIR")
|
||||
.required(true)
|
||||
.validator(is_valid_signer)
|
||||
.help("Current authorized vote signer."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("new_authorized")
|
||||
.index(3)
|
||||
.value_name("NEW_AUTHORIZED_KEYPAIR")
|
||||
.required(true)
|
||||
.validator(is_valid_signer)
|
||||
.help("New authorized vote signer."),
|
||||
)
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-authorize-withdrawer-checked")
|
||||
.about("Authorize a new withdraw signing keypair for the given vote account, \
|
||||
checking the new authority as a signer")
|
||||
.arg(
|
||||
pubkey!(Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("VOTE_ACCOUNT_ADDRESS")
|
||||
.required(true),
|
||||
"Vote account in which to set the authorized withdrawer. "),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized")
|
||||
.index(2)
|
||||
.value_name("AUTHORIZED_KEYPAIR")
|
||||
.required(true)
|
||||
.validator(is_valid_signer)
|
||||
.help("Current authorized withdrawer."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("new_authorized")
|
||||
.index(3)
|
||||
.value_name("NEW_AUTHORIZED_KEYPAIR")
|
||||
.required(true)
|
||||
.validator(is_valid_signer)
|
||||
.help("New authorized withdrawer."),
|
||||
)
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-update-validator")
|
||||
@ -166,7 +224,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.validator(is_valid_signer)
|
||||
.help("Authorized withdrawer keypair"),
|
||||
)
|
||||
.arg(memo_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-update-commission")
|
||||
@ -196,7 +254,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.validator(is_valid_signer)
|
||||
.help("Authorized withdrawer keypair"),
|
||||
)
|
||||
.arg(memo_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-account")
|
||||
@ -266,7 +324,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.validator(is_valid_signer)
|
||||
.help("Authorized withdrawer [default: cli config keypair]"),
|
||||
)
|
||||
.arg(memo_arg())
|
||||
.arg(memo_arg())
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -311,19 +369,25 @@ pub fn parse_vote_authorize(
|
||||
default_signer: &DefaultSigner,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
vote_authorize: VoteAuthorize,
|
||||
checked: bool,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let new_authorized_pubkey =
|
||||
pubkey_of_signer(matches, "new_authorized_pubkey", wallet_manager)?.unwrap();
|
||||
let (authorized, _) = signer_of(matches, "authorized", wallet_manager)?;
|
||||
let (authorized, authorized_pubkey) = signer_of(matches, "authorized", wallet_manager)?;
|
||||
|
||||
let payer_provided = None;
|
||||
let signer_info = default_signer.generate_unique_signers(
|
||||
vec![payer_provided, authorized],
|
||||
matches,
|
||||
wallet_manager,
|
||||
)?;
|
||||
let mut signers = vec![payer_provided, authorized];
|
||||
|
||||
let new_authorized_pubkey = if checked {
|
||||
let (new_authorized_signer, new_authorized_pubkey) =
|
||||
signer_of(matches, "new_authorized", wallet_manager)?;
|
||||
signers.push(new_authorized_signer);
|
||||
new_authorized_pubkey.unwrap()
|
||||
} else {
|
||||
pubkey_of_signer(matches, "new_authorized_pubkey", wallet_manager)?.unwrap()
|
||||
};
|
||||
|
||||
let signer_info = default_signer.generate_unique_signers(signers, matches, wallet_manager)?;
|
||||
let memo = matches.value_of(MEMO_ARG.name).map(String::from);
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
@ -332,6 +396,12 @@ pub fn parse_vote_authorize(
|
||||
new_authorized_pubkey,
|
||||
vote_authorize,
|
||||
memo,
|
||||
authorized: signer_info.index_of(authorized_pubkey).unwrap(),
|
||||
new_authorized: if checked {
|
||||
signer_info.index_of(Some(new_authorized_pubkey))
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
@ -558,28 +628,34 @@ pub fn process_vote_authorize(
|
||||
vote_account_pubkey: &Pubkey,
|
||||
new_authorized_pubkey: &Pubkey,
|
||||
vote_authorize: VoteAuthorize,
|
||||
authorized: SignerIndex,
|
||||
new_authorized: Option<SignerIndex>,
|
||||
memo: Option<&String>,
|
||||
) -> ProcessResult {
|
||||
// If the `authorized_account` is also the fee payer, `config.signers` will only have one
|
||||
// keypair in it
|
||||
let authorized = if config.signers.len() == 2 {
|
||||
config.signers[1]
|
||||
} else {
|
||||
config.signers[0]
|
||||
};
|
||||
let authorized = config.signers[authorized];
|
||||
let new_authorized_signer = new_authorized.map(|index| config.signers[index]);
|
||||
|
||||
check_unique_pubkeys(
|
||||
(&authorized.pubkey(), "authorized_account".to_string()),
|
||||
(new_authorized_pubkey, "new_authorized_pubkey".to_string()),
|
||||
)?;
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let ixs = vec![vote_instruction::authorize(
|
||||
vote_account_pubkey, // vote account to update
|
||||
&authorized.pubkey(), // current authorized
|
||||
new_authorized_pubkey, // new vote signer/withdrawer
|
||||
vote_authorize, // vote or withdraw
|
||||
)]
|
||||
.with_memo(memo);
|
||||
let vote_ix = if new_authorized_signer.is_some() {
|
||||
vote_instruction::authorize_checked(
|
||||
vote_account_pubkey, // vote account to update
|
||||
&authorized.pubkey(), // current authorized
|
||||
new_authorized_pubkey, // new vote signer/withdrawer
|
||||
vote_authorize, // vote or withdraw
|
||||
)
|
||||
} else {
|
||||
vote_instruction::authorize(
|
||||
vote_account_pubkey, // vote account to update
|
||||
&authorized.pubkey(), // current authorized
|
||||
new_authorized_pubkey, // new vote signer/withdrawer
|
||||
vote_authorize, // vote or withdraw
|
||||
)
|
||||
};
|
||||
let ixs = vec![vote_ix].with_memo(memo);
|
||||
|
||||
let message = Message::new(&ixs, Some(&config.signers[0].pubkey()));
|
||||
let mut tx = Transaction::new_unsigned(message);
|
||||
@ -843,6 +919,8 @@ mod tests {
|
||||
new_authorized_pubkey: pubkey2,
|
||||
vote_authorize: VoteAuthorize::Voter,
|
||||
memo: None,
|
||||
authorized: 0,
|
||||
new_authorized: None,
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
|
||||
}
|
||||
@ -867,6 +945,8 @@ mod tests {
|
||||
new_authorized_pubkey: pubkey2,
|
||||
vote_authorize: VoteAuthorize::Voter,
|
||||
memo: None,
|
||||
authorized: 1,
|
||||
new_authorized: None,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
@ -875,6 +955,70 @@ mod tests {
|
||||
}
|
||||
);
|
||||
|
||||
let (voter_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
let voter_keypair = Keypair::new();
|
||||
write_keypair(&voter_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
let test_authorize_voter = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"vote-authorize-voter-checked",
|
||||
&pubkey_string,
|
||||
&default_keypair_file,
|
||||
&voter_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize_voter, &default_signer, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey: pubkey,
|
||||
new_authorized_pubkey: voter_keypair.pubkey(),
|
||||
vote_authorize: VoteAuthorize::Voter,
|
||||
memo: None,
|
||||
authorized: 0,
|
||||
new_authorized: Some(1),
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
read_keypair_file(&voter_keypair_file).unwrap().into()
|
||||
],
|
||||
}
|
||||
);
|
||||
|
||||
let test_authorize_voter = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"vote-authorize-voter-checked",
|
||||
&pubkey_string,
|
||||
&authorized_keypair_file,
|
||||
&voter_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize_voter, &default_signer, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey: pubkey,
|
||||
new_authorized_pubkey: voter_keypair.pubkey(),
|
||||
vote_authorize: VoteAuthorize::Voter,
|
||||
memo: None,
|
||||
authorized: 1,
|
||||
new_authorized: Some(2),
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
read_keypair_file(&authorized_keypair_file).unwrap().into(),
|
||||
read_keypair_file(&voter_keypair_file).unwrap().into(),
|
||||
],
|
||||
}
|
||||
);
|
||||
|
||||
let test_authorize_voter = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"vote-authorize-voter-checked",
|
||||
&pubkey_string,
|
||||
&authorized_keypair_file,
|
||||
&pubkey2_string,
|
||||
]);
|
||||
assert!(parse_command(&test_authorize_voter, &default_signer, &mut None).is_err());
|
||||
|
||||
let (keypair_file, mut tmp_file) = make_tmp_file();
|
||||
let keypair = Keypair::new();
|
||||
write_keypair(&keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
@ -1,6 +1,7 @@
|
||||
use solana_cli::{
|
||||
cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig},
|
||||
spend_utils::SpendAmount,
|
||||
stake::StakeAuthorizationIndexed,
|
||||
test_utils::{check_ready, check_recent_balance},
|
||||
};
|
||||
use solana_cli_output::{parse_sign_only_reply_string, OutputFormat};
|
||||
@ -64,6 +65,7 @@ fn test_stake_delegation_force() {
|
||||
seed: None,
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
withdrawer_signer: None,
|
||||
lockup: Lockup::default(),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
sign_only: false,
|
||||
@ -151,6 +153,7 @@ fn test_seed_stake_delegation_and_deactivation() {
|
||||
seed: Some("hi there".to_string()),
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
withdrawer_signer: None,
|
||||
lockup: Lockup::default(),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
sign_only: false,
|
||||
@ -231,6 +234,7 @@ fn test_stake_delegation_and_deactivation() {
|
||||
seed: None,
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
withdrawer_signer: None,
|
||||
lockup: Lockup::default(),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
sign_only: false,
|
||||
@ -332,6 +336,7 @@ fn test_offline_stake_delegation_and_deactivation() {
|
||||
seed: None,
|
||||
staker: Some(config_offline.signers[0].pubkey()),
|
||||
withdrawer: None,
|
||||
withdrawer_signer: None,
|
||||
lockup: Lockup::default(),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
sign_only: false,
|
||||
@ -451,6 +456,7 @@ fn test_nonced_stake_delegation_and_deactivation() {
|
||||
seed: None,
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
withdrawer_signer: None,
|
||||
lockup: Lockup::default(),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
sign_only: false,
|
||||
@ -581,6 +587,7 @@ fn test_stake_authorize() {
|
||||
seed: None,
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
withdrawer_signer: None,
|
||||
lockup: Lockup::default(),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
sign_only: false,
|
||||
@ -600,7 +607,12 @@ fn test_stake_authorize() {
|
||||
config.signers.pop();
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 0)],
|
||||
new_authorizations: vec![StakeAuthorizationIndexed {
|
||||
authorization_type: StakeAuthorize::Staker,
|
||||
new_authority_pubkey: online_authority_pubkey,
|
||||
authority: 0,
|
||||
new_authority_signer: None,
|
||||
}],
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
@ -629,8 +641,18 @@ fn test_stake_authorize() {
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![
|
||||
(StakeAuthorize::Staker, online_authority2_pubkey, 1),
|
||||
(StakeAuthorize::Withdrawer, withdraw_authority_pubkey, 0),
|
||||
StakeAuthorizationIndexed {
|
||||
authorization_type: StakeAuthorize::Staker,
|
||||
new_authority_pubkey: online_authority2_pubkey,
|
||||
authority: 1,
|
||||
new_authority_signer: None,
|
||||
},
|
||||
StakeAuthorizationIndexed {
|
||||
authorization_type: StakeAuthorize::Withdrawer,
|
||||
new_authority_pubkey: withdraw_authority_pubkey,
|
||||
authority: 0,
|
||||
new_authority_signer: None,
|
||||
},
|
||||
],
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
@ -657,7 +679,12 @@ fn test_stake_authorize() {
|
||||
config.signers.push(&online_authority2);
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, offline_authority_pubkey, 1)],
|
||||
new_authorizations: vec![StakeAuthorizationIndexed {
|
||||
authorization_type: StakeAuthorize::Staker,
|
||||
new_authority_pubkey: offline_authority_pubkey,
|
||||
authority: 1,
|
||||
new_authority_signer: None,
|
||||
}],
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
@ -683,7 +710,12 @@ fn test_stake_authorize() {
|
||||
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
|
||||
config_offline.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, nonced_authority_pubkey, 0)],
|
||||
new_authorizations: vec![StakeAuthorizationIndexed {
|
||||
authorization_type: StakeAuthorize::Staker,
|
||||
new_authority_pubkey: nonced_authority_pubkey,
|
||||
authority: 0,
|
||||
new_authority_signer: None,
|
||||
}],
|
||||
sign_only: true,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
@ -702,7 +734,12 @@ fn test_stake_authorize() {
|
||||
config.signers = vec![&offline_presigner];
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, nonced_authority_pubkey, 0)],
|
||||
new_authorizations: vec![StakeAuthorizationIndexed {
|
||||
authorization_type: StakeAuthorize::Staker,
|
||||
new_authority_pubkey: nonced_authority_pubkey,
|
||||
authority: 0,
|
||||
new_authority_signer: None,
|
||||
}],
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
@ -753,7 +790,12 @@ fn test_stake_authorize() {
|
||||
config_offline.signers.push(&nonced_authority);
|
||||
config_offline.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 1)],
|
||||
new_authorizations: vec![StakeAuthorizationIndexed {
|
||||
authorization_type: StakeAuthorize::Staker,
|
||||
new_authority_pubkey: online_authority_pubkey,
|
||||
authority: 1,
|
||||
new_authority_signer: None,
|
||||
}],
|
||||
sign_only: true,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
@ -773,7 +815,12 @@ fn test_stake_authorize() {
|
||||
config.signers = vec![&offline_presigner, &nonced_authority_presigner];
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, online_authority_pubkey, 1)],
|
||||
new_authorizations: vec![StakeAuthorizationIndexed {
|
||||
authorization_type: StakeAuthorize::Staker,
|
||||
new_authority_pubkey: online_authority_pubkey,
|
||||
authority: 1,
|
||||
new_authority_signer: None,
|
||||
}],
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
@ -861,6 +908,7 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
seed: None,
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
withdrawer_signer: None,
|
||||
lockup: Lockup::default(),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
sign_only: false,
|
||||
@ -880,7 +928,12 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
config.signers = vec![&default_signer, &payer_keypair];
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, offline_pubkey, 0)],
|
||||
new_authorizations: vec![StakeAuthorizationIndexed {
|
||||
authorization_type: StakeAuthorize::Staker,
|
||||
new_authority_pubkey: offline_pubkey,
|
||||
authority: 0,
|
||||
new_authority_signer: None,
|
||||
}],
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
@ -902,7 +955,12 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
let (blockhash, _) = rpc_client.get_recent_blockhash().unwrap();
|
||||
config_offline.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, payer_pubkey, 0)],
|
||||
new_authorizations: vec![StakeAuthorizationIndexed {
|
||||
authorization_type: StakeAuthorize::Staker,
|
||||
new_authority_pubkey: payer_pubkey,
|
||||
authority: 0,
|
||||
new_authority_signer: None,
|
||||
}],
|
||||
sign_only: true,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
@ -921,7 +979,12 @@ fn test_stake_authorize_with_fee_payer() {
|
||||
config.signers = vec![&offline_presigner];
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![(StakeAuthorize::Staker, payer_pubkey, 0)],
|
||||
new_authorizations: vec![StakeAuthorizationIndexed {
|
||||
authorization_type: StakeAuthorize::Staker,
|
||||
new_authority_pubkey: payer_pubkey,
|
||||
authority: 0,
|
||||
new_authority_signer: None,
|
||||
}],
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
@ -985,6 +1048,7 @@ fn test_stake_split() {
|
||||
seed: None,
|
||||
staker: Some(offline_pubkey),
|
||||
withdrawer: Some(offline_pubkey),
|
||||
withdrawer_signer: None,
|
||||
lockup: Lockup::default(),
|
||||
amount: SpendAmount::Some(10 * minimum_stake_balance),
|
||||
sign_only: false,
|
||||
@ -1135,6 +1199,7 @@ fn test_stake_set_lockup() {
|
||||
seed: None,
|
||||
staker: Some(offline_pubkey),
|
||||
withdrawer: Some(config.signers[0].pubkey()),
|
||||
withdrawer_signer: None,
|
||||
lockup,
|
||||
amount: SpendAmount::Some(10 * minimum_stake_balance),
|
||||
sign_only: false,
|
||||
@ -1163,6 +1228,7 @@ fn test_stake_set_lockup() {
|
||||
config.command = CliCommand::StakeSetLockup {
|
||||
stake_account_pubkey,
|
||||
lockup,
|
||||
new_custodian_signer: None,
|
||||
custodian: 0,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
@ -1198,6 +1264,7 @@ fn test_stake_set_lockup() {
|
||||
config.command = CliCommand::StakeSetLockup {
|
||||
stake_account_pubkey,
|
||||
lockup,
|
||||
new_custodian_signer: None,
|
||||
custodian: 0,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
@ -1218,6 +1285,7 @@ fn test_stake_set_lockup() {
|
||||
config.command = CliCommand::StakeSetLockup {
|
||||
stake_account_pubkey,
|
||||
lockup,
|
||||
new_custodian_signer: None,
|
||||
custodian: 1,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
@ -1250,6 +1318,7 @@ fn test_stake_set_lockup() {
|
||||
config.command = CliCommand::StakeSetLockup {
|
||||
stake_account_pubkey,
|
||||
lockup,
|
||||
new_custodian_signer: None,
|
||||
custodian: 1,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
@ -1297,6 +1366,7 @@ fn test_stake_set_lockup() {
|
||||
config_offline.command = CliCommand::StakeSetLockup {
|
||||
stake_account_pubkey,
|
||||
lockup,
|
||||
new_custodian_signer: None,
|
||||
custodian: 0,
|
||||
sign_only: true,
|
||||
dump_transaction_message: false,
|
||||
@ -1315,6 +1385,7 @@ fn test_stake_set_lockup() {
|
||||
config.command = CliCommand::StakeSetLockup {
|
||||
stake_account_pubkey,
|
||||
lockup,
|
||||
new_custodian_signer: None,
|
||||
custodian: 0,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
@ -1409,6 +1480,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
seed: None,
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
withdrawer_signer: None,
|
||||
lockup: Lockup::default(),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
sign_only: true,
|
||||
@ -1432,6 +1504,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
seed: None,
|
||||
staker: Some(offline_pubkey),
|
||||
withdrawer: None,
|
||||
withdrawer_signer: None,
|
||||
lockup: Lockup::default(),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
sign_only: false,
|
||||
@ -1521,6 +1594,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
seed: Some(seed.to_string()),
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
withdrawer_signer: None,
|
||||
lockup: Lockup::default(),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
sign_only: true,
|
||||
@ -1542,6 +1616,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
seed: Some(seed.to_string()),
|
||||
staker: Some(offline_pubkey),
|
||||
withdrawer: Some(offline_pubkey),
|
||||
withdrawer_signer: None,
|
||||
lockup: Lockup::default(),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
sign_only: false,
|
||||
@ -1561,3 +1636,228 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
Pubkey::create_with_seed(&stake_pubkey, seed, &stake::program::id()).unwrap();
|
||||
check_recent_balance(50_000, &rpc_client, &seed_address);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stake_checked_instructions() {
|
||||
solana_logger::setup();
|
||||
|
||||
let mint_keypair = Keypair::new();
|
||||
let mint_pubkey = mint_keypair.pubkey();
|
||||
let faucet_addr = run_local_faucet(mint_keypair, None);
|
||||
let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr));
|
||||
|
||||
let rpc_client =
|
||||
RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed());
|
||||
let default_signer = Keypair::new();
|
||||
|
||||
let mut config = CliConfig::recent_for_tests();
|
||||
config.json_rpc_url = test_validator.rpc_url();
|
||||
config.signers = vec![&default_signer];
|
||||
|
||||
request_and_confirm_airdrop(&rpc_client, &config, &config.signers[0].pubkey(), 100_000)
|
||||
.unwrap();
|
||||
|
||||
// Create stake account with withdrawer
|
||||
let stake_keypair = Keypair::new();
|
||||
let stake_account_pubkey = stake_keypair.pubkey();
|
||||
let withdrawer_keypair = Keypair::new();
|
||||
let withdrawer_pubkey = withdrawer_keypair.pubkey();
|
||||
config.signers.push(&stake_keypair);
|
||||
config.command = CliCommand::CreateStakeAccount {
|
||||
stake_account: 1,
|
||||
seed: None,
|
||||
staker: None,
|
||||
withdrawer: Some(withdrawer_pubkey),
|
||||
withdrawer_signer: Some(1),
|
||||
lockup: Lockup::default(),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
from: 0,
|
||||
};
|
||||
process_command(&config).unwrap_err(); // unsigned authority should fail
|
||||
|
||||
config.signers = vec![&default_signer, &stake_keypair, &withdrawer_keypair];
|
||||
config.command = CliCommand::CreateStakeAccount {
|
||||
stake_account: 1,
|
||||
seed: None,
|
||||
staker: None,
|
||||
withdrawer: Some(withdrawer_pubkey),
|
||||
withdrawer_signer: Some(1),
|
||||
lockup: Lockup::default(),
|
||||
amount: SpendAmount::Some(50_000),
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
from: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
|
||||
// Re-authorize account, checking new authority
|
||||
let staker_keypair = Keypair::new();
|
||||
let staker_pubkey = staker_keypair.pubkey();
|
||||
config.signers = vec![&default_signer];
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![StakeAuthorizationIndexed {
|
||||
authorization_type: StakeAuthorize::Staker,
|
||||
new_authority_pubkey: staker_pubkey,
|
||||
authority: 0,
|
||||
new_authority_signer: Some(0),
|
||||
}],
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
};
|
||||
process_command(&config).unwrap_err(); // unsigned authority should fail
|
||||
|
||||
config.signers = vec![&default_signer, &staker_keypair];
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![StakeAuthorizationIndexed {
|
||||
authorization_type: StakeAuthorize::Staker,
|
||||
new_authority_pubkey: staker_pubkey,
|
||||
authority: 0,
|
||||
new_authority_signer: Some(1),
|
||||
}],
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
|
||||
let stake_state: StakeState = stake_account.state().unwrap();
|
||||
let current_authority = match stake_state {
|
||||
StakeState::Initialized(meta) => meta.authorized.staker,
|
||||
_ => panic!("Unexpected stake state!"),
|
||||
};
|
||||
assert_eq!(current_authority, staker_pubkey);
|
||||
|
||||
let new_withdrawer_keypair = Keypair::new();
|
||||
let new_withdrawer_pubkey = new_withdrawer_keypair.pubkey();
|
||||
config.signers = vec![&default_signer, &withdrawer_keypair];
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![StakeAuthorizationIndexed {
|
||||
authorization_type: StakeAuthorize::Withdrawer,
|
||||
new_authority_pubkey: new_withdrawer_pubkey,
|
||||
authority: 1,
|
||||
new_authority_signer: Some(1),
|
||||
}],
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
};
|
||||
process_command(&config).unwrap_err(); // unsigned authority should fail
|
||||
|
||||
config.signers = vec![
|
||||
&default_signer,
|
||||
&withdrawer_keypair,
|
||||
&new_withdrawer_keypair,
|
||||
];
|
||||
config.command = CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
new_authorizations: vec![StakeAuthorizationIndexed {
|
||||
authorization_type: StakeAuthorize::Withdrawer,
|
||||
new_authority_pubkey: new_withdrawer_pubkey,
|
||||
authority: 1,
|
||||
new_authority_signer: Some(2),
|
||||
}],
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
custodian: None,
|
||||
no_wait: false,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
|
||||
let stake_state: StakeState = stake_account.state().unwrap();
|
||||
let current_authority = match stake_state {
|
||||
StakeState::Initialized(meta) => meta.authorized.withdrawer,
|
||||
_ => panic!("Unexpected stake state!"),
|
||||
};
|
||||
assert_eq!(current_authority, new_withdrawer_pubkey);
|
||||
|
||||
// Set lockup, checking new custodian
|
||||
let custodian = Keypair::new();
|
||||
let custodian_pubkey = custodian.pubkey();
|
||||
let lockup = LockupArgs {
|
||||
unix_timestamp: Some(1_581_534_570),
|
||||
epoch: Some(200),
|
||||
custodian: Some(custodian_pubkey),
|
||||
};
|
||||
config.signers = vec![&default_signer, &new_withdrawer_keypair];
|
||||
config.command = CliCommand::StakeSetLockup {
|
||||
stake_account_pubkey,
|
||||
lockup,
|
||||
new_custodian_signer: Some(1),
|
||||
custodian: 1,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap_err(); // unsigned new custodian should fail
|
||||
|
||||
config.signers = vec![&default_signer, &new_withdrawer_keypair, &custodian];
|
||||
config.command = CliCommand::StakeSetLockup {
|
||||
stake_account_pubkey,
|
||||
lockup,
|
||||
new_custodian_signer: Some(2),
|
||||
custodian: 1,
|
||||
sign_only: false,
|
||||
dump_transaction_message: false,
|
||||
blockhash_query: BlockhashQuery::default(),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
memo: None,
|
||||
fee_payer: 0,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
|
||||
let stake_state: StakeState = stake_account.state().unwrap();
|
||||
let current_lockup = match stake_state {
|
||||
StakeState::Initialized(meta) => meta.lockup,
|
||||
_ => panic!("Unexpected stake state!"),
|
||||
};
|
||||
assert_eq!(
|
||||
current_lockup.unix_timestamp,
|
||||
lockup.unix_timestamp.unwrap()
|
||||
);
|
||||
assert_eq!(current_lockup.epoch, lockup.epoch.unwrap());
|
||||
assert_eq!(current_lockup.custodian, custodian_pubkey);
|
||||
}
|
||||
|
@ -83,13 +83,48 @@ fn test_vote_authorize_and_withdraw() {
|
||||
check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey);
|
||||
|
||||
// Authorize vote account withdrawal to another signer
|
||||
let withdraw_authority = Keypair::new();
|
||||
let first_withdraw_authority = Keypair::new();
|
||||
config.signers = vec![&default_signer];
|
||||
config.command = CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey,
|
||||
new_authorized_pubkey: first_withdraw_authority.pubkey(),
|
||||
vote_authorize: VoteAuthorize::Withdrawer,
|
||||
memo: None,
|
||||
authorized: 0,
|
||||
new_authorized: None,
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let vote_account = rpc_client
|
||||
.get_account(&vote_account_keypair.pubkey())
|
||||
.unwrap();
|
||||
let vote_state: VoteStateVersions = vote_account.state().unwrap();
|
||||
let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer;
|
||||
assert_eq!(authorized_withdrawer, first_withdraw_authority.pubkey());
|
||||
|
||||
// Authorize vote account withdrawal to another signer with checked instruction
|
||||
let withdraw_authority = Keypair::new();
|
||||
config.signers = vec![&default_signer, &first_withdraw_authority];
|
||||
config.command = CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey,
|
||||
new_authorized_pubkey: withdraw_authority.pubkey(),
|
||||
vote_authorize: VoteAuthorize::Withdrawer,
|
||||
memo: None,
|
||||
authorized: 1,
|
||||
new_authorized: Some(1),
|
||||
};
|
||||
process_command(&config).unwrap_err(); // unsigned by new authority should fail
|
||||
config.signers = vec![
|
||||
&default_signer,
|
||||
&first_withdraw_authority,
|
||||
&withdraw_authority,
|
||||
];
|
||||
config.command = CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey,
|
||||
new_authorized_pubkey: withdraw_authority.pubkey(),
|
||||
vote_authorize: VoteAuthorize::Withdrawer,
|
||||
memo: None,
|
||||
authorized: 1,
|
||||
new_authorized: Some(2),
|
||||
};
|
||||
process_command(&config).unwrap();
|
||||
let vote_account = rpc_client
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -24,14 +24,14 @@ semver = "0.11.0"
|
||||
serde = "1.0.122"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.56"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.4" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.5" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.5" }
|
||||
solana-faucet = { path = "../faucet", version = "=1.7.5" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.5" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.5" }
|
||||
solana-version = { path = "../version", version = "=1.7.5" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.5" }
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tungstenite = "0.10.1"
|
||||
@ -40,7 +40,7 @@ url = "2.1.1"
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-http-server = "17.0.0"
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.5" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,5 +1,5 @@
|
||||
use {
|
||||
crate::rpc_request,
|
||||
crate::{rpc_request, rpc_response},
|
||||
solana_faucet::faucet::FaucetError,
|
||||
solana_sdk::{
|
||||
signature::SignerError, transaction::TransactionError, transport::TransportError,
|
||||
@ -30,6 +30,24 @@ pub enum ClientErrorKind {
|
||||
Custom(String),
|
||||
}
|
||||
|
||||
impl ClientErrorKind {
|
||||
pub fn get_transaction_error(&self) -> Option<TransactionError> {
|
||||
match self {
|
||||
Self::RpcError(rpc_request::RpcError::RpcResponseError {
|
||||
data:
|
||||
rpc_request::RpcResponseErrorData::SendTransactionPreflightFailure(
|
||||
rpc_response::RpcSimulateTransactionResult {
|
||||
err: Some(tx_err), ..
|
||||
},
|
||||
),
|
||||
..
|
||||
}) => Some(tx_err.clone()),
|
||||
Self::TransactionError(tx_err) => Some(tx_err.clone()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<TransportError> for ClientErrorKind {
|
||||
fn from(err: TransportError) -> Self {
|
||||
match err {
|
||||
@ -86,6 +104,10 @@ impl ClientError {
|
||||
pub fn kind(&self) -> &ClientErrorKind {
|
||||
&self.kind
|
||||
}
|
||||
|
||||
pub fn get_transaction_error(&self) -> Option<TransactionError> {
|
||||
self.kind.get_transaction_error()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ClientErrorKind> for ClientError {
|
||||
|
9
client/src/fees.rs
Normal file
9
client/src/fees.rs
Normal file
@ -0,0 +1,9 @@
|
||||
use crate::{fee_calculator::FeeCalculator, hash::Hash};
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Fees {
|
||||
pub blockhash: Hash,
|
||||
pub fee_calculator: FeeCalculator,
|
||||
pub last_valid_block_height: u64,
|
||||
}
|
@ -1012,49 +1012,55 @@ impl RpcClient {
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
) -> ClientResult<Signature> {
|
||||
let signature = self.send_transaction(transaction)?;
|
||||
let recent_blockhash = if uses_durable_nonce(transaction).is_some() {
|
||||
self.get_recent_blockhash_with_commitment(CommitmentConfig::processed())?
|
||||
.value
|
||||
.0
|
||||
} else {
|
||||
transaction.message.recent_blockhash
|
||||
};
|
||||
let status = loop {
|
||||
let status = self.get_signature_status(&signature)?;
|
||||
if status.is_none() {
|
||||
if self
|
||||
.get_fee_calculator_for_blockhash_with_commitment(
|
||||
&recent_blockhash,
|
||||
CommitmentConfig::processed(),
|
||||
)?
|
||||
.value
|
||||
.is_none()
|
||||
{
|
||||
break status;
|
||||
}
|
||||
const SEND_RETRIES: usize = 1;
|
||||
const GET_STATUS_RETRIES: usize = usize::MAX;
|
||||
|
||||
'sending: for _ in 0..SEND_RETRIES {
|
||||
let signature = self.send_transaction(transaction)?;
|
||||
|
||||
let recent_blockhash = if uses_durable_nonce(transaction).is_some() {
|
||||
let (recent_blockhash, ..) = self
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::processed())?
|
||||
.value;
|
||||
recent_blockhash
|
||||
} else {
|
||||
break status;
|
||||
transaction.message.recent_blockhash
|
||||
};
|
||||
|
||||
for status_retry in 0..GET_STATUS_RETRIES {
|
||||
match self.get_signature_status(&signature)? {
|
||||
Some(Ok(_)) => return Ok(signature),
|
||||
Some(Err(e)) => return Err(e.into()),
|
||||
None => {
|
||||
let fee_calculator = self
|
||||
.get_fee_calculator_for_blockhash_with_commitment(
|
||||
&recent_blockhash,
|
||||
CommitmentConfig::processed(),
|
||||
)?
|
||||
.value;
|
||||
if fee_calculator.is_none() {
|
||||
// Block hash is not found by some reason
|
||||
break 'sending;
|
||||
} else if cfg!(not(test))
|
||||
// Ignore sleep at last step.
|
||||
&& status_retry < GET_STATUS_RETRIES
|
||||
{
|
||||
// Retry twice a second
|
||||
sleep(Duration::from_millis(500));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if cfg!(not(test)) {
|
||||
// Retry twice a second
|
||||
sleep(Duration::from_millis(500));
|
||||
}
|
||||
};
|
||||
if let Some(result) = status {
|
||||
match result {
|
||||
Ok(_) => Ok(signature),
|
||||
Err(err) => Err(err.into()),
|
||||
}
|
||||
} else {
|
||||
Err(RpcError::ForUser(
|
||||
"unable to confirm transaction. \
|
||||
This can happen in situations such as transaction expiration \
|
||||
and insufficient fee-payer funds"
|
||||
.to_string(),
|
||||
)
|
||||
.into())
|
||||
}
|
||||
|
||||
Err(RpcError::ForUser(
|
||||
"unable to confirm transaction. \
|
||||
This can happen in situations such as transaction expiration \
|
||||
and insufficient fee-payer funds"
|
||||
.to_string(),
|
||||
)
|
||||
.into())
|
||||
}
|
||||
|
||||
/// Note that `get_account` returns `Err(..)` if the account does not exist whereas
|
||||
@ -1241,6 +1247,34 @@ impl RpcClient {
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_fees(&self) -> ClientResult<Fees> {
|
||||
Ok(self.get_fees_with_commitment(self.commitment())?.value)
|
||||
}
|
||||
|
||||
pub fn get_fees_with_commitment(&self, commitment_config: CommitmentConfig) -> RpcResult<Fees> {
|
||||
let Response {
|
||||
context,
|
||||
value: fees,
|
||||
} = self.send::<Response<RpcFees>>(
|
||||
RpcRequest::GetFees,
|
||||
json!([self.maybe_map_commitment(commitment_config)?]),
|
||||
)?;
|
||||
let blockhash = fees.blockhash.parse().map_err(|_| {
|
||||
ClientError::new_with_request(
|
||||
RpcError::ParseError("Hash".to_string()).into(),
|
||||
RpcRequest::GetFees,
|
||||
)
|
||||
})?;
|
||||
Ok(Response {
|
||||
context,
|
||||
value: Fees {
|
||||
blockhash,
|
||||
fee_calculator: fees.fee_calculator,
|
||||
last_valid_block_height: fees.last_valid_block_height,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_recent_blockhash(&self) -> ClientResult<(Hash, FeeCalculator)> {
|
||||
let (blockhash, fee_calculator, _last_valid_slot) = self
|
||||
.get_recent_blockhash_with_commitment(self.commitment())?
|
||||
|
@ -81,6 +81,8 @@ pub struct RpcGetVoteAccountsConfig {
|
||||
pub vote_pubkey: Option<String>, // validator vote address, as a base-58 encoded string
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
pub keep_unstaked_delinquents: Option<bool>,
|
||||
pub delinquent_slot_distance: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
|
@ -18,6 +18,7 @@ pub const JSON_RPC_SERVER_ERROR_LONG_TERM_STORAGE_SLOT_SKIPPED: i64 = -32009;
|
||||
pub const JSON_RPC_SERVER_ERROR_KEY_EXCLUDED_FROM_SECONDARY_INDEX: i64 = -32010;
|
||||
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_HISTORY_NOT_AVAILABLE: i64 = -32011;
|
||||
pub const JSON_RPC_SCAN_ERROR: i64 = -32012;
|
||||
pub const JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_LEN_MISMATCH: i64 = -32013;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum RpcCustomError {
|
||||
@ -51,6 +52,8 @@ pub enum RpcCustomError {
|
||||
TransactionHistoryNotAvailable,
|
||||
#[error("ScanError")]
|
||||
ScanError { message: String },
|
||||
#[error("TransactionSignatureLenMismatch")]
|
||||
TransactionSignatureLenMismatch,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@ -151,6 +154,13 @@ impl From<RpcCustomError> for Error {
|
||||
message,
|
||||
data: None,
|
||||
},
|
||||
RpcCustomError::TransactionSignatureLenMismatch => Self {
|
||||
code: ErrorCode::ServerError(
|
||||
JSON_RPC_SERVER_ERROR_TRANSACTION_SIGNATURE_LEN_MISMATCH,
|
||||
),
|
||||
message: "Transaction signature length mismatch".to_string(),
|
||||
data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ use {
|
||||
solana_sdk::{
|
||||
clock::{Epoch, Slot, UnixTimestamp},
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
hash::Hash,
|
||||
inflation::Inflation,
|
||||
transaction::{Result, TransactionError},
|
||||
},
|
||||
@ -57,6 +58,14 @@ pub struct DeprecatedRpcFees {
|
||||
pub last_valid_slot: Slot,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Fees {
|
||||
pub blockhash: Hash,
|
||||
pub fee_calculator: FeeCalculator,
|
||||
pub last_valid_block_height: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcFeeCalculator {
|
||||
@ -394,8 +403,9 @@ pub struct RpcPerfSample {
|
||||
pub struct RpcInflationReward {
|
||||
pub epoch: Epoch,
|
||||
pub effective_slot: Slot,
|
||||
pub amount: u64, // lamports
|
||||
pub post_balance: u64, // lamports
|
||||
pub amount: u64, // lamports
|
||||
pub post_balance: u64, // lamports
|
||||
pub commission: Option<u8>, // Vote account commission when the reward was credited
|
||||
}
|
||||
|
||||
impl From<ConfirmedTransactionStatusWithSignature> for RpcConfirmedTransactionStatusWithSignature {
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
homepage = "https://solana.com/"
|
||||
documentation = "https://docs.rs/solana-core"
|
||||
readme = "../README.md"
|
||||
@ -43,33 +43,33 @@ retain_mut = "0.1.2"
|
||||
serde = "1.0.122"
|
||||
serde_bytes = "0.11"
|
||||
serde_derive = "1.0.103"
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.4" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.7.4" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.4" }
|
||||
solana-client = { path = "../client", version = "=1.7.4" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.4" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.4" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.4" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "=1.7.4" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.4" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.4" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.4" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.4" }
|
||||
solana-poh = { path = "../poh", version = "=1.7.4" }
|
||||
solana-program-test = { path = "../program-test", version = "=1.7.4" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.7.4" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.4" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.4" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.7.4" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.7.4" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.4" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.4" }
|
||||
solana-account-decoder = { path = "../account-decoder", version = "=1.7.5" }
|
||||
solana-banks-server = { path = "../banks-server", version = "=1.7.5" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "=1.7.5" }
|
||||
solana-client = { path = "../client", version = "=1.7.5" }
|
||||
solana-gossip = { path = "../gossip", version = "=1.7.5" }
|
||||
solana-ledger = { path = "../ledger", version = "=1.7.5" }
|
||||
solana-logger = { path = "../logger", version = "=1.7.5" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "=1.7.5" }
|
||||
solana-metrics = { path = "../metrics", version = "=1.7.5" }
|
||||
solana-measure = { path = "../measure", version = "=1.7.5" }
|
||||
solana-net-utils = { path = "../net-utils", version = "=1.7.5" }
|
||||
solana-perf = { path = "../perf", version = "=1.7.5" }
|
||||
solana-poh = { path = "../poh", version = "=1.7.5" }
|
||||
solana-program-test = { path = "../program-test", version = "=1.7.5" }
|
||||
solana-rpc = { path = "../rpc", version = "=1.7.5" }
|
||||
solana-runtime = { path = "../runtime", version = "=1.7.5" }
|
||||
solana-sdk = { path = "../sdk", version = "=1.7.5" }
|
||||
solana-frozen-abi = { path = "../frozen-abi", version = "=1.7.5" }
|
||||
solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.7.5" }
|
||||
solana-streamer = { path = "../streamer", version = "=1.7.5" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "=1.7.5" }
|
||||
solana-version = { path = "../version", version = "=1.7.5" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "=1.7.5" }
|
||||
spl-token-v2-0 = { package = "spl-token", version = "=3.1.1", features = ["no-entrypoint"] }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.4" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.7.5" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
@ -80,8 +80,8 @@ num_cpus = "1.13.0"
|
||||
reqwest = { version = "0.11.2", default-features = false, features = ["blocking", "rustls-tls", "json"] }
|
||||
serde_json = "1.0.56"
|
||||
serial_test = "0.4.0"
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.7.4" }
|
||||
solana-version = { path = "../version", version = "=1.7.4" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "=1.7.5" }
|
||||
solana-version = { path = "../version", version = "=1.7.5" }
|
||||
symlink = "0.1.0"
|
||||
systemstat = "0.1.5"
|
||||
tokio_02 = { version = "0.2", package = "tokio", features = ["full"] }
|
||||
|
@ -3,10 +3,14 @@
|
||||
extern crate test;
|
||||
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_core::broadcast_stage::broadcast_metrics::TransmitShredsStats;
|
||||
use solana_core::broadcast_stage::{broadcast_shreds, get_broadcast_peers};
|
||||
use solana_gossip::cluster_info::{ClusterInfo, Node};
|
||||
use solana_gossip::contact_info::ContactInfo;
|
||||
use solana_core::{
|
||||
broadcast_stage::{broadcast_metrics::TransmitShredsStats, broadcast_shreds, BroadcastStage},
|
||||
cluster_nodes::ClusterNodes,
|
||||
};
|
||||
use solana_gossip::{
|
||||
cluster_info::{ClusterInfo, Node},
|
||||
contact_info::ContactInfo,
|
||||
};
|
||||
use solana_ledger::shred::Shred;
|
||||
use solana_sdk::pubkey;
|
||||
use solana_sdk::timing::timestamp;
|
||||
@ -36,7 +40,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
|
||||
stakes.insert(id, thread_rng().gen_range(1, NUM_PEERS) as u64);
|
||||
}
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(&stakes));
|
||||
let cluster_nodes = ClusterNodes::<BroadcastStage>::new(&cluster_info, &stakes);
|
||||
let shreds = Arc::new(shreds);
|
||||
let last_datapoint = Arc::new(AtomicU64::new(0));
|
||||
bencher.iter(move || {
|
||||
@ -44,8 +48,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
|
||||
broadcast_shreds(
|
||||
&socket,
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&cluster_nodes,
|
||||
&last_datapoint,
|
||||
&mut TransmitShredsStats::default(),
|
||||
)
|
||||
|
@ -976,16 +976,15 @@ impl BankingStage {
|
||||
fn transactions_from_packets(
|
||||
msgs: &Packets,
|
||||
transaction_indexes: &[usize],
|
||||
secp256k1_program_enabled: bool,
|
||||
libsecp256k1_0_5_upgrade_enabled: bool,
|
||||
) -> (Vec<HashedTransaction<'static>>, Vec<usize>) {
|
||||
transaction_indexes
|
||||
.iter()
|
||||
.filter_map(|tx_index| {
|
||||
let p = &msgs.packets[*tx_index];
|
||||
let tx: Transaction = limited_deserialize(&p.data[0..p.meta.size]).ok()?;
|
||||
if secp256k1_program_enabled {
|
||||
tx.verify_precompiles().ok()?;
|
||||
}
|
||||
tx.verify_precompiles(libsecp256k1_0_5_upgrade_enabled)
|
||||
.ok()?;
|
||||
let message_bytes = Self::packet_message(p)?;
|
||||
let message_hash = Message::hash_raw_message(message_bytes);
|
||||
Some((
|
||||
@ -1049,7 +1048,7 @@ impl BankingStage {
|
||||
let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets(
|
||||
msgs,
|
||||
&packet_indexes,
|
||||
bank.secp256k1_program_enabled(),
|
||||
bank.libsecp256k1_0_5_upgrade_enabled(),
|
||||
);
|
||||
packet_conversion_time.stop();
|
||||
|
||||
@ -1120,7 +1119,7 @@ impl BankingStage {
|
||||
let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets(
|
||||
msgs,
|
||||
&transaction_indexes,
|
||||
bank.secp256k1_program_enabled(),
|
||||
bank.libsecp256k1_0_5_upgrade_enabled(),
|
||||
);
|
||||
|
||||
let tx_count = transaction_to_packet_indexes.len();
|
||||
@ -2380,7 +2379,7 @@ mod tests {
|
||||
|
||||
let shreds = entries_to_test_shreds(entries, bank.slot(), 0, true, 0);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.set_roots(&[bank.slot()]).unwrap();
|
||||
blockstore.set_roots(std::iter::once(&bank.slot())).unwrap();
|
||||
|
||||
let (transaction_status_sender, transaction_status_receiver) = unbounded();
|
||||
let transaction_status_service = TransactionStatusService::new(
|
||||
|
@ -6,17 +6,15 @@ use self::{
|
||||
fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun,
|
||||
standard_broadcast_run::StandardBroadcastRun,
|
||||
};
|
||||
use crate::result::{Error, Result};
|
||||
use crate::{
|
||||
cluster_nodes::ClusterNodes,
|
||||
result::{Error, Result},
|
||||
};
|
||||
use crossbeam_channel::{
|
||||
Receiver as CrossbeamReceiver, RecvTimeoutError as CrossbeamRecvTimeoutError,
|
||||
Sender as CrossbeamSender,
|
||||
};
|
||||
use solana_gossip::{
|
||||
cluster_info::{self, ClusterInfo, ClusterInfoError},
|
||||
contact_info::ContactInfo,
|
||||
crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
|
||||
weighted_shuffle::weighted_best,
|
||||
};
|
||||
use solana_gossip::cluster_info::{ClusterInfo, ClusterInfoError};
|
||||
use solana_ledger::{blockstore::Blockstore, shred::Shred};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
|
||||
@ -380,26 +378,16 @@ fn update_peer_stats(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_broadcast_peers(
|
||||
cluster_info: &ClusterInfo,
|
||||
stakes: Option<&HashMap<Pubkey, u64>>,
|
||||
) -> (Vec<ContactInfo>, Vec<(u64, usize)>) {
|
||||
let mut peers = cluster_info.tvu_peers();
|
||||
let peers_and_stakes = cluster_info::stake_weight_peers(&mut peers, stakes);
|
||||
(peers, peers_and_stakes)
|
||||
}
|
||||
|
||||
/// broadcast messages from the leader to layer 1 nodes
|
||||
/// # Remarks
|
||||
pub fn broadcast_shreds(
|
||||
s: &UdpSocket,
|
||||
shreds: &[Shred],
|
||||
peers_and_stakes: &[(u64, usize)],
|
||||
peers: &[ContactInfo],
|
||||
cluster_nodes: &ClusterNodes<BroadcastStage>,
|
||||
last_datapoint_submit: &Arc<AtomicU64>,
|
||||
transmit_stats: &mut TransmitShredsStats,
|
||||
) -> Result<()> {
|
||||
let broadcast_len = peers_and_stakes.len();
|
||||
let broadcast_len = cluster_nodes.num_peers();
|
||||
if broadcast_len == 0 {
|
||||
update_peer_stats(1, 1, last_datapoint_submit);
|
||||
return Ok(());
|
||||
@ -407,10 +395,9 @@ pub fn broadcast_shreds(
|
||||
let mut shred_select = Measure::start("shred_select");
|
||||
let packets: Vec<_> = shreds
|
||||
.iter()
|
||||
.map(|shred| {
|
||||
let broadcast_index = weighted_best(peers_and_stakes, shred.seed());
|
||||
|
||||
(&shred.payload, &peers[broadcast_index].tvu)
|
||||
.filter_map(|shred| {
|
||||
let node = cluster_nodes.get_broadcast_peer(shred.seed())?;
|
||||
Some((&shred.payload, &node.tvu))
|
||||
})
|
||||
.collect();
|
||||
shred_select.stop();
|
||||
@ -429,7 +416,7 @@ pub fn broadcast_shreds(
|
||||
send_mmsg_time.stop();
|
||||
transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us();
|
||||
|
||||
let num_live_peers = num_live_peers(peers);
|
||||
let num_live_peers = cluster_nodes.num_peers_live(timestamp()) as i64;
|
||||
update_peer_stats(
|
||||
num_live_peers,
|
||||
broadcast_len as i64 + 1,
|
||||
@ -438,25 +425,6 @@ pub fn broadcast_shreds(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn distance(a: u64, b: u64) -> u64 {
|
||||
if a > b {
|
||||
a - b
|
||||
} else {
|
||||
b - a
|
||||
}
|
||||
}
|
||||
|
||||
fn num_live_peers(peers: &[ContactInfo]) -> i64 {
|
||||
let mut num_live_peers = 1i64;
|
||||
peers.iter().for_each(|p| {
|
||||
// A peer is considered live if they generated their contact info recently
|
||||
if distance(timestamp(), p.wallclock) <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS {
|
||||
num_live_peers += 1;
|
||||
}
|
||||
});
|
||||
num_live_peers
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use super::*;
|
||||
@ -540,19 +508,6 @@ pub mod test {
|
||||
assert_eq!(num_expected_coding_shreds, coding_index);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_num_live_peers() {
|
||||
let mut ci = ContactInfo {
|
||||
wallclock: std::u64::MAX,
|
||||
..ContactInfo::default()
|
||||
};
|
||||
assert_eq!(num_live_peers(&[ci.clone()]), 1);
|
||||
ci.wallclock = timestamp() - 1;
|
||||
assert_eq!(num_live_peers(&[ci.clone()]), 2);
|
||||
ci.wallclock = timestamp() - CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS - 1;
|
||||
assert_eq!(num_live_peers(&[ci]), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_duplicate_retransmit_signal() {
|
||||
// Setup
|
||||
|
@ -1,4 +1,5 @@
|
||||
use super::*;
|
||||
use crate::cluster_nodes::ClusterNodes;
|
||||
use solana_ledger::shred::Shredder;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::Keypair;
|
||||
@ -134,13 +135,14 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
) -> Result<()> {
|
||||
let ((stakes, shreds), _) = receiver.lock().unwrap().recv()?;
|
||||
// Broadcast data
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes.as_deref());
|
||||
|
||||
let cluster_nodes = ClusterNodes::<BroadcastStage>::new(
|
||||
cluster_info,
|
||||
stakes.as_deref().unwrap_or(&HashMap::default()),
|
||||
);
|
||||
broadcast_shreds(
|
||||
sock,
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&cluster_nodes,
|
||||
&Arc::new(AtomicU64::new(0)),
|
||||
&mut TransmitShredsStats::default(),
|
||||
)?;
|
||||
|
@ -4,7 +4,7 @@ use super::{
|
||||
broadcast_utils::{self, ReceiveResults},
|
||||
*,
|
||||
};
|
||||
use crate::broadcast_stage::broadcast_utils::UnfinishedSlotInfo;
|
||||
use crate::{broadcast_stage::broadcast_utils::UnfinishedSlotInfo, cluster_nodes::ClusterNodes};
|
||||
use solana_ledger::{
|
||||
entry::Entry,
|
||||
shred::{
|
||||
@ -27,16 +27,10 @@ pub struct StandardBroadcastRun {
|
||||
shred_version: u16,
|
||||
last_datapoint_submit: Arc<AtomicU64>,
|
||||
num_batches: usize,
|
||||
broadcast_peer_cache: Arc<RwLock<BroadcastPeerCache>>,
|
||||
cluster_nodes: Arc<RwLock<ClusterNodes<BroadcastStage>>>,
|
||||
last_peer_update: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct BroadcastPeerCache {
|
||||
peers: Vec<ContactInfo>,
|
||||
peers_and_stakes: Vec<(u64, usize)>,
|
||||
}
|
||||
|
||||
impl StandardBroadcastRun {
|
||||
pub(super) fn new(keypair: Arc<Keypair>, shred_version: u16) -> Self {
|
||||
Self {
|
||||
@ -50,7 +44,7 @@ impl StandardBroadcastRun {
|
||||
shred_version,
|
||||
last_datapoint_submit: Arc::default(),
|
||||
num_batches: 0,
|
||||
broadcast_peer_cache: Arc::default(),
|
||||
cluster_nodes: Arc::default(),
|
||||
last_peer_update: Arc::default(),
|
||||
}
|
||||
}
|
||||
@ -354,13 +348,13 @@ impl StandardBroadcastRun {
|
||||
.compare_and_swap(now, last, Ordering::Relaxed)
|
||||
== last
|
||||
{
|
||||
let mut w_broadcast_peer_cache = self.broadcast_peer_cache.write().unwrap();
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
w_broadcast_peer_cache.peers = peers;
|
||||
w_broadcast_peer_cache.peers_and_stakes = peers_and_stakes;
|
||||
*self.cluster_nodes.write().unwrap() = ClusterNodes::<BroadcastStage>::new(
|
||||
cluster_info,
|
||||
stakes.unwrap_or(&HashMap::default()),
|
||||
);
|
||||
}
|
||||
get_peers_time.stop();
|
||||
let r_broadcast_peer_cache = self.broadcast_peer_cache.read().unwrap();
|
||||
let cluster_nodes = self.cluster_nodes.read().unwrap();
|
||||
|
||||
let mut transmit_stats = TransmitShredsStats::default();
|
||||
// Broadcast the shreds
|
||||
@ -368,12 +362,11 @@ impl StandardBroadcastRun {
|
||||
broadcast_shreds(
|
||||
sock,
|
||||
&shreds,
|
||||
&r_broadcast_peer_cache.peers_and_stakes,
|
||||
&r_broadcast_peer_cache.peers,
|
||||
&cluster_nodes,
|
||||
&self.last_datapoint_submit,
|
||||
&mut transmit_stats,
|
||||
)?;
|
||||
drop(r_broadcast_peer_cache);
|
||||
drop(cluster_nodes);
|
||||
transmit_time.stop();
|
||||
|
||||
transmit_stats.transmit_elapsed = transmit_time.as_us();
|
||||
|
441
core/src/cluster_nodes.rs
Normal file
441
core/src/cluster_nodes.rs
Normal file
@ -0,0 +1,441 @@
|
||||
use {
|
||||
crate::{broadcast_stage::BroadcastStage, retransmit_stage::RetransmitStage},
|
||||
itertools::Itertools,
|
||||
solana_gossip::{
|
||||
cluster_info::{compute_retransmit_peers, ClusterInfo},
|
||||
contact_info::ContactInfo,
|
||||
crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
|
||||
weighted_shuffle::{weighted_best, weighted_shuffle},
|
||||
},
|
||||
solana_sdk::pubkey::Pubkey,
|
||||
std::{any::TypeId, cmp::Reverse, collections::HashMap, marker::PhantomData},
|
||||
};
|
||||
|
||||
enum NodeId {
|
||||
// TVU node obtained through gossip (staked or not).
|
||||
ContactInfo(ContactInfo),
|
||||
// Staked node with no contact-info in gossip table.
|
||||
Pubkey(Pubkey),
|
||||
}
|
||||
|
||||
struct Node {
|
||||
node: NodeId,
|
||||
stake: u64,
|
||||
}
|
||||
|
||||
pub struct ClusterNodes<T> {
|
||||
pubkey: Pubkey, // The local node itself.
|
||||
// All staked nodes + other known tvu-peers + the node itself;
|
||||
// sorted by (stake, pubkey) in descending order.
|
||||
nodes: Vec<Node>,
|
||||
// Weights and indices for sampling peers. weighted_{shuffle,best} expect
|
||||
// weights >= 1. For backward compatibility we use max(1, stake) for
|
||||
// weights and exclude nodes with no contact-info.
|
||||
index: Vec<(/*weight:*/ u64, /*index:*/ usize)>,
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl Node {
|
||||
#[inline]
|
||||
fn pubkey(&self) -> Pubkey {
|
||||
match &self.node {
|
||||
NodeId::Pubkey(pubkey) => *pubkey,
|
||||
NodeId::ContactInfo(node) => node.id,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn contact_info(&self) -> Option<&ContactInfo> {
|
||||
match &self.node {
|
||||
NodeId::Pubkey(_) => None,
|
||||
NodeId::ContactInfo(node) => Some(node),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ClusterNodes<T> {
|
||||
pub fn num_peers(&self) -> usize {
|
||||
self.index.len()
|
||||
}
|
||||
|
||||
// A peer is considered live if they generated their contact info recently.
|
||||
pub fn num_peers_live(&self, now: u64) -> usize {
|
||||
self.index
|
||||
.iter()
|
||||
.filter_map(|(_, index)| self.nodes[*index].contact_info())
|
||||
.filter(|node| {
|
||||
let elapsed = if node.wallclock < now {
|
||||
now - node.wallclock
|
||||
} else {
|
||||
node.wallclock - now
|
||||
};
|
||||
elapsed < CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS
|
||||
})
|
||||
.count()
|
||||
}
|
||||
}
|
||||
|
||||
impl ClusterNodes<BroadcastStage> {
|
||||
pub fn new(cluster_info: &ClusterInfo, stakes: &HashMap<Pubkey, u64>) -> Self {
|
||||
new_cluster_nodes(cluster_info, stakes)
|
||||
}
|
||||
|
||||
/// Returns the root of turbine broadcast tree, which the leader sends the
|
||||
/// shred to.
|
||||
pub fn get_broadcast_peer(&self, shred_seed: [u8; 32]) -> Option<&ContactInfo> {
|
||||
if self.index.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let index = weighted_best(&self.index, shred_seed);
|
||||
match &self.nodes[index].node {
|
||||
NodeId::ContactInfo(node) => Some(node),
|
||||
NodeId::Pubkey(_) => panic!("this should not happen!"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ClusterNodes<RetransmitStage> {
|
||||
pub fn new(cluster_info: &ClusterInfo, stakes: &HashMap<Pubkey, u64>) -> Self {
|
||||
new_cluster_nodes(cluster_info, stakes)
|
||||
}
|
||||
|
||||
pub fn get_retransmit_peers(
|
||||
&self,
|
||||
shred_seed: [u8; 32],
|
||||
fanout: usize,
|
||||
slot_leader: Option<Pubkey>,
|
||||
) -> (
|
||||
Vec<&ContactInfo>, // neighbors
|
||||
Vec<&ContactInfo>, // children
|
||||
) {
|
||||
// Exclude leader from list of nodes.
|
||||
let index = self.index.iter().copied();
|
||||
let (weights, index): (Vec<u64>, Vec<usize>) = match slot_leader {
|
||||
None => {
|
||||
error!("unknown leader for shred slot");
|
||||
index.unzip()
|
||||
}
|
||||
Some(slot_leader) if slot_leader == self.pubkey => {
|
||||
error!("retransmit from slot leader: {}", slot_leader);
|
||||
index.unzip()
|
||||
}
|
||||
Some(slot_leader) => index
|
||||
.filter(|(_, i)| self.nodes[*i].pubkey() != slot_leader)
|
||||
.unzip(),
|
||||
};
|
||||
let index: Vec<_> = {
|
||||
let shuffle = weighted_shuffle(&weights, shred_seed);
|
||||
shuffle.into_iter().map(|i| index[i]).collect()
|
||||
};
|
||||
let self_index = index
|
||||
.iter()
|
||||
.position(|i| self.nodes[*i].pubkey() == self.pubkey)
|
||||
.unwrap();
|
||||
let (neighbors, children) = compute_retransmit_peers(fanout, self_index, &index);
|
||||
// Assert that the node itself is included in the set of neighbors, at
|
||||
// the right offset.
|
||||
debug_assert_eq!(
|
||||
self.nodes[neighbors[self_index % fanout]].pubkey(),
|
||||
self.pubkey
|
||||
);
|
||||
let get_contact_infos = |index: Vec<usize>| -> Vec<&ContactInfo> {
|
||||
index
|
||||
.into_iter()
|
||||
.map(|i| self.nodes[i].contact_info().unwrap())
|
||||
.collect()
|
||||
};
|
||||
(get_contact_infos(neighbors), get_contact_infos(children))
|
||||
}
|
||||
}
|
||||
|
||||
fn new_cluster_nodes<T: 'static>(
|
||||
cluster_info: &ClusterInfo,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> ClusterNodes<T> {
|
||||
let self_pubkey = cluster_info.id();
|
||||
let nodes = get_nodes(cluster_info, stakes);
|
||||
let broadcast = TypeId::of::<T>() == TypeId::of::<BroadcastStage>();
|
||||
// For backward compatibility:
|
||||
// * nodes which do not have contact-info are excluded.
|
||||
// * stakes are floored at 1.
|
||||
// The sorting key here should be equivalent to
|
||||
// solana_gossip::deprecated::sorted_stakes_with_index.
|
||||
// Leader itself is excluded when sampling broadcast peers.
|
||||
let index = nodes
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, node)| node.contact_info().is_some())
|
||||
.filter(|(_, node)| !broadcast || node.pubkey() != self_pubkey)
|
||||
.sorted_by_key(|(_, node)| Reverse((node.stake.max(1), node.pubkey())))
|
||||
.map(|(index, node)| (node.stake.max(1), index))
|
||||
.collect();
|
||||
ClusterNodes {
|
||||
pubkey: self_pubkey,
|
||||
nodes,
|
||||
index,
|
||||
_phantom: PhantomData::default(),
|
||||
}
|
||||
}
|
||||
|
||||
// All staked nodes + other known tvu-peers + the node itself;
|
||||
// sorted by (stake, pubkey) in descending order.
|
||||
fn get_nodes(cluster_info: &ClusterInfo, stakes: &HashMap<Pubkey, u64>) -> Vec<Node> {
|
||||
let self_pubkey = cluster_info.id();
|
||||
// The local node itself.
|
||||
std::iter::once({
|
||||
let stake = stakes.get(&self_pubkey).copied().unwrap_or_default();
|
||||
let node = NodeId::from(cluster_info.my_contact_info());
|
||||
Node { node, stake }
|
||||
})
|
||||
// All known tvu-peers from gossip.
|
||||
.chain(cluster_info.tvu_peers().into_iter().map(|node| {
|
||||
let stake = stakes.get(&node.id).copied().unwrap_or_default();
|
||||
let node = NodeId::from(node);
|
||||
Node { node, stake }
|
||||
}))
|
||||
// All staked nodes.
|
||||
.chain(
|
||||
stakes
|
||||
.iter()
|
||||
.filter(|(_, stake)| **stake > 0)
|
||||
.map(|(&pubkey, &stake)| Node {
|
||||
node: NodeId::from(pubkey),
|
||||
stake,
|
||||
}),
|
||||
)
|
||||
.sorted_by_key(|node| Reverse((node.stake, node.pubkey())))
|
||||
// Since sorted_by_key is stable, in case of duplicates, this
|
||||
// will keep nodes with contact-info.
|
||||
.dedup_by(|a, b| a.pubkey() == b.pubkey())
|
||||
.collect()
|
||||
}
|
||||
|
||||
impl From<ContactInfo> for NodeId {
|
||||
fn from(node: ContactInfo) -> Self {
|
||||
NodeId::ContactInfo(node)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Pubkey> for NodeId {
|
||||
fn from(pubkey: Pubkey) -> Self {
|
||||
NodeId::Pubkey(pubkey)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for ClusterNodes<T> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
pubkey: Pubkey::default(),
|
||||
nodes: Vec::default(),
|
||||
index: Vec::default(),
|
||||
_phantom: PhantomData::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {
|
||||
super::*,
|
||||
rand::{seq::SliceRandom, Rng},
|
||||
solana_gossip::{
|
||||
crds_value::{CrdsData, CrdsValue},
|
||||
deprecated::{
|
||||
shuffle_peers_and_index, sorted_retransmit_peers_and_stakes,
|
||||
sorted_stakes_with_index,
|
||||
},
|
||||
},
|
||||
solana_sdk::timing::timestamp,
|
||||
std::iter::repeat_with,
|
||||
};
|
||||
|
||||
// Legacy methods copied for testing backward compatibility.
|
||||
|
||||
fn get_broadcast_peers(
|
||||
cluster_info: &ClusterInfo,
|
||||
stakes: Option<&HashMap<Pubkey, u64>>,
|
||||
) -> (Vec<ContactInfo>, Vec<(u64, usize)>) {
|
||||
let mut peers = cluster_info.tvu_peers();
|
||||
let peers_and_stakes = stake_weight_peers(&mut peers, stakes);
|
||||
(peers, peers_and_stakes)
|
||||
}
|
||||
|
||||
fn stake_weight_peers(
|
||||
peers: &mut Vec<ContactInfo>,
|
||||
stakes: Option<&HashMap<Pubkey, u64>>,
|
||||
) -> Vec<(u64, usize)> {
|
||||
peers.dedup();
|
||||
sorted_stakes_with_index(peers, stakes)
|
||||
}
|
||||
|
||||
fn make_cluster<R: Rng>(
|
||||
rng: &mut R,
|
||||
) -> (
|
||||
Vec<ContactInfo>,
|
||||
HashMap<Pubkey, u64>, // stakes
|
||||
ClusterInfo,
|
||||
) {
|
||||
let mut nodes: Vec<_> = repeat_with(|| ContactInfo::new_rand(rng, None))
|
||||
.take(1000)
|
||||
.collect();
|
||||
nodes.shuffle(rng);
|
||||
let this_node = nodes[0].clone();
|
||||
let mut stakes: HashMap<Pubkey, u64> = nodes
|
||||
.iter()
|
||||
.filter_map(|node| {
|
||||
if rng.gen_ratio(1, 7) {
|
||||
None // No stake for some of the nodes.
|
||||
} else {
|
||||
Some((node.id, rng.gen_range(0, 20)))
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
// Add some staked nodes with no contact-info.
|
||||
stakes.extend(repeat_with(|| (Pubkey::new_unique(), rng.gen_range(0, 20))).take(100));
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(this_node);
|
||||
{
|
||||
let now = timestamp();
|
||||
let mut gossip = cluster_info.gossip.write().unwrap();
|
||||
// First node is pushed to crds table by ClusterInfo constructor.
|
||||
for node in nodes.iter().skip(1) {
|
||||
let node = CrdsData::ContactInfo(node.clone());
|
||||
let node = CrdsValue::new_unsigned(node);
|
||||
assert_eq!(gossip.crds.insert(node, now), Ok(()));
|
||||
}
|
||||
}
|
||||
(nodes, stakes, cluster_info)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cluster_nodes_retransmit() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let (nodes, stakes, cluster_info) = make_cluster(&mut rng);
|
||||
let this_node = cluster_info.my_contact_info();
|
||||
// ClusterInfo::tvu_peers excludes the node itself.
|
||||
assert_eq!(cluster_info.tvu_peers().len(), nodes.len() - 1);
|
||||
let cluster_nodes = ClusterNodes::<RetransmitStage>::new(&cluster_info, &stakes);
|
||||
// All nodes with contact-info should be in the index.
|
||||
assert_eq!(cluster_nodes.index.len(), nodes.len());
|
||||
// Staked nodes with no contact-info should be included.
|
||||
assert!(cluster_nodes.nodes.len() > nodes.len());
|
||||
// Assert that all nodes keep their contact-info.
|
||||
// and, all staked nodes are also included.
|
||||
{
|
||||
let cluster_nodes: HashMap<_, _> = cluster_nodes
|
||||
.nodes
|
||||
.iter()
|
||||
.map(|node| (node.pubkey(), node))
|
||||
.collect();
|
||||
for node in &nodes {
|
||||
assert_eq!(cluster_nodes[&node.id].contact_info().unwrap().id, node.id);
|
||||
}
|
||||
for (pubkey, stake) in &stakes {
|
||||
if *stake > 0 {
|
||||
assert_eq!(cluster_nodes[pubkey].stake, *stake);
|
||||
}
|
||||
}
|
||||
}
|
||||
let (peers, stakes_and_index) =
|
||||
sorted_retransmit_peers_and_stakes(&cluster_info, Some(&stakes));
|
||||
assert_eq!(stakes_and_index.len(), peers.len());
|
||||
assert_eq!(cluster_nodes.index.len(), peers.len());
|
||||
for (i, node) in cluster_nodes
|
||||
.index
|
||||
.iter()
|
||||
.map(|(_, i)| &cluster_nodes.nodes[*i])
|
||||
.enumerate()
|
||||
{
|
||||
let (stake, index) = stakes_and_index[i];
|
||||
// Wallclock may be update by ClusterInfo::push_self.
|
||||
if node.pubkey() == this_node.id {
|
||||
assert_eq!(this_node.id, peers[index].id)
|
||||
} else {
|
||||
assert_eq!(node.contact_info().unwrap(), &peers[index]);
|
||||
}
|
||||
assert_eq!(node.stake.max(1), stake);
|
||||
}
|
||||
let slot_leader = nodes[1..].choose(&mut rng).unwrap().id;
|
||||
// Remove slot leader from peers indices.
|
||||
let stakes_and_index: Vec<_> = stakes_and_index
|
||||
.into_iter()
|
||||
.filter(|(_stake, index)| peers[*index].id != slot_leader)
|
||||
.collect();
|
||||
assert_eq!(peers.len(), stakes_and_index.len() + 1);
|
||||
let mut shred_seed = [0u8; 32];
|
||||
rng.fill(&mut shred_seed[..]);
|
||||
let (self_index, shuffled_peers_and_stakes) =
|
||||
shuffle_peers_and_index(&this_node.id, &peers, &stakes_and_index, shred_seed);
|
||||
let shuffled_index: Vec<_> = shuffled_peers_and_stakes
|
||||
.into_iter()
|
||||
.map(|(_, index)| index)
|
||||
.collect();
|
||||
assert_eq!(this_node.id, peers[shuffled_index[self_index]].id);
|
||||
for fanout in 1..200 {
|
||||
let (neighbors_indices, children_indices) =
|
||||
compute_retransmit_peers(fanout, self_index, &shuffled_index);
|
||||
let (neighbors, children) =
|
||||
cluster_nodes.get_retransmit_peers(shred_seed, fanout, Some(slot_leader));
|
||||
assert_eq!(children.len(), children_indices.len());
|
||||
for (node, index) in children.into_iter().zip(children_indices) {
|
||||
assert_eq!(*node, peers[index]);
|
||||
}
|
||||
assert_eq!(neighbors.len(), neighbors_indices.len());
|
||||
assert_eq!(neighbors[0].id, peers[neighbors_indices[0]].id);
|
||||
for (node, index) in neighbors.into_iter().zip(neighbors_indices).skip(1) {
|
||||
assert_eq!(*node, peers[index]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cluster_nodes_broadcast() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let (nodes, stakes, cluster_info) = make_cluster(&mut rng);
|
||||
// ClusterInfo::tvu_peers excludes the node itself.
|
||||
assert_eq!(cluster_info.tvu_peers().len(), nodes.len() - 1);
|
||||
let cluster_nodes = ClusterNodes::<BroadcastStage>::new(&cluster_info, &stakes);
|
||||
// All nodes with contact-info should be in the index.
|
||||
// Excluding this node itself.
|
||||
assert_eq!(cluster_nodes.index.len() + 1, nodes.len());
|
||||
// Staked nodes with no contact-info should be included.
|
||||
assert!(cluster_nodes.nodes.len() > nodes.len());
|
||||
// Assert that all nodes keep their contact-info.
|
||||
// and, all staked nodes are also included.
|
||||
{
|
||||
let cluster_nodes: HashMap<_, _> = cluster_nodes
|
||||
.nodes
|
||||
.iter()
|
||||
.map(|node| (node.pubkey(), node))
|
||||
.collect();
|
||||
for node in &nodes {
|
||||
assert_eq!(cluster_nodes[&node.id].contact_info().unwrap().id, node.id);
|
||||
}
|
||||
for (pubkey, stake) in &stakes {
|
||||
if *stake > 0 {
|
||||
assert_eq!(cluster_nodes[pubkey].stake, *stake);
|
||||
}
|
||||
}
|
||||
}
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(&stakes));
|
||||
assert_eq!(peers_and_stakes.len(), peers.len());
|
||||
assert_eq!(cluster_nodes.index.len(), peers.len());
|
||||
for (i, node) in cluster_nodes
|
||||
.index
|
||||
.iter()
|
||||
.map(|(_, i)| &cluster_nodes.nodes[*i])
|
||||
.enumerate()
|
||||
{
|
||||
let (stake, index) = peers_and_stakes[i];
|
||||
assert_eq!(node.contact_info().unwrap(), &peers[index]);
|
||||
assert_eq!(node.stake.max(1), stake);
|
||||
}
|
||||
for _ in 0..100 {
|
||||
let mut shred_seed = [0u8; 32];
|
||||
rng.fill(&mut shred_seed[..]);
|
||||
let index = weighted_best(&peers_and_stakes, shred_seed);
|
||||
let peer = cluster_nodes.get_broadcast_peer(shred_seed).unwrap();
|
||||
assert_eq!(*peer, peers[index]);
|
||||
}
|
||||
}
|
||||
}
|
@ -136,6 +136,9 @@ impl ClusterSlots {
|
||||
}
|
||||
|
||||
pub fn compute_weights(&self, slot: Slot, repair_peers: &[ContactInfo]) -> Vec<u64> {
|
||||
if repair_peers.is_empty() {
|
||||
return Vec::default();
|
||||
}
|
||||
let stakes = {
|
||||
let validator_stakes = self.validator_stakes.read().unwrap();
|
||||
repair_peers
|
||||
|
@ -104,13 +104,13 @@ pub(crate) struct ComputedBankState {
|
||||
pub my_latest_landed_vote: Option<Slot>,
|
||||
}
|
||||
|
||||
#[frozen_abi(digest = "Eay84NBbJqiMBfE7HHH2o6e51wcvoU79g8zCi5sw6uj3")]
|
||||
#[frozen_abi(digest = "GMs1FxKteU7K4ZFRofMBqNhBpM4xkPVxfYod6R8DQmpT")]
|
||||
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)]
|
||||
pub struct Tower {
|
||||
node_pubkey: Pubkey,
|
||||
threshold_depth: usize,
|
||||
threshold_size: f64,
|
||||
lockouts: VoteState,
|
||||
vote_state: VoteState,
|
||||
last_vote: Vote,
|
||||
#[serde(skip)]
|
||||
// The blockhash used in the last vote transaction, may or may not equal the
|
||||
@ -141,7 +141,7 @@ impl Default for Tower {
|
||||
node_pubkey: Pubkey::default(),
|
||||
threshold_depth: VOTE_THRESHOLD_DEPTH,
|
||||
threshold_size: VOTE_THRESHOLD_SIZE,
|
||||
lockouts: VoteState::default(),
|
||||
vote_state: VoteState::default(),
|
||||
last_vote: Vote::default(),
|
||||
last_timestamp: BlockTimestamp::default(),
|
||||
last_vote_tx_blockhash: Hash::default(),
|
||||
@ -151,7 +151,7 @@ impl Default for Tower {
|
||||
last_switch_threshold_check: Option::default(),
|
||||
};
|
||||
// VoteState::root_slot is ensured to be Some in Tower
|
||||
tower.lockouts.root_slot = Some(Slot::default());
|
||||
tower.vote_state.root_slot = Some(Slot::default());
|
||||
tower
|
||||
}
|
||||
}
|
||||
@ -373,7 +373,7 @@ impl Tower {
|
||||
}
|
||||
|
||||
pub fn tower_slots(&self) -> Vec<Slot> {
|
||||
self.lockouts.tower()
|
||||
self.vote_state.tower()
|
||||
}
|
||||
|
||||
pub fn last_vote_tx_blockhash(&self) -> Hash {
|
||||
@ -421,7 +421,7 @@ impl Tower {
|
||||
let last_voted_slot_in_bank = Self::last_voted_slot_in_bank(bank, vote_account_pubkey);
|
||||
|
||||
// Returns the new root if one is made after applying a vote for the given bank to
|
||||
// `self.lockouts`
|
||||
// `self.vote_state`
|
||||
self.record_bank_vote_and_update_lockouts(bank.slot(), bank.hash(), last_voted_slot_in_bank)
|
||||
}
|
||||
|
||||
@ -434,7 +434,7 @@ impl Tower {
|
||||
trace!("{} record_vote for {}", self.node_pubkey, vote_slot);
|
||||
let old_root = self.root();
|
||||
let mut new_vote = Self::apply_vote_and_generate_vote_diff(
|
||||
&mut self.lockouts,
|
||||
&mut self.vote_state,
|
||||
vote_slot,
|
||||
vote_hash,
|
||||
last_voted_slot_in_bank,
|
||||
@ -502,12 +502,12 @@ impl Tower {
|
||||
// snapshot (slot N). In other words, there should be no possibility a Tower doesn't have
|
||||
// root, unlike young vote accounts.
|
||||
pub fn root(&self) -> Slot {
|
||||
self.lockouts.root_slot.unwrap()
|
||||
self.vote_state.root_slot.unwrap()
|
||||
}
|
||||
|
||||
// a slot is recent if it's newer than the last vote we have
|
||||
pub fn is_recent(&self, slot: Slot) -> bool {
|
||||
if let Some(last_voted_slot) = self.lockouts.last_voted_slot() {
|
||||
if let Some(last_voted_slot) = self.vote_state.last_voted_slot() {
|
||||
if slot <= last_voted_slot {
|
||||
return false;
|
||||
}
|
||||
@ -516,7 +516,7 @@ impl Tower {
|
||||
}
|
||||
|
||||
pub fn has_voted(&self, slot: Slot) -> bool {
|
||||
for vote in &self.lockouts.votes {
|
||||
for vote in &self.vote_state.votes {
|
||||
if slot == vote.slot {
|
||||
return true;
|
||||
}
|
||||
@ -533,15 +533,15 @@ impl Tower {
|
||||
// slot to the current lockouts to pop any expired votes. If any of the
|
||||
// remaining voted slots are on a different fork from the checked slot,
|
||||
// it's still locked out.
|
||||
let mut lockouts = self.lockouts.clone();
|
||||
lockouts.process_slot_vote_unchecked(slot);
|
||||
for vote in &lockouts.votes {
|
||||
let mut vote_state = self.vote_state.clone();
|
||||
vote_state.process_slot_vote_unchecked(slot);
|
||||
for vote in &vote_state.votes {
|
||||
if slot != vote.slot && !ancestors.contains(&vote.slot) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(root_slot) = lockouts.root_slot {
|
||||
if let Some(root_slot) = vote_state.root_slot {
|
||||
if slot != root_slot {
|
||||
// This case should never happen because bank forks purges all
|
||||
// non-descendants of the root every time root is set
|
||||
@ -862,9 +862,9 @@ impl Tower {
|
||||
voted_stakes: &VotedStakes,
|
||||
total_stake: Stake,
|
||||
) -> bool {
|
||||
let mut lockouts = self.lockouts.clone();
|
||||
lockouts.process_slot_vote_unchecked(slot);
|
||||
let vote = lockouts.nth_recent_vote(self.threshold_depth);
|
||||
let mut vote_state = self.vote_state.clone();
|
||||
vote_state.process_slot_vote_unchecked(slot);
|
||||
let vote = vote_state.nth_recent_vote(self.threshold_depth);
|
||||
if let Some(vote) = vote {
|
||||
if let Some(fork_stake) = voted_stakes.get(&vote.slot) {
|
||||
let lockout = *fork_stake as f64 / total_stake as f64;
|
||||
@ -873,7 +873,7 @@ impl Tower {
|
||||
slot, vote.slot, lockout, fork_stake, total_stake
|
||||
);
|
||||
if vote.confirmation_count as usize > self.threshold_depth {
|
||||
for old_vote in &self.lockouts.votes {
|
||||
for old_vote in &self.vote_state.votes {
|
||||
if old_vote.slot == vote.slot
|
||||
&& old_vote.confirmation_count == vote.confirmation_count
|
||||
{
|
||||
@ -928,7 +928,7 @@ impl Tower {
|
||||
}
|
||||
|
||||
fn voted_slots(&self) -> Vec<Slot> {
|
||||
self.lockouts
|
||||
self.vote_state
|
||||
.votes
|
||||
.iter()
|
||||
.map(|lockout| lockout.slot)
|
||||
@ -964,11 +964,11 @@ impl Tower {
|
||||
assert_eq!(slot_history.check(replayed_root), Check::Found);
|
||||
|
||||
assert!(
|
||||
self.last_vote == Vote::default() && self.lockouts.votes.is_empty()
|
||||
|| self.last_vote != Vote::default() && !self.lockouts.votes.is_empty(),
|
||||
"last vote: {:?} lockouts.votes: {:?}",
|
||||
self.last_vote == Vote::default() && self.vote_state.votes.is_empty()
|
||||
|| self.last_vote != Vote::default() && !self.vote_state.votes.is_empty(),
|
||||
"last vote: {:?} vote_state.votes: {:?}",
|
||||
self.last_vote,
|
||||
self.lockouts.votes
|
||||
self.vote_state.votes
|
||||
);
|
||||
|
||||
if let Some(last_voted_slot) = self.last_voted_slot() {
|
||||
@ -1034,7 +1034,7 @@ impl Tower {
|
||||
let tower_root = self.root();
|
||||
// retained slots will be consisted only from divergent slots
|
||||
let mut retain_flags_for_each_vote_in_reverse: Vec<_> =
|
||||
Vec::with_capacity(self.lockouts.votes.len());
|
||||
Vec::with_capacity(self.vote_state.votes.len());
|
||||
|
||||
let mut still_in_future = true;
|
||||
let mut past_outside_history = false;
|
||||
@ -1112,10 +1112,10 @@ impl Tower {
|
||||
let mut retain_flags_for_each_vote =
|
||||
retain_flags_for_each_vote_in_reverse.into_iter().rev();
|
||||
|
||||
let original_votes_len = self.lockouts.votes.len();
|
||||
let original_votes_len = self.vote_state.votes.len();
|
||||
self.initialize_lockouts(move |_| retain_flags_for_each_vote.next().unwrap());
|
||||
|
||||
if self.lockouts.votes.is_empty() {
|
||||
if self.vote_state.votes.is_empty() {
|
||||
info!("All restored votes were behind; resetting root_slot and last_vote in tower!");
|
||||
// we might not have banks for those votes so just reset.
|
||||
// That's because the votes may well past replayed_root
|
||||
@ -1145,7 +1145,7 @@ impl Tower {
|
||||
bank: &Bank,
|
||||
) {
|
||||
if let Some((_stake, vote_account)) = bank.get_vote_account(vote_account_pubkey) {
|
||||
self.lockouts = vote_account
|
||||
self.vote_state = vote_account
|
||||
.vote_state()
|
||||
.as_ref()
|
||||
.expect("vote_account isn't a VoteState?")
|
||||
@ -1158,7 +1158,7 @@ impl Tower {
|
||||
bank.slot(),
|
||||
);
|
||||
assert_eq!(
|
||||
self.lockouts.node_pubkey, self.node_pubkey,
|
||||
self.vote_state.node_pubkey, self.node_pubkey,
|
||||
"vote account's node_pubkey doesn't match",
|
||||
);
|
||||
} else {
|
||||
@ -1172,13 +1172,13 @@ impl Tower {
|
||||
}
|
||||
|
||||
fn initialize_lockouts<F: FnMut(&Lockout) -> bool>(&mut self, should_retain: F) {
|
||||
self.lockouts.votes.retain(should_retain);
|
||||
self.vote_state.votes.retain(should_retain);
|
||||
}
|
||||
|
||||
// Updating root is needed to correctly restore from newly-saved tower for the next
|
||||
// boot
|
||||
fn initialize_root(&mut self, root: Slot) {
|
||||
self.lockouts.root_slot = Some(root);
|
||||
self.vote_state.root_slot = Some(root);
|
||||
}
|
||||
|
||||
pub fn get_filename(path: &Path, node_pubkey: &Pubkey) -> PathBuf {
|
||||
@ -1337,7 +1337,7 @@ pub fn reconcile_blockstore_roots_with_tower(
|
||||
"Reconciling slots as root based on tower root: {:?} ({}..{}) ",
|
||||
new_roots, tower_root, last_blockstore_root
|
||||
);
|
||||
blockstore.set_roots(&new_roots)?;
|
||||
blockstore.set_roots(new_roots.iter())?;
|
||||
} else {
|
||||
// This indicates we're in bad state; but still don't panic here.
|
||||
// That's because we might have a chance of recovering properly with
|
||||
@ -1439,9 +1439,6 @@ pub mod test {
|
||||
|
||||
while let Some(visit) = walk.get() {
|
||||
let slot = visit.node().data;
|
||||
self.progress
|
||||
.entry(slot)
|
||||
.or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0));
|
||||
if self.bank_forks.read().unwrap().get(slot).is_some() {
|
||||
walk.forward();
|
||||
continue;
|
||||
@ -1449,6 +1446,9 @@ pub mod test {
|
||||
let parent = walk.get_parent().unwrap().data;
|
||||
let parent_bank = self.bank_forks.read().unwrap().get(parent).unwrap().clone();
|
||||
let new_bank = Bank::new_from_parent(&parent_bank, &Pubkey::default(), slot);
|
||||
self.progress
|
||||
.entry(slot)
|
||||
.or_insert_with(|| ForkProgress::new(Hash::default(), None, None, 0, 0));
|
||||
for (pubkey, vote) in cluster_votes.iter() {
|
||||
if vote.contains(&parent) {
|
||||
let keypairs = self.validator_keypairs.get(pubkey).unwrap();
|
||||
@ -1701,7 +1701,14 @@ pub mod test {
|
||||
let mut progress = ProgressMap::default();
|
||||
progress.insert(
|
||||
0,
|
||||
ForkProgress::new(bank0.last_blockhash(), None, None, 0, 0),
|
||||
ForkProgress::new_from_bank(
|
||||
&bank0,
|
||||
bank0.collector_id(),
|
||||
&Pubkey::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
);
|
||||
let bank_forks = BankForks::new(bank0);
|
||||
let heaviest_subtree_fork_choice =
|
||||
@ -1793,8 +1800,8 @@ pub mod test {
|
||||
}
|
||||
|
||||
for i in 0..5 {
|
||||
assert_eq!(tower.lockouts.votes[i].slot as usize, i);
|
||||
assert_eq!(tower.lockouts.votes[i].confirmation_count as usize, 6 - i);
|
||||
assert_eq!(tower.vote_state.votes[i].slot as usize, i);
|
||||
assert_eq!(tower.vote_state.votes[i].confirmation_count as usize, 6 - i);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2107,7 +2114,7 @@ pub mod test {
|
||||
// If we set a root, then any lockout intervals below the root shouldn't
|
||||
// count toward the switch threshold. This means the other validator's
|
||||
// vote lockout no longer counts
|
||||
tower.lockouts.root_slot = Some(43);
|
||||
tower.vote_state.root_slot = Some(43);
|
||||
// Refresh ancestors and descendants for new root.
|
||||
let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors();
|
||||
let descendants = vote_simulator
|
||||
@ -2347,7 +2354,7 @@ pub mod test {
|
||||
.is_empty());
|
||||
}
|
||||
|
||||
info!("local tower: {:#?}", tower.lockouts.votes);
|
||||
info!("local tower: {:#?}", tower.vote_state.votes);
|
||||
let observed = vote_simulator
|
||||
.bank_forks
|
||||
.read()
|
||||
@ -2439,14 +2446,14 @@ pub mod test {
|
||||
};
|
||||
let root_weight = root.lockout() as u128;
|
||||
let vote_account_expected_weight = tower
|
||||
.lockouts
|
||||
.vote_state
|
||||
.votes
|
||||
.iter()
|
||||
.map(|v| v.lockout() as u128)
|
||||
.sum::<u128>()
|
||||
+ root_weight;
|
||||
let expected_bank_weight = 2 * vote_account_expected_weight;
|
||||
assert_eq!(tower.lockouts.root_slot, Some(0));
|
||||
assert_eq!(tower.vote_state.root_slot, Some(0));
|
||||
let mut latest_validator_votes_for_frozen_banks =
|
||||
LatestValidatorVotesForFrozenBanks::default();
|
||||
let ComputedBankState {
|
||||
@ -2523,7 +2530,7 @@ pub mod test {
|
||||
fn test_is_locked_out_root_slot_child_pass() {
|
||||
let mut tower = Tower::new_for_tests(0, 0.67);
|
||||
let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
|
||||
tower.lockouts.root_slot = Some(0);
|
||||
tower.vote_state.root_slot = Some(0);
|
||||
assert!(!tower.is_locked_out(1, &ancestors));
|
||||
}
|
||||
|
||||
@ -2531,7 +2538,7 @@ pub mod test {
|
||||
fn test_is_locked_out_root_slot_sibling_fail() {
|
||||
let mut tower = Tower::new_for_tests(0, 0.67);
|
||||
let ancestors: HashSet<Slot> = vec![0].into_iter().collect();
|
||||
tower.lockouts.root_slot = Some(0);
|
||||
tower.vote_state.root_slot = Some(0);
|
||||
tower.record_vote(1, Hash::default());
|
||||
assert!(tower.is_locked_out(2, &ancestors));
|
||||
}
|
||||
@ -2592,10 +2599,10 @@ pub mod test {
|
||||
tower.record_vote(1, Hash::default());
|
||||
assert!(!tower.is_locked_out(4, &ancestors));
|
||||
tower.record_vote(4, Hash::default());
|
||||
assert_eq!(tower.lockouts.votes[0].slot, 0);
|
||||
assert_eq!(tower.lockouts.votes[0].confirmation_count, 2);
|
||||
assert_eq!(tower.lockouts.votes[1].slot, 4);
|
||||
assert_eq!(tower.lockouts.votes[1].confirmation_count, 1);
|
||||
assert_eq!(tower.vote_state.votes[0].slot, 0);
|
||||
assert_eq!(tower.vote_state.votes[0].confirmation_count, 2);
|
||||
assert_eq!(tower.vote_state.votes[1].slot, 4);
|
||||
assert_eq!(tower.vote_state.votes[1].confirmation_count, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -2955,7 +2962,7 @@ pub mod test {
|
||||
tower.record_vote(110, Hash::default());
|
||||
tower.record_vote(111, Hash::default());
|
||||
assert_eq!(tower.voted_slots(), vec![43, 110, 111]);
|
||||
assert_eq!(tower.lockouts.root_slot, Some(0));
|
||||
assert_eq!(tower.vote_state.root_slot, Some(0));
|
||||
}
|
||||
|
||||
// Prepare simulated validator restart!
|
||||
@ -3055,7 +3062,7 @@ pub mod test {
|
||||
tower.record_vote(110, Hash::default());
|
||||
tower.record_vote(111, Hash::default());
|
||||
assert_eq!(tower.voted_slots(), vec![110, 111]);
|
||||
assert_eq!(tower.lockouts.root_slot, Some(replayed_root_slot));
|
||||
assert_eq!(tower.vote_state.root_slot, Some(replayed_root_slot));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -3145,7 +3152,7 @@ pub mod test {
|
||||
assert!(!blockstore.is_root(4));
|
||||
|
||||
let mut tower = Tower::new_with_key(&Pubkey::default());
|
||||
tower.lockouts.root_slot = Some(4);
|
||||
tower.vote_state.root_slot = Some(4);
|
||||
reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap();
|
||||
|
||||
assert!(!blockstore.is_root(0));
|
||||
@ -3170,14 +3177,14 @@ pub mod test {
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
let (shreds, _) = make_slot_entries(4, 1, 42);
|
||||
blockstore.insert_shreds(shreds, None, false).unwrap();
|
||||
blockstore.set_roots(&[3]).unwrap();
|
||||
blockstore.set_roots(std::iter::once(&3)).unwrap();
|
||||
assert!(!blockstore.is_root(0));
|
||||
assert!(!blockstore.is_root(1));
|
||||
assert!(blockstore.is_root(3));
|
||||
assert!(!blockstore.is_root(4));
|
||||
|
||||
let mut tower = Tower::new_with_key(&Pubkey::default());
|
||||
tower.lockouts.root_slot = Some(4);
|
||||
tower.vote_state.root_slot = Some(4);
|
||||
reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap();
|
||||
}
|
||||
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
|
||||
@ -3199,7 +3206,7 @@ pub mod test {
|
||||
assert!(!blockstore.is_root(3));
|
||||
|
||||
let mut tower = Tower::new_with_key(&Pubkey::default());
|
||||
tower.lockouts.root_slot = Some(4);
|
||||
tower.vote_state.root_slot = Some(4);
|
||||
assert_eq!(blockstore.last_root(), 0);
|
||||
reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap();
|
||||
assert_eq!(blockstore.last_root(), 0);
|
||||
@ -3351,7 +3358,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_adjust_lockouts_after_replay_all_not_found_even_if_rooted() {
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower.lockouts.root_slot = Some(4);
|
||||
tower.vote_state.root_slot = Some(4);
|
||||
tower.record_vote(5, Hash::default());
|
||||
tower.record_vote(6, Hash::default());
|
||||
|
||||
@ -3373,7 +3380,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_adjust_lockouts_after_replay_all_future_votes_only_root_found() {
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower.lockouts.root_slot = Some(2);
|
||||
tower.vote_state.root_slot = Some(2);
|
||||
tower.record_vote(3, Hash::default());
|
||||
tower.record_vote(4, Hash::default());
|
||||
tower.record_vote(5, Hash::default());
|
||||
@ -3429,8 +3436,8 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_adjust_lockouts_after_replay_time_warped() {
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower.lockouts.votes.push_back(Lockout::new(1));
|
||||
tower.lockouts.votes.push_back(Lockout::new(0));
|
||||
tower.vote_state.votes.push_back(Lockout::new(1));
|
||||
tower.vote_state.votes.push_back(Lockout::new(0));
|
||||
let vote = Vote::new(vec![0], Hash::default());
|
||||
tower.last_vote = vote;
|
||||
|
||||
@ -3447,8 +3454,8 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_adjust_lockouts_after_replay_diverged_ancestor() {
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower.lockouts.votes.push_back(Lockout::new(1));
|
||||
tower.lockouts.votes.push_back(Lockout::new(2));
|
||||
tower.vote_state.votes.push_back(Lockout::new(1));
|
||||
tower.vote_state.votes.push_back(Lockout::new(2));
|
||||
let vote = Vote::new(vec![2], Hash::default());
|
||||
tower.last_vote = vote;
|
||||
|
||||
@ -3469,11 +3476,11 @@ pub mod test {
|
||||
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower
|
||||
.lockouts
|
||||
.vote_state
|
||||
.votes
|
||||
.push_back(Lockout::new(MAX_ENTRIES - 1));
|
||||
tower.lockouts.votes.push_back(Lockout::new(0));
|
||||
tower.lockouts.votes.push_back(Lockout::new(1));
|
||||
tower.vote_state.votes.push_back(Lockout::new(0));
|
||||
tower.vote_state.votes.push_back(Lockout::new(1));
|
||||
let vote = Vote::new(vec![1], Hash::default());
|
||||
tower.last_vote = vote;
|
||||
|
||||
@ -3491,8 +3498,8 @@ pub mod test {
|
||||
#[should_panic(expected = "slot_in_tower(2) < checked_slot(1)")]
|
||||
fn test_adjust_lockouts_after_replay_reversed_votes() {
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower.lockouts.votes.push_back(Lockout::new(2));
|
||||
tower.lockouts.votes.push_back(Lockout::new(1));
|
||||
tower.vote_state.votes.push_back(Lockout::new(2));
|
||||
tower.vote_state.votes.push_back(Lockout::new(1));
|
||||
let vote = Vote::new(vec![1], Hash::default());
|
||||
tower.last_vote = vote;
|
||||
|
||||
@ -3509,9 +3516,9 @@ pub mod test {
|
||||
#[should_panic(expected = "slot_in_tower(3) < checked_slot(3)")]
|
||||
fn test_adjust_lockouts_after_replay_repeated_non_root_votes() {
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower.lockouts.votes.push_back(Lockout::new(2));
|
||||
tower.lockouts.votes.push_back(Lockout::new(3));
|
||||
tower.lockouts.votes.push_back(Lockout::new(3));
|
||||
tower.vote_state.votes.push_back(Lockout::new(2));
|
||||
tower.vote_state.votes.push_back(Lockout::new(3));
|
||||
tower.vote_state.votes.push_back(Lockout::new(3));
|
||||
let vote = Vote::new(vec![3], Hash::default());
|
||||
tower.last_vote = vote;
|
||||
|
||||
@ -3527,10 +3534,10 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_adjust_lockouts_after_replay_vote_on_root() {
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower.lockouts.root_slot = Some(42);
|
||||
tower.lockouts.votes.push_back(Lockout::new(42));
|
||||
tower.lockouts.votes.push_back(Lockout::new(43));
|
||||
tower.lockouts.votes.push_back(Lockout::new(44));
|
||||
tower.vote_state.root_slot = Some(42);
|
||||
tower.vote_state.votes.push_back(Lockout::new(42));
|
||||
tower.vote_state.votes.push_back(Lockout::new(43));
|
||||
tower.vote_state.votes.push_back(Lockout::new(44));
|
||||
let vote = Vote::new(vec![44], Hash::default());
|
||||
tower.last_vote = vote;
|
||||
|
||||
@ -3544,7 +3551,7 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_adjust_lockouts_after_replay_vote_on_genesis() {
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower.lockouts.votes.push_back(Lockout::new(0));
|
||||
tower.vote_state.votes.push_back(Lockout::new(0));
|
||||
let vote = Vote::new(vec![0], Hash::default());
|
||||
tower.last_vote = vote;
|
||||
|
||||
@ -3557,8 +3564,8 @@ pub mod test {
|
||||
#[test]
|
||||
fn test_adjust_lockouts_after_replay_future_tower() {
|
||||
let mut tower = Tower::new_for_tests(10, 0.9);
|
||||
tower.lockouts.votes.push_back(Lockout::new(13));
|
||||
tower.lockouts.votes.push_back(Lockout::new(14));
|
||||
tower.vote_state.votes.push_back(Lockout::new(13));
|
||||
tower.vote_state.votes.push_back(Lockout::new(14));
|
||||
let vote = Vote::new(vec![14], Hash::default());
|
||||
tower.last_vote = vote;
|
||||
tower.initialize_root(12);
|
||||
|
@ -12,6 +12,7 @@ pub mod banking_stage;
|
||||
pub mod broadcast_stage;
|
||||
pub mod cache_block_meta_service;
|
||||
pub mod cluster_info_vote_listener;
|
||||
pub mod cluster_nodes;
|
||||
pub mod cluster_slot_state_verifier;
|
||||
pub mod cluster_slots;
|
||||
pub mod cluster_slots_service;
|
||||
|
@ -316,7 +316,7 @@ mod test {
|
||||
assert!(optimistic_confirmation_verifier.unchecked_slots.is_empty());
|
||||
|
||||
// If we know set the root in blockstore, should return nothing
|
||||
blockstore.set_roots(&[1, 3]).unwrap();
|
||||
blockstore.set_roots(vec![1, 3].iter()).unwrap();
|
||||
optimistic_confirmation_verifier.add_new_optimistic_confirmed_slots(optimistic_slots);
|
||||
assert!(optimistic_confirmation_verifier
|
||||
.verify_for_unrooted_optimistic_slots(&bank7, &blockstore)
|
||||
|
@ -7,9 +7,10 @@ use crate::{
|
||||
repair_weight::RepairWeight,
|
||||
replay_stage::DUPLICATE_THRESHOLD,
|
||||
result::Result,
|
||||
serve_repair::{RepairType, ServeRepair},
|
||||
serve_repair::{RepairType, ServeRepair, REPAIR_PEERS_CACHE_CAPACITY},
|
||||
};
|
||||
use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender};
|
||||
use lru::LruCache;
|
||||
use solana_gossip::cluster_info::ClusterInfo;
|
||||
use solana_ledger::{
|
||||
blockstore::{Blockstore, SlotMeta},
|
||||
@ -193,6 +194,7 @@ impl RepairService {
|
||||
let mut last_stats = Instant::now();
|
||||
let duplicate_slot_repair_statuses: HashMap<Slot, DuplicateSlotRepairStatus> =
|
||||
HashMap::new();
|
||||
let mut peers_cache = LruCache::new(REPAIR_PEERS_CACHE_CAPACITY);
|
||||
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
@ -272,14 +274,13 @@ impl RepairService {
|
||||
)
|
||||
};
|
||||
|
||||
let mut cache = HashMap::new();
|
||||
let mut send_repairs_elapsed = Measure::start("send_repairs_elapsed");
|
||||
let mut outstanding_requests = outstanding_requests.write().unwrap();
|
||||
repairs.into_iter().for_each(|repair_request| {
|
||||
if let Ok((to, req)) = serve_repair.repair_request(
|
||||
cluster_slots,
|
||||
repair_request,
|
||||
&mut cache,
|
||||
&mut peers_cache,
|
||||
&mut repair_stats,
|
||||
&repair_info.repair_validators,
|
||||
&mut outstanding_requests,
|
||||
|
@ -138,6 +138,10 @@ pub struct ReplayTiming {
|
||||
start_leader_elapsed: u64,
|
||||
reset_bank_elapsed: u64,
|
||||
voting_elapsed: u64,
|
||||
vote_push_us: u64,
|
||||
vote_send_us: u64,
|
||||
generate_vote_us: u64,
|
||||
update_commitment_cache_us: u64,
|
||||
select_forks_elapsed: u64,
|
||||
compute_slot_stats_elapsed: u64,
|
||||
generate_new_bank_forks_elapsed: u64,
|
||||
@ -191,6 +195,17 @@ impl ReplayTiming {
|
||||
let now = timestamp();
|
||||
let elapsed_ms = now - self.last_print;
|
||||
if elapsed_ms > 1000 {
|
||||
datapoint_info!(
|
||||
"replay-loop-voting-stats",
|
||||
("vote_push_us", self.vote_push_us, i64),
|
||||
("vote_send_us", self.vote_send_us, i64),
|
||||
("generate_vote_us", self.generate_vote_us, i64),
|
||||
(
|
||||
"update_commitment_cache_us",
|
||||
self.update_commitment_cache_us,
|
||||
i64
|
||||
),
|
||||
);
|
||||
datapoint_info!(
|
||||
"replay-loop-timing-stats",
|
||||
("total_elapsed_us", elapsed_ms * 1000, i64),
|
||||
@ -583,6 +598,7 @@ impl ReplayStage {
|
||||
&mut unfrozen_gossip_verified_vote_hashes,
|
||||
&mut voted_signatures,
|
||||
&mut has_new_vote_been_rooted,
|
||||
&mut replay_timing,
|
||||
);
|
||||
};
|
||||
voting_time.stop();
|
||||
@ -1291,6 +1307,7 @@ impl ReplayStage {
|
||||
unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes,
|
||||
vote_signatures: &mut Vec<Signature>,
|
||||
has_new_vote_been_rooted: &mut bool,
|
||||
replay_timing: &mut ReplayTiming,
|
||||
) {
|
||||
if bank.is_empty() {
|
||||
inc_new_counter_info!("replay_stage-voted_empty_bank", 1);
|
||||
@ -1320,7 +1337,7 @@ impl ReplayStage {
|
||||
// get dropped.
|
||||
leader_schedule_cache.set_root(rooted_banks.last().unwrap());
|
||||
blockstore
|
||||
.set_roots(&rooted_slots)
|
||||
.set_roots(rooted_slots.iter())
|
||||
.expect("Ledger set roots failed");
|
||||
let highest_confirmed_root = Some(
|
||||
block_commitment_cache
|
||||
@ -1355,12 +1372,16 @@ impl ReplayStage {
|
||||
info!("new root {}", new_root);
|
||||
}
|
||||
|
||||
let mut update_commitment_cache_time = Measure::start("update_commitment_cache");
|
||||
Self::update_commitment_cache(
|
||||
bank.clone(),
|
||||
bank_forks.read().unwrap().root(),
|
||||
progress.get_fork_stats(bank.slot()).unwrap().total_stake,
|
||||
lockouts_sender,
|
||||
);
|
||||
update_commitment_cache_time.stop();
|
||||
replay_timing.update_commitment_cache_us += update_commitment_cache_time.as_us();
|
||||
|
||||
Self::push_vote(
|
||||
cluster_info,
|
||||
bank,
|
||||
@ -1371,6 +1392,7 @@ impl ReplayStage {
|
||||
switch_fork_decision,
|
||||
vote_signatures,
|
||||
*has_new_vote_been_rooted,
|
||||
replay_timing,
|
||||
);
|
||||
}
|
||||
|
||||
@ -1536,6 +1558,7 @@ impl ReplayStage {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn push_vote(
|
||||
cluster_info: &ClusterInfo,
|
||||
bank: &Bank,
|
||||
@ -1546,7 +1569,9 @@ impl ReplayStage {
|
||||
switch_fork_decision: &SwitchForkDecision,
|
||||
vote_signatures: &mut Vec<Signature>,
|
||||
has_new_vote_been_rooted: bool,
|
||||
replay_timing: &mut ReplayTiming,
|
||||
) {
|
||||
let mut generate_time = Measure::start("generate_vote");
|
||||
let vote_tx = Self::generate_vote_tx(
|
||||
&cluster_info.keypair,
|
||||
bank,
|
||||
@ -1557,13 +1582,20 @@ impl ReplayStage {
|
||||
vote_signatures,
|
||||
has_new_vote_been_rooted,
|
||||
);
|
||||
generate_time.stop();
|
||||
replay_timing.generate_vote_us += generate_time.as_us();
|
||||
if let Some(vote_tx) = vote_tx {
|
||||
tower.refresh_last_vote_tx_blockhash(vote_tx.message.recent_blockhash);
|
||||
let mut send_time = Measure::start("send_vote");
|
||||
let _ = cluster_info.send_vote(
|
||||
&vote_tx,
|
||||
crate::banking_stage::next_leader_tpu(cluster_info, poh_recorder),
|
||||
);
|
||||
send_time.stop();
|
||||
let mut push_time = Measure::start("push_vote");
|
||||
cluster_info.push_vote(&tower.tower_slots(), vote_tx);
|
||||
push_time.stop();
|
||||
replay_timing.vote_push_us += push_time.as_us();
|
||||
}
|
||||
}
|
||||
|
||||
@ -2496,7 +2528,7 @@ mod tests {
|
||||
use solana_runtime::{
|
||||
accounts_background_service::AbsRequestSender,
|
||||
commitment::BlockCommitment,
|
||||
genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs},
|
||||
genesis_utils::{GenesisConfigInfo, ValidatorVoteKeypairs},
|
||||
};
|
||||
use solana_sdk::{
|
||||
clock::NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
@ -2519,11 +2551,11 @@ mod tests {
|
||||
iter,
|
||||
sync::{atomic::AtomicU64, Arc, RwLock},
|
||||
};
|
||||
use trees::tr;
|
||||
use trees::{tr, Tree};
|
||||
|
||||
#[test]
|
||||
fn test_is_partition_detected() {
|
||||
let VoteSimulator { bank_forks, .. } = setup_forks();
|
||||
let (VoteSimulator { bank_forks, .. }, _) = setup_default_forks(1);
|
||||
let ancestors = bank_forks.read().unwrap().ancestors();
|
||||
// Last vote 1 is an ancestor of the heaviest slot 3, no partition
|
||||
assert!(!ReplayStage::is_partition_detected(&ancestors, 1, 3));
|
||||
@ -2540,8 +2572,8 @@ mod tests {
|
||||
struct ReplayBlockstoreComponents {
|
||||
blockstore: Arc<Blockstore>,
|
||||
validator_node_to_vote_keys: HashMap<Pubkey, Pubkey>,
|
||||
validator_authorized_voter_keypairs: HashMap<Pubkey, ValidatorVoteKeypairs>,
|
||||
my_vote_pubkey: Pubkey,
|
||||
validator_keypairs: HashMap<Pubkey, ValidatorVoteKeypairs>,
|
||||
my_pubkey: Pubkey,
|
||||
progress: ProgressMap,
|
||||
cluster_info: ClusterInfo,
|
||||
leader_schedule_cache: Arc<LeaderScheduleCache>,
|
||||
@ -2551,45 +2583,32 @@ mod tests {
|
||||
rpc_subscriptions: Arc<RpcSubscriptions>,
|
||||
}
|
||||
|
||||
fn replay_blockstore_components() -> ReplayBlockstoreComponents {
|
||||
fn replay_blockstore_components(forks: Option<Tree<Slot>>) -> ReplayBlockstoreComponents {
|
||||
// Setup blockstore
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(
|
||||
Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"),
|
||||
);
|
||||
let validator_authorized_voter_keypairs: Vec<_> =
|
||||
(0..20).map(|_| ValidatorVoteKeypairs::new_rand()).collect();
|
||||
let (vote_simulator, blockstore) =
|
||||
setup_forks_from_tree(forks.unwrap_or_else(|| tr(0)), 20);
|
||||
|
||||
let validator_node_to_vote_keys: HashMap<Pubkey, Pubkey> =
|
||||
validator_authorized_voter_keypairs
|
||||
.iter()
|
||||
.map(|v| (v.node_keypair.pubkey(), v.vote_keypair.pubkey()))
|
||||
.collect();
|
||||
let GenesisConfigInfo { genesis_config, .. } =
|
||||
genesis_utils::create_genesis_config_with_vote_accounts(
|
||||
10_000,
|
||||
&validator_authorized_voter_keypairs,
|
||||
vec![100; validator_authorized_voter_keypairs.len()],
|
||||
);
|
||||
let VoteSimulator {
|
||||
validator_keypairs,
|
||||
progress,
|
||||
bank_forks,
|
||||
..
|
||||
} = vote_simulator;
|
||||
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
|
||||
// ProgressMap
|
||||
let mut progress = ProgressMap::default();
|
||||
progress.insert(
|
||||
0,
|
||||
ForkProgress::new_from_bank(
|
||||
&bank0,
|
||||
bank0.collector_id(),
|
||||
&Pubkey::default(),
|
||||
None,
|
||||
0,
|
||||
0,
|
||||
),
|
||||
);
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let bank_forks = Arc::new(bank_forks);
|
||||
let validator_node_to_vote_keys: HashMap<Pubkey, Pubkey> = validator_keypairs
|
||||
.iter()
|
||||
.map(|(_, keypairs)| {
|
||||
(
|
||||
keypairs.node_keypair.pubkey(),
|
||||
keypairs.vote_keypair.pubkey(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
// ClusterInfo
|
||||
let my_keypairs = &validator_authorized_voter_keypairs[0];
|
||||
let my_keypairs = validator_keypairs.values().next().unwrap();
|
||||
let my_pubkey = my_keypairs.node_keypair.pubkey();
|
||||
let cluster_info = ClusterInfo::new(
|
||||
Node::new_localhost_with_pubkey(&my_pubkey).info,
|
||||
@ -2598,16 +2617,18 @@ mod tests {
|
||||
assert_eq!(my_pubkey, cluster_info.id());
|
||||
|
||||
// Leader schedule cache
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank0));
|
||||
let root_bank = bank_forks.read().unwrap().root_bank();
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&root_bank));
|
||||
|
||||
// PohRecorder
|
||||
let working_bank = bank_forks.read().unwrap().working_bank();
|
||||
let poh_recorder = Mutex::new(
|
||||
PohRecorder::new(
|
||||
bank0.tick_height(),
|
||||
bank0.last_blockhash(),
|
||||
bank0.slot(),
|
||||
working_bank.tick_height(),
|
||||
working_bank.last_blockhash(),
|
||||
working_bank.slot(),
|
||||
None,
|
||||
bank0.ticks_per_slot(),
|
||||
working_bank.ticks_per_slot(),
|
||||
&Pubkey::default(),
|
||||
&blockstore,
|
||||
&leader_schedule_cache,
|
||||
@ -2617,14 +2638,11 @@ mod tests {
|
||||
.0,
|
||||
);
|
||||
|
||||
// BankForks
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
|
||||
|
||||
// Tower
|
||||
let my_vote_pubkey = my_keypairs.vote_keypair.pubkey();
|
||||
let tower = Tower::new_from_bankforks(
|
||||
&bank_forks.read().unwrap(),
|
||||
&ledger_path,
|
||||
blockstore.ledger_path(),
|
||||
&cluster_info.id(),
|
||||
&my_vote_pubkey,
|
||||
);
|
||||
@ -2640,17 +2658,11 @@ mod tests {
|
||||
optimistically_confirmed_bank,
|
||||
));
|
||||
|
||||
let validator_authorized_voter_keypairs: HashMap<Pubkey, ValidatorVoteKeypairs> =
|
||||
validator_authorized_voter_keypairs
|
||||
.into_iter()
|
||||
.map(|keys| (keys.vote_keypair.pubkey(), keys))
|
||||
.collect();
|
||||
|
||||
ReplayBlockstoreComponents {
|
||||
blockstore,
|
||||
validator_node_to_vote_keys,
|
||||
validator_authorized_voter_keypairs,
|
||||
my_vote_pubkey,
|
||||
validator_keypairs,
|
||||
my_pubkey,
|
||||
progress,
|
||||
cluster_info,
|
||||
leader_schedule_cache,
|
||||
@ -2671,7 +2683,7 @@ mod tests {
|
||||
leader_schedule_cache,
|
||||
rpc_subscriptions,
|
||||
..
|
||||
} = replay_blockstore_components();
|
||||
} = replay_blockstore_components(None);
|
||||
|
||||
// Insert a non-root bank so that the propagation logic will update this
|
||||
// bank
|
||||
@ -4267,11 +4279,12 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_purge_unconfirmed_duplicate_slot() {
|
||||
let (vote_simulator, _) = setup_default_forks(2);
|
||||
let VoteSimulator {
|
||||
bank_forks,
|
||||
mut progress,
|
||||
..
|
||||
} = setup_forks();
|
||||
} = vote_simulator;
|
||||
let mut descendants = bank_forks.read().unwrap().descendants().clone();
|
||||
let mut ancestors = bank_forks.read().unwrap().ancestors();
|
||||
|
||||
@ -4331,7 +4344,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_purge_ancestors_descendants() {
|
||||
let VoteSimulator { bank_forks, .. } = setup_forks();
|
||||
let (VoteSimulator { bank_forks, .. }, _) = setup_default_forks(1);
|
||||
|
||||
// Purge branch rooted at slot 2
|
||||
let mut descendants = bank_forks.read().unwrap().descendants().clone();
|
||||
@ -4389,7 +4402,7 @@ mod tests {
|
||||
bank_forks,
|
||||
leader_schedule_cache,
|
||||
..
|
||||
} = replay_blockstore_components();
|
||||
} = replay_blockstore_components(None);
|
||||
|
||||
let root_bank = bank_forks.read().unwrap().root_bank();
|
||||
let my_pubkey = leader_schedule_cache
|
||||
@ -4583,13 +4596,16 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_gossip_vote_doesnt_affect_fork_choice() {
|
||||
let VoteSimulator {
|
||||
bank_forks,
|
||||
mut heaviest_subtree_fork_choice,
|
||||
mut latest_validator_votes_for_frozen_banks,
|
||||
vote_pubkeys,
|
||||
..
|
||||
} = setup_forks();
|
||||
let (
|
||||
VoteSimulator {
|
||||
bank_forks,
|
||||
mut heaviest_subtree_fork_choice,
|
||||
mut latest_validator_votes_for_frozen_banks,
|
||||
vote_pubkeys,
|
||||
..
|
||||
},
|
||||
_,
|
||||
) = setup_default_forks(1);
|
||||
|
||||
let vote_pubkey = vote_pubkeys[0];
|
||||
let mut unfrozen_gossip_verified_vote_hashes = UnfrozenGossipVerifiedVoteHashes::default();
|
||||
@ -4625,14 +4641,14 @@ mod tests {
|
||||
#[test]
|
||||
fn test_replay_stage_refresh_last_vote() {
|
||||
let ReplayBlockstoreComponents {
|
||||
mut validator_authorized_voter_keypairs,
|
||||
mut validator_keypairs,
|
||||
cluster_info,
|
||||
poh_recorder,
|
||||
bank_forks,
|
||||
mut tower,
|
||||
my_vote_pubkey,
|
||||
my_pubkey,
|
||||
..
|
||||
} = replay_blockstore_components();
|
||||
} = replay_blockstore_components(None);
|
||||
|
||||
let mut last_vote_refresh_time = LastVoteRefreshTime {
|
||||
last_refresh_time: Instant::now(),
|
||||
@ -4642,11 +4658,9 @@ mod tests {
|
||||
let mut voted_signatures = vec![];
|
||||
|
||||
let my_vote_keypair = vec![Arc::new(
|
||||
validator_authorized_voter_keypairs
|
||||
.remove(&my_vote_pubkey)
|
||||
.unwrap()
|
||||
.vote_keypair,
|
||||
validator_keypairs.remove(&my_pubkey).unwrap().vote_keypair,
|
||||
)];
|
||||
let my_vote_pubkey = my_vote_keypair[0].pubkey();
|
||||
let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone();
|
||||
|
||||
fn fill_bank_with_ticks(bank: &Bank) {
|
||||
@ -4673,6 +4687,7 @@ mod tests {
|
||||
&SwitchForkDecision::SameFork,
|
||||
&mut voted_signatures,
|
||||
has_new_vote_been_rooted,
|
||||
&mut ReplayTiming::default(),
|
||||
);
|
||||
let mut cursor = Cursor::default();
|
||||
let (_, votes) = cluster_info.get_votes(&mut cursor);
|
||||
@ -4724,6 +4739,7 @@ mod tests {
|
||||
&SwitchForkDecision::SameFork,
|
||||
&mut voted_signatures,
|
||||
has_new_vote_been_rooted,
|
||||
&mut ReplayTiming::default(),
|
||||
);
|
||||
let (_, votes) = cluster_info.get_votes(&mut cursor);
|
||||
assert_eq!(votes.len(), 1);
|
||||
@ -4902,7 +4918,16 @@ mod tests {
|
||||
)
|
||||
}
|
||||
|
||||
fn setup_forks() -> VoteSimulator {
|
||||
fn setup_forks_from_tree(tree: Tree<Slot>, num_keys: usize) -> (VoteSimulator, Blockstore) {
|
||||
let mut vote_simulator = VoteSimulator::new(num_keys);
|
||||
vote_simulator.fill_bank_forks(tree.clone(), &HashMap::new());
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
blockstore.add_tree(tree, false, true, 2, Hash::default());
|
||||
(vote_simulator, blockstore)
|
||||
}
|
||||
|
||||
fn setup_default_forks(num_keys: usize) -> (VoteSimulator, Blockstore) {
|
||||
/*
|
||||
Build fork structure:
|
||||
|
||||
@ -4917,12 +4942,9 @@ mod tests {
|
||||
|
|
||||
slot 6
|
||||
*/
|
||||
let forks = tr(0) / (tr(1) / (tr(2) / (tr(4))) / (tr(3) / (tr(5) / (tr(6)))));
|
||||
|
||||
let mut vote_simulator = VoteSimulator::new(1);
|
||||
vote_simulator.fill_bank_forks(forks, &HashMap::new());
|
||||
|
||||
vote_simulator
|
||||
let tree = tr(0) / (tr(1) / (tr(2) / (tr(4))) / (tr(3) / (tr(5) / (tr(6)))));
|
||||
setup_forks_from_tree(tree, num_keys)
|
||||
}
|
||||
|
||||
fn check_map_eq<K: Eq + std::hash::Hash + std::fmt::Debug, T: PartialEq + std::fmt::Debug>(
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
use crate::{
|
||||
cluster_info_vote_listener::VerifiedVoteReceiver,
|
||||
cluster_nodes::ClusterNodes,
|
||||
cluster_slots::ClusterSlots,
|
||||
cluster_slots_service::{ClusterSlotsService, ClusterSlotsUpdateReceiver},
|
||||
completed_data_sets_service::CompletedDataSetsSender,
|
||||
@ -13,10 +14,7 @@ use crate::{
|
||||
use crossbeam_channel::{Receiver, Sender};
|
||||
use lru::LruCache;
|
||||
use solana_client::rpc_response::SlotUpdate;
|
||||
use solana_gossip::{
|
||||
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
|
||||
contact_info::ContactInfo,
|
||||
};
|
||||
use solana_gossip::cluster_info::{ClusterInfo, DATA_PLANE_FANOUT};
|
||||
use solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats};
|
||||
use solana_ledger::{
|
||||
blockstore::{Blockstore, CompletedSlotsReceiver},
|
||||
@ -33,7 +31,6 @@ use solana_runtime::{bank::Bank, bank_forks::BankForks};
|
||||
use solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp};
|
||||
use solana_streamer::streamer::PacketReceiver;
|
||||
use std::{
|
||||
cmp,
|
||||
collections::hash_set::HashSet,
|
||||
collections::{BTreeMap, BTreeSet, HashMap},
|
||||
net::UdpSocket,
|
||||
@ -217,12 +214,6 @@ fn update_retransmit_stats(
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct EpochStakesCache {
|
||||
peers: Vec<ContactInfo>,
|
||||
stakes_and_index: Vec<(u64, usize)>,
|
||||
}
|
||||
|
||||
use crate::packet_hasher::PacketHasher;
|
||||
// Map of shred (slot, index, is_data) => list of hash values seen for that key.
|
||||
pub type ShredFilter = LruCache<(Slot, u32, bool), Vec<u64>>;
|
||||
@ -283,33 +274,6 @@ fn check_if_first_shred_received(
|
||||
}
|
||||
}
|
||||
|
||||
// Drops shred slot leader from retransmit peers.
|
||||
// TODO: decide which bank should be used here.
|
||||
fn get_retransmit_peers(
|
||||
self_pubkey: Pubkey,
|
||||
shred_slot: Slot,
|
||||
leader_schedule_cache: &LeaderScheduleCache,
|
||||
bank: &Bank,
|
||||
stakes_cache: &EpochStakesCache,
|
||||
) -> Vec<(u64 /*stakes*/, usize /*index*/)> {
|
||||
match leader_schedule_cache.slot_leader_at(shred_slot, Some(bank)) {
|
||||
None => {
|
||||
error!("unknown leader for shred slot");
|
||||
stakes_cache.stakes_and_index.clone()
|
||||
}
|
||||
Some(pubkey) if pubkey == self_pubkey => {
|
||||
error!("retransmit from slot leader: {}", pubkey);
|
||||
stakes_cache.stakes_and_index.clone()
|
||||
}
|
||||
Some(pubkey) => stakes_cache
|
||||
.stakes_and_index
|
||||
.iter()
|
||||
.filter(|(_, i)| stakes_cache.peers[*i].id != pubkey)
|
||||
.copied()
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn retransmit(
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
@ -319,7 +283,7 @@ fn retransmit(
|
||||
sock: &UdpSocket,
|
||||
id: u32,
|
||||
stats: &RetransmitStats,
|
||||
epoch_stakes_cache: &RwLock<EpochStakesCache>,
|
||||
cluster_nodes: &RwLock<ClusterNodes<RetransmitStage>>,
|
||||
last_peer_update: &AtomicU64,
|
||||
shreds_received: &Mutex<ShredFilterAndHasher>,
|
||||
max_slots: &MaxSlots,
|
||||
@ -357,20 +321,17 @@ fn retransmit(
|
||||
&& last_peer_update.compare_and_swap(last, now, Ordering::Relaxed) == last
|
||||
{
|
||||
let epoch_staked_nodes = r_bank.epoch_staked_nodes(bank_epoch);
|
||||
let (peers, stakes_and_index) =
|
||||
cluster_info.sorted_retransmit_peers_and_stakes(epoch_staked_nodes.as_ref());
|
||||
{
|
||||
let mut epoch_stakes_cache = epoch_stakes_cache.write().unwrap();
|
||||
epoch_stakes_cache.peers = peers;
|
||||
epoch_stakes_cache.stakes_and_index = stakes_and_index;
|
||||
}
|
||||
*cluster_nodes.write().unwrap() = ClusterNodes::<RetransmitStage>::new(
|
||||
cluster_info,
|
||||
&epoch_staked_nodes.unwrap_or_default(),
|
||||
);
|
||||
{
|
||||
let mut sr = shreds_received.lock().unwrap();
|
||||
sr.0.clear();
|
||||
sr.1.reset();
|
||||
}
|
||||
}
|
||||
let r_epoch_stakes_cache = epoch_stakes_cache.read().unwrap();
|
||||
let cluster_nodes = cluster_nodes.read().unwrap();
|
||||
let mut peers_len = 0;
|
||||
epoch_cache_update.stop();
|
||||
|
||||
@ -411,52 +372,19 @@ fn retransmit(
|
||||
}
|
||||
|
||||
let mut compute_turbine_peers = Measure::start("turbine_start");
|
||||
let stakes_and_index = get_retransmit_peers(
|
||||
my_id,
|
||||
shred_slot,
|
||||
leader_schedule_cache,
|
||||
r_bank.deref(),
|
||||
r_epoch_stakes_cache.deref(),
|
||||
);
|
||||
let (my_index, shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index(
|
||||
&my_id,
|
||||
&r_epoch_stakes_cache.peers,
|
||||
&stakes_and_index,
|
||||
packet.meta.seed,
|
||||
);
|
||||
let slot_leader = leader_schedule_cache.slot_leader_at(shred_slot, Some(r_bank.deref()));
|
||||
let (neighbors, children) =
|
||||
cluster_nodes.get_retransmit_peers(packet.meta.seed, DATA_PLANE_FANOUT, slot_leader);
|
||||
// If the node is on the critical path (i.e. the first node in each
|
||||
// neighborhood), then we expect that the packet arrives at tvu socket
|
||||
// as opposed to tvu-forwards. If this is not the case, then the
|
||||
// turbine broadcast/retransmit tree is mismatched across nodes.
|
||||
let anchor_node = my_index % DATA_PLANE_FANOUT == 0;
|
||||
let anchor_node = neighbors[0].id == my_id;
|
||||
if packet.meta.forward == anchor_node {
|
||||
// TODO: Consider forwarding the packet to the root node here.
|
||||
retransmit_tree_mismatch += 1;
|
||||
}
|
||||
peers_len = cmp::max(peers_len, shuffled_stakes_and_index.len());
|
||||
// split off the indexes, we don't need the stakes anymore
|
||||
let indexes: Vec<_> = shuffled_stakes_and_index
|
||||
.into_iter()
|
||||
.map(|(_, index)| index)
|
||||
.collect();
|
||||
debug_assert_eq!(my_id, r_epoch_stakes_cache.peers[indexes[my_index]].id);
|
||||
|
||||
let (neighbors, children) = compute_retransmit_peers(DATA_PLANE_FANOUT, my_index, &indexes);
|
||||
let neighbors: Vec<_> = neighbors
|
||||
.into_iter()
|
||||
.filter_map(|index| {
|
||||
let peer = &r_epoch_stakes_cache.peers[index];
|
||||
if peer.id == my_id {
|
||||
None
|
||||
} else {
|
||||
Some(peer)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let children: Vec<_> = children
|
||||
.into_iter()
|
||||
.map(|index| &r_epoch_stakes_cache.peers[index])
|
||||
.collect();
|
||||
peers_len = peers_len.max(cluster_nodes.num_peers());
|
||||
compute_turbine_peers.stop();
|
||||
compute_turbine_peers_total += compute_turbine_peers.as_us();
|
||||
|
||||
@ -471,7 +399,13 @@ fn retransmit(
|
||||
// children and also tvu_forward socket of its neighbors. Otherwise it
|
||||
// should only forward to tvu_forward socket of its children.
|
||||
if anchor_node {
|
||||
ClusterInfo::retransmit_to(&neighbors, packet, sock, /*forward socket=*/ true);
|
||||
// First neighbor is this node itself, so skip it.
|
||||
ClusterInfo::retransmit_to(
|
||||
&neighbors[1..],
|
||||
packet,
|
||||
sock,
|
||||
/*forward socket=*/ true,
|
||||
);
|
||||
}
|
||||
ClusterInfo::retransmit_to(
|
||||
&children,
|
||||
@ -541,7 +475,7 @@ pub fn retransmitter(
|
||||
let r = r.clone();
|
||||
let cluster_info = cluster_info.clone();
|
||||
let stats = stats.clone();
|
||||
let epoch_stakes_cache = Arc::new(RwLock::new(EpochStakesCache::default()));
|
||||
let cluster_nodes = Arc::default();
|
||||
let last_peer_update = Arc::new(AtomicU64::new(0));
|
||||
let shreds_received = shreds_received.clone();
|
||||
let max_slots = max_slots.clone();
|
||||
@ -561,7 +495,7 @@ pub fn retransmitter(
|
||||
&sockets[s],
|
||||
s as u32,
|
||||
&stats,
|
||||
&epoch_stakes_cache,
|
||||
&cluster_nodes,
|
||||
&last_peer_update,
|
||||
&shreds_received,
|
||||
&max_slots,
|
||||
|
@ -55,6 +55,7 @@ impl RewardsRecorderService {
|
||||
lamports: reward_info.lamports,
|
||||
post_balance: reward_info.post_balance,
|
||||
reward_type: Some(reward_info.reward_type),
|
||||
commission: reward_info.commission,
|
||||
})
|
||||
.collect();
|
||||
|
||||
|
@ -6,7 +6,11 @@ use crate::{
|
||||
result::{Error, Result},
|
||||
};
|
||||
use bincode::serialize;
|
||||
use rand::distributions::{Distribution, WeightedIndex};
|
||||
use lru::LruCache;
|
||||
use rand::{
|
||||
distributions::{Distribution, WeightedError, WeightedIndex},
|
||||
Rng,
|
||||
};
|
||||
use solana_gossip::{
|
||||
cluster_info::{ClusterInfo, ClusterInfoError},
|
||||
contact_info::ContactInfo,
|
||||
@ -27,7 +31,7 @@ use solana_sdk::{
|
||||
};
|
||||
use solana_streamer::streamer::{PacketReceiver, PacketSender};
|
||||
use std::{
|
||||
collections::{hash_map::Entry, HashMap, HashSet},
|
||||
collections::HashSet,
|
||||
net::SocketAddr,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::{Arc, RwLock},
|
||||
@ -37,6 +41,10 @@ use std::{
|
||||
|
||||
/// the number of slots to respond with when responding to `Orphan` requests
|
||||
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10;
|
||||
// Number of slots to cache their respective repair peers and sampling weights.
|
||||
pub(crate) const REPAIR_PEERS_CACHE_CAPACITY: usize = 128;
|
||||
// Limit cache entries ttl in order to avoid re-using outdated data.
|
||||
const REPAIR_PEERS_CACHE_TTL: Duration = Duration::from_secs(10);
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Hash, PartialEq, Eq)]
|
||||
pub enum RepairType {
|
||||
@ -107,7 +115,38 @@ pub struct ServeRepair {
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
}
|
||||
|
||||
type RepairCache = HashMap<Slot, (Vec<ContactInfo>, WeightedIndex<u64>)>;
|
||||
// Cache entry for repair peers for a slot.
|
||||
pub(crate) struct RepairPeers {
|
||||
asof: Instant,
|
||||
peers: Vec<(Pubkey, /*ContactInfo.serve_repair:*/ SocketAddr)>,
|
||||
weighted_index: WeightedIndex<u64>,
|
||||
}
|
||||
|
||||
impl RepairPeers {
|
||||
fn new(asof: Instant, peers: &[ContactInfo], weights: &[u64]) -> Result<Self> {
|
||||
if peers.is_empty() {
|
||||
return Err(Error::from(ClusterInfoError::NoPeers));
|
||||
}
|
||||
if peers.len() != weights.len() {
|
||||
return Err(Error::from(WeightedError::InvalidWeight));
|
||||
}
|
||||
let weighted_index = WeightedIndex::new(weights)?;
|
||||
let peers = peers
|
||||
.iter()
|
||||
.map(|peer| (peer.id, peer.serve_repair))
|
||||
.collect();
|
||||
Ok(Self {
|
||||
asof,
|
||||
peers,
|
||||
weighted_index,
|
||||
})
|
||||
}
|
||||
|
||||
fn sample<R: Rng>(&self, rng: &mut R) -> (Pubkey, SocketAddr) {
|
||||
let index = self.weighted_index.sample(rng);
|
||||
self.peers[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl ServeRepair {
|
||||
/// Without a valid keypair gossip will not function. Only useful for tests.
|
||||
@ -396,11 +435,11 @@ impl ServeRepair {
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
pub fn repair_request(
|
||||
pub(crate) fn repair_request(
|
||||
&self,
|
||||
cluster_slots: &ClusterSlots,
|
||||
repair_request: RepairType,
|
||||
cache: &mut RepairCache,
|
||||
peers_cache: &mut LruCache<Slot, RepairPeers>,
|
||||
repair_stats: &mut RepairStats,
|
||||
repair_validators: &Option<HashSet<Pubkey>>,
|
||||
outstanding_requests: &mut OutstandingRepairs,
|
||||
@ -408,25 +447,21 @@ impl ServeRepair {
|
||||
// find a peer that appears to be accepting replication and has the desired slot, as indicated
|
||||
// by a valid tvu port location
|
||||
let slot = repair_request.slot();
|
||||
let (repair_peers, weighted_index) = match cache.entry(slot) {
|
||||
Entry::Occupied(entry) => entry.into_mut(),
|
||||
Entry::Vacant(entry) => {
|
||||
let repair_peers = match peers_cache.get(&slot) {
|
||||
Some(entry) if entry.asof.elapsed() < REPAIR_PEERS_CACHE_TTL => entry,
|
||||
_ => {
|
||||
peers_cache.pop(&slot);
|
||||
let repair_peers = self.repair_peers(repair_validators, slot);
|
||||
if repair_peers.is_empty() {
|
||||
return Err(Error::from(ClusterInfoError::NoPeers));
|
||||
}
|
||||
let weights = cluster_slots.compute_weights(slot, &repair_peers);
|
||||
debug_assert_eq!(weights.len(), repair_peers.len());
|
||||
let weighted_index = WeightedIndex::new(weights)?;
|
||||
entry.insert((repair_peers, weighted_index))
|
||||
let repair_peers = RepairPeers::new(Instant::now(), &repair_peers, &weights)?;
|
||||
peers_cache.put(slot, repair_peers);
|
||||
peers_cache.get(&slot).unwrap()
|
||||
}
|
||||
};
|
||||
let n = weighted_index.sample(&mut rand::thread_rng());
|
||||
let addr = repair_peers[n].serve_repair; // send the request to the peer's serve_repair port
|
||||
let (peer, addr) = repair_peers.sample(&mut rand::thread_rng());
|
||||
let nonce =
|
||||
outstanding_requests.add_request(repair_request, solana_sdk::timing::timestamp());
|
||||
let repair_peer_id = repair_peers[n].id;
|
||||
let out = self.map_repair_request(&repair_request, &repair_peer_id, repair_stats, nonce)?;
|
||||
let out = self.map_repair_request(&repair_request, &peer, repair_stats, nonce)?;
|
||||
Ok((addr, out))
|
||||
}
|
||||
|
||||
@ -772,7 +807,7 @@ mod tests {
|
||||
let rv = serve_repair.repair_request(
|
||||
&cluster_slots,
|
||||
RepairType::Shred(0, 0),
|
||||
&mut HashMap::new(),
|
||||
&mut LruCache::new(100),
|
||||
&mut RepairStats::default(),
|
||||
&None,
|
||||
&mut outstanding_requests,
|
||||
@ -800,7 +835,7 @@ mod tests {
|
||||
.repair_request(
|
||||
&cluster_slots,
|
||||
RepairType::Shred(0, 0),
|
||||
&mut HashMap::new(),
|
||||
&mut LruCache::new(100),
|
||||
&mut RepairStats::default(),
|
||||
&None,
|
||||
&mut outstanding_requests,
|
||||
@ -834,7 +869,7 @@ mod tests {
|
||||
.repair_request(
|
||||
&cluster_slots,
|
||||
RepairType::Shred(0, 0),
|
||||
&mut HashMap::new(),
|
||||
&mut LruCache::new(100),
|
||||
&mut RepairStats::default(),
|
||||
&None,
|
||||
&mut outstanding_requests,
|
||||
@ -1015,7 +1050,7 @@ mod tests {
|
||||
.repair_request(
|
||||
&cluster_slots,
|
||||
RepairType::Shred(0, 0),
|
||||
&mut HashMap::new(),
|
||||
&mut LruCache::new(100),
|
||||
&mut RepairStats::default(),
|
||||
&trusted_validators,
|
||||
&mut OutstandingRepairs::default(),
|
||||
@ -1032,7 +1067,7 @@ mod tests {
|
||||
.repair_request(
|
||||
&cluster_slots,
|
||||
RepairType::Shred(0, 0),
|
||||
&mut HashMap::new(),
|
||||
&mut LruCache::new(100),
|
||||
&mut RepairStats::default(),
|
||||
&trusted_validators,
|
||||
&mut OutstandingRepairs::default(),
|
||||
@ -1053,7 +1088,7 @@ mod tests {
|
||||
.repair_request(
|
||||
&cluster_slots,
|
||||
RepairType::Shred(0, 0),
|
||||
&mut HashMap::new(),
|
||||
&mut LruCache::new(100),
|
||||
&mut RepairStats::default(),
|
||||
&None,
|
||||
&mut OutstandingRepairs::default(),
|
||||
|
@ -1430,17 +1430,22 @@ fn report_target_features() {
|
||||
not(target_os = "macos")
|
||||
))]
|
||||
{
|
||||
// Validator binaries built on a machine with AVX support will generate invalid opcodes
|
||||
// when run on machines without AVX causing a non-obvious process abort. Instead detect
|
||||
// the mismatch and error cleanly.
|
||||
if is_x86_feature_detected!("avx") {
|
||||
info!("AVX detected");
|
||||
} else {
|
||||
error!(
|
||||
"Your machine does not have AVX support, please rebuild from source on your machine"
|
||||
);
|
||||
abort();
|
||||
}
|
||||
unsafe { check_avx() };
|
||||
}
|
||||
}
|
||||
|
||||
// Validator binaries built on a machine with AVX support will generate invalid opcodes
|
||||
// when run on machines without AVX causing a non-obvious process abort. Instead detect
|
||||
// the mismatch and error cleanly.
|
||||
#[target_feature(enable = "avx")]
|
||||
unsafe fn check_avx() {
|
||||
if is_x86_feature_detected!("avx") {
|
||||
info!("AVX detected");
|
||||
} else {
|
||||
error!(
|
||||
"Your machine does not have AVX support, please rebuild from source on your machine"
|
||||
);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "1.7.4"
|
||||
version = "1.7.5"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.foundation>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -88,10 +88,6 @@ module.exports = {
|
||||
label: "Introduction",
|
||||
to: "introduction",
|
||||
},
|
||||
{
|
||||
label: "Tour de SOL",
|
||||
to: "tour-de-sol",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
|
@ -56,7 +56,6 @@ $ solana-validator \
|
||||
## Testnet(测试网)
|
||||
|
||||
- Testnet是我们在实时群集上重点测试最新发布功能的地方,尤其侧重于网络性能,稳定性和验证程序行为。
|
||||
- 集群[Tour de SOL](tour-de-sol.md)计划在Testnet上运行,在该计划中,我们接受恶意行为和对网络的攻击,以帮助我们发现和消除错误或网络漏洞。
|
||||
- Testnet代币**不是真实的**
|
||||
- Testnet可能会重置账本。
|
||||
- Testnet包括用于空投的代币水龙头,用于应用程序测试
|
||||
|
@ -1,11 +0,0 @@
|
||||
---
|
||||
title: 介绍
|
||||
---
|
||||
|
||||
## 欢迎!
|
||||
|
||||
本指南包括了关于如何参加 Solana Tour de SOL 的信息。 遇到困难? 需要提出看法? 请发邮件到 ryan@solana.com
|
||||
|
||||
### [了解更多关于 Tour de SOL 的信息](https://solana.com/tds/)
|
||||
|
||||
如果您还没有注册,请先在 [https://solana.com/tds/](https://solana.com/tds/) 填写表格。
|
@ -1 +0,0 @@
|
||||
# 参与方法
|
@ -1,7 +0,0 @@
|
||||
---
|
||||
title: 创建验证节点的步骤
|
||||
---
|
||||
|
||||
要创建Solana验证节点,请遵循针对[Testnet集群](../../clusters.md)的常规[验证节点工作流程](../../running-validator/validator-start.md)。
|
||||
|
||||
请注意,运行每个Epoch的进程都会自动质押到Testnet验证节点。 如果您的验证节点运行正常,则将在两天内质押生效(如果长时间离线则自动取消质押)。
|
@ -1,44 +0,0 @@
|
||||
---
|
||||
title: 创建验证节点公钥
|
||||
---
|
||||
|
||||
您需要先注册才能参加到网络中。 请查看 [注册信息](../registration/how-to-register.md)。
|
||||
|
||||
为了获得 SOL 奖励,您需要在keybase.io帐户下发布验证者的身份公共密钥。
|
||||
|
||||
## **生成密钥对**
|
||||
|
||||
1. 如果还没有密钥对,请运行以下命令来为验证节点生成一个:
|
||||
|
||||
```bash
|
||||
solana-keygen new -o ~/validator-keypair.json
|
||||
```
|
||||
|
||||
2. 现在可以运行以下命令查看身份公共密钥:
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey ~/validator-keypair.json
|
||||
```
|
||||
|
||||
> 注意:“validator-keypair.json”文件也是您的 \(ed25519\) 私钥。
|
||||
|
||||
验证节点身份密钥独特识别了您在网络中的验证节点。 **备份此信息至关重要。**
|
||||
|
||||
如果您不备份此信息,那么如果您无法访问验证节点的话,将无法对其进行恢复。 如果发生这种情况,您将失去SOL TOO的奖励。
|
||||
|
||||
要备份您的验证节点识别密钥, **请备份您的"validator-keypair.json" 文件到一个安全位置。**
|
||||
|
||||
## 将您的Solana公钥链接到Keybase帐户
|
||||
|
||||
您必须将Solana pubkey链接到Keybase.io帐户。 以下说明介绍了如何通过在服务器上安装Keybase来执行此操作。
|
||||
|
||||
1. 在您的机器上安装[Keybase](https://keybase.io/download)。
|
||||
2. 登录到服务器上的Keybase帐户。 如果您还没有Keybase帐户,请先创建一个。 以下是基本的[Keybase CLI命令列表](https://keybase.io/docs/command_line/basics)。
|
||||
3. 在公用文件夹中创建一个Solana目录:`mkdir /keybase/public/<KEYBASE_USERNAME>/solana`
|
||||
4. 在Keybase公共文件夹中按以下格式创建一个空文件,来发布验证者的身份公共密钥:`/keybase/public/<KEYBASE_USERNAME>/solana/validator-<BASE58_PUBKEY>`。 例如:
|
||||
|
||||
```bash
|
||||
touch /keybase/public/<KEYBASE_USERNAME>/solana/validator-<BASE58_PUBKEY>
|
||||
```
|
||||
|
||||
5. 要检查公钥是否已成功发布,请确保您在 `https://keybase.pub/<KEYBASE_USERNAME>/solana/validator-<BASE58_PUBKEY>` 看到它。
|
@ -1,11 +0,0 @@
|
||||
---
|
||||
title: 运行验证节点的要求
|
||||
---
|
||||
|
||||
## 硬件
|
||||
|
||||
请参考 [建议的硬件配置](../../running-validator/validator-reqs.md)。
|
||||
|
||||
## 软件
|
||||
|
||||
- 我们在Ubuntu 04/18上进行开发和运行。 在Ubuntu 04/16上运行时,某些用户会遇到一些问题
|
@ -1 +0,0 @@
|
||||
# 注册
|
@ -1,7 +0,0 @@
|
||||
---
|
||||
title: 保密规定
|
||||
---
|
||||
|
||||
请参看****第 8 章[** TOUR DE SOL 参与规则的 **](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) ** 保密规定。**
|
||||
|
||||
Solana 无意在 Tour de SOL 共享任何机密信息。 我们将会通过口头、电子邮件等方式将信息分享出去。 除非明确地调出信息,否则不应将信息视为机密信息,我们欢迎您的分享。
|
@ -1,25 +0,0 @@
|
||||
---
|
||||
title: 如何注册
|
||||
---
|
||||
|
||||
#### 1) 注册表
|
||||
|
||||
[请在此提交注册表](https://forms.gle/gQYLozj5u7yKU3HG6)
|
||||
|
||||
#### 2) KYC/AML(通过 Coinlist)
|
||||
|
||||
[请在这里注册 KYC/AML + 参与协议](https://tsm.coinlist.co/solana-staking)
|
||||
|
||||
_如果您先前已经完成了 SLP 或 TdS 的 KYC/AML,那么同一个实体/个人就不需要这个步骤了。 我们不接受美国 实体或个人。_
|
||||
|
||||
#### 3) 加入我们的Discord
|
||||
|
||||
所有 Tour de SOL 验证程序**都需要**加入,因为这是我们的主要通信渠道:https://discord.gg/N3mqAfa
|
||||
|
||||
### 下一步
|
||||
|
||||
- 查看我们的文档来熟悉如何[运行一个验证节点](../../running-validator.md)
|
||||
|
||||
- 完成注册后,您将收到一封电子邮件,说明您要完成了注册流程。
|
||||
|
||||
- 在 Discord 上相见!
|
@ -1,29 +0,0 @@
|
||||
---
|
||||
title: 奖励
|
||||
---
|
||||
|
||||
## 奖励计算 <a id="how-are-rewards-calculated"></a>
|
||||
|
||||
奖励将根据 [论坛的这个帖子](https://forums.solana.com/t/tour-de-sol-stage-1-preliminary-compensation-design/79) 和 [这个表格](https://docs.google.com/spreadsheets/d/11puBSw2THdO4wU-uyDEic-D03jg4ZAooVpcZU0w_4gI/edit#gid=218406032) 中描述的奖励规则来计算。
|
||||
|
||||
另外请查看 [TOUR DE SOL 参与条款](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) 中的“2\(f\) Tour de Sol 详情”,来了解更多奖励详情。
|
||||
|
||||
## 领取奖励的要求 <a id="what-are-the-requirements-to-receive-rewards"></a>
|
||||
|
||||
参与者必须已经签署 Tour de SOL 参与协议,并通过 CoinList 平台以个人身份通过KYC/AML,并在参加之前填写了W-8 BEN或W-9纳税表格\(取决于您的居住地\) 来参与到 Tour 中。 完成注册后,参与者可以参加到任何一个和所有的阶段。 最终注册日期将分阶段公开宣布。
|
||||
|
||||
最后,参与者必须签署Solana的标准[代币协议](https://drive.google.com/open?id=1O4cEUZzeSNoVcncbHcEegAqPgjT-7hcy)。 代币协议将在奖励发放日期之前由Solana提供。
|
||||
|
||||
另外请参阅 [TOUR DE SOL 参与条款](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) 中的“2\(i\) & 2\(j\) Tour de Sol 详情”部分,了解与领取奖励有关的更多详情。
|
||||
|
||||
## 税务要求 <a id="what-are-the-tax-implications-of-the-rewards"></a>
|
||||
|
||||
参与者正在与Solana签订服务协议,并获得与服务相关的酌情奖励。 他们不被视为公司的全职员工,因此如果适用的话,Solana会收集W-9和W-8 BEN表格以支持纳税报告义务。 Solana建议参与者咨询税务会计师,以了解任何潜在的税务要求。
|
||||
|
||||
此外,如 [TOUR DE SOL 参与条款](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) 第2i、2k和10c节所述:
|
||||
|
||||
> 2i - 要获得任何SOL奖励,参与者必须签署公司的SOL奖励协议标准格式,其中包括作为SOL奖励发行的管理SOL所有权和使用的条款和条件,包括但不限于适用的锁定证券法、黑名单日期和纳税报告信息要求。
|
||||
|
||||
> 2k - 要获得任何SOL奖励,入围者必须签署公司的SOL奖励协议标准格式,其中包括作为SOL奖励发行的用于管理SOL所有权和使用的条款和条件,包括但不限于适用的锁定证券法、黑名单日期和纳税报告信息要求。
|
||||
|
||||
> 10c - 您有责任遵守适用于任何协议的交易的所有法律和法规,包括但不限于《商品交易法》以及美国 商品期货交易委员会\(“CFTC”\)颁布的法规,美国 证券交易委员会\(“SEC”\) 颁布的联邦证券法律和法规以及适用于您从公司收取的任何报酬的税法。
|
@ -1,5 +0,0 @@
|
||||
---
|
||||
title: 参与条款
|
||||
---
|
||||
|
||||
详情请查看官方 [TOUR DE SOL 参与条款](https://drive.google.com/a/solana.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view?usp=sharing)。
|
@ -1,53 +0,0 @@
|
||||
---
|
||||
title: 注册常见问题
|
||||
---
|
||||
|
||||
对于任何参与问题,[TOUR DE SOL 参与条款](https://drive.google.com/a/solana.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view?usp=sharing) 都应被视为权威资料。
|
||||
|
||||
## 注册是强制性的吗?
|
||||
|
||||
是的。 注册是强制的。 注册正在进行中, 我们每月主办为期一个月的 Tour de SOL,新的参加者需要等到下一阶段开始时才能进入。 [注册信息在这里](how-to-register.md)。
|
||||
|
||||
## 谁有资格参加?
|
||||
|
||||
详情请见 [TOUR DE SOL Participation terms](https://drive.google.com/a/solana.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view?usp=sharing) 的“参与资格 1;KYC 要求”。
|
||||
|
||||
## 我是否必须完成KYC/AML认证才能参与?
|
||||
|
||||
是的。 完成KYC/AML是强制性的。 如果你在第一阶段前没有完成这个进程,你就无法参加到 Tour de SOL。
|
||||
|
||||
我们已经与 Coinlist 合作管理 Tour de SOL 的 KYC/AML。 您可以在这里找到 [参与教程](https://docs.google.com/presentation/d/1gz8e34piUzzwzCMKwVrKKbZiPXV64Uq2-Izt4-VcMR4/),[在这里完成认证](https://docs.google.com/presentation/d/1gz8e34piUzzwzCMKwVrKKbZiPXV64Uq2-Izt4-VcMR4/edit#slide=id.g5dff17f5e5_0_44)。
|
||||
|
||||
## 我作为 Tour de Sol 参与者的责任是什么?
|
||||
|
||||
详情请查看 [TOR DE SOL 参与条款中](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) 的“2c Tour de SOL详情”。
|
||||
|
||||
### 如何计算“Tour de Sol 活跃 Tour 事件时间的 50%”责任?
|
||||
|
||||
为了有资格在给定阶段获得奖励,验证者必须在该阶段 >= 50%的位置中提交投票。
|
||||
|
||||
如果验证者无法为某个阶段提交 >= 50%的投票,但仍然认为他们应该在该阶段获得奖励,那么他们可以向Solana提出重新考虑的请求。
|
||||
|
||||
## Tour de Sol 测试代币与 Solana 主网代币之间是否有关系?
|
||||
|
||||
没有。 详情请查看 [TOR DE SOL 参与条款中](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) 的“2d Tour de SOL Details”。
|
||||
|
||||
## 验证节点会被取消 Tour de Sol 资格吗?
|
||||
|
||||
会的。 如果某个验证节点从事违禁行为和/或未能提供上述第\#4点所述的最低限度服务,那么它将被取消资格。
|
||||
|
||||
另见 [ TOUR DE SOL 参与条款](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) 的“4 违禁行为”,来详细地查看违禁行为。
|
||||
|
||||
### 更多关于违禁行为的问题:
|
||||
|
||||
#### 如“ 4 禁止行为”一节所述,有在居住地管辖范围以外的其他管辖区提供 Tour 服务的例子吗? 是否意味着服务器必须放在我居住地的管辖范围内?
|
||||
|
||||
不是的。 服务器可以位于与参与者的居住地不同的其他管辖区中。 签署[TOUR DE SOL 参与条款](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view)后,参与者已同意:如果居住在美国,他们就在美国提供服务;如果不在美国境内,他们就从美国境外提供服务。
|
||||
|
||||
## 奖励是怎么计算的?
|
||||
|
||||
详情请查看 [奖励部分](rewards.md)。
|
||||
|
||||
## 我们怎么知道能否公开分享哪些信息?
|
||||
|
||||
请查看 [保密协议](confidentiality.md)。
|
@ -1,7 +0,0 @@
|
||||
---
|
||||
title: 提交 Bug
|
||||
---
|
||||
|
||||
请在[此Github代码库中的issue](https://github.com/solana-labs/solana/issues)提交所有的漏洞和反馈。
|
||||
|
||||
由于[Discord频道](useful-links.md)的信息流比较快,因此其中报告的问题很可能会在信息流中丢失。 在Github代码库中归档问题是确保记录并解决问题的唯一方法。
|
@ -1,15 +0,0 @@
|
||||
---
|
||||
title: 有用的链接 & 讨论
|
||||
description: 阅读本指南以后
|
||||
---
|
||||
|
||||
- [网络浏览器](http://explorer.solana.com/)
|
||||
- [TdS 性能指示板](https://metrics.solana.com:3000/d/monitor-edge/cluster-telemetry-edge?refresh=1m&from=now-15m&to=now&var-testnet=tds)
|
||||
- 验证节点频道
|
||||
- [\#validator-support](https://discord.gg/rZsenD) 通用群组用于讨论 Tour de SOL 故障以外的验证节点相关疑问。
|
||||
- [\#tourdesol-validators](https://discord.gg/BdujK2) 群组供 Tour de SOL 参与者进行交流。
|
||||
- [\#tourdesol-annound](https://discord.gg/Q5TxEC),关于 Tour de SOL 关键信息的唯一官方发布频道。
|
||||
- [核心软件代码库](https://github.com/solana-labs/solana)
|
||||
- [在此仓库中提交 bug 和反馈](https://github.com/solana-labs/solana/issues)
|
||||
|
||||
> 找不到您想要的东西? 请发送电子邮件到 ryan@solana.com 或在 Discord 联系 @rshea\#2622。
|
@ -15,10 +15,6 @@
|
||||
"message": "介绍",
|
||||
"description": "The label of footer link with label=Introduction linking to introduction"
|
||||
},
|
||||
"link.item.label.Tour de SOL": {
|
||||
"message": "Tour de SOL",
|
||||
"description": "The label of footer link with label=Tour de SOL linking to tour-de-sol"
|
||||
},
|
||||
"link.item.label.Discord": {
|
||||
"message": "Discord",
|
||||
"description": "The label of footer link with label=Discord linking to https://discordapp.com/invite/pquxPsq"
|
||||
@ -39,4 +35,4 @@
|
||||
"message": "Copyright © 2021 Solana Foundation",
|
||||
"description": "The footer copyright"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -101,35 +101,6 @@ module.exports = {
|
||||
"running-validator/validator-stake",
|
||||
"running-validator/validator-monitor",
|
||||
"running-validator/validator-info",
|
||||
{
|
||||
type: "category",
|
||||
label: "Incenvitized Testnet",
|
||||
items: [
|
||||
"tour-de-sol",
|
||||
{
|
||||
type: "category",
|
||||
label: "Registration",
|
||||
items: [
|
||||
"tour-de-sol/registration/how-to-register",
|
||||
"tour-de-sol/registration/terms-of-participation",
|
||||
"tour-de-sol/registration/rewards",
|
||||
"tour-de-sol/registration/confidentiality",
|
||||
"tour-de-sol/registration/validator-registration-and-rewards-faq",
|
||||
],
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "Participation",
|
||||
items: [
|
||||
"tour-de-sol/participation/validator-technical-requirements",
|
||||
"tour-de-sol/participation/validator-public-key-registration",
|
||||
"tour-de-sol/participation/steps-to-create-a-validator",
|
||||
],
|
||||
},
|
||||
"tour-de-sol/useful-links",
|
||||
"tour-de-sol/submitting-bugs",
|
||||
],
|
||||
},
|
||||
"running-validator/validator-troubleshoot",
|
||||
],
|
||||
Clusters: [
|
||||
@ -213,22 +184,23 @@ module.exports = {
|
||||
label: "Accepted",
|
||||
items: [
|
||||
"proposals/accepted-design-proposals",
|
||||
"proposals/bankless-leader",
|
||||
"proposals/block-confirmation",
|
||||
"proposals/cluster-test-framework",
|
||||
"proposals/embedding-move",
|
||||
"proposals/interchain-transaction-verification",
|
||||
"proposals/ledger-replication-to-implement",
|
||||
"proposals/optimistic-confirmation-and-slashing",
|
||||
"proposals/vote-signing-to-implement",
|
||||
"proposals/cluster-test-framework",
|
||||
"proposals/validator-proposal",
|
||||
"proposals/simple-payment-and-state-verification",
|
||||
"proposals/interchain-transaction-verification",
|
||||
"proposals/snapshot-verification",
|
||||
"proposals/bankless-leader",
|
||||
"proposals/slashing",
|
||||
"proposals/tick-verification",
|
||||
"proposals/block-confirmation",
|
||||
"proposals/rust-clients",
|
||||
"proposals/optimistic_confirmation",
|
||||
"proposals/embedding-move",
|
||||
"proposals/rip-curl",
|
||||
"proposals/rust-clients",
|
||||
"proposals/simple-payment-and-state-verification",
|
||||
"proposals/slashing",
|
||||
"proposals/snapshot-verification",
|
||||
"proposals/tick-verification",
|
||||
"proposals/transactions-v2",
|
||||
"proposals/validator-proposal",
|
||||
"proposals/vote-signing-to-implement",
|
||||
],
|
||||
},
|
||||
],
|
||||
|
@ -62,9 +62,6 @@ The `--trusted-validator`s is operated by Solana
|
||||
- Testnet is where we stress test recent release features on a live
|
||||
cluster, particularly focused on network performance, stability and validator
|
||||
behavior.
|
||||
- [Tour de SOL](tour-de-sol.md) initiative runs on Testnet, where we
|
||||
encourage malicious behavior and attacks on the network to help us find and
|
||||
squash bugs or network vulnerabilities.
|
||||
- Testnet tokens are **not real**
|
||||
- Testnet may be subject to ledger resets.
|
||||
- Testnet includes a token faucet for airdrops for application testing
|
||||
|
@ -31,7 +31,7 @@ updates of a particular `MINOR` version release.
|
||||
#### Release Channels
|
||||
|
||||
- `edge` software that contains cutting-edge features with no backward compatibility policy
|
||||
- `beta` software that runs on the Solana Tour de SOL testnet cluster
|
||||
- `beta` software that runs on the Solana Testnet cluster
|
||||
- `stable` software that run on the Solana Mainnet Beta and Devnet clusters
|
||||
|
||||
#### Major Releases (x.0.0)
|
||||
@ -43,7 +43,7 @@ that were enabled in the previous `MAJOR` version.
|
||||
#### Minor Releases (1.x.0)
|
||||
|
||||
New features and proposal implementations are added to _new_ `MINOR` version
|
||||
releases (e.g. 1.4.0) and are first run on Solana's Tour de SOL testnet cluster. While running
|
||||
releases (e.g. 1.4.0) and are first run on Solana's Testnet cluster. While running
|
||||
on the testnet, `MINOR` versions are considered to be in the `beta` release channel. After
|
||||
those changes have been patched as needed and proven to be reliable, the `MINOR` version will
|
||||
be upgraded to the `stable` release channel and deployed to the Mainnet Beta cluster.
|
||||
|
@ -402,6 +402,7 @@ The result field will be an object with the following fields:
|
||||
- `lamports: <i64>`- number of reward lamports credited or debited by the account, as a i64
|
||||
- `postBalance: <u64>` - account balance in lamports after the reward was applied
|
||||
- `rewardType: <string|undefined>` - type of reward: "fee", "rent", "voting", "staking"
|
||||
- `commission: <u8|undefined>` - vote account commission when the reward was credited, only present for voting and staking rewards
|
||||
- `blockTime: <i64 | null>` - estimated production time, as Unix timestamp (seconds since the Unix epoch). null if not available
|
||||
- `blockHeight: <u64 | null>` - the number of blocks beneath this block
|
||||
|
||||
@ -1372,6 +1373,7 @@ The result field will be a JSON array with the following fields:
|
||||
- `effectiveSlot: <u64>`, the slot in which the rewards are effective
|
||||
- `amount: <u64>`, reward amount in lamports
|
||||
- `postBalance: <u64>`, post balance of the account in lamports
|
||||
- `commission: <u8|undefined>` - vote account commission when the reward was credited
|
||||
|
||||
#### Example
|
||||
|
||||
@ -1383,7 +1385,7 @@ curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d '
|
||||
"id": 1,
|
||||
"method": "getInflationReward",
|
||||
"params": [
|
||||
["6dmNQ5jwLeLk5REvio1JcMshcbvkYMwy26sJ8pbkvStu", "BGsqMegLpV6n6Ve146sSX2dTjUMj3M92HnU8BbNRMhF2"], 2
|
||||
["6dmNQ5jwLeLk5REvio1JcMshcbvkYMwy26sJ8pbkvStu", "BGsqMegLpV6n6Ve146sSX2dTjUMj3M92HnU8BbNRMhF2"], {"epoch": 2}
|
||||
]
|
||||
}
|
||||
'
|
||||
@ -2820,6 +2822,7 @@ Returns transaction details for a confirmed transaction
|
||||
- `lamports: <i64>`- number of reward lamports credited or debited by the account, as a i64
|
||||
- `postBalance: <u64>` - account balance in lamports after the reward was applied
|
||||
- `rewardType: <string>` - type of reward: currently only "rent", other types may be added in the future
|
||||
- `commission: <u8|undefined>` - vote account commission when the reward was credited, only present for voting and staking rewards
|
||||
|
||||
|
||||
#### Example:
|
||||
@ -3013,7 +3016,7 @@ curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d '
|
||||
|
||||
Result:
|
||||
```json
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.7.4"},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.7.5"},"id":1}
|
||||
```
|
||||
|
||||
### getVoteAccounts
|
||||
@ -3025,6 +3028,8 @@ Returns the account info and associated stake for all the voting accounts in the
|
||||
- `<object>` - (optional) Configuration object containing the following field:
|
||||
- (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
- (optional) `votePubkey: <string>` - Only return results for this validator vote address (base-58 encoded)
|
||||
- (optional) `keepUnstakedDelinquents: <bool>` - Do not filter out delinquent validators with no stake
|
||||
- (optional) `delinquentSlotDistance: <u64>` - Specify the number of slots behind the tip that a validator must fall to be considered delinquent. **NOTE:** For the sake of consistency between ecosystem products, _it is **not** recommended that this argument be specified._
|
||||
|
||||
#### Results:
|
||||
|
||||
@ -3373,6 +3378,8 @@ Result:
|
||||
|
||||
#### Notification Format:
|
||||
|
||||
The notification format is the same as seen in the [getAccountInfo](jsonrpc-api.md#getAccountInfo) RPC HTTP method.
|
||||
|
||||
Base58 encoding:
|
||||
```json
|
||||
{
|
||||
@ -3505,7 +3512,14 @@ Result:
|
||||
|
||||
#### Notification Format:
|
||||
|
||||
Base58 encoding:
|
||||
The notification will be an RpcResponse JSON object with value equal to:
|
||||
|
||||
- `signature: <string>` - The transaction signature base58 encoded.
|
||||
- `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L24)
|
||||
- `logs: <array | null>` - Array of log messages the transaction instructions output during execution, null if simulation failed before the transaction was able to execute (for example due to an invalid blockhash or signature verification failure)
|
||||
|
||||
Example:
|
||||
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
@ -3545,7 +3559,6 @@ Unsubscribe from transaction logging
|
||||
Request:
|
||||
```json
|
||||
{"jsonrpc":"2.0", "id":1, "method":"logsUnsubscribe", "params":[0]}
|
||||
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -3622,6 +3635,8 @@ Result:
|
||||
|
||||
#### Notification Format:
|
||||
|
||||
The notification format is a <b>single</b> program account object as seen in the [getProgramAccounts](jsonrpc-api.md#getProgramAccounts) RPC HTTP method.
|
||||
|
||||
Base58 encoding:
|
||||
```json
|
||||
{
|
||||
@ -3756,7 +3771,12 @@ Result:
|
||||
```
|
||||
|
||||
#### Notification Format:
|
||||
```bash
|
||||
|
||||
The notification will be an RpcResponse JSON object with value containing an object with:
|
||||
- `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L24)
|
||||
|
||||
Example:
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "signatureNotification",
|
||||
@ -3826,7 +3846,14 @@ Result:
|
||||
|
||||
#### Notification Format:
|
||||
|
||||
```bash
|
||||
The notification will be an object with the following fields:
|
||||
|
||||
- `parent: <u64>` - The parent slot
|
||||
- `root: <u64>` - The current root slot
|
||||
- `slot: <u64>` - The newly set slot value
|
||||
|
||||
Example:
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "slotNotification",
|
||||
@ -3897,6 +3924,20 @@ Result:
|
||||
|
||||
#### Notification Format:
|
||||
|
||||
The notification will be an object with the following fields:
|
||||
|
||||
- `parent: <u64>` - The parent slot
|
||||
- `slot: <u64>` - The newly updated slot
|
||||
- `timestamp: <i64>` - The Unix timestamp of the update
|
||||
- `type: <string>` - The update type, one of:
|
||||
- "firstShredReceived"
|
||||
- "completed"
|
||||
- "createdBank"
|
||||
- "frozen"
|
||||
- "dead"
|
||||
- "optimisticConfirmation"
|
||||
- "root"
|
||||
|
||||
```bash
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
@ -3913,15 +3954,6 @@ Result:
|
||||
}
|
||||
```
|
||||
|
||||
Types:
|
||||
- "firstShredReceived"
|
||||
- "completed"
|
||||
- "createdBank"
|
||||
- "frozen"
|
||||
- "dead"
|
||||
- "optimisticConfirmation"
|
||||
- "root"
|
||||
|
||||
### slotsUpdatesUnsubscribe
|
||||
|
||||
Unsubscribe from slot-update notifications
|
||||
@ -3976,7 +4008,7 @@ Result:
|
||||
|
||||
The result is the latest root slot number.
|
||||
|
||||
```bash
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "rootNotification",
|
||||
@ -4045,7 +4077,10 @@ Result:
|
||||
|
||||
#### Notification Format:
|
||||
|
||||
The result is the latest vote, containing its hash, a list of voted slots, and an optional timestamp.
|
||||
The notification will be an object with the following fields:
|
||||
- `hash: <string>` - The vote hash
|
||||
- `slots: <array>` - The slots covered by the vote, as an array of u64 integers
|
||||
- `timestamp: <i64 | null>` - The timestamp of the vote
|
||||
|
||||
```json
|
||||
{
|
||||
@ -4134,6 +4169,7 @@ The result field will be an object with the following fields:
|
||||
- `lamports: <i64>`- number of reward lamports credited or debited by the account, as a i64
|
||||
- `postBalance: <u64>` - account balance in lamports after the reward was applied
|
||||
- `rewardType: <string|undefined>` - type of reward: "fee", "rent", "voting", "staking"
|
||||
- `commission: <u8|undefined>` - vote account commission when the reward was credited, only present for voting and staking rewards
|
||||
- `blockTime: <i64 | null>` - estimated production time, as Unix timestamp (seconds since the Unix epoch). null if not available
|
||||
|
||||
#### Example:
|
||||
|
@ -97,7 +97,9 @@ other instructions in the same transaction. Read more information on
|
||||
## RecentBlockhashes
|
||||
|
||||
The RecentBlockhashes sysvar contains the active recent blockhashes as well as
|
||||
their associated fee calculators. It is updated every slot.
|
||||
their associated fee calculators. It is updated every slot. Entries are ordered
|
||||
by descending block height, so the first entry holds the most recent block hash,
|
||||
and the last entry holds an old block hash.
|
||||
|
||||
- Address: `SysvarRecentB1ockHashes11111111111111111111`
|
||||
- Layout:
|
||||
|
@ -48,7 +48,7 @@ To become a Solana validator, one must deposit/lock-up some amount of SOL in a c
|
||||
|
||||
initial deposit.
|
||||
|
||||
Solana's trustless sense of time and ordering provided by its PoH data structure, along with its [turbine](https://www.youtube.com/watch?v=qt_gDRXHrHQ&t=1s) data broadcast and transmission design, should provide sub-second transaction confirmation times that scale with the log of the number of nodes in the cluster. This means we shouldn't have to restrict the number of validating nodes with a prohibitive 'minimum deposits' and expect nodes to be able to become validators with nominal amounts of SOL staked. At the same time, Solana's focus on high-throughput should create incentive for validation clients to provide high-performant and reliable hardware. Combined with potential a minimum network speed threshold to join as a validation-client, we expect a healthy validation delegation market to emerge. To this end, Solana's testnet will lead into a "Tour de SOL" validation-client competition, focusing on throughput and uptime to rank and reward testnet validators.
|
||||
Solana's trustless sense of time and ordering provided by its PoH data structure, along with its [turbine](https://www.youtube.com/watch?v=qt_gDRXHrHQ&t=1s) data broadcast and transmission design, should provide sub-second transaction confirmation times that scale with the log of the number of nodes in the cluster. This means we shouldn't have to restrict the number of validating nodes with a prohibitive 'minimum deposits' and expect nodes to be able to become validators with nominal amounts of SOL staked. At the same time, Solana's focus on high-throughput should create incentive for validation clients to provide high-performant and reliable hardware. Combined with potential a minimum network speed threshold to join as a validation-client, we expect a healthy validation delegation market to emerge.
|
||||
|
||||
## Penalties
|
||||
|
||||
|
@ -31,7 +31,7 @@ The inflation rate actually observed on the Solana network after accounting for
|
||||
|
||||
The rate of return (aka _interest_) earned on SOL staked on the network. It is often quoted as an annualized rate (e.g. "the network _staking yield_ is currently $10\%$ per year").
|
||||
|
||||
- _Staking yield_ is of great interest to validators and token-holders holders who wish to delegate their tokens to avoid token dilution due to inflation (the extent of which is discussed below).
|
||||
- _Staking yield_ is of great interest to validators and token holders who wish to delegate their tokens to avoid token dilution due to inflation (the extent of which is discussed below).
|
||||
- $100\%$ of inflationary issuances are to be distributed to staked token-holders in proportion to their staked SOL and to validators who charge a commission on the rewards earned by their delegated SOL..
|
||||
- There may be future consideration for an additional split of inflation issuance with the introduction of _Archivers_ into the economy. _Archivers_ are network participants who provide a decentralized storage service and should also be incentivized with token distribution from inflation issuances for this service. - Similarly, early designs specified a fixed percentage of inflationary issuance to be delivered to the Foundation treasury for operational expenses and future grants. However, inflation will be launching without any portion allocated to the Foundation.
|
||||
- _Staking yield_ can be calculated from the _Inflation Schedule_ along with the fraction of the _Total Current Supply_ that is staked at any given time. The explicit relationship is given by:
|
||||
|
@ -30,6 +30,7 @@ To run an api node:
|
||||
```bash
|
||||
solana-validator \
|
||||
--ledger <LEDGER_PATH> \
|
||||
--identity <VALIDATOR_IDENTITY_KEYPAIR> \
|
||||
--entrypoint <CLUSTER_ENTRYPOINT> \
|
||||
--expected-genesis-hash <EXPECTED_GENESIS_HASH> \
|
||||
--rpc-port 8899 \
|
||||
|
@ -105,24 +105,6 @@ function Home() {
|
||||
{features && features.length > 0 && (
|
||||
<section className={styles.features}>
|
||||
<div className="container">
|
||||
<div className="alert alert--primary" role="alert">
|
||||
<b>Announcing the Solana Season Hackathon</b>
|
||||
<br />
|
||||
Jumpstart your next project on Solana & join the fastest growing
|
||||
ecosystem in crypto
|
||||
<br />
|
||||
<p align="right">
|
||||
<a
|
||||
href="https://solana.com/solanaszn"
|
||||
rel="noopener noreferrer"
|
||||
target="_blank"
|
||||
>
|
||||
<button className="button button--outline button--secondary">
|
||||
Register Now
|
||||
</button>
|
||||
</a>
|
||||
</p>
|
||||
</div>
|
||||
<div className="row cards__container">
|
||||
{features.map((props, idx) => (
|
||||
<Feature key={idx} {...props} />
|
||||
|
302
docs/src/proposals/transactions-v2.md
Normal file
302
docs/src/proposals/transactions-v2.md
Normal file
@ -0,0 +1,302 @@
|
||||
# Transactions v2 - Address maps
|
||||
|
||||
## Problem
|
||||
|
||||
Messages transmitted to Solana validators must not exceed the IPv6 MTU size to
|
||||
ensure fast and reliable network transmission of cluster info over UDP.
|
||||
Solana's networking stack uses a conservative MTU size of 1280 bytes which,
|
||||
after accounting for headers, leaves 1232 bytes for packet data like serialized
|
||||
transactions.
|
||||
|
||||
Developers building applications on Solana must design their on-chain program
|
||||
interfaces within the above transaction size limit constraint. One common
|
||||
work-around is to store state temporarily on-chain and consume that state in
|
||||
later transactions. This is the approach used by the BPF loader program for
|
||||
deploying Solana programs.
|
||||
|
||||
However, this workaround doesn't work well when developers compose many on-chain
|
||||
programs in a single atomic transaction. With more composition comes more
|
||||
account inputs, each of which takes up 32 bytes. There is currently no available
|
||||
workaround for increasing the number of accounts used in a single transaction
|
||||
since each transaction must list all accounts that it needs to properly lock
|
||||
accounts for parallel execution. Therefore the current cap is about 35 accounts
|
||||
after accounting for signatures and other transaction metadata.
|
||||
|
||||
## Proposed Solution
|
||||
|
||||
Introduce a new on-chain program which stores account address maps and add a new
|
||||
transaction format which supports concise account references through the
|
||||
on-chain address maps.
|
||||
|
||||
### Address Map Program
|
||||
|
||||
Here we describe a program-based solution to the problem, whereby a protocol
|
||||
developer or end-user can create collections of related addresses on-chain for
|
||||
concise use in a transaction's account inputs. This approach is similar to page
|
||||
tables used in operating systems to succinctly map virtual addresses to physical
|
||||
memory.
|
||||
|
||||
After addresses are stored on-chain in an address map account, they may be
|
||||
succinctly referenced in a transaction using a 1-byte u8 index rather than a
|
||||
full 32-byte address. This will require a new transaction format to make use of
|
||||
these succinct references as well as runtime handling for looking up and loading
|
||||
accounts from the on-chain mappings.
|
||||
|
||||
#### State
|
||||
|
||||
Address map accounts must be rent-exempt but may be closed with a one epoch
|
||||
deactivation period. Address maps must be activated before use.
|
||||
|
||||
Since transactions use a u8 offset to look up mapped addresses, accounts can
|
||||
store up to 2^8 addresses each. Anyone may create an address map account of any
|
||||
size as long as its big enough to store the necessary metadata. In addition to
|
||||
stored addresses, address map accounts must also track the latest count of
|
||||
stored addresses and an authority which must be a present signer for all
|
||||
appended map entries.
|
||||
|
||||
Map additions require one slot to activate so each map should track how many
|
||||
addresses are still pending activation in their on-chain state:
|
||||
|
||||
```rust
|
||||
struct AddressMap {
|
||||
// authority must sign for each addition and to close the map account
|
||||
authority: Pubkey,
|
||||
// record a deactivation epoch to help validators know when to remove
|
||||
// the map from their caches.
|
||||
deactivation_epoch: Epoch,
|
||||
// entries may not be modified once activated
|
||||
activated: bool,
|
||||
// list of entries, max capacity of u8::MAX
|
||||
entries: Vec<Pubkey>,
|
||||
}
|
||||
```
|
||||
|
||||
#### Cleanup
|
||||
|
||||
Once an address map gets stale and is no longer used, it can be reclaimed by the
|
||||
authority withdrawing lamports but the remaining balance must be greater than
|
||||
two epochs of rent. This ensures that it takes at least one full epoch to
|
||||
deactivate a map.
|
||||
|
||||
Maps may not be recreated because each new map must be created at a derived
|
||||
address using a monotonically increasing counter as a derivation seed.
|
||||
|
||||
#### Cost
|
||||
|
||||
Since address map accounts require caching and special handling in the runtime,
|
||||
they should incur higher costs for storage. Cost structure design will be added
|
||||
later.
|
||||
|
||||
### Versioned Transactions
|
||||
|
||||
In order to allow accounts to be referenced more succinctly, the structure of
|
||||
serialized transactions must be modified. The new transaction format should not
|
||||
affect transaction processing in the Solana VM beyond the increased capacity for
|
||||
accounts and program invocations. Invoked programs will be unaware of which
|
||||
transaction format was used.
|
||||
|
||||
The new transaction format must be distinguished from the current transaction
|
||||
format. Current transactions can fit at most 19 signatures (64-bytes each) but
|
||||
the message header encodes `num_required_signatures` as a `u8`. Since the upper
|
||||
bit of the `u8` will never be set for a valid transaction, we can enable it to
|
||||
denote whether a transaction should be decoded with the versioned format or not.
|
||||
|
||||
#### New Transaction Format
|
||||
|
||||
```rust
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Transaction {
|
||||
#[serde(with = "short_vec")]
|
||||
pub signatures: Vec<Signature>,
|
||||
/// The message to sign.
|
||||
pub message: Message,
|
||||
}
|
||||
|
||||
// Uses custom serialization. If the first bit is set, a versioned message is
|
||||
// encoded starting from the next byte. If the first bit is not set, all bytes
|
||||
// are used to encode the original unversioned `Message` format.
|
||||
pub enum Message {
|
||||
Unversioned(UnversionedMessage),
|
||||
Versioned(VersionedMessage),
|
||||
}
|
||||
|
||||
// use bincode varint encoding to use u8 instead of u32 for enum tags
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub enum VersionedMessage {
|
||||
Current(Box<MessageV2>)
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct MessageV2 {
|
||||
// unchanged
|
||||
pub header: MessageHeader,
|
||||
|
||||
// unchanged
|
||||
#[serde(with = "short_vec")]
|
||||
pub account_keys: Vec<Pubkey>,
|
||||
|
||||
/// The last `address_maps.len()` number of readonly unsigned account_keys
|
||||
/// should be loaded as address maps
|
||||
#[serde(with = "short_vec")]
|
||||
pub address_maps: Vec<AddressMap>,
|
||||
|
||||
// unchanged
|
||||
pub recent_blockhash: Hash,
|
||||
|
||||
// unchanged. Account indices are still `u8` encoded so the max number of accounts
|
||||
// in account_keys + address_maps is limited to 256.
|
||||
#[serde(with = "short_vec")]
|
||||
pub instructions: Vec<CompiledInstruction>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct AddressMap {
|
||||
/// The last num_readonly_entries of entries are read-only
|
||||
pub num_readonly_entries: u8,
|
||||
|
||||
/// List of map entries to load
|
||||
#[serde(with = "short_vec")]
|
||||
pub entries: Vec<u8>,
|
||||
}
|
||||
```
|
||||
|
||||
#### Size changes
|
||||
|
||||
- 1 byte for `prefix` field
|
||||
- 1 byte for version enum discriminant
|
||||
- 1 byte for `address_maps` length
|
||||
- Each map requires 2 bytes for `entries` length and `num_readonly`
|
||||
- Each map entry is 1 byte (u8)
|
||||
|
||||
#### Cost changes
|
||||
|
||||
Using an address map in a transaction should incur an extra cost due to
|
||||
the extra work validators need to do to load and cache them.
|
||||
|
||||
#### Metadata changes
|
||||
|
||||
Each account accessed via an address map should be stored in the transaction
|
||||
metadata for quick reference. This will avoid the need for clients to make
|
||||
multiple RPC round trips to fetch all accounts referenced in a v2 transaction.
|
||||
It will also make it easier to use the ledger tool to analyze account access
|
||||
patterns.
|
||||
|
||||
#### RPC changes
|
||||
|
||||
Fetched transaction responses will likely require a new version field to
|
||||
indicate to clients which transaction structure to use for deserialization.
|
||||
Clients using pre-existing RPC methods will receive error responses when
|
||||
attempting to fetch a versioned transaction which will indicate that they
|
||||
must upgrade.
|
||||
|
||||
The RPC API should also support an option for returning fully expanded
|
||||
transactions to abstract away the address map details from downstream clients.
|
||||
|
||||
### Limitations
|
||||
|
||||
- Max of 256 accounts may be specified in a transaction because u8 is used by compiled
|
||||
instructions to index into transaction message account keys.
|
||||
- Address maps can hold up to 256 addresses because references to map entries
|
||||
are encoded as `u8` in transactions.
|
||||
- Transaction signers may not be referenced with an address map, the full
|
||||
address of each signer must be serialized in the transaction. This ensures that
|
||||
the performance of transaction signature checks is not affected.
|
||||
- Hardware wallets will probably not be able to display details about accounts
|
||||
referenced through address maps due to inability to verify on-chain data.
|
||||
- Only single level address maps can be used. Recursive maps will not be supported.
|
||||
|
||||
## Security Concerns
|
||||
|
||||
### Resource consumption
|
||||
|
||||
Enabling more account inputs in a transaction allows for more program
|
||||
invocations, write-locks, and data reads / writes. Before address maps are
|
||||
enabled, transaction-wide compute limits and increased costs for write locks and
|
||||
data reads are required.
|
||||
|
||||
### Front running
|
||||
|
||||
If the addresses listed within an address map account are modifiable, front
|
||||
running attacks could modify which mapped accounts are resolved for a later
|
||||
transaction. For this reason, we propose that any stored address is immutable
|
||||
and that address map accounts themselves may not be recreated.
|
||||
|
||||
Additionally, a malicious actor could try to fork the chain immediately after a
|
||||
new address map account is added to a block. If successful, they could add a
|
||||
different unexpected map entry in the fork. In order to deter this attack,
|
||||
clients should wait for address maps to be finalized before using them in a
|
||||
transaction. Clients may also append integrity check instructions to the
|
||||
transaction which verify that the correct accounts are used.
|
||||
|
||||
### Denial of service
|
||||
|
||||
Address map accounts will be read very frequently and will therefore be a
|
||||
more high profile target for denial of service attacks through write locks
|
||||
similar to sysvar accounts.
|
||||
|
||||
For this reason, special handling should be given to address map lookups.
|
||||
Address maps lookups should not be affected by account read/write locks.
|
||||
|
||||
### Duplicate accounts
|
||||
|
||||
Transactions may not load an account more than once whether directly through
|
||||
`account_keys` or indirectly through `address_maps`.
|
||||
|
||||
## Other Proposals
|
||||
|
||||
1) Account prefixes
|
||||
|
||||
Needing to pre-register accounts in an on-chain address map is cumbersome
|
||||
because it adds an extra step for transaction processing. Instead, Solana
|
||||
transactions could use variable length address prefixes to specify accounts.
|
||||
These prefix shortcuts can save on data usage without needing to setup on-chain
|
||||
state.
|
||||
|
||||
However, this model requires nodes to keep a mapping of prefixes to active account
|
||||
addresses. Attackers can create accounts with the same prefix as a popular account
|
||||
to disrupt transactions.
|
||||
|
||||
2) Transaction builder program
|
||||
|
||||
Solana can provide a new on-chain program which allows "Big" transactions to be
|
||||
constructed on-chain by normal transactions. Once the transaction is
|
||||
constructed, a final "Execute" transaction can trigger a node to process the big
|
||||
transaction as a normal transaction without needing to fit it into an MTU sized
|
||||
packet.
|
||||
|
||||
The UX of this approach is tricky. A user could in theory sign a big transaction
|
||||
but it wouldn't be great if they had to use their wallet to sign multiple
|
||||
transactions to build that transaction that they already signed and approved. This
|
||||
could be a use-case for transaction relay services, though. A user could pay a
|
||||
relayer to construct the large pre-signed transaction on-chain for them.
|
||||
|
||||
In order to prevent the large transaction from being reconstructed and replayed,
|
||||
its message hash will need to be added to the status cache when executed.
|
||||
|
||||
3) Epoch account indexes
|
||||
|
||||
Similarly to leader schedule calculation, validators could create a global index
|
||||
of the most accessed accounts in the previous epoch and make that index
|
||||
available to transactions in the following epoch.
|
||||
|
||||
This approach has a downside of only updating the index at epoch boundaries
|
||||
which means there would be a few day delay before popular new accounts could be
|
||||
referenced. It also needs to be consistently generated by all validators by
|
||||
using some criteria like adding accounts in order by access count.
|
||||
|
||||
4) Address lists
|
||||
|
||||
Extend the transaction structure to support addresses that, when loaded, expand
|
||||
to a list of addresses. After expansion, all account inputs are concatenated to
|
||||
form a single list of account keys which can be indexed into by instructions.
|
||||
Address lists would likely need to be immutable to prevent attacks. They would
|
||||
also need to be limited in length to limit resource consumption.
|
||||
|
||||
This proposal can be thought of a special case of the proposed index account
|
||||
approach. Since the full account list would be expanded, there's no need to add
|
||||
additional offsets that use up the limited space in a serialized transaction.
|
||||
However, the expected size of an address list may need to be encoded into the
|
||||
transaction to aid the sanitization of account indexes. We would also need to
|
||||
encode how many addresses in the list should be loaded as readonly vs
|
||||
read-write. Lastly, special attention must be given to watch out for addresses
|
||||
that exist in multiple account lists.
|
@ -2,6 +2,6 @@
|
||||
title: Running a Validator
|
||||
---
|
||||
|
||||
This section describes how run a Solana validator node.
|
||||
This section describes how to run a Solana validator node.
|
||||
|
||||
There are several clusters available to connect to, see [choosing a Cluster](cli/choose-a-cluster.md) for an overview of each.
|
||||
There are several clusters available to connect to; see [choosing a Cluster](cli/choose-a-cluster.md) for an overview of each.
|
||||
|
@ -11,12 +11,7 @@ testnet participants, [https://discord.gg/pquxPsq](https://discord.gg/pquxPsq).
|
||||
- [Testnet Metrics Dashboard](https://metrics.solana.com:3000/d/monitor-edge/cluster-telemetry-edge?refresh=60s&orgId=2)
|
||||
- Validator chat channels
|
||||
- [\#validator-support](https://discord.gg/rZsenD) General support channel for any Validator related queries.
|
||||
- [\#tourdesol](https://discord.gg/BdujK2) Discussion and support channel for Tour de SOL participants ([What is Tour de SOL?](https://solana.com/tds/)).
|
||||
- [\#tourdesol-announcements](https://discord.gg/Q5TxEC) The single source of truth for critical information relating to Tour de SOL
|
||||
- [\#tourdesol-stage0](https://discord.gg/Xf8tES) Discussion for events within Tour de SOL Stage 0. Stage 0 includes all the dry-run
|
||||
- [\#testnet-announcements](https://discord.gg/Q5TxEC) The single source of truth for critical information relating Testnet
|
||||
- [Core software repo](https://github.com/solana-labs/solana)
|
||||
- [Tour de SOL Docs](https://docs.solana.com/tour-de-sol)
|
||||
- [TdS repo](https://github.com/solana-labs/tour-de-sol)
|
||||
- [TdS metrics dashboard](https://metrics.solana.com:3000/d/monitor-edge/cluster-telemetry-edge?refresh=1m&from=now-15m&to=now&var-testnet=tds)
|
||||
|
||||
Can't find what you're looking for? Send an email to ryan@solana.com or reach out to @rshea\#2622 on Discord.
|
||||
|
@ -1,11 +0,0 @@
|
||||
---
|
||||
title: Introduction
|
||||
---
|
||||
|
||||
## Welcome!
|
||||
|
||||
This guide contains information about how to participate in Solana's Tour de SOL. Questions? Comments? Keep on reading!
|
||||
|
||||
### Learn more about Tour de SOL
|
||||
|
||||
If you haven't registered yet, complete the form at [https://solana.com/validator-registration/](https://solana.com/validator-registration/) first.
|
@ -1 +0,0 @@
|
||||
# Participation
|
@ -1,11 +0,0 @@
|
||||
---
|
||||
title: Steps to create a validator
|
||||
---
|
||||
|
||||
To create a Solana validator, follow the normal [validator workflow](../../running-validator/validator-start.md)
|
||||
targeting the [Testnet cluster](../../clusters.md).
|
||||
|
||||
Note that Testnet validators are automatically staked by a process that runs
|
||||
every Epoch. If your validator is running correctly then in a couple of days it
|
||||
will be staked (and automatically destaked if offline for a prolonged period of
|
||||
time).
|
@ -1,45 +0,0 @@
|
||||
---
|
||||
title: Create a validator public key
|
||||
---
|
||||
|
||||
In order to participate you need to first register. See [Registration info](../registration/how-to-register.md).
|
||||
|
||||
In order to obtain your allotment of SOL you need to publish your
|
||||
validator's identity public key under your keybase.io account.
|
||||
|
||||
## **Generate Keypair**
|
||||
|
||||
1. If you haven't already, generate your validator's identity keypair by running:
|
||||
|
||||
```bash
|
||||
solana-keygen new -o ~/validator-keypair.json
|
||||
```
|
||||
|
||||
2. The identity public key can now be viewed by running:
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey ~/validator-keypair.json
|
||||
```
|
||||
|
||||
> Note: The "validator-keypair.json” file is also your \(ed25519\) private key.
|
||||
|
||||
Your validator identity keypair uniquely identifies your validator within the network. **It is crucial to back-up this information.**
|
||||
|
||||
If you don’t back up this information, you WILL NOT BE ABLE TO RECOVER YOUR VALIDATOR, if you lose access to it. If this happens, YOU WILL LOSE YOUR ALLOCATION OF SOL TOO.
|
||||
|
||||
To back-up your validator identify keypair, **back-up your "validator-keypair.json” file to a secure location.**
|
||||
|
||||
## Link your Solana pubkey to a Keybase account
|
||||
|
||||
You must link your Solana pubkey to a Keybase.io account. The following instructions describe how to do that by installing Keybase on your server.
|
||||
|
||||
1. Install [Keybase](https://keybase.io/download) on your machine.
|
||||
2. Log in to your Keybase account on your server. Create a Keybase account first if you don’t already have one. Here’s a [list of basic Keybase CLI commands](https://keybase.io/docs/command_line/basics).
|
||||
3. Create a Solana directory in your public file folder: `mkdir /keybase/public/<KEYBASE_USERNAME>/solana`
|
||||
4. Publish your validator's identity public key by creating an empty file in your Keybase public file folder in the following format: `/keybase/public/<KEYBASE_USERNAME>/solana/validator-<BASE58_PUBKEY>`. For example:
|
||||
|
||||
```bash
|
||||
touch /keybase/public/<KEYBASE_USERNAME>/solana/validator-<BASE58_PUBKEY>
|
||||
```
|
||||
|
||||
5. To check your public key was published, ensure you can successfully browse to `https://keybase.pub/<KEYBASE_USERNAME>/solana/validator-<BASE58_PUBKEY>`
|
@ -1,11 +0,0 @@
|
||||
---
|
||||
title: Requirements to run a validator
|
||||
---
|
||||
|
||||
## Hardware
|
||||
|
||||
See [suggested hardware configuration here](../../running-validator/validator-reqs.md).
|
||||
|
||||
## Software
|
||||
|
||||
- We build and run on Ubuntu 20.04
|
@ -1 +0,0 @@
|
||||
# Registration
|
@ -1,7 +0,0 @@
|
||||
---
|
||||
title: Confidentiality
|
||||
---
|
||||
|
||||
**Section 8 of the** [**TOUR DE SOL PARTICIPATION TERMS**](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) **references confidentiality.**
|
||||
|
||||
Solana doesn’t intend to share any confidential information during the Tour de SOL. However, if we do, it will be called out as such within our communications verbally, by email, etc. Unless information is specifically called out as such, the information should not be considered confidential and we welcome you to share it.
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
title: How To Register
|
||||
---
|
||||
|
||||
For registration, KYC, and the participation agreement, please visit the
|
||||
validator registration page on the Solana.com website:
|
||||
|
||||
https://solana.com/validator-registration
|
@ -1,31 +0,0 @@
|
||||
---
|
||||
title: Compensation
|
||||
---
|
||||
|
||||
## Compensation Calculation
|
||||
|
||||
Compensation will be calculated according to the compensation design described in [this forum post](https://forums.solana.com/t/tour-de-sol-updates-to-tour-de-sol-and-bug-bounty-compensation-structure/1132).
|
||||
|
||||
Please also see section “2\(f\) Tour de Sol Details” of the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) for additional compensation details.
|
||||
|
||||
## Requirements to Receive Rewards
|
||||
|
||||
Participants must have signed the Tour de SOL participation agreement,
|
||||
passed KYC/AML , as well as filled out the W-8 BEN or W-9 tax form
|
||||
\(depending on your residency\) prior to participating in the Tour.
|
||||
|
||||
A participant may participate in any and all stages that begin after they
|
||||
complete registration. The final registrations dates will be announced
|
||||
publicly on a stage-by-stage basis.
|
||||
|
||||
## Tax Implications
|
||||
|
||||
Participants are entering into a service agreement with Solana with discretionary compensation associated with the services. They are not considered to be a full-time employee of the company and therefore Solana is collecting W-9 and W-8 BEN forms in order to support tax reporting obligations, if applicable. Solana recommends participants consult with a tax accountant to understand any potential tax implications.
|
||||
|
||||
Also, as stated in Sections 2i, 2k and 10c of the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view):
|
||||
|
||||
> 2i - To receive any SOL Reward, a Finalist must sign the Company’s standard form of SOL Reward Agreement which will include the terms and conditions governing the ownership and use of the SOL that are issued as the SOL Reward, including but not limited to applicable lockups required by securities laws, blackout dates and tax reporting information.
|
||||
|
||||
> 2k - To receive any SOL Reward, a Finalist must sign the Company’s standard form of SOL Reward Agreement which will include the terms and conditions governing the ownership and use of the SOL that are issued as the SOL Reward, including but not limited to applicable lockups required by securities laws, blackout dates and tax reporting information.
|
||||
|
||||
> 10c - You are responsible for complying with all laws and regulations applicable to your transactions on any Protocol, including, but not limited to, the Commodity Exchange Act and the regulations promulgated thereunder by the U.S. Commodity Futures Trading Commission \(“CFTC”\), the federal securities laws and the regulations promulgated thereunder by the U.S. Securities and Exchange Commission \(“SEC”\), and the tax laws applicable to any remuneration received by you from Company.
|
@ -1,5 +0,0 @@
|
||||
---
|
||||
title: Terms of Participation
|
||||
---
|
||||
|
||||
Please see the official [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/a/solana.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view?usp=sharing) for complete details.
|
@ -1,49 +0,0 @@
|
||||
---
|
||||
title: Registration FAQ
|
||||
---
|
||||
|
||||
The [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/a/solana.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view?usp=sharing) should be considered the authoritative resource for any participation questions.
|
||||
|
||||
## Is registration mandatory?
|
||||
|
||||
Yes. Registration is mandatory. Registration is rolling, we host month-long Tour de SOL stages on a monthly basis and new participants will need to wait until the start of the next stage to be on-boarded. [Registration information lives here](how-to-register.md).
|
||||
|
||||
## Who’s eligible to participate?
|
||||
|
||||
Please see section “1 Eligibility; KYC Requirements” of the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/a/solana.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view?usp=sharing) for details.
|
||||
|
||||
## Do I have to complete the KYC/AML process to participate?
|
||||
|
||||
Yes. Completing the KYC/AML process is mandatory.
|
||||
|
||||
## What are my responsibilities as a Tour de Sol participant?
|
||||
|
||||
Please see section “2c Tour de SOL Details” of the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) for details.
|
||||
|
||||
## How is the “90% of the active Tour event time” responsibility calculated?
|
||||
|
||||
To be eligible for rewards in a given stage, a validator must submit votes in >= 90% of that stage’s slots.
|
||||
|
||||
## Is there a relationship between the Tour de Sol testnet tokens and Solana mainnet tokens?
|
||||
|
||||
No. Please see section ”2d Tour de SOL Details” of the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) for details.
|
||||
|
||||
## Can a validator be disqualified from the Tour de Sol?
|
||||
|
||||
Yes. A validator be disqualified from the Tour de SOL if they engage in prohibited conduct and/or fails to provide the minimum level of services described in question \#4 above.
|
||||
|
||||
Please also see Section “4 Prohibited Conduct” of the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) for a more detailed description of prohibited conduct.
|
||||
|
||||
### More questions related to Prohibited Conduct:
|
||||
|
||||
#### As referenced in section “4 Prohibited Conduct”, what would be an example of providing Tour Services from a jurisdiction other than the jurisdiction of my residence? Does this mean my server has to reside in the jurisdiction of my residence?
|
||||
|
||||
No. Servers can be in other jurisdictions that differ from a participant’s residency. By signing the [TOUR DE SOL PARTICIPATION TERMS](https://drive.google.com/file/d/15ueLG6VJoQ5Hx4rnpjFeuL3pG5DbrBbE/view) the participant has represented that they are delivering their services from the US if they reside there or from outside the US if they are not residing within the US.
|
||||
|
||||
## How are rewards calculated?
|
||||
|
||||
Please see the [Rewards section](rewards.md) for details
|
||||
|
||||
## How will we know what information we can and cannot share publicly?
|
||||
|
||||
Please see [Confidentiality](confidentiality.md).
|
@ -1,7 +0,0 @@
|
||||
---
|
||||
title: Submitting Bugs
|
||||
---
|
||||
|
||||
Please submit all bugs and feedback as [issues in this Github repo](https://github.com/solana-labs/solana/issues).
|
||||
|
||||
Given the fast pace of communication in the [Discord channels](useful-links.md), it’s likely issues reported in them may be lost in the information flow. Filing the issues in the Github repo is the only way to ensure the issues get logged and addressed.
|
@ -1,15 +0,0 @@
|
||||
---
|
||||
title: Useful Links & Discussion
|
||||
description: Where to go after you've read this guide
|
||||
---
|
||||
|
||||
- [Network Explorer](http://explorer.solana.com/)
|
||||
- [TdS metrics dashboard](https://metrics.solana.com:3000/d/monitor-edge/cluster-telemetry-edge?refresh=1m&from=now-15m&to=now&var-testnet=tds)
|
||||
- [Core software repo](https://github.com/solana-labs/solana)
|
||||
- [Submit bugs and feedback in this repo](https://github.com/solana-labs/solana/issues)
|
||||
|
||||
### Validator Discussions
|
||||
|
||||
The Solana Discord server is where all the action happens. Join the server by
|
||||
visiting https://solana.com/discord. Check out the "Testnet (Tour de SOL)" channel
|
||||
group.
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user