Compare commits
71 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
f1cda98aeb | ||
|
d5cbac41cb | ||
|
f19209b23d | ||
|
fd670d0ae0 | ||
|
948d869c49 | ||
|
2bab4ace8b | ||
|
1ddff68642 | ||
|
c0b250285a | ||
|
4509579e10 | ||
|
9b8d59cc2b | ||
|
bd91fc2985 | ||
|
9a3ebe0b20 | ||
|
6c08dc9c9d | ||
|
740c1df045 | ||
|
0a5905a02c | ||
|
237eceb6c1 | ||
|
dabbdcf988 | ||
|
3fceaf694c | ||
|
34df5ad364 | ||
|
573aed2b4b | ||
|
6ef65d8513 | ||
|
7c6fb3d554 | ||
|
886eaac211 | ||
|
6ca69a1525 | ||
|
1cc2f67391 | ||
|
5d547130f0 | ||
|
facb209720 | ||
|
5e215ac854 | ||
|
d3dd9ec6e2 | ||
|
12ed7c6845 | ||
|
f9d68b5d86 | ||
|
8f9f11e37f | ||
|
6a5f67f78c | ||
|
a505e92487 | ||
|
701300334a | ||
|
b9cf02fd6a | ||
|
71cb8de0dd | ||
|
13e5b479eb | ||
|
2ad435587a | ||
|
8f54d409e2 | ||
|
b4345c039a | ||
|
e61545ad18 | ||
|
961d1f0ee5 | ||
|
b260f686a3 | ||
|
3cfc38850b | ||
|
f12a933a54 | ||
|
135763e019 | ||
|
aaec7de881 | ||
|
420ea2f143 | ||
|
cb2dd56317 | ||
|
a420d1e91e | ||
|
0073448afc | ||
|
086cdd8ef7 | ||
|
dd57cbd6a4 | ||
|
8937a1db3b | ||
|
89a914f7c1 | ||
|
cf9936a314 | ||
|
6f95524be3 | ||
|
8021d368fe | ||
|
d7c43f0c0b | ||
|
6765453f8a | ||
|
adb0824da5 | ||
|
f86dcec94b | ||
|
8f28989520 | ||
|
1823d7bdec | ||
|
892a3b6dc4 | ||
|
cc987b8884 | ||
|
32d616da1e | ||
|
6d62d0cd42 | ||
|
c7d6e2b4a5 | ||
|
d6f1e4b10a |
@@ -3,3 +3,16 @@ root: ./docs/src
|
||||
structure:
|
||||
readme: introduction.md
|
||||
summary: SUMMARY.md
|
||||
|
||||
redirects:
|
||||
wallet: ./wallet-guide/README.md
|
||||
wallet/app-wallets: ./wallet-guide/apps.md
|
||||
wallet/app-wallets/trust-wallet: ./wallet-guide/trust-wallet.md
|
||||
wallet/app-wallets/ledger-live: ./wallet-guide/ledger-live.md
|
||||
wallet/cli-wallets: ./wallet-guide/cli.md
|
||||
wallet/cli-wallets/paper-wallet: ./paper-wallet/README.md
|
||||
wallet/cli-wallets/paper-wallet/paper-wallet-usage: ./paper-wallet/paper-wallet-usage.md
|
||||
wallet/cli-wallets/remote-wallet: ./hardware-wallets/README.md
|
||||
wallet/cli-wallets/remote-wallet/ledger: ./hardware-wallets/ledger.md
|
||||
wallet/cli-wallets/file-system-wallet: ./file-system-wallet/README.md
|
||||
wallet/support: ./wallet-guide/support.md
|
||||
|
795
Cargo.lock
generated
795
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-accounts-bench"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,10 +10,13 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.3" }
|
||||
solana-measure = { path = "../measure", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.7" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.7" }
|
||||
solana-measure = { path = "../measure", version = "1.1.7" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.7" }
|
||||
rand = "0.6.5"
|
||||
clap = "2.33.0"
|
||||
crossbeam-channel = "0.4"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-lib"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
description = "Solana Archiver Library"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -15,26 +15,29 @@ ed25519-dalek = "=1.0.0-pre.1"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-client = { path = "../client", version = "1.1.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.3" }
|
||||
solana-client = { path = "../client", version = "1.1.7" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.7" }
|
||||
thiserror = "1.0"
|
||||
serde = "1.0.105"
|
||||
serde_json = "1.0.48"
|
||||
serde_derive = "1.0.103"
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.3" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.3" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-perf = { path = "../perf", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-core = { path = "../core", version = "1.1.3" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.3" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.3" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.7" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.7" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.7" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.7" }
|
||||
solana-logger = { path = "../logger", version = "1.1.7" }
|
||||
solana-perf = { path = "../perf", version = "1.1.7" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.7" }
|
||||
solana-core = { path = "../core", version = "1.1.7" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.7" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.7" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.7" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.2"
|
||||
|
||||
[lib]
|
||||
name = "solana_archiver_lib"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -53,7 +53,7 @@ use std::{
|
||||
result,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::{channel, Receiver, Sender},
|
||||
sync::{Arc, RwLock},
|
||||
sync::Arc,
|
||||
thread::{sleep, spawn, JoinHandle},
|
||||
time::Duration,
|
||||
};
|
||||
@@ -185,9 +185,9 @@ impl Archiver {
|
||||
|
||||
info!("Archiver: id: {}", keypair.pubkey());
|
||||
info!("Creating cluster info....");
|
||||
let mut cluster_info = ClusterInfo::new(node.info.clone(), keypair.clone());
|
||||
let cluster_info = ClusterInfo::new(node.info.clone(), keypair.clone());
|
||||
cluster_info.set_entrypoint(cluster_entrypoint.clone());
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let cluster_slots = Arc::new(ClusterSlots::default());
|
||||
// Note for now, this ledger will not contain any of the existing entries
|
||||
// in the ledger located at ledger_path, and will only append on newly received
|
||||
@@ -200,7 +200,7 @@ impl Archiver {
|
||||
|
||||
info!("Connecting to the cluster via {:?}", cluster_entrypoint);
|
||||
let (nodes, _) =
|
||||
match solana_core::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 1) {
|
||||
match solana_core::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 2) {
|
||||
Ok(nodes_and_archivers) => nodes_and_archivers,
|
||||
Err(e) => {
|
||||
//shutdown services before exiting
|
||||
@@ -308,7 +308,7 @@ impl Archiver {
|
||||
fn run(
|
||||
meta: &mut ArchiverMeta,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
archiver_keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
@@ -365,12 +365,12 @@ impl Archiver {
|
||||
}
|
||||
|
||||
fn redeem_rewards(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
archiver_keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
client_commitment: CommitmentConfig,
|
||||
) {
|
||||
let nodes = cluster_info.read().unwrap().tvu_peers();
|
||||
let nodes = cluster_info.tvu_peers();
|
||||
let client = solana_core::gossip_service::get_client(&nodes);
|
||||
|
||||
if let Ok(Some(account)) =
|
||||
@@ -405,7 +405,7 @@ impl Archiver {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn setup(
|
||||
meta: &mut ArchiverMeta,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
node_info: &ContactInfo,
|
||||
@@ -491,7 +491,7 @@ impl Archiver {
|
||||
blockstore: &Arc<Blockstore>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
node_info: &ContactInfo,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
) {
|
||||
info!(
|
||||
"window created, waiting for ledger download starting at slot {:?}",
|
||||
@@ -519,11 +519,8 @@ impl Archiver {
|
||||
contact_info.tvu = "0.0.0.0:0".parse().unwrap();
|
||||
contact_info.wallclock = timestamp();
|
||||
// copy over the adopted shred_version from the entrypoint
|
||||
contact_info.shred_version = cluster_info.read().unwrap().my_data().shred_version;
|
||||
{
|
||||
let mut cluster_info_w = cluster_info.write().unwrap();
|
||||
cluster_info_w.insert_self(contact_info);
|
||||
}
|
||||
contact_info.shred_version = cluster_info.my_shred_version();
|
||||
cluster_info.update_contact_info(|current| *current = contact_info);
|
||||
}
|
||||
|
||||
fn encrypt_ledger(meta: &mut ArchiverMeta, blockstore: &Arc<Blockstore>) -> Result<()> {
|
||||
@@ -626,12 +623,12 @@ impl Archiver {
|
||||
|
||||
fn submit_mining_proof(
|
||||
meta: &ArchiverMeta,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
archiver_keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
) {
|
||||
// No point if we've got no storage account...
|
||||
let nodes = cluster_info.read().unwrap().tvu_peers();
|
||||
let nodes = cluster_info.tvu_peers();
|
||||
let client = solana_core::gossip_service::get_client(&nodes);
|
||||
let storage_balance = client
|
||||
.poll_get_balance_with_commitment(&storage_keypair.pubkey(), meta.client_commitment);
|
||||
@@ -689,13 +686,10 @@ impl Archiver {
|
||||
}
|
||||
|
||||
fn get_segment_config(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
client_commitment: CommitmentConfig,
|
||||
) -> Result<u64> {
|
||||
let rpc_peers = {
|
||||
let cluster_info = cluster_info.read().unwrap();
|
||||
cluster_info.all_rpc_peers()
|
||||
};
|
||||
let rpc_peers = cluster_info.all_rpc_peers();
|
||||
debug!("rpc peers: {:?}", rpc_peers);
|
||||
if !rpc_peers.is_empty() {
|
||||
let rpc_client = {
|
||||
@@ -721,7 +715,7 @@ impl Archiver {
|
||||
|
||||
/// Waits until the first segment is ready, and returns the current segment
|
||||
fn poll_for_segment(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
slots_per_segment: u64,
|
||||
previous_blockhash: &Hash,
|
||||
exit: &Arc<AtomicBool>,
|
||||
@@ -741,17 +735,14 @@ impl Archiver {
|
||||
|
||||
/// Poll for a different blockhash and associated max_slot than `previous_blockhash`
|
||||
fn poll_for_blockhash_and_slot(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
slots_per_segment: u64,
|
||||
previous_blockhash: &Hash,
|
||||
exit: &Arc<AtomicBool>,
|
||||
) -> Result<(Hash, u64)> {
|
||||
info!("waiting for the next turn...");
|
||||
loop {
|
||||
let rpc_peers = {
|
||||
let cluster_info = cluster_info.read().unwrap();
|
||||
cluster_info.all_rpc_peers()
|
||||
};
|
||||
let rpc_peers = cluster_info.all_rpc_peers();
|
||||
debug!("rpc peers: {:?}", rpc_peers);
|
||||
if !rpc_peers.is_empty() {
|
||||
let rpc_client = {
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-utils"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
description = "Solana Archiver Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,15 +11,18 @@ edition = "2018"
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
solana-chacha = { path = "../chacha", version = "1.1.3" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.3" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-perf = { path = "../perf", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.7" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.7" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.7" }
|
||||
solana-logger = { path = "../logger", version = "1.1.7" }
|
||||
solana-perf = { path = "../perf", version = "1.1.7" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.7" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.2"
|
||||
|
||||
[lib]
|
||||
name = "solana_archiver_utils"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-archiver"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,11 +10,14 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
console = "0.10.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.3" }
|
||||
solana-core = { path = "../core", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.3" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.7" }
|
||||
solana-core = { path = "../core", version = "1.1.7" }
|
||||
solana-logger = { path = "../logger", version = "1.1.7" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.7" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.1.7" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.7" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.7" }
|
||||
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -10,13 +10,16 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.3.0"
|
||||
solana-core = { path = "../core", version = "1.1.3" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.3" }
|
||||
solana-perf = { path = "../perf", version = "1.1.3" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.3" }
|
||||
solana-measure = { path = "../measure", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-core = { path = "../core", version = "1.1.7" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.7" }
|
||||
solana-perf = { path = "../perf", version = "1.1.7" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.7" }
|
||||
solana-logger = { path = "../logger", version = "1.1.7" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.7" }
|
||||
solana-measure = { path = "../measure", version = "1.1.7" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.7" }
|
||||
rand = "0.6.5"
|
||||
crossbeam-channel = "0.4"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -28,7 +28,7 @@ use solana_sdk::{
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex, RwLock},
|
||||
sync::{atomic::Ordering, mpsc::Receiver, Arc, Mutex},
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
@@ -152,7 +152,7 @@ fn main() {
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
&poh_recorder,
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -18,17 +18,20 @@ rand = "0.6.5"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.3" }
|
||||
solana-core = { path = "../core", version = "1.1.3" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.3" }
|
||||
solana-client = { path = "../client", version = "1.1.3" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.3" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.7" }
|
||||
solana-core = { path = "../core", version = "1.1.7" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.7" }
|
||||
solana-client = { path = "../client", version = "1.1.7" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.7" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.1.7" }
|
||||
solana-logger = { path = "../logger", version = "1.1.7" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.7" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.7" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.7" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.7" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.3" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.7" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,14 +2,17 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.3" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.7" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.7" }
|
||||
solana-logger = { path = "../logger", version = "1.1.7" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.7" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -14,24 +14,27 @@ log = "0.4.8"
|
||||
rayon = "1.3.0"
|
||||
serde_json = "1.0.48"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.3" }
|
||||
solana-core = { path = "../core", version = "1.1.3" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.3" }
|
||||
solana-client = { path = "../client", version = "1.1.3" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.3" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.1.3", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.3" }
|
||||
solana-measure = { path = "../measure", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.1.3", optional = true }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.7" }
|
||||
solana-core = { path = "../core", version = "1.1.7" }
|
||||
solana-genesis = { path = "../genesis", version = "1.1.7" }
|
||||
solana-client = { path = "../client", version = "1.1.7" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.7" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.1.7", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.1.7" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.7" }
|
||||
solana-measure = { path = "../measure", version = "1.1.7" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.7" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.7" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.7" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.1.7", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.4.0"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.3" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.1.7" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay", "solana-move-loader-program"]
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-cuda"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
description = "Solana Chacha Cuda APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,15 +10,18 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.3" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.3" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-perf = { path = "../perf", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.1.7" }
|
||||
solana-chacha = { path = "../chacha", version = "1.1.7" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.7" }
|
||||
solana-logger = { path = "../logger", version = "1.1.7" }
|
||||
solana-perf = { path = "../perf", version = "1.1.7" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.7" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
||||
[lib]
|
||||
name = "solana_chacha_cuda"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -10,3 +10,6 @@ edition = "2018"
|
||||
|
||||
[build-dependencies]
|
||||
cc = "1.0.49"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
description = "Solana Chacha APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -12,14 +12,17 @@ edition = "2018"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.3" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-perf = { path = "../perf", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.1.7" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.7" }
|
||||
solana-logger = { path = "../logger", version = "1.1.7" }
|
||||
solana-perf = { path = "../perf", version = "1.1.7" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.7" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
||||
[lib]
|
||||
name = "solana_chacha"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -18,3 +18,6 @@ steps:
|
||||
- command: "ci/publish-docs.sh"
|
||||
timeout_in_minutes: 15
|
||||
name: "publish docs"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
name: "move"
|
||||
timeout_in_minutes: 20
|
||||
|
@@ -2,12 +2,15 @@
|
||||
# other than those in docs/ are modified
|
||||
|
||||
steps:
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck"
|
||||
timeout_in_minutes: 5
|
||||
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||
name: "coverage"
|
||||
timeout_in_minutes: 30
|
||||
- wait
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
||||
name: "stable"
|
||||
timeout_in_minutes: 60
|
||||
artifact_paths: "log-*.txt"
|
||||
- wait
|
||||
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 40
|
||||
@@ -17,21 +20,7 @@ steps:
|
||||
- command: "ci/test-bench.sh"
|
||||
name: "bench"
|
||||
timeout_in_minutes: 30
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
||||
name: "stable"
|
||||
timeout_in_minutes: 60
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=rpc-test-capable"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
name: "move"
|
||||
timeout_in_minutes: 20
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
|
||||
name: "local-cluster"
|
||||
timeout_in_minutes: 45
|
||||
artifact_paths: "log-*.txt"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||
name: "coverage"
|
||||
timeout_in_minutes: 30
|
||||
agents:
|
||||
- "queue=rpc-test-capable"
|
||||
|
@@ -8,6 +8,9 @@ steps:
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
|
||||
name: "checks"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck"
|
||||
timeout_in_minutes: 5
|
||||
|
||||
- wait
|
||||
|
||||
|
@@ -95,9 +95,8 @@ fi
|
||||
source ci/upload-ci-artifact.sh
|
||||
|
||||
for file in solana-release-$TARGET.tar.bz2 solana-release-$TARGET.yml solana-install-init-"$TARGET"* $MAYBE_TARBALLS; do
|
||||
upload-ci-artifact "$file"
|
||||
|
||||
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
|
||||
upload-ci-artifact "$file"
|
||||
echo "Skipped $file due to DO_NOT_PUBLISH_TAR"
|
||||
continue
|
||||
fi
|
||||
|
@@ -38,10 +38,15 @@ test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
# Clear the BPF sysroot files, they are not automatically rebuilt
|
||||
rm -rf target/xargo # Issue #3105
|
||||
|
||||
# Limit compiler jobs to reduce memory usage
|
||||
# on machines with 1gb/thread of memory
|
||||
NPROC=$(nproc)
|
||||
NPROC=$((NPROC>16 ? 16 : NPROC))
|
||||
|
||||
echo "Executing $testName"
|
||||
case $testName in
|
||||
test-stable)
|
||||
_ cargo +"$rust_stable" test --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path bench-tps/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
|
||||
;;
|
||||
test-stable-perf)
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -11,8 +11,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.7" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.7" }
|
||||
thiserror = "1.0.11"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
@@ -20,3 +20,6 @@ chrono = "0.4"
|
||||
|
||||
[lib]
|
||||
name = "solana_clap_utils"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -116,7 +116,7 @@ pub fn pubkeys_sigs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<(Pubk
|
||||
pub fn signer_of(
|
||||
matches: &ArgMatches<'_>,
|
||||
name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<(Option<Box<dyn Signer>>, Option<Pubkey>), Box<dyn std::error::Error>> {
|
||||
if let Some(location) = matches.value_of(name) {
|
||||
let signer = signer_from_path(matches, location, name, wallet_manager)?;
|
||||
@@ -130,7 +130,7 @@ pub fn signer_of(
|
||||
pub fn pubkey_of_signer(
|
||||
matches: &ArgMatches<'_>,
|
||||
name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<Option<Pubkey>, Box<dyn std::error::Error>> {
|
||||
if let Some(location) = matches.value_of(name) {
|
||||
Ok(Some(pubkey_from_path(
|
||||
@@ -147,7 +147,7 @@ pub fn pubkey_of_signer(
|
||||
pub fn pubkeys_of_multiple_signers(
|
||||
matches: &ArgMatches<'_>,
|
||||
name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<Option<Vec<Pubkey>>, Box<dyn std::error::Error>> {
|
||||
if let Some(pubkey_matches) = matches.values_of(name) {
|
||||
let mut pubkeys: Vec<Pubkey> = vec![];
|
||||
@@ -163,7 +163,7 @@ pub fn pubkeys_of_multiple_signers(
|
||||
pub fn resolve_signer(
|
||||
matches: &ArgMatches<'_>,
|
||||
name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<Option<String>, Box<dyn std::error::Error>> {
|
||||
Ok(resolve_signer_from_path(
|
||||
matches,
|
||||
|
@@ -8,7 +8,7 @@ use clap::ArgMatches;
|
||||
use rpassword::prompt_password_stderr;
|
||||
use solana_remote_wallet::{
|
||||
remote_keypair::generate_remote_keypair,
|
||||
remote_wallet::{RemoteWalletError, RemoteWalletManager},
|
||||
remote_wallet::{maybe_wallet_manager, RemoteWalletError, RemoteWalletManager},
|
||||
};
|
||||
use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
@@ -47,13 +47,6 @@ pub fn parse_keypair_path(path: &str) -> KeypairUrl {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_for_usb<S>(mut items: impl Iterator<Item = S>) -> bool
|
||||
where
|
||||
S: Into<String>,
|
||||
{
|
||||
items.any(|arg| matches!(parse_keypair_path(&arg.into()), KeypairUrl::Usb(_)))
|
||||
}
|
||||
|
||||
pub fn presigner_from_pubkey_sigs(
|
||||
pubkey: &Pubkey,
|
||||
signers: &[(Pubkey, Signature)],
|
||||
@@ -71,7 +64,7 @@ pub fn signer_from_path(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
keypair_name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<Box<dyn Signer>, Box<dyn error::Error>> {
|
||||
match parse_keypair_path(path) {
|
||||
KeypairUrl::Ask => {
|
||||
@@ -95,6 +88,9 @@ pub fn signer_from_path(
|
||||
Ok(Box::new(read_keypair(&mut stdin)?))
|
||||
}
|
||||
KeypairUrl::Usb(path) => {
|
||||
if wallet_manager.is_none() {
|
||||
*wallet_manager = maybe_wallet_manager()?;
|
||||
}
|
||||
if let Some(wallet_manager) = wallet_manager {
|
||||
Ok(Box::new(generate_remote_keypair(
|
||||
path,
|
||||
@@ -129,7 +125,7 @@ pub fn pubkey_from_path(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
keypair_name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<Pubkey, Box<dyn error::Error>> {
|
||||
match parse_keypair_path(path) {
|
||||
KeypairUrl::Pubkey(pubkey) => Ok(pubkey),
|
||||
@@ -141,7 +137,7 @@ pub fn resolve_signer_from_path(
|
||||
matches: &ArgMatches,
|
||||
path: &str,
|
||||
keypair_name: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<Option<String>, Box<dyn error::Error>> {
|
||||
match parse_keypair_path(path) {
|
||||
KeypairUrl::Ask => {
|
||||
@@ -165,6 +161,9 @@ pub fn resolve_signer_from_path(
|
||||
read_keypair(&mut stdin).map(|_| None)
|
||||
}
|
||||
KeypairUrl::Usb(path) => {
|
||||
if wallet_manager.is_none() {
|
||||
*wallet_manager = maybe_wallet_manager()?;
|
||||
}
|
||||
if let Some(wallet_manager) = wallet_manager {
|
||||
let path = generate_remote_keypair(
|
||||
path,
|
||||
@@ -263,20 +262,4 @@ mod tests {
|
||||
sanitize_seed_phrase(seed_phrase)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_for_usb() {
|
||||
let args: Vec<&str> = vec![];
|
||||
assert_eq!(check_for_usb(args.into_iter()), false);
|
||||
let args = vec!["usb://"];
|
||||
assert_eq!(check_for_usb(args.into_iter()), true);
|
||||
let args = vec!["other"];
|
||||
assert_eq!(check_for_usb(args.into_iter()), false);
|
||||
let args = vec!["other", "usb://", "another"];
|
||||
assert_eq!(check_for_usb(args.into_iter()), true);
|
||||
let args = vec!["other", "another"];
|
||||
assert_eq!(check_for_usb(args.into_iter()), false);
|
||||
let args = vec!["usb://", "usb://"];
|
||||
assert_eq!(check_for_usb(args.into_iter()), true);
|
||||
}
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -15,3 +15,6 @@ serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_yaml = "0.8.11"
|
||||
url = "2.1.1"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -60,17 +60,38 @@ impl Config {
|
||||
ws_url
|
||||
.set_scheme(if is_secure { "wss" } else { "ws" })
|
||||
.expect("unable to set scheme");
|
||||
let ws_port = match json_rpc_url.port() {
|
||||
Some(port) => port + 1,
|
||||
None => {
|
||||
if is_secure {
|
||||
8901
|
||||
} else {
|
||||
8900
|
||||
}
|
||||
}
|
||||
};
|
||||
ws_url.set_port(Some(ws_port)).expect("unable to set port");
|
||||
if let Some(port) = json_rpc_url.port() {
|
||||
ws_url.set_port(Some(port + 1)).expect("unable to set port");
|
||||
}
|
||||
ws_url.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn compute_websocket_url() {
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"http://devnet.solana.com"),
|
||||
"ws://devnet.solana.com/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"https://devnet.solana.com"),
|
||||
"wss://devnet.solana.com/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"http://example.com:8899"),
|
||||
"ws://example.com:8900/".to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"https://example.com:1234"),
|
||||
"wss://example.com:1235/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(Config::compute_websocket_url(&"garbage"), String::new());
|
||||
}
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@@ -18,6 +18,7 @@ ctrlc = { version = "3.1.4", features = ["termination"] }
|
||||
console = "0.10.0"
|
||||
dirs = "2.0.2"
|
||||
log = "0.4.8"
|
||||
Inflector = "0.11.4"
|
||||
indicatif = "0.14.0"
|
||||
humantime = "2.0.0"
|
||||
num-traits = "0.2"
|
||||
@@ -26,30 +27,33 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.48"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.3" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.3" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.1.3" }
|
||||
solana-client = { path = "../client", version = "1.1.3" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.3" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.3" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.3" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.3" }
|
||||
titlecase = "1.1.0"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.7" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.7" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.1.7" }
|
||||
solana-client = { path = "../client", version = "1.1.7" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.1.7" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.7" }
|
||||
solana-logger = { path = "../logger", version = "1.1.7" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.7" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.1.7" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.7" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.7" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.7" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.7" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.7" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.7" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.7" }
|
||||
thiserror = "1.0.13"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "1.1.3" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.3" }
|
||||
solana-core = { path = "../core", version = "1.1.7" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.7" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
name = "solana"
|
||||
path = "src/main.rs"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
246
cli/src/cli.rs
246
cli/src/cli.rs
@@ -1,4 +1,5 @@
|
||||
use crate::{
|
||||
cli_output::{CliAccount, OutputFormat},
|
||||
cluster_query::*,
|
||||
display::{println_name_value, println_signers},
|
||||
nonce::{self, *},
|
||||
@@ -21,6 +22,7 @@ use solana_clap_utils::{
|
||||
use solana_client::{
|
||||
client_error::{ClientErrorKind, Result as ClientResult},
|
||||
rpc_client::RpcClient,
|
||||
rpc_response::{RpcAccount, RpcKeyedAccount},
|
||||
};
|
||||
#[cfg(not(test))]
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
@@ -49,6 +51,7 @@ use solana_stake_program::{
|
||||
stake_state::{Lockup, StakeAuthorize},
|
||||
};
|
||||
use solana_storage_program::storage_instruction::StorageAccountType;
|
||||
use solana_transaction_status::{EncodedTransaction, TransactionEncoding};
|
||||
use solana_vote_program::vote_state::VoteAuthorize;
|
||||
use std::{
|
||||
error,
|
||||
@@ -84,7 +87,7 @@ pub(crate) fn generate_unique_signers(
|
||||
bulk_signers: Vec<Option<Box<dyn Signer>>>,
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliSignerInfo, Box<dyn error::Error>> {
|
||||
let mut unique_signers = vec![];
|
||||
|
||||
@@ -224,6 +227,11 @@ pub enum CliCommand {
|
||||
use_lamports_unit: bool,
|
||||
commitment_config: CommitmentConfig,
|
||||
},
|
||||
TransactionHistory {
|
||||
address: Pubkey,
|
||||
end_slot: Option<Slot>, // None == latest slot
|
||||
slot_limit: u64,
|
||||
},
|
||||
// Nonce commands
|
||||
AuthorizeNonceAccount {
|
||||
nonce_account: Pubkey,
|
||||
@@ -331,6 +339,7 @@ pub enum CliCommand {
|
||||
destination_account_pubkey: Pubkey,
|
||||
lamports: u64,
|
||||
withdraw_authority: SignerIndex,
|
||||
custodian: Option<SignerIndex>,
|
||||
sign_only: bool,
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
@@ -397,6 +406,7 @@ pub enum CliCommand {
|
||||
},
|
||||
Cancel(Pubkey),
|
||||
Confirm(Signature),
|
||||
DecodeTransaction(Transaction),
|
||||
Pay(PayCommand),
|
||||
ResolveSigner(Option<String>),
|
||||
ShowAccount {
|
||||
@@ -472,6 +482,7 @@ pub struct CliConfig<'a> {
|
||||
pub keypair_path: String,
|
||||
pub rpc_client: Option<RpcClient>,
|
||||
pub verbose: bool,
|
||||
pub output_format: OutputFormat,
|
||||
}
|
||||
|
||||
impl CliConfig<'_> {
|
||||
@@ -563,6 +574,7 @@ impl Default for CliConfig<'_> {
|
||||
keypair_path: Self::default_keypair_path(),
|
||||
rpc_client: None,
|
||||
verbose: false,
|
||||
output_format: OutputFormat::Display,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -570,7 +582,7 @@ impl Default for CliConfig<'_> {
|
||||
pub fn parse_command(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, Box<dyn error::Error>> {
|
||||
let response = match matches.subcommand() {
|
||||
// Cluster Query Commands
|
||||
@@ -612,6 +624,9 @@ pub fn parse_command(
|
||||
}),
|
||||
("stakes", Some(matches)) => parse_show_stakes(matches, wallet_manager),
|
||||
("validators", Some(matches)) => parse_show_validators(matches),
|
||||
("transaction-history", Some(matches)) => {
|
||||
parse_transaction_history(matches, wallet_manager)
|
||||
}
|
||||
// Nonce Commands
|
||||
("authorize-nonce-account", Some(matches)) => {
|
||||
parse_authorize_nonce_account(matches, default_signer_path, wallet_manager)
|
||||
@@ -794,11 +809,23 @@ pub fn parse_command(
|
||||
command: CliCommand::Confirm(signature),
|
||||
signers: vec![],
|
||||
}),
|
||||
_ => {
|
||||
eprintln!("{}", matches.usage());
|
||||
Err(CliError::BadParameter("Invalid signature".to_string()))
|
||||
}
|
||||
_ => Err(CliError::BadParameter("Invalid signature".to_string())),
|
||||
},
|
||||
("decode-transaction", Some(matches)) => {
|
||||
let encoded_transaction = EncodedTransaction::Binary(
|
||||
matches.value_of("base85_transaction").unwrap().to_string(),
|
||||
);
|
||||
if let Some(transaction) = encoded_transaction.decode() {
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::DecodeTransaction(transaction),
|
||||
signers: vec![],
|
||||
})
|
||||
} else {
|
||||
Err(CliError::BadParameter(
|
||||
"Unable to decode transaction".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
("pay", Some(matches)) => {
|
||||
let lamports = lamports_of_sol(matches, "amount").unwrap();
|
||||
let to = pubkey_of_signer(matches, "to", wallet_manager)?.unwrap();
|
||||
@@ -1050,7 +1077,7 @@ pub fn return_signers(tx: &Transaction) -> ProcessResult {
|
||||
pub fn parse_create_address_with_seed(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let from_pubkey = pubkey_of_signer(matches, "from", wallet_manager)?;
|
||||
let signers = if from_pubkey.is_some() {
|
||||
@@ -1161,13 +1188,47 @@ fn process_balance(
|
||||
}
|
||||
}
|
||||
|
||||
fn process_confirm(rpc_client: &RpcClient, signature: &Signature) -> ProcessResult {
|
||||
match rpc_client.get_signature_status(&signature) {
|
||||
fn process_confirm(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
signature: &Signature,
|
||||
) -> ProcessResult {
|
||||
match rpc_client.get_signature_status_with_commitment_and_history(
|
||||
&signature,
|
||||
CommitmentConfig::max(),
|
||||
true,
|
||||
) {
|
||||
Ok(status) => {
|
||||
if let Some(result) = status {
|
||||
match result {
|
||||
if let Some(transaction_status) = status {
|
||||
if config.verbose {
|
||||
match rpc_client
|
||||
.get_confirmed_transaction(signature, TransactionEncoding::Binary)
|
||||
{
|
||||
Ok(confirmed_transaction) => {
|
||||
println!(
|
||||
"\nTransaction executed in slot {}:",
|
||||
confirmed_transaction.slot
|
||||
);
|
||||
crate::display::println_transaction(
|
||||
&confirmed_transaction
|
||||
.transaction
|
||||
.transaction
|
||||
.decode()
|
||||
.expect("Successful decode"),
|
||||
&confirmed_transaction.transaction.meta,
|
||||
" ",
|
||||
);
|
||||
}
|
||||
Err(err) => {
|
||||
println!("Unable to get confirmed transaction details: {}", err)
|
||||
}
|
||||
}
|
||||
println!();
|
||||
}
|
||||
|
||||
match transaction_status {
|
||||
Ok(_) => Ok("Confirmed".to_string()),
|
||||
Err(err) => Ok(format!("Transaction failed with error: {}", err)),
|
||||
Err(err) => Ok(format!("Transaction failed: {}", err)),
|
||||
}
|
||||
} else {
|
||||
Ok("Not found".to_string())
|
||||
@@ -1177,33 +1238,40 @@ fn process_confirm(rpc_client: &RpcClient, signature: &Signature) -> ProcessResu
|
||||
}
|
||||
}
|
||||
|
||||
fn process_decode_transaction(transaction: &Transaction) -> ProcessResult {
|
||||
crate::display::println_transaction(transaction, &None, "");
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
fn process_show_account(
|
||||
rpc_client: &RpcClient,
|
||||
_config: &CliConfig,
|
||||
config: &CliConfig,
|
||||
account_pubkey: &Pubkey,
|
||||
output_file: &Option<String>,
|
||||
use_lamports_unit: bool,
|
||||
) -> ProcessResult {
|
||||
let account = rpc_client.get_account(account_pubkey)?;
|
||||
let data = account.data.clone();
|
||||
let cli_account = CliAccount {
|
||||
keyed_account: RpcKeyedAccount {
|
||||
pubkey: account_pubkey.to_string(),
|
||||
account: RpcAccount::encode(account),
|
||||
},
|
||||
use_lamports_unit,
|
||||
};
|
||||
|
||||
println!();
|
||||
println_name_value("Public Key:", &account_pubkey.to_string());
|
||||
println_name_value(
|
||||
"Balance:",
|
||||
&build_balance_message(account.lamports, use_lamports_unit, true),
|
||||
);
|
||||
println_name_value("Owner:", &account.owner.to_string());
|
||||
println_name_value("Executable:", &account.executable.to_string());
|
||||
println_name_value("Rent Epoch:", &account.rent_epoch.to_string());
|
||||
config.output_format.formatted_print(&cli_account);
|
||||
|
||||
if let Some(output_file) = output_file {
|
||||
let mut f = File::create(output_file)?;
|
||||
f.write_all(&account.data)?;
|
||||
println!();
|
||||
println!("Wrote account data to {}", output_file);
|
||||
} else if !account.data.is_empty() {
|
||||
use pretty_hex::*;
|
||||
println!("{:?}", account.data.hex_dump());
|
||||
if config.output_format == OutputFormat::Display {
|
||||
if let Some(output_file) = output_file {
|
||||
let mut f = File::create(output_file)?;
|
||||
f.write_all(&data)?;
|
||||
println!();
|
||||
println!("Wrote account data to {}", output_file);
|
||||
} else if !data.is_empty() {
|
||||
use pretty_hex::*;
|
||||
println!("{:?}", data.hex_dump());
|
||||
}
|
||||
}
|
||||
|
||||
Ok("".to_string())
|
||||
@@ -1577,7 +1645,7 @@ fn process_witness(
|
||||
}
|
||||
|
||||
pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
if config.verbose {
|
||||
if config.verbose && config.output_format == OutputFormat::Display {
|
||||
println_name_value("RPC URL:", &config.json_rpc_url);
|
||||
println_name_value("Default Signer Path:", &config.keypair_path);
|
||||
if config.keypair_path.starts_with("usb://") {
|
||||
@@ -1622,7 +1690,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::GetBlockTime { slot } => process_get_block_time(&rpc_client, *slot),
|
||||
CliCommand::GetGenesisHash => process_get_genesis_hash(&rpc_client),
|
||||
CliCommand::GetEpochInfo { commitment_config } => {
|
||||
process_get_epoch_info(&rpc_client, *commitment_config)
|
||||
process_get_epoch_info(&rpc_client, config, *commitment_config)
|
||||
}
|
||||
CliCommand::GetEpoch { commitment_config } => {
|
||||
process_get_epoch(&rpc_client, *commitment_config)
|
||||
@@ -1662,13 +1730,19 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
vote_account_pubkeys,
|
||||
} => process_show_stakes(
|
||||
&rpc_client,
|
||||
config,
|
||||
*use_lamports_unit,
|
||||
vote_account_pubkeys.as_deref(),
|
||||
),
|
||||
CliCommand::ShowValidators {
|
||||
use_lamports_unit,
|
||||
commitment_config,
|
||||
} => process_show_validators(&rpc_client, *use_lamports_unit, *commitment_config),
|
||||
} => process_show_validators(&rpc_client, config, *use_lamports_unit, *commitment_config),
|
||||
CliCommand::TransactionHistory {
|
||||
address,
|
||||
end_slot,
|
||||
slot_limit,
|
||||
} => process_transaction_history(&rpc_client, address, *end_slot, *slot_limit),
|
||||
|
||||
// Nonce Commands
|
||||
|
||||
@@ -1711,7 +1785,12 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::ShowNonceAccount {
|
||||
nonce_account_pubkey,
|
||||
use_lamports_unit,
|
||||
} => process_show_nonce_account(&rpc_client, &nonce_account_pubkey, *use_lamports_unit),
|
||||
} => process_show_nonce_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
&nonce_account_pubkey,
|
||||
*use_lamports_unit,
|
||||
),
|
||||
// Withdraw lamports from a nonce account
|
||||
CliCommand::WithdrawFromNonceAccount {
|
||||
nonce_account,
|
||||
@@ -1890,6 +1969,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
destination_account_pubkey,
|
||||
lamports,
|
||||
withdraw_authority,
|
||||
custodian,
|
||||
sign_only,
|
||||
blockhash_query,
|
||||
ref nonce_account,
|
||||
@@ -1902,6 +1982,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
&destination_account_pubkey,
|
||||
*lamports,
|
||||
*withdraw_authority,
|
||||
*custodian,
|
||||
*sign_only,
|
||||
blockhash_query,
|
||||
nonce_account.as_ref(),
|
||||
@@ -1940,7 +2021,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
|
||||
// Return all or single validator info
|
||||
CliCommand::GetValidatorInfo(info_pubkey) => {
|
||||
process_get_validator_info(&rpc_client, *info_pubkey)
|
||||
process_get_validator_info(&rpc_client, config, *info_pubkey)
|
||||
}
|
||||
// Publish validator info
|
||||
CliCommand::SetValidatorInfo {
|
||||
@@ -2051,7 +2132,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
// Cancel a contract by contract Pubkey
|
||||
CliCommand::Cancel(pubkey) => process_cancel(&rpc_client, config, &pubkey),
|
||||
// Confirm the last client transaction by signature
|
||||
CliCommand::Confirm(signature) => process_confirm(&rpc_client, signature),
|
||||
CliCommand::Confirm(signature) => process_confirm(&rpc_client, config, signature),
|
||||
CliCommand::DecodeTransaction(transaction) => process_decode_transaction(transaction),
|
||||
// If client has positive balance, pay lamports to another address
|
||||
CliCommand::Pay(PayCommand {
|
||||
lamports,
|
||||
@@ -2333,6 +2415,18 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.help("The transaction signature to confirm"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("decode-transaction")
|
||||
.about("Decode a base-85 binary transaction")
|
||||
.arg(
|
||||
Arg::with_name("base85_transaction")
|
||||
.index(1)
|
||||
.value_name("BASE58_TRANSACTION")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("The transaction to decode"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("create-address-with-seed")
|
||||
.about("Generate a derived account address with a seed")
|
||||
@@ -2550,7 +2644,7 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("output_file")
|
||||
.long("output")
|
||||
.long("output-file")
|
||||
.short("o")
|
||||
.value_name("FILEPATH")
|
||||
.takes_value(true)
|
||||
@@ -2604,11 +2698,11 @@ mod tests {
|
||||
write_keypair_file(&default_keypair, &default_keypair_file).unwrap();
|
||||
|
||||
let signer_info =
|
||||
generate_unique_signers(vec![], &matches, &default_keypair_file, None).unwrap();
|
||||
generate_unique_signers(vec![], &matches, &default_keypair_file, &mut None).unwrap();
|
||||
assert_eq!(signer_info.signers.len(), 0);
|
||||
|
||||
let signer_info =
|
||||
generate_unique_signers(vec![None, None], &matches, &default_keypair_file, None)
|
||||
generate_unique_signers(vec![None, None], &matches, &default_keypair_file, &mut None)
|
||||
.unwrap();
|
||||
assert_eq!(signer_info.signers.len(), 1);
|
||||
assert_eq!(signer_info.index_of(None), Some(0));
|
||||
@@ -2620,7 +2714,7 @@ mod tests {
|
||||
let keypair0_clone_pubkey = keypair0.pubkey();
|
||||
let signers = vec![None, Some(keypair0.into()), Some(keypair0_clone.into())];
|
||||
let signer_info =
|
||||
generate_unique_signers(signers, &matches, &default_keypair_file, None).unwrap();
|
||||
generate_unique_signers(signers, &matches, &default_keypair_file, &mut None).unwrap();
|
||||
assert_eq!(signer_info.signers.len(), 2);
|
||||
assert_eq!(signer_info.index_of(None), Some(0));
|
||||
assert_eq!(signer_info.index_of(Some(keypair0_pubkey)), Some(1));
|
||||
@@ -2631,7 +2725,7 @@ mod tests {
|
||||
let keypair0_clone = keypair_from_seed(&[1u8; 32]).unwrap();
|
||||
let signers = vec![Some(keypair0.into()), Some(keypair0_clone.into())];
|
||||
let signer_info =
|
||||
generate_unique_signers(signers, &matches, &default_keypair_file, None).unwrap();
|
||||
generate_unique_signers(signers, &matches, &default_keypair_file, &mut None).unwrap();
|
||||
assert_eq!(signer_info.signers.len(), 1);
|
||||
assert_eq!(signer_info.index_of(Some(keypair0_pubkey)), Some(0));
|
||||
|
||||
@@ -2652,7 +2746,7 @@ mod tests {
|
||||
Some(keypair1.into()),
|
||||
];
|
||||
let signer_info =
|
||||
generate_unique_signers(signers, &matches, &default_keypair_file, None).unwrap();
|
||||
generate_unique_signers(signers, &matches, &default_keypair_file, &mut None).unwrap();
|
||||
assert_eq!(signer_info.signers.len(), 2);
|
||||
assert_eq!(signer_info.index_of(Some(keypair0_pubkey)), Some(0));
|
||||
assert_eq!(signer_info.index_of(Some(keypair1_pubkey)), Some(1));
|
||||
@@ -2678,7 +2772,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "airdrop", "50", &pubkey_string]);
|
||||
assert_eq!(
|
||||
parse_command(&test_airdrop, "", None).unwrap(),
|
||||
parse_command(&test_airdrop, "", &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Airdrop {
|
||||
faucet_host: None,
|
||||
@@ -2701,7 +2795,7 @@ mod tests {
|
||||
&keypair.pubkey().to_string(),
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_balance, "", None).unwrap(),
|
||||
parse_command(&test_balance, "", &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Balance {
|
||||
pubkey: Some(keypair.pubkey()),
|
||||
@@ -2717,7 +2811,7 @@ mod tests {
|
||||
"--lamports",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_balance, "", None).unwrap(),
|
||||
parse_command(&test_balance, "", &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Balance {
|
||||
pubkey: Some(keypair.pubkey()),
|
||||
@@ -2731,7 +2825,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "balance", "--lamports"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_balance, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_balance, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Balance {
|
||||
pubkey: None,
|
||||
@@ -2747,7 +2841,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "cancel", &pubkey_string]);
|
||||
assert_eq!(
|
||||
parse_command(&test_cancel, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_cancel, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Cancel(pubkey),
|
||||
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
|
||||
@@ -2762,7 +2856,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "confirm", &signature_string]);
|
||||
assert_eq!(
|
||||
parse_command(&test_confirm, "", None).unwrap(),
|
||||
parse_command(&test_confirm, "", &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Confirm(signature),
|
||||
signers: vec![],
|
||||
@@ -2771,7 +2865,7 @@ mod tests {
|
||||
let test_bad_signature = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "confirm", "deadbeef"]);
|
||||
assert!(parse_command(&test_bad_signature, "", None).is_err());
|
||||
assert!(parse_command(&test_bad_signature, "", &mut None).is_err());
|
||||
|
||||
// Test CreateAddressWithSeed
|
||||
let from_pubkey = Some(Pubkey::new_rand());
|
||||
@@ -2791,7 +2885,7 @@ mod tests {
|
||||
&from_str,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_create_address_with_seed, "", None).unwrap(),
|
||||
parse_command(&test_create_address_with_seed, "", &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateAddressWithSeed {
|
||||
from_pubkey,
|
||||
@@ -2809,7 +2903,7 @@ mod tests {
|
||||
"STAKE",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_create_address_with_seed, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_create_address_with_seed, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateAddressWithSeed {
|
||||
from_pubkey: None,
|
||||
@@ -2826,7 +2920,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "deploy", "/Users/test/program.o"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_deploy, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_deploy, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Deploy("/Users/test/program.o".to_string()),
|
||||
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
|
||||
@@ -2839,7 +2933,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "resolve-signer", &keypair_file]);
|
||||
assert_eq!(
|
||||
parse_command(&test_resolve_signer, "", None).unwrap(),
|
||||
parse_command(&test_resolve_signer, "", &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::ResolveSigner(Some(keypair_file.clone())),
|
||||
signers: vec![],
|
||||
@@ -2851,7 +2945,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "resolve-signer", &pubkey_string]);
|
||||
assert_eq!(
|
||||
parse_command(&test_resolve_signer, "", None).unwrap(),
|
||||
parse_command(&test_resolve_signer, "", &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::ResolveSigner(Some(pubkey.to_string())),
|
||||
signers: vec![],
|
||||
@@ -2864,7 +2958,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "pay", &pubkey_string, "50"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_pay, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50_000_000_000,
|
||||
@@ -2887,7 +2981,7 @@ mod tests {
|
||||
&witness1_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay_multiple_witnesses, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_pay_multiple_witnesses, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50_000_000_000,
|
||||
@@ -2907,7 +3001,7 @@ mod tests {
|
||||
&witness0_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay_single_witness, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_pay_single_witness, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50_000_000_000,
|
||||
@@ -2931,7 +3025,7 @@ mod tests {
|
||||
&witness0_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay_timestamp, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_pay_timestamp, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50_000_000_000,
|
||||
@@ -2957,7 +3051,7 @@ mod tests {
|
||||
"--sign-only",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_pay, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50_000_000_000,
|
||||
@@ -2980,7 +3074,7 @@ mod tests {
|
||||
&blockhash_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_pay, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50_000_000_000,
|
||||
@@ -3009,7 +3103,7 @@ mod tests {
|
||||
&pubkey_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_pay, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50_000_000_000,
|
||||
@@ -3042,7 +3136,7 @@ mod tests {
|
||||
&keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_pay, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50_000_000_000,
|
||||
@@ -3080,7 +3174,7 @@ mod tests {
|
||||
&signer_arg,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_pay, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50_000_000_000,
|
||||
@@ -3118,7 +3212,7 @@ mod tests {
|
||||
"--signer",
|
||||
&signer_arg,
|
||||
]);
|
||||
assert!(parse_command(&test_pay, &keypair_file, None).is_err());
|
||||
assert!(parse_command(&test_pay, &keypair_file, &mut None).is_err());
|
||||
|
||||
// Test Send-Signature Subcommand
|
||||
let test_send_signature = test_commands.clone().get_matches_from(vec![
|
||||
@@ -3128,7 +3222,7 @@ mod tests {
|
||||
&pubkey_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_send_signature, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_send_signature, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Witness(pubkey, pubkey),
|
||||
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
|
||||
@@ -3149,7 +3243,7 @@ mod tests {
|
||||
&witness1_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay_multiple_witnesses, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_pay_multiple_witnesses, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay(PayCommand {
|
||||
lamports: 50_000_000_000,
|
||||
@@ -3173,7 +3267,7 @@ mod tests {
|
||||
"2018-09-19T17:30:59",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_send_timestamp, &keypair_file, None).unwrap(),
|
||||
parse_command(&test_send_timestamp, &keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::TimeElapsed(pubkey, pubkey, dt),
|
||||
signers: vec![read_keypair_file(&keypair_file).unwrap().into()],
|
||||
@@ -3187,7 +3281,7 @@ mod tests {
|
||||
"--date",
|
||||
"20180919T17:30:59",
|
||||
]);
|
||||
assert!(parse_command(&test_bad_timestamp, &keypair_file, None).is_err());
|
||||
assert!(parse_command(&test_bad_timestamp, &keypair_file, &mut None).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -3287,6 +3381,7 @@ mod tests {
|
||||
destination_account_pubkey: to_pubkey,
|
||||
lamports: 100,
|
||||
withdraw_authority: 0,
|
||||
custodian: None,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
@@ -3426,10 +3521,7 @@ mod tests {
|
||||
config.command = CliCommand::Confirm(any_signature);
|
||||
assert_eq!(
|
||||
process_command(&config).unwrap(),
|
||||
format!(
|
||||
"Transaction failed with error: {}",
|
||||
TransactionError::AccountInUse
|
||||
)
|
||||
format!("Transaction failed: {}", TransactionError::AccountInUse)
|
||||
);
|
||||
|
||||
// Failure cases
|
||||
@@ -3565,7 +3657,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "transfer", &to_string, "42"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_transfer, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_transfer, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Transfer {
|
||||
lamports: 42_000_000_000,
|
||||
@@ -3591,7 +3683,7 @@ mod tests {
|
||||
"42",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_transfer, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_transfer, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Transfer {
|
||||
lamports: 42_000_000_000,
|
||||
@@ -3621,7 +3713,7 @@ mod tests {
|
||||
"--sign-only",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_transfer, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_transfer, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Transfer {
|
||||
lamports: 42_000_000_000,
|
||||
@@ -3656,7 +3748,7 @@ mod tests {
|
||||
&blockhash_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_transfer, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_transfer, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Transfer {
|
||||
lamports: 42_000_000_000,
|
||||
@@ -3695,7 +3787,7 @@ mod tests {
|
||||
&nonce_authority_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_transfer, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_transfer, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Transfer {
|
||||
lamports: 42_000_000_000,
|
||||
|
841
cli/src/cli_output.rs
Normal file
841
cli/src/cli_output.rs
Normal file
@@ -0,0 +1,841 @@
|
||||
use crate::{cli::build_balance_message, display::writeln_name_value};
|
||||
use chrono::{DateTime, NaiveDateTime, SecondsFormat, Utc};
|
||||
use console::{style, Emoji};
|
||||
use inflector::cases::titlecase::to_title_case;
|
||||
use serde::Serialize;
|
||||
use serde_json::{Map, Value};
|
||||
use solana_client::rpc_response::{RpcEpochInfo, RpcKeyedAccount, RpcVoteAccountInfo};
|
||||
use solana_sdk::{
|
||||
clock::{self, Epoch, Slot, UnixTimestamp},
|
||||
stake_history::StakeHistoryEntry,
|
||||
};
|
||||
use solana_stake_program::stake_state::{Authorized, Lockup};
|
||||
use solana_vote_program::{
|
||||
authorized_voters::AuthorizedVoters,
|
||||
vote_state::{BlockTimestamp, Lockout},
|
||||
};
|
||||
use std::{collections::BTreeMap, fmt, time::Duration};
|
||||
|
||||
static WARNING: Emoji = Emoji("⚠️", "!");
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub enum OutputFormat {
|
||||
Display,
|
||||
Json,
|
||||
JsonCompact,
|
||||
}
|
||||
|
||||
impl OutputFormat {
|
||||
pub fn formatted_print<T>(&self, item: &T)
|
||||
where
|
||||
T: Serialize + fmt::Display,
|
||||
{
|
||||
match self {
|
||||
OutputFormat::Display => {
|
||||
println!("{}", item);
|
||||
}
|
||||
OutputFormat::Json => {
|
||||
println!("{}", serde_json::to_string_pretty(item).unwrap());
|
||||
}
|
||||
OutputFormat::JsonCompact => {
|
||||
println!("{}", serde_json::to_value(item).unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct CliAccount {
|
||||
#[serde(flatten)]
|
||||
pub keyed_account: RpcKeyedAccount,
|
||||
#[serde(skip_serializing)]
|
||||
pub use_lamports_unit: bool,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliAccount {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(f)?;
|
||||
writeln_name_value(f, "Public Key:", &self.keyed_account.pubkey)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Balance:",
|
||||
&build_balance_message(
|
||||
self.keyed_account.account.lamports,
|
||||
self.use_lamports_unit,
|
||||
true,
|
||||
),
|
||||
)?;
|
||||
writeln_name_value(f, "Owner:", &self.keyed_account.account.owner)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Executable:",
|
||||
&self.keyed_account.account.executable.to_string(),
|
||||
)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Rent Epoch:",
|
||||
&self.keyed_account.account.rent_epoch.to_string(),
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
pub struct CliBlockProduction {
|
||||
pub epoch: Epoch,
|
||||
pub start_slot: Slot,
|
||||
pub end_slot: Slot,
|
||||
pub total_slots: usize,
|
||||
pub total_blocks_produced: usize,
|
||||
pub total_slots_skipped: usize,
|
||||
pub leaders: Vec<CliBlockProductionEntry>,
|
||||
pub individual_slot_status: Vec<CliSlotStatus>,
|
||||
#[serde(skip_serializing)]
|
||||
pub verbose: bool,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliBlockProduction {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(f)?;
|
||||
writeln!(
|
||||
f,
|
||||
"{}",
|
||||
style(format!(
|
||||
" {:<44} {:>15} {:>15} {:>15} {:>23}",
|
||||
"Identity Pubkey",
|
||||
"Leader Slots",
|
||||
"Blocks Produced",
|
||||
"Skipped Slots",
|
||||
"Skipped Slot Percentage",
|
||||
))
|
||||
.bold()
|
||||
)?;
|
||||
for leader in &self.leaders {
|
||||
writeln!(
|
||||
f,
|
||||
" {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
|
||||
leader.identity_pubkey,
|
||||
leader.leader_slots,
|
||||
leader.blocks_produced,
|
||||
leader.skipped_slots,
|
||||
leader.skipped_slots as f64 / leader.leader_slots as f64 * 100.
|
||||
)?;
|
||||
}
|
||||
writeln!(f)?;
|
||||
writeln!(
|
||||
f,
|
||||
" {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
|
||||
format!("Epoch {} total:", self.epoch),
|
||||
self.total_slots,
|
||||
self.total_blocks_produced,
|
||||
self.total_slots_skipped,
|
||||
self.total_slots_skipped as f64 / self.total_slots as f64 * 100.
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
" (using data from {} slots: {} to {})",
|
||||
self.total_slots, self.start_slot, self.end_slot
|
||||
)?;
|
||||
if self.verbose {
|
||||
writeln!(f)?;
|
||||
writeln!(f)?;
|
||||
writeln!(
|
||||
f,
|
||||
"{}",
|
||||
style(format!(" {:<15} {:<44}", "Slot", "Identity Pubkey")).bold(),
|
||||
)?;
|
||||
for status in &self.individual_slot_status {
|
||||
if status.skipped {
|
||||
writeln!(
|
||||
f,
|
||||
"{}",
|
||||
style(format!(
|
||||
" {:<15} {:<44} SKIPPED",
|
||||
status.slot, status.leader
|
||||
))
|
||||
.red()
|
||||
)?;
|
||||
} else {
|
||||
writeln!(
|
||||
f,
|
||||
"{}",
|
||||
style(format!(" {:<15} {:<44}", status.slot, status.leader))
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliBlockProductionEntry {
|
||||
pub identity_pubkey: String,
|
||||
pub leader_slots: u64,
|
||||
pub blocks_produced: u64,
|
||||
pub skipped_slots: u64,
|
||||
}
|
||||
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliSlotStatus {
|
||||
pub slot: Slot,
|
||||
pub leader: String,
|
||||
pub skipped: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliEpochInfo {
|
||||
#[serde(flatten)]
|
||||
pub epoch_info: RpcEpochInfo,
|
||||
}
|
||||
|
||||
impl From<RpcEpochInfo> for CliEpochInfo {
|
||||
fn from(epoch_info: RpcEpochInfo) -> Self {
|
||||
Self { epoch_info }
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CliEpochInfo {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(f)?;
|
||||
writeln_name_value(f, "Slot:", &self.epoch_info.absolute_slot.to_string())?;
|
||||
writeln_name_value(f, "Epoch:", &self.epoch_info.epoch.to_string())?;
|
||||
let start_slot = self.epoch_info.absolute_slot - self.epoch_info.slot_index;
|
||||
let end_slot = start_slot + self.epoch_info.slots_in_epoch;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Epoch Slot Range:",
|
||||
&format!("[{}..{})", start_slot, end_slot),
|
||||
)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Epoch Completed Percent:",
|
||||
&format!(
|
||||
"{:>3.3}%",
|
||||
self.epoch_info.slot_index as f64 / self.epoch_info.slots_in_epoch as f64 * 100_f64
|
||||
),
|
||||
)?;
|
||||
let remaining_slots_in_epoch = self.epoch_info.slots_in_epoch - self.epoch_info.slot_index;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Epoch Completed Slots:",
|
||||
&format!(
|
||||
"{}/{} ({} remaining)",
|
||||
self.epoch_info.slot_index,
|
||||
self.epoch_info.slots_in_epoch,
|
||||
remaining_slots_in_epoch
|
||||
),
|
||||
)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Epoch Completed Time:",
|
||||
&format!(
|
||||
"{}/{} ({} remaining)",
|
||||
slot_to_human_time(self.epoch_info.slot_index),
|
||||
slot_to_human_time(self.epoch_info.slots_in_epoch),
|
||||
slot_to_human_time(remaining_slots_in_epoch)
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn slot_to_human_time(slot: Slot) -> String {
|
||||
humantime::format_duration(Duration::from_secs(
|
||||
slot * clock::DEFAULT_TICKS_PER_SLOT / clock::DEFAULT_TICKS_PER_SECOND,
|
||||
))
|
||||
.to_string()
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliValidators {
|
||||
pub total_active_stake: u64,
|
||||
pub total_current_stake: u64,
|
||||
pub total_deliquent_stake: u64,
|
||||
pub current_validators: Vec<CliValidator>,
|
||||
pub delinquent_validators: Vec<CliValidator>,
|
||||
#[serde(skip_serializing)]
|
||||
pub use_lamports_unit: bool,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliValidators {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn write_vote_account(
|
||||
f: &mut fmt::Formatter,
|
||||
validator: &CliValidator,
|
||||
total_active_stake: u64,
|
||||
use_lamports_unit: bool,
|
||||
delinquent: bool,
|
||||
) -> fmt::Result {
|
||||
fn non_zero_or_dash(v: u64) -> String {
|
||||
if v == 0 {
|
||||
"-".into()
|
||||
} else {
|
||||
format!("{}", v)
|
||||
}
|
||||
}
|
||||
|
||||
writeln!(
|
||||
f,
|
||||
"{} {:<44} {:<44} {:>9}% {:>8} {:>10} {:>7} {}",
|
||||
if delinquent {
|
||||
WARNING.to_string()
|
||||
} else {
|
||||
" ".to_string()
|
||||
},
|
||||
validator.identity_pubkey,
|
||||
validator.vote_account_pubkey,
|
||||
validator.commission,
|
||||
non_zero_or_dash(validator.last_vote),
|
||||
non_zero_or_dash(validator.root_slot),
|
||||
validator.credits,
|
||||
if validator.activated_stake > 0 {
|
||||
format!(
|
||||
"{} ({:.2}%)",
|
||||
build_balance_message(validator.activated_stake, use_lamports_unit, true),
|
||||
100. * validator.activated_stake as f64 / total_active_stake as f64
|
||||
)
|
||||
} else {
|
||||
"-".into()
|
||||
},
|
||||
)
|
||||
}
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Active Stake:",
|
||||
&build_balance_message(self.total_active_stake, self.use_lamports_unit, true),
|
||||
)?;
|
||||
if self.total_deliquent_stake > 0 {
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Current Stake:",
|
||||
&format!(
|
||||
"{} ({:0.2}%)",
|
||||
&build_balance_message(self.total_current_stake, self.use_lamports_unit, true),
|
||||
100. * self.total_current_stake as f64 / self.total_active_stake as f64
|
||||
),
|
||||
)?;
|
||||
writeln_name_value(
|
||||
f,
|
||||
"Delinquent Stake:",
|
||||
&format!(
|
||||
"{} ({:0.2}%)",
|
||||
&build_balance_message(
|
||||
self.total_deliquent_stake,
|
||||
self.use_lamports_unit,
|
||||
true
|
||||
),
|
||||
100. * self.total_deliquent_stake as f64 / self.total_active_stake as f64
|
||||
),
|
||||
)?;
|
||||
}
|
||||
writeln!(f)?;
|
||||
writeln!(
|
||||
f,
|
||||
"{}",
|
||||
style(format!(
|
||||
" {:<44} {:<44} {} {} {} {:>7} {}",
|
||||
"Identity Pubkey",
|
||||
"Vote Account Pubkey",
|
||||
"Commission",
|
||||
"Last Vote",
|
||||
"Root Block",
|
||||
"Credits",
|
||||
"Active Stake",
|
||||
))
|
||||
.bold()
|
||||
)?;
|
||||
for validator in &self.current_validators {
|
||||
write_vote_account(
|
||||
f,
|
||||
validator,
|
||||
self.total_active_stake,
|
||||
self.use_lamports_unit,
|
||||
false,
|
||||
)?;
|
||||
}
|
||||
for validator in &self.delinquent_validators {
|
||||
write_vote_account(
|
||||
f,
|
||||
validator,
|
||||
self.total_active_stake,
|
||||
self.use_lamports_unit,
|
||||
true,
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliValidator {
|
||||
pub identity_pubkey: String,
|
||||
pub vote_account_pubkey: String,
|
||||
pub commission: u8,
|
||||
pub last_vote: u64,
|
||||
pub root_slot: u64,
|
||||
pub credits: u64,
|
||||
pub activated_stake: u64,
|
||||
}
|
||||
|
||||
impl CliValidator {
|
||||
pub fn new(vote_account: &RpcVoteAccountInfo, current_epoch: Epoch) -> Self {
|
||||
Self {
|
||||
identity_pubkey: vote_account.node_pubkey.to_string(),
|
||||
vote_account_pubkey: vote_account.vote_pubkey.to_string(),
|
||||
commission: vote_account.commission,
|
||||
last_vote: vote_account.last_vote,
|
||||
root_slot: vote_account.root_slot,
|
||||
credits: vote_account
|
||||
.epoch_credits
|
||||
.iter()
|
||||
.find_map(|(epoch, credits, _)| {
|
||||
if *epoch == current_epoch {
|
||||
Some(*credits)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or(0),
|
||||
activated_stake: vote_account.activated_stake,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliNonceAccount {
|
||||
pub balance: u64,
|
||||
pub minimum_balance_for_rent_exemption: u64,
|
||||
pub nonce: Option<String>,
|
||||
pub lamports_per_signature: Option<u64>,
|
||||
pub authority: Option<String>,
|
||||
#[serde(skip_serializing)]
|
||||
pub use_lamports_unit: bool,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliNonceAccount {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(
|
||||
f,
|
||||
"Balance: {}",
|
||||
build_balance_message(self.balance, self.use_lamports_unit, true)
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Minimum Balance Required: {}",
|
||||
build_balance_message(
|
||||
self.minimum_balance_for_rent_exemption,
|
||||
self.use_lamports_unit,
|
||||
true
|
||||
)
|
||||
)?;
|
||||
let nonce = self.nonce.as_deref().unwrap_or("uninitialized");
|
||||
writeln!(f, "Nonce: {}", nonce)?;
|
||||
if let Some(fees) = self.lamports_per_signature {
|
||||
writeln!(f, "Fee: {} lamports per signature", fees)?;
|
||||
} else {
|
||||
writeln!(f, "Fees: uninitialized")?;
|
||||
}
|
||||
let authority = self.authority.as_deref().unwrap_or("uninitialized");
|
||||
writeln!(f, "Authority: {}", authority)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct CliStakeVec(Vec<CliKeyedStakeState>);
|
||||
|
||||
impl CliStakeVec {
|
||||
pub fn new(list: Vec<CliKeyedStakeState>) -> Self {
|
||||
Self(list)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CliStakeVec {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
for state in &self.0 {
|
||||
writeln!(f)?;
|
||||
write!(f, "{}", state)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliKeyedStakeState {
|
||||
pub stake_pubkey: String,
|
||||
#[serde(flatten)]
|
||||
pub stake_state: CliStakeState,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliKeyedStakeState {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(f, "Stake Pubkey: {}", self.stake_pubkey)?;
|
||||
write!(f, "{}", self.stake_state)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliStakeState {
|
||||
pub stake_type: CliStakeType,
|
||||
pub total_stake: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub delegated_stake: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub delegated_vote_account_address: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub activation_epoch: Option<Epoch>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub deactivation_epoch: Option<Epoch>,
|
||||
#[serde(flatten, skip_serializing_if = "Option::is_none")]
|
||||
pub authorized: Option<CliAuthorized>,
|
||||
#[serde(flatten, skip_serializing_if = "Option::is_none")]
|
||||
pub lockup: Option<CliLockup>,
|
||||
#[serde(skip_serializing)]
|
||||
pub use_lamports_unit: bool,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliStakeState {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fn show_authorized(f: &mut fmt::Formatter, authorized: &CliAuthorized) -> fmt::Result {
|
||||
writeln!(f, "Stake Authority: {}", authorized.staker)?;
|
||||
writeln!(f, "Withdraw Authority: {}", authorized.withdrawer)?;
|
||||
Ok(())
|
||||
}
|
||||
fn show_lockup(f: &mut fmt::Formatter, lockup: &CliLockup) -> fmt::Result {
|
||||
writeln!(
|
||||
f,
|
||||
"Lockup Timestamp: {} (UnixTimestamp: {})",
|
||||
DateTime::<Utc>::from_utc(
|
||||
NaiveDateTime::from_timestamp(lockup.unix_timestamp, 0),
|
||||
Utc
|
||||
)
|
||||
.to_rfc3339_opts(SecondsFormat::Secs, true),
|
||||
lockup.unix_timestamp
|
||||
)?;
|
||||
writeln!(f, "Lockup Epoch: {}", lockup.epoch)?;
|
||||
writeln!(f, "Lockup Custodian: {}", lockup.custodian)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
match self.stake_type {
|
||||
CliStakeType::RewardsPool => writeln!(f, "Stake account is a rewards pool")?,
|
||||
CliStakeType::Uninitialized => writeln!(f, "Stake account is uninitialized")?,
|
||||
CliStakeType::Initialized => {
|
||||
writeln!(
|
||||
f,
|
||||
"Total Stake: {}",
|
||||
build_balance_message(self.total_stake, self.use_lamports_unit, true)
|
||||
)?;
|
||||
writeln!(f, "Stake account is undelegated")?;
|
||||
show_authorized(f, self.authorized.as_ref().unwrap())?;
|
||||
show_lockup(f, self.lockup.as_ref().unwrap())?;
|
||||
}
|
||||
CliStakeType::Stake => {
|
||||
writeln!(
|
||||
f,
|
||||
"Total Stake: {}",
|
||||
build_balance_message(self.total_stake, self.use_lamports_unit, true)
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Delegated Stake: {}",
|
||||
build_balance_message(
|
||||
self.delegated_stake.unwrap(),
|
||||
self.use_lamports_unit,
|
||||
true
|
||||
)
|
||||
)?;
|
||||
if let Some(delegated_vote_account_address) = &self.delegated_vote_account_address {
|
||||
writeln!(
|
||||
f,
|
||||
"Delegated Vote Account Address: {}",
|
||||
delegated_vote_account_address
|
||||
)?;
|
||||
}
|
||||
writeln!(
|
||||
f,
|
||||
"Stake activates starting from epoch: {}",
|
||||
self.activation_epoch.unwrap()
|
||||
)?;
|
||||
if let Some(deactivation_epoch) = self.deactivation_epoch {
|
||||
writeln!(
|
||||
f,
|
||||
"Stake deactivates starting from epoch: {}",
|
||||
deactivation_epoch
|
||||
)?;
|
||||
}
|
||||
show_authorized(f, self.authorized.as_ref().unwrap())?;
|
||||
show_lockup(f, self.lockup.as_ref().unwrap())?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub enum CliStakeType {
|
||||
Stake,
|
||||
RewardsPool,
|
||||
Uninitialized,
|
||||
Initialized,
|
||||
}
|
||||
|
||||
impl Default for CliStakeType {
|
||||
fn default() -> Self {
|
||||
Self::Uninitialized
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliStakeHistory {
|
||||
pub entries: Vec<CliStakeHistoryEntry>,
|
||||
#[serde(skip_serializing)]
|
||||
pub use_lamports_unit: bool,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliStakeHistory {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(f)?;
|
||||
writeln!(
|
||||
f,
|
||||
"{}",
|
||||
style(format!(
|
||||
" {:<5} {:>20} {:>20} {:>20}",
|
||||
"Epoch", "Effective Stake", "Activating Stake", "Deactivating Stake",
|
||||
))
|
||||
.bold()
|
||||
)?;
|
||||
for entry in &self.entries {
|
||||
writeln!(
|
||||
f,
|
||||
" {:>5} {:>20} {:>20} {:>20} {}",
|
||||
entry.epoch,
|
||||
build_balance_message(entry.effective_stake, self.use_lamports_unit, false),
|
||||
build_balance_message(entry.activating_stake, self.use_lamports_unit, false),
|
||||
build_balance_message(entry.deactivating_stake, self.use_lamports_unit, false),
|
||||
if self.use_lamports_unit {
|
||||
"lamports"
|
||||
} else {
|
||||
"SOL"
|
||||
}
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&(Epoch, StakeHistoryEntry)> for CliStakeHistoryEntry {
|
||||
fn from((epoch, entry): &(Epoch, StakeHistoryEntry)) -> Self {
|
||||
Self {
|
||||
epoch: *epoch,
|
||||
effective_stake: entry.effective,
|
||||
activating_stake: entry.activating,
|
||||
deactivating_stake: entry.deactivating,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliStakeHistoryEntry {
|
||||
pub epoch: Epoch,
|
||||
pub effective_stake: u64,
|
||||
pub activating_stake: u64,
|
||||
pub deactivating_stake: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliAuthorized {
|
||||
pub staker: String,
|
||||
pub withdrawer: String,
|
||||
}
|
||||
|
||||
impl From<&Authorized> for CliAuthorized {
|
||||
fn from(authorized: &Authorized) -> Self {
|
||||
Self {
|
||||
staker: authorized.staker.to_string(),
|
||||
withdrawer: authorized.withdrawer.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliLockup {
|
||||
pub unix_timestamp: UnixTimestamp,
|
||||
pub epoch: Epoch,
|
||||
pub custodian: String,
|
||||
}
|
||||
|
||||
impl From<&Lockup> for CliLockup {
|
||||
fn from(lockup: &Lockup) -> Self {
|
||||
Self {
|
||||
unix_timestamp: lockup.unix_timestamp,
|
||||
epoch: lockup.epoch,
|
||||
custodian: lockup.custodian.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct CliValidatorInfoVec(Vec<CliValidatorInfo>);
|
||||
|
||||
impl CliValidatorInfoVec {
|
||||
pub fn new(list: Vec<CliValidatorInfo>) -> Self {
|
||||
Self(list)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CliValidatorInfoVec {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
if self.0.is_empty() {
|
||||
writeln!(f, "No validator info accounts found")?;
|
||||
}
|
||||
for validator_info in &self.0 {
|
||||
writeln!(f)?;
|
||||
write!(f, "{}", validator_info)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliValidatorInfo {
|
||||
pub identity_pubkey: String,
|
||||
pub info_pubkey: String,
|
||||
pub info: Map<String, Value>,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliValidatorInfo {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln_name_value(f, "Validator Identity Pubkey:", &self.identity_pubkey)?;
|
||||
writeln_name_value(f, " Info Pubkey:", &self.info_pubkey)?;
|
||||
for (key, value) in self.info.iter() {
|
||||
writeln_name_value(
|
||||
f,
|
||||
&format!(" {}:", to_title_case(key)),
|
||||
&value.as_str().unwrap_or("?"),
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliVoteAccount {
|
||||
pub account_balance: u64,
|
||||
pub validator_identity: String,
|
||||
#[serde(flatten)]
|
||||
pub authorized_voters: CliAuthorizedVoters,
|
||||
pub authorized_withdrawer: String,
|
||||
pub credits: u64,
|
||||
pub commission: u8,
|
||||
pub root_slot: Option<Slot>,
|
||||
pub recent_timestamp: BlockTimestamp,
|
||||
pub votes: Vec<CliLockout>,
|
||||
pub epoch_voting_history: Vec<CliEpochVotingHistory>,
|
||||
#[serde(skip_serializing)]
|
||||
pub use_lamports_unit: bool,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliVoteAccount {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
writeln!(
|
||||
f,
|
||||
"Account Balance: {}",
|
||||
build_balance_message(self.account_balance, self.use_lamports_unit, true)
|
||||
)?;
|
||||
writeln!(f, "Validator Identity: {}", self.validator_identity)?;
|
||||
writeln!(f, "Authorized Voters: {}", self.authorized_voters)?;
|
||||
writeln!(f, "Authorized Withdrawer: {}", self.authorized_withdrawer)?;
|
||||
writeln!(f, "Credits: {}", self.credits)?;
|
||||
writeln!(f, "Commission: {}%", self.commission)?;
|
||||
writeln!(
|
||||
f,
|
||||
"Root Slot: {}",
|
||||
match self.root_slot {
|
||||
Some(slot) => slot.to_string(),
|
||||
None => "~".to_string(),
|
||||
}
|
||||
)?;
|
||||
writeln!(f, "Recent Timestamp: {:?}", self.recent_timestamp)?;
|
||||
if !self.votes.is_empty() {
|
||||
writeln!(f, "Recent Votes:")?;
|
||||
for vote in &self.votes {
|
||||
writeln!(
|
||||
f,
|
||||
"- slot: {}\n confirmation count: {}",
|
||||
vote.slot, vote.confirmation_count
|
||||
)?;
|
||||
}
|
||||
writeln!(f, "Epoch Voting History:")?;
|
||||
for epoch_info in &self.epoch_voting_history {
|
||||
writeln!(
|
||||
f,
|
||||
"- epoch: {}\n slots in epoch: {}\n credits earned: {}",
|
||||
epoch_info.epoch, epoch_info.slots_in_epoch, epoch_info.credits_earned,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliAuthorizedVoters {
|
||||
authorized_voters: BTreeMap<Epoch, String>,
|
||||
}
|
||||
|
||||
impl fmt::Display for CliAuthorizedVoters {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{:?}", self.authorized_voters)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&AuthorizedVoters> for CliAuthorizedVoters {
|
||||
fn from(authorized_voters: &AuthorizedVoters) -> Self {
|
||||
let mut voter_map: BTreeMap<Epoch, String> = BTreeMap::new();
|
||||
for (epoch, voter) in authorized_voters.iter() {
|
||||
voter_map.insert(*epoch, voter.to_string());
|
||||
}
|
||||
Self {
|
||||
authorized_voters: voter_map,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliEpochVotingHistory {
|
||||
pub epoch: Epoch,
|
||||
pub slots_in_epoch: u64,
|
||||
pub credits_earned: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct CliLockout {
|
||||
pub slot: Slot,
|
||||
pub confirmation_count: u32,
|
||||
}
|
||||
|
||||
impl From<&Lockout> for CliLockout {
|
||||
fn from(lockout: &Lockout) -> Self {
|
||||
Self {
|
||||
slot: lockout.slot,
|
||||
confirmation_count: lockout.confirmation_count,
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,7 +1,8 @@
|
||||
use crate::{
|
||||
cli::{
|
||||
build_balance_message, check_account_for_fee, CliCommand, CliCommandInfo, CliConfig,
|
||||
CliError, ProcessResult,
|
||||
cli::{check_account_for_fee, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
|
||||
cli_output::{
|
||||
CliBlockProduction, CliBlockProductionEntry, CliEpochInfo, CliKeyedStakeState,
|
||||
CliSlotStatus, CliStakeVec, CliValidator, CliValidators,
|
||||
},
|
||||
display::println_name_value,
|
||||
};
|
||||
@@ -13,7 +14,7 @@ use solana_clap_utils::{input_parsers::*, input_validators::*, keypair::signer_f
|
||||
use solana_client::{
|
||||
pubsub_client::{PubsubClient, SlotInfoMessage},
|
||||
rpc_client::RpcClient,
|
||||
rpc_response::RpcVoteAccountInfo,
|
||||
rpc_request::MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE,
|
||||
};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
@@ -42,7 +43,6 @@ use std::{
|
||||
|
||||
static CHECK_MARK: Emoji = Emoji("✅ ", "");
|
||||
static CROSS_MARK: Emoji = Emoji("❌ ", "");
|
||||
static WARNING: Emoji = Emoji("⚠️", "!");
|
||||
|
||||
pub trait ClusterQuerySubCommands {
|
||||
fn cluster_query_subcommands(self) -> Self;
|
||||
@@ -278,12 +278,46 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.help("Display balance in lamports instead of SOL"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("transaction-history")
|
||||
.about("Show historical transactions affecting the given address, \
|
||||
ordered based on the slot in which they were confirmed in \
|
||||
from lowest to highest slot")
|
||||
.arg(
|
||||
Arg::with_name("address")
|
||||
.index(1)
|
||||
.value_name("ADDRESS")
|
||||
.required(true)
|
||||
.validator(is_valid_pubkey)
|
||||
.help("Account address"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("end_slot")
|
||||
.takes_value(false)
|
||||
.value_name("SLOT")
|
||||
.index(2)
|
||||
.validator(is_slot)
|
||||
.help(
|
||||
"Slot to start from [default: latest slot at maximum commitment]"
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("limit")
|
||||
.long("limit")
|
||||
.takes_value(true)
|
||||
.value_name("NUMBER OF SLOTS")
|
||||
.validator(is_slot)
|
||||
.help(
|
||||
"Limit the search to this many slots"
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_catchup(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let node_pubkey = pubkey_of_signer(matches, "node_pubkey", wallet_manager)?.unwrap();
|
||||
let node_json_rpc_url = value_t!(matches, "node_json_rpc_url", String).ok();
|
||||
@@ -307,7 +341,7 @@ pub fn parse_catchup(
|
||||
pub fn parse_cluster_ping(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let lamports = value_t_or_exit!(matches, "lamports", u64);
|
||||
let interval = Duration::from_secs(value_t_or_exit!(matches, "interval", u64));
|
||||
@@ -409,7 +443,7 @@ pub fn parse_get_transaction_count(matches: &ArgMatches<'_>) -> Result<CliComman
|
||||
|
||||
pub fn parse_show_stakes(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let use_lamports_unit = matches.is_present("lamports");
|
||||
let vote_account_pubkeys =
|
||||
@@ -441,6 +475,25 @@ pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_transaction_history(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let address = pubkey_of_signer(matches, "address", wallet_manager)?.unwrap();
|
||||
let end_slot = value_t!(matches, "end_slot", Slot).ok();
|
||||
let slot_limit = value_t!(matches, "limit", u64)
|
||||
.unwrap_or(MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE);
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::TransactionHistory {
|
||||
address,
|
||||
end_slot,
|
||||
slot_limit,
|
||||
},
|
||||
signers: vec![],
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a new process bar for processing that will take an unknown amount of time
|
||||
fn new_spinner_progress_bar() -> ProgressBar {
|
||||
let progress_bar = ProgressBar::new(42);
|
||||
@@ -603,51 +656,15 @@ pub fn process_get_block_time(rpc_client: &RpcClient, slot: Slot) -> ProcessResu
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn slot_to_human_time(slot: Slot) -> String {
|
||||
humantime::format_duration(Duration::from_secs(
|
||||
slot * clock::DEFAULT_TICKS_PER_SLOT / clock::DEFAULT_TICKS_PER_SECOND,
|
||||
))
|
||||
.to_string()
|
||||
}
|
||||
|
||||
pub fn process_get_epoch_info(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ProcessResult {
|
||||
let epoch_info = rpc_client.get_epoch_info_with_commitment(commitment_config.clone())?;
|
||||
println!();
|
||||
println_name_value("Slot:", &epoch_info.absolute_slot.to_string());
|
||||
println_name_value("Epoch:", &epoch_info.epoch.to_string());
|
||||
let start_slot = epoch_info.absolute_slot - epoch_info.slot_index;
|
||||
let end_slot = start_slot + epoch_info.slots_in_epoch;
|
||||
println_name_value(
|
||||
"Epoch Slot Range:",
|
||||
&format!("[{}..{})", start_slot, end_slot),
|
||||
);
|
||||
println_name_value(
|
||||
"Epoch Completed Percent:",
|
||||
&format!(
|
||||
"{:>3.3}%",
|
||||
epoch_info.slot_index as f64 / epoch_info.slots_in_epoch as f64 * 100_f64
|
||||
),
|
||||
);
|
||||
let remaining_slots_in_epoch = epoch_info.slots_in_epoch - epoch_info.slot_index;
|
||||
println_name_value(
|
||||
"Epoch Completed Slots:",
|
||||
&format!(
|
||||
"{}/{} ({} remaining)",
|
||||
epoch_info.slot_index, epoch_info.slots_in_epoch, remaining_slots_in_epoch
|
||||
),
|
||||
);
|
||||
println_name_value(
|
||||
"Epoch Completed Time:",
|
||||
&format!(
|
||||
"{}/{} ({} remaining)",
|
||||
slot_to_human_time(epoch_info.slot_index),
|
||||
slot_to_human_time(epoch_info.slots_in_epoch),
|
||||
slot_to_human_time(remaining_slots_in_epoch)
|
||||
),
|
||||
);
|
||||
let epoch_info: CliEpochInfo = rpc_client
|
||||
.get_epoch_info_with_commitment(commitment_config.clone())?
|
||||
.into();
|
||||
config.output_format.formatted_print(&epoch_info);
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
@@ -740,9 +757,9 @@ pub fn process_show_block_production(
|
||||
let start_slot_index = (start_slot - first_slot_in_epoch) as usize;
|
||||
let end_slot_index = (end_slot - first_slot_in_epoch) as usize;
|
||||
let total_slots = end_slot_index - start_slot_index + 1;
|
||||
let total_blocks = confirmed_blocks.len();
|
||||
assert!(total_blocks <= total_slots);
|
||||
let total_slots_skipped = total_slots - total_blocks;
|
||||
let total_blocks_produced = confirmed_blocks.len();
|
||||
assert!(total_blocks_produced <= total_slots);
|
||||
let total_slots_skipped = total_slots - total_blocks_produced;
|
||||
let mut leader_slot_count = HashMap::new();
|
||||
let mut leader_skipped_slots = HashMap::new();
|
||||
|
||||
@@ -766,7 +783,7 @@ pub fn process_show_block_production(
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"Processing {} slots containing {} blocks and {} empty slots...",
|
||||
total_slots, total_blocks, total_slots_skipped
|
||||
total_slots, total_blocks_produced, total_slots_skipped
|
||||
));
|
||||
|
||||
let mut confirmed_blocks_index = 0;
|
||||
@@ -785,71 +802,52 @@ pub fn process_show_block_production(
|
||||
continue;
|
||||
}
|
||||
if slot_of_next_confirmed_block == slot {
|
||||
individual_slot_status
|
||||
.push(style(format!(" {:<15} {:<44}", slot, leader)).to_string());
|
||||
individual_slot_status.push(CliSlotStatus {
|
||||
slot,
|
||||
leader: (*leader).to_string(),
|
||||
skipped: false,
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
*skipped_slots += 1;
|
||||
individual_slot_status.push(
|
||||
style(format!(" {:<15} {:<44} SKIPPED", slot, leader))
|
||||
.red()
|
||||
.to_string(),
|
||||
);
|
||||
individual_slot_status.push(CliSlotStatus {
|
||||
slot,
|
||||
leader: (*leader).to_string(),
|
||||
skipped: true,
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
progress_bar.finish_and_clear();
|
||||
println!(
|
||||
"\n{}",
|
||||
style(format!(
|
||||
" {:<44} {:>15} {:>15} {:>15} {:>23}",
|
||||
"Identity Pubkey",
|
||||
"Leader Slots",
|
||||
"Blocks Produced",
|
||||
"Skipped Slots",
|
||||
"Skipped Slot Percentage",
|
||||
))
|
||||
.bold()
|
||||
);
|
||||
|
||||
let mut table = vec![];
|
||||
for (leader, leader_slots) in leader_slot_count.iter() {
|
||||
let skipped_slots = leader_skipped_slots.get(leader).unwrap();
|
||||
let blocks_produced = leader_slots - skipped_slots;
|
||||
table.push(format!(
|
||||
" {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
|
||||
leader,
|
||||
leader_slots,
|
||||
blocks_produced,
|
||||
skipped_slots,
|
||||
*skipped_slots as f64 / *leader_slots as f64 * 100.
|
||||
));
|
||||
}
|
||||
table.sort();
|
||||
|
||||
println!(
|
||||
"{}\n\n {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
|
||||
table.join("\n"),
|
||||
format!("Epoch {} total:", epoch),
|
||||
let mut leaders: Vec<CliBlockProductionEntry> = leader_slot_count
|
||||
.iter()
|
||||
.map(|(leader, leader_slots)| {
|
||||
let skipped_slots = leader_skipped_slots.get(leader).unwrap();
|
||||
let blocks_produced = leader_slots - skipped_slots;
|
||||
CliBlockProductionEntry {
|
||||
identity_pubkey: (**leader).to_string(),
|
||||
leader_slots: *leader_slots,
|
||||
blocks_produced,
|
||||
skipped_slots: *skipped_slots,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
leaders.sort_by(|a, b| a.identity_pubkey.partial_cmp(&b.identity_pubkey).unwrap());
|
||||
let block_production = CliBlockProduction {
|
||||
epoch,
|
||||
start_slot,
|
||||
end_slot,
|
||||
total_slots,
|
||||
total_blocks,
|
||||
total_blocks_produced,
|
||||
total_slots_skipped,
|
||||
total_slots_skipped as f64 / total_slots as f64 * 100.
|
||||
);
|
||||
println!(
|
||||
" (using data from {} slots: {} to {})",
|
||||
total_slots, start_slot, end_slot
|
||||
);
|
||||
|
||||
if config.verbose {
|
||||
println!(
|
||||
"\n\n{}\n{}",
|
||||
style(format!(" {:<15} {:<44}", "Slot", "Identity Pubkey")).bold(),
|
||||
individual_slot_status.join("\n")
|
||||
);
|
||||
}
|
||||
leaders,
|
||||
individual_slot_status,
|
||||
verbose: config.verbose,
|
||||
};
|
||||
config.output_format.formatted_print(&block_production);
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
@@ -1131,10 +1129,11 @@ pub fn process_show_gossip(rpc_client: &RpcClient) -> ProcessResult {
|
||||
|
||||
pub fn process_show_stakes(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
use_lamports_unit: bool,
|
||||
vote_account_pubkeys: Option<&[Pubkey]>,
|
||||
) -> ProcessResult {
|
||||
use crate::stake::print_stake_state;
|
||||
use crate::stake::build_stake_state;
|
||||
use solana_stake_program::stake_state::StakeState;
|
||||
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
@@ -1142,13 +1141,20 @@ pub fn process_show_stakes(
|
||||
let all_stake_accounts = rpc_client.get_program_accounts(&solana_stake_program::id())?;
|
||||
progress_bar.finish_and_clear();
|
||||
|
||||
let mut stake_accounts: Vec<CliKeyedStakeState> = vec![];
|
||||
for (stake_pubkey, stake_account) in all_stake_accounts {
|
||||
if let Ok(stake_state) = stake_account.state() {
|
||||
match stake_state {
|
||||
StakeState::Initialized(_) => {
|
||||
if vote_account_pubkeys.is_none() {
|
||||
println!("\nstake pubkey: {}", stake_pubkey);
|
||||
print_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
|
||||
stake_accounts.push(CliKeyedStakeState {
|
||||
stake_pubkey: stake_pubkey.to_string(),
|
||||
stake_state: build_stake_state(
|
||||
stake_account.lamports,
|
||||
&stake_state,
|
||||
use_lamports_unit,
|
||||
),
|
||||
});
|
||||
}
|
||||
}
|
||||
StakeState::Stake(_, stake) => {
|
||||
@@ -1157,19 +1163,29 @@ pub fn process_show_stakes(
|
||||
.unwrap()
|
||||
.contains(&stake.delegation.voter_pubkey)
|
||||
{
|
||||
println!("\nstake pubkey: {}", stake_pubkey);
|
||||
print_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
|
||||
stake_accounts.push(CliKeyedStakeState {
|
||||
stake_pubkey: stake_pubkey.to_string(),
|
||||
stake_state: build_stake_state(
|
||||
stake_account.lamports,
|
||||
&stake_state,
|
||||
use_lamports_unit,
|
||||
),
|
||||
});
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
config
|
||||
.output_format
|
||||
.formatted_print(&CliStakeVec::new(stake_accounts));
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
pub fn process_show_validators(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
use_lamports_unit: bool,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ProcessResult {
|
||||
@@ -1179,129 +1195,66 @@ pub fn process_show_validators(
|
||||
.current
|
||||
.iter()
|
||||
.chain(vote_accounts.delinquent.iter())
|
||||
.fold(0, |acc, vote_account| acc + vote_account.activated_stake)
|
||||
as f64;
|
||||
.fold(0, |acc, vote_account| acc + vote_account.activated_stake);
|
||||
|
||||
let total_deliquent_stake = vote_accounts
|
||||
.delinquent
|
||||
.iter()
|
||||
.fold(0, |acc, vote_account| acc + vote_account.activated_stake)
|
||||
as f64;
|
||||
.fold(0, |acc, vote_account| acc + vote_account.activated_stake);
|
||||
let total_current_stake = total_active_stake - total_deliquent_stake;
|
||||
|
||||
println_name_value(
|
||||
"Active Stake:",
|
||||
&build_balance_message(total_active_stake as u64, use_lamports_unit, true),
|
||||
);
|
||||
if total_deliquent_stake > 0. {
|
||||
println_name_value(
|
||||
"Current Stake:",
|
||||
&format!(
|
||||
"{} ({:0.2}%)",
|
||||
&build_balance_message(total_current_stake as u64, use_lamports_unit, true),
|
||||
100. * total_current_stake / total_active_stake
|
||||
),
|
||||
);
|
||||
println_name_value(
|
||||
"Delinquent Stake:",
|
||||
&format!(
|
||||
"{} ({:0.2}%)",
|
||||
&build_balance_message(total_deliquent_stake as u64, use_lamports_unit, true),
|
||||
100. * total_deliquent_stake / total_active_stake
|
||||
),
|
||||
);
|
||||
}
|
||||
println!();
|
||||
|
||||
println!(
|
||||
"{}",
|
||||
style(format!(
|
||||
" {:<44} {:<44} {} {} {} {:>7} {}",
|
||||
"Identity Pubkey",
|
||||
"Vote Account Pubkey",
|
||||
"Commission",
|
||||
"Last Vote",
|
||||
"Root Block",
|
||||
"Credits",
|
||||
"Active Stake",
|
||||
))
|
||||
.bold()
|
||||
);
|
||||
|
||||
fn print_vote_account(
|
||||
vote_account: RpcVoteAccountInfo,
|
||||
current_epoch: Epoch,
|
||||
total_active_stake: f64,
|
||||
use_lamports_unit: bool,
|
||||
delinquent: bool,
|
||||
) {
|
||||
fn non_zero_or_dash(v: u64) -> String {
|
||||
if v == 0 {
|
||||
"-".into()
|
||||
} else {
|
||||
format!("{}", v)
|
||||
}
|
||||
}
|
||||
|
||||
println!(
|
||||
"{} {:<44} {:<44} {:>9}% {:>8} {:>10} {:>7} {}",
|
||||
if delinquent {
|
||||
WARNING.to_string()
|
||||
} else {
|
||||
" ".to_string()
|
||||
},
|
||||
vote_account.node_pubkey,
|
||||
vote_account.vote_pubkey,
|
||||
vote_account.commission,
|
||||
non_zero_or_dash(vote_account.last_vote),
|
||||
non_zero_or_dash(vote_account.root_slot),
|
||||
vote_account
|
||||
.epoch_credits
|
||||
.iter()
|
||||
.find_map(|(epoch, credits, _)| if *epoch == current_epoch {
|
||||
Some(*credits)
|
||||
} else {
|
||||
None
|
||||
})
|
||||
.unwrap_or(0),
|
||||
if vote_account.activated_stake > 0 {
|
||||
format!(
|
||||
"{} ({:.2}%)",
|
||||
build_balance_message(vote_account.activated_stake, use_lamports_unit, true),
|
||||
100. * vote_account.activated_stake as f64 / total_active_stake
|
||||
)
|
||||
} else {
|
||||
"-".into()
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
let mut current = vote_accounts.current;
|
||||
current.sort_by(|a, b| b.activated_stake.cmp(&a.activated_stake));
|
||||
for vote_account in current.into_iter() {
|
||||
print_vote_account(
|
||||
vote_account,
|
||||
epoch_info.epoch,
|
||||
total_active_stake,
|
||||
use_lamports_unit,
|
||||
false,
|
||||
);
|
||||
}
|
||||
let current_validators: Vec<CliValidator> = current
|
||||
.iter()
|
||||
.map(|vote_account| CliValidator::new(vote_account, epoch_info.epoch))
|
||||
.collect();
|
||||
let mut delinquent = vote_accounts.delinquent;
|
||||
delinquent.sort_by(|a, b| b.activated_stake.cmp(&a.activated_stake));
|
||||
for vote_account in delinquent.into_iter() {
|
||||
print_vote_account(
|
||||
vote_account,
|
||||
epoch_info.epoch,
|
||||
total_active_stake,
|
||||
use_lamports_unit,
|
||||
true,
|
||||
);
|
||||
}
|
||||
let delinquent_validators: Vec<CliValidator> = delinquent
|
||||
.iter()
|
||||
.map(|vote_account| CliValidator::new(vote_account, epoch_info.epoch))
|
||||
.collect();
|
||||
|
||||
let cli_validators = CliValidators {
|
||||
total_active_stake,
|
||||
total_current_stake,
|
||||
total_deliquent_stake,
|
||||
current_validators,
|
||||
delinquent_validators,
|
||||
use_lamports_unit,
|
||||
};
|
||||
config.output_format.formatted_print(&cli_validators);
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
pub fn process_transaction_history(
|
||||
rpc_client: &RpcClient,
|
||||
address: &Pubkey,
|
||||
end_slot: Option<Slot>, // None == use latest slot
|
||||
slot_limit: u64,
|
||||
) -> ProcessResult {
|
||||
let end_slot = {
|
||||
if let Some(end_slot) = end_slot {
|
||||
end_slot
|
||||
} else {
|
||||
rpc_client.get_slot_with_commitment(CommitmentConfig::max())?
|
||||
}
|
||||
};
|
||||
let start_slot = end_slot.saturating_sub(slot_limit);
|
||||
|
||||
println!(
|
||||
"Transactions affecting {} within slots [{},{}]",
|
||||
address, start_slot, end_slot
|
||||
);
|
||||
let signatures =
|
||||
rpc_client.get_confirmed_signatures_for_address(address, start_slot, end_slot)?;
|
||||
for signature in &signatures {
|
||||
println!("{}", signature);
|
||||
}
|
||||
Ok(format!("{} transactions found", signatures.len(),))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -1325,7 +1278,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "cluster-version"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_cluster_version, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_cluster_version, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::ClusterVersion,
|
||||
signers: vec![],
|
||||
@@ -1334,7 +1287,7 @@ mod tests {
|
||||
|
||||
let test_fees = test_commands.clone().get_matches_from(vec!["test", "fees"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_fees, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_fees, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Fees,
|
||||
signers: vec![],
|
||||
@@ -1347,7 +1300,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "block-time", &slot.to_string()]);
|
||||
assert_eq!(
|
||||
parse_command(&test_get_block_time, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_get_block_time, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::GetBlockTime { slot },
|
||||
signers: vec![],
|
||||
@@ -1358,7 +1311,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "epoch-info"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_get_epoch_info, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_get_epoch_info, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::GetEpochInfo {
|
||||
commitment_config: CommitmentConfig::recent(),
|
||||
@@ -1371,7 +1324,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "genesis-hash"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_get_genesis_hash, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_get_genesis_hash, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::GetGenesisHash,
|
||||
signers: vec![],
|
||||
@@ -1380,7 +1333,7 @@ mod tests {
|
||||
|
||||
let test_get_slot = test_commands.clone().get_matches_from(vec!["test", "slot"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_get_slot, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_get_slot, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::GetSlot {
|
||||
commitment_config: CommitmentConfig::recent(),
|
||||
@@ -1393,7 +1346,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "epoch"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_get_epoch, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_get_epoch, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::GetEpoch {
|
||||
commitment_config: CommitmentConfig::recent(),
|
||||
@@ -1406,7 +1359,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "total-supply"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_total_supply, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_total_supply, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::TotalSupply {
|
||||
commitment_config: CommitmentConfig::recent(),
|
||||
@@ -1419,7 +1372,7 @@ mod tests {
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "transaction-count"]);
|
||||
assert_eq!(
|
||||
parse_command(&test_transaction_count, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_transaction_count, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::GetTransactionCount {
|
||||
commitment_config: CommitmentConfig::recent(),
|
||||
@@ -1440,7 +1393,7 @@ mod tests {
|
||||
"--confirmed",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_ping, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_ping, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Ping {
|
||||
lamports: 1,
|
||||
|
@@ -1,6 +1,11 @@
|
||||
use crate::cli::SettingType;
|
||||
use console::style;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::{
|
||||
hash::Hash, native_token::lamports_to_sol, program_utils::limited_deserialize,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_transaction_status::RpcTransactionStatusMeta;
|
||||
use std::fmt;
|
||||
|
||||
// Pretty print a "name value"
|
||||
pub fn println_name_value(name: &str, value: &str) {
|
||||
@@ -12,6 +17,15 @@ pub fn println_name_value(name: &str, value: &str) {
|
||||
println!("{} {}", style(name).bold(), styled_value);
|
||||
}
|
||||
|
||||
pub fn writeln_name_value(f: &mut fmt::Formatter, name: &str, value: &str) -> fmt::Result {
|
||||
let styled_value = if value == "" {
|
||||
style("(not set)").italic()
|
||||
} else {
|
||||
style(value)
|
||||
};
|
||||
writeln!(f, "{} {}", style(name).bold(), styled_value)
|
||||
}
|
||||
|
||||
pub fn println_name_value_or(name: &str, value: &str, setting_type: SettingType) {
|
||||
let description = match setting_type {
|
||||
SettingType::Explicit => "",
|
||||
@@ -49,3 +63,106 @@ pub fn println_signers(
|
||||
}
|
||||
println!();
|
||||
}
|
||||
|
||||
pub fn println_transaction(
|
||||
transaction: &Transaction,
|
||||
transaction_status: &Option<RpcTransactionStatusMeta>,
|
||||
prefix: &str,
|
||||
) {
|
||||
let message = &transaction.message;
|
||||
println!("{}Recent Blockhash: {:?}", prefix, message.recent_blockhash);
|
||||
for (signature_index, signature) in transaction.signatures.iter().enumerate() {
|
||||
println!("{}Signature {}: {:?}", prefix, signature_index, signature);
|
||||
}
|
||||
println!("{}{:?}", prefix, message.header);
|
||||
for (account_index, account) in message.account_keys.iter().enumerate() {
|
||||
println!("{}Account {}: {:?}", prefix, account_index, account);
|
||||
}
|
||||
for (instruction_index, instruction) in message.instructions.iter().enumerate() {
|
||||
let program_pubkey = message.account_keys[instruction.program_id_index as usize];
|
||||
println!("{}Instruction {}", prefix, instruction_index);
|
||||
println!(
|
||||
"{} Program: {} ({})",
|
||||
prefix, program_pubkey, instruction.program_id_index
|
||||
);
|
||||
for (account_index, account) in instruction.accounts.iter().enumerate() {
|
||||
let account_pubkey = message.account_keys[*account as usize];
|
||||
println!(
|
||||
"{} Account {}: {} ({})",
|
||||
prefix, account_index, account_pubkey, account
|
||||
);
|
||||
}
|
||||
|
||||
let mut raw = true;
|
||||
if program_pubkey == solana_vote_program::id() {
|
||||
if let Ok(vote_instruction) = limited_deserialize::<
|
||||
solana_vote_program::vote_instruction::VoteInstruction,
|
||||
>(&instruction.data)
|
||||
{
|
||||
println!("{} {:?}", prefix, vote_instruction);
|
||||
raw = false;
|
||||
}
|
||||
} else if program_pubkey == solana_stake_program::id() {
|
||||
if let Ok(stake_instruction) = limited_deserialize::<
|
||||
solana_stake_program::stake_instruction::StakeInstruction,
|
||||
>(&instruction.data)
|
||||
{
|
||||
println!("{} {:?}", prefix, stake_instruction);
|
||||
raw = false;
|
||||
}
|
||||
} else if program_pubkey == solana_sdk::system_program::id() {
|
||||
if let Ok(system_instruction) = limited_deserialize::<
|
||||
solana_sdk::system_instruction::SystemInstruction,
|
||||
>(&instruction.data)
|
||||
{
|
||||
println!("{} {:?}", prefix, system_instruction);
|
||||
raw = false;
|
||||
}
|
||||
}
|
||||
|
||||
if raw {
|
||||
println!("{} Data: {:?}", prefix, instruction.data);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(transaction_status) = transaction_status {
|
||||
println!(
|
||||
"{}Status: {}",
|
||||
prefix,
|
||||
match &transaction_status.status {
|
||||
Ok(_) => "Ok".into(),
|
||||
Err(err) => err.to_string(),
|
||||
}
|
||||
);
|
||||
println!("{} Fee: {}", prefix, transaction_status.fee);
|
||||
assert_eq!(
|
||||
transaction_status.pre_balances.len(),
|
||||
transaction_status.post_balances.len()
|
||||
);
|
||||
for (i, (pre, post)) in transaction_status
|
||||
.pre_balances
|
||||
.iter()
|
||||
.zip(transaction_status.post_balances.iter())
|
||||
.enumerate()
|
||||
{
|
||||
if pre == post {
|
||||
println!(
|
||||
"{} Account {} balance: {} SOL",
|
||||
prefix,
|
||||
i,
|
||||
lamports_to_sol(*pre)
|
||||
);
|
||||
} else {
|
||||
println!(
|
||||
"{} Account {} balance: {} SOL -> {} SOL",
|
||||
prefix,
|
||||
i,
|
||||
lamports_to_sol(*pre),
|
||||
lamports_to_sol(*post)
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!("{}Status: Unavailable", prefix);
|
||||
}
|
||||
}
|
||||
|
@@ -1,4 +1,8 @@
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
|
||||
pub mod cli;
|
||||
pub mod cli_output;
|
||||
pub mod cluster_query;
|
||||
pub mod display;
|
||||
pub mod nonce;
|
||||
|
@@ -2,17 +2,16 @@ use clap::{crate_description, crate_name, AppSettings, Arg, ArgGroup, ArgMatches
|
||||
use console::style;
|
||||
|
||||
use solana_clap_utils::{
|
||||
input_validators::is_url,
|
||||
keypair::{check_for_usb, SKIP_SEED_PHRASE_VALIDATION_ARG},
|
||||
offline::SIGN_ONLY_ARG,
|
||||
input_validators::is_url, keypair::SKIP_SEED_PHRASE_VALIDATION_ARG, offline::SIGN_ONLY_ARG,
|
||||
DisplayError,
|
||||
};
|
||||
use solana_cli::{
|
||||
cli::{app, parse_command, process_command, CliCommandInfo, CliConfig, CliSigners},
|
||||
cli_output::OutputFormat,
|
||||
display::{println_name_value, println_name_value_or},
|
||||
};
|
||||
use solana_cli_config::{Config, CONFIG_FILE};
|
||||
use solana_remote_wallet::remote_wallet::{maybe_wallet_manager, RemoteWalletManager};
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use std::{error, sync::Arc};
|
||||
|
||||
fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error>> {
|
||||
@@ -104,7 +103,7 @@ fn parse_settings(matches: &ArgMatches<'_>) -> Result<bool, Box<dyn error::Error
|
||||
|
||||
pub fn parse_args<'a>(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: Option<Arc<RemoteWalletManager>>,
|
||||
mut wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<(CliConfig<'a>, CliSigners), Box<dyn error::Error>> {
|
||||
let config = if let Some(config_file) = matches.value_of("config_file") {
|
||||
Config::load(config_file).unwrap_or_default()
|
||||
@@ -127,7 +126,16 @@ pub fn parse_args<'a>(
|
||||
);
|
||||
|
||||
let CliCommandInfo { command, signers } =
|
||||
parse_command(&matches, &default_signer_path, wallet_manager.as_ref())?;
|
||||
parse_command(&matches, &default_signer_path, &mut wallet_manager)?;
|
||||
|
||||
let output_format = matches
|
||||
.value_of("output_format")
|
||||
.map(|value| match value {
|
||||
"json" => OutputFormat::Json,
|
||||
"json-compact" => OutputFormat::JsonCompact,
|
||||
_ => unreachable!(),
|
||||
})
|
||||
.unwrap_or(OutputFormat::Display);
|
||||
|
||||
Ok((
|
||||
CliConfig {
|
||||
@@ -138,6 +146,7 @@ pub fn parse_args<'a>(
|
||||
keypair_path: default_signer_path,
|
||||
rpc_client: None,
|
||||
verbose: matches.is_present("verbose"),
|
||||
output_format,
|
||||
},
|
||||
signers,
|
||||
))
|
||||
@@ -199,6 +208,14 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.global(true)
|
||||
.help("Show additional information"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("output_format")
|
||||
.long("output")
|
||||
.global(true)
|
||||
.takes_value(true)
|
||||
.possible_values(&["json", "json-compact"])
|
||||
.help("Return information in specified output format. Supports: json, json-compact"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
|
||||
.long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
|
||||
@@ -235,22 +252,14 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
do_main(&matches, check_for_usb(std::env::args()))
|
||||
.map_err(|err| DisplayError::new_as_boxed(err).into())
|
||||
do_main(&matches).map_err(|err| DisplayError::new_as_boxed(err).into())
|
||||
}
|
||||
|
||||
fn do_main(
|
||||
matches: &ArgMatches<'_>,
|
||||
need_wallet_manager: bool,
|
||||
) -> Result<(), Box<dyn error::Error>> {
|
||||
fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box<dyn error::Error>> {
|
||||
if parse_settings(&matches)? {
|
||||
let wallet_manager = if need_wallet_manager {
|
||||
maybe_wallet_manager()?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let mut wallet_manager = None;
|
||||
|
||||
let (mut config, signers) = parse_args(&matches, wallet_manager)?;
|
||||
let (mut config, signers) = parse_args(&matches, &mut wallet_manager)?;
|
||||
config.signers = signers.iter().map(|s| s.as_ref()).collect();
|
||||
let result = process_command(&config)?;
|
||||
let (_, submatches) = matches.subcommand();
|
||||
|
@@ -1,7 +1,10 @@
|
||||
use crate::cli::{
|
||||
build_balance_message, check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
|
||||
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult,
|
||||
SignerIndex,
|
||||
use crate::{
|
||||
cli::{
|
||||
check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
|
||||
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError,
|
||||
ProcessResult, SignerIndex,
|
||||
},
|
||||
cli_output::CliNonceAccount,
|
||||
};
|
||||
use clap::{App, Arg, ArgMatches, SubCommand};
|
||||
use solana_clap_utils::{
|
||||
@@ -276,7 +279,7 @@ pub fn data_from_state(state: &State) -> Result<&Data, CliNonceError> {
|
||||
pub fn parse_authorize_nonce_account(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
|
||||
let new_authority = pubkey_of_signer(matches, "new_authority", wallet_manager)?.unwrap();
|
||||
@@ -304,7 +307,7 @@ pub fn parse_authorize_nonce_account(
|
||||
pub fn parse_nonce_create_account(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let (nonce_account, nonce_account_pubkey) =
|
||||
signer_of(matches, "nonce_account_keypair", wallet_manager)?;
|
||||
@@ -333,7 +336,7 @@ pub fn parse_nonce_create_account(
|
||||
|
||||
pub fn parse_get_nonce(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account_pubkey =
|
||||
pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
|
||||
@@ -347,7 +350,7 @@ pub fn parse_get_nonce(
|
||||
pub fn parse_new_nonce(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
|
||||
let (nonce_authority, nonce_authority_pubkey) =
|
||||
@@ -372,7 +375,7 @@ pub fn parse_new_nonce(
|
||||
|
||||
pub fn parse_show_nonce_account(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account_pubkey =
|
||||
pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
|
||||
@@ -390,7 +393,7 @@ pub fn parse_show_nonce_account(
|
||||
pub fn parse_withdraw_from_nonce_account(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let nonce_account = pubkey_of_signer(matches, "nonce_account_pubkey", wallet_manager)?.unwrap();
|
||||
let destination_account_pubkey =
|
||||
@@ -584,38 +587,26 @@ pub fn process_new_nonce(
|
||||
|
||||
pub fn process_show_nonce_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
nonce_account_pubkey: &Pubkey,
|
||||
use_lamports_unit: bool,
|
||||
) -> ProcessResult {
|
||||
let nonce_account = get_account(rpc_client, nonce_account_pubkey)?;
|
||||
let print_account = |data: Option<&nonce::state::Data>| {
|
||||
println!(
|
||||
"Balance: {}",
|
||||
build_balance_message(nonce_account.lamports, use_lamports_unit, true)
|
||||
);
|
||||
println!(
|
||||
"Minimum Balance Required: {}",
|
||||
build_balance_message(
|
||||
rpc_client.get_minimum_balance_for_rent_exemption(State::size())?,
|
||||
use_lamports_unit,
|
||||
true
|
||||
)
|
||||
);
|
||||
match data {
|
||||
Some(ref data) => {
|
||||
println!("Nonce: {}", data.blockhash);
|
||||
println!(
|
||||
"Fee: {} lamports per signature",
|
||||
data.fee_calculator.lamports_per_signature
|
||||
);
|
||||
println!("Authority: {}", data.authority);
|
||||
}
|
||||
None => {
|
||||
println!("Nonce: uninitialized");
|
||||
println!("Fees: uninitialized");
|
||||
println!("Authority: uninitialized");
|
||||
}
|
||||
let mut nonce_account = CliNonceAccount {
|
||||
balance: nonce_account.lamports,
|
||||
minimum_balance_for_rent_exemption: rpc_client
|
||||
.get_minimum_balance_for_rent_exemption(State::size())?,
|
||||
use_lamports_unit,
|
||||
..CliNonceAccount::default()
|
||||
};
|
||||
if let Some(ref data) = data {
|
||||
nonce_account.nonce = Some(data.blockhash.to_string());
|
||||
nonce_account.lamports_per_signature = Some(data.fee_calculator.lamports_per_signature);
|
||||
nonce_account.authority = Some(data.authority.to_string());
|
||||
}
|
||||
|
||||
config.output_format.formatted_print(&nonce_account);
|
||||
Ok("".to_string())
|
||||
};
|
||||
match state_from_account(&nonce_account)? {
|
||||
@@ -697,7 +688,12 @@ mod tests {
|
||||
&Pubkey::default().to_string(),
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize_nonce_account, &default_keypair_file, None).unwrap(),
|
||||
parse_command(
|
||||
&test_authorize_nonce_account,
|
||||
&default_keypair_file,
|
||||
&mut None
|
||||
)
|
||||
.unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::AuthorizeNonceAccount {
|
||||
nonce_account: nonce_account_pubkey,
|
||||
@@ -718,7 +714,12 @@ mod tests {
|
||||
&authority_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize_nonce_account, &default_keypair_file, None).unwrap(),
|
||||
parse_command(
|
||||
&test_authorize_nonce_account,
|
||||
&default_keypair_file,
|
||||
&mut None
|
||||
)
|
||||
.unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::AuthorizeNonceAccount {
|
||||
nonce_account: read_keypair_file(&keypair_file).unwrap().pubkey(),
|
||||
@@ -740,7 +741,7 @@ mod tests {
|
||||
"50",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_create_nonce_account, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_create_nonce_account, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateNonceAccount {
|
||||
nonce_account: 1,
|
||||
@@ -765,7 +766,7 @@ mod tests {
|
||||
&authority_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_create_nonce_account, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_create_nonce_account, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateNonceAccount {
|
||||
nonce_account: 1,
|
||||
@@ -787,7 +788,7 @@ mod tests {
|
||||
&nonce_account_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_get_nonce, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_get_nonce, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::GetNonce(nonce_account_keypair.pubkey()),
|
||||
signers: vec![],
|
||||
@@ -801,7 +802,7 @@ mod tests {
|
||||
.get_matches_from(vec!["test", "new-nonce", &keypair_file]);
|
||||
let nonce_account = read_keypair_file(&keypair_file).unwrap();
|
||||
assert_eq!(
|
||||
parse_command(&test_new_nonce, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_new_nonce, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::NewNonce {
|
||||
nonce_account: nonce_account.pubkey(),
|
||||
@@ -821,7 +822,7 @@ mod tests {
|
||||
]);
|
||||
let nonce_account = read_keypair_file(&keypair_file).unwrap();
|
||||
assert_eq!(
|
||||
parse_command(&test_new_nonce, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_new_nonce, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::NewNonce {
|
||||
nonce_account: nonce_account.pubkey(),
|
||||
@@ -841,7 +842,7 @@ mod tests {
|
||||
&nonce_account_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_show_nonce_account, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_show_nonce_account, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::ShowNonceAccount {
|
||||
nonce_account_pubkey: nonce_account_keypair.pubkey(),
|
||||
@@ -863,7 +864,7 @@ mod tests {
|
||||
parse_command(
|
||||
&test_withdraw_from_nonce_account,
|
||||
&default_keypair_file,
|
||||
None
|
||||
&mut None
|
||||
)
|
||||
.unwrap(),
|
||||
CliCommandInfo {
|
||||
@@ -891,7 +892,7 @@ mod tests {
|
||||
parse_command(
|
||||
&test_withdraw_from_nonce_account,
|
||||
&default_keypair_file,
|
||||
None
|
||||
&mut None
|
||||
)
|
||||
.unwrap(),
|
||||
CliCommandInfo {
|
||||
|
300
cli/src/stake.rs
300
cli/src/stake.rs
@@ -1,15 +1,14 @@
|
||||
use crate::{
|
||||
cli::{
|
||||
build_balance_message, check_account_for_fee, check_unique_pubkeys, fee_payer_arg,
|
||||
generate_unique_signers, log_instruction_custom_error, nonce_authority_arg, return_signers,
|
||||
CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult, SignerIndex, FEE_PAYER_ARG,
|
||||
check_account_for_fee, check_unique_pubkeys, fee_payer_arg, generate_unique_signers,
|
||||
log_instruction_custom_error, nonce_authority_arg, return_signers, CliCommand,
|
||||
CliCommandInfo, CliConfig, CliError, ProcessResult, SignerIndex, FEE_PAYER_ARG,
|
||||
},
|
||||
cli_output::{CliStakeHistory, CliStakeHistoryEntry, CliStakeState, CliStakeType},
|
||||
nonce::{check_nonce_account, nonce_arg, NONCE_ARG, NONCE_AUTHORITY_ARG},
|
||||
offline::{blockhash_query::BlockhashQuery, *},
|
||||
};
|
||||
use chrono::{DateTime, NaiveDateTime, SecondsFormat, Utc};
|
||||
use clap::{App, Arg, ArgGroup, ArgMatches, SubCommand};
|
||||
use console::style;
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*, offline::*, ArgConstant};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
@@ -314,6 +313,14 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.arg(nonce_arg())
|
||||
.arg(nonce_authority_arg())
|
||||
.arg(fee_payer_arg())
|
||||
.arg(
|
||||
Arg::with_name("custodian")
|
||||
.long("custodian")
|
||||
.takes_value(true)
|
||||
.value_name("KEYPAIR")
|
||||
.validator(is_valid_signer)
|
||||
.help("Authority to override account lockup")
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("stake-set-lockup")
|
||||
@@ -403,7 +410,7 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
pub fn parse_stake_create_account(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let seed = matches.value_of("seed").map(|s| s.to_string());
|
||||
let epoch = value_of(matches, "lockup_epoch").unwrap_or(0);
|
||||
@@ -455,7 +462,7 @@ pub fn parse_stake_create_account(
|
||||
pub fn parse_stake_delegate_stake(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let stake_account_pubkey =
|
||||
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
|
||||
@@ -497,7 +504,7 @@ pub fn parse_stake_delegate_stake(
|
||||
pub fn parse_stake_authorize(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let stake_account_pubkey =
|
||||
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
|
||||
@@ -580,7 +587,7 @@ pub fn parse_stake_authorize(
|
||||
pub fn parse_split_stake(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let stake_account_pubkey =
|
||||
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
|
||||
@@ -625,7 +632,7 @@ pub fn parse_split_stake(
|
||||
pub fn parse_stake_deactivate_stake(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let stake_account_pubkey =
|
||||
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
|
||||
@@ -662,7 +669,7 @@ pub fn parse_stake_deactivate_stake(
|
||||
pub fn parse_stake_withdraw_stake(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let stake_account_pubkey =
|
||||
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
|
||||
@@ -677,11 +684,15 @@ pub fn parse_stake_withdraw_stake(
|
||||
let (nonce_authority, nonce_authority_pubkey) =
|
||||
signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?;
|
||||
let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?;
|
||||
let (custodian, custodian_pubkey) = signer_of(matches, "custodian", wallet_manager)?;
|
||||
|
||||
let mut bulk_signers = vec![withdraw_authority, fee_payer];
|
||||
if nonce_account.is_some() {
|
||||
bulk_signers.push(nonce_authority);
|
||||
}
|
||||
if custodian.is_some() {
|
||||
bulk_signers.push(custodian);
|
||||
}
|
||||
let signer_info =
|
||||
generate_unique_signers(bulk_signers, matches, default_signer_path, wallet_manager)?;
|
||||
|
||||
@@ -696,6 +707,7 @@ pub fn parse_stake_withdraw_stake(
|
||||
nonce_account,
|
||||
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
|
||||
fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(),
|
||||
custodian: custodian_pubkey.and_then(|_| signer_info.index_of(custodian_pubkey)),
|
||||
},
|
||||
signers: signer_info.signers,
|
||||
})
|
||||
@@ -704,7 +716,7 @@ pub fn parse_stake_withdraw_stake(
|
||||
pub fn parse_stake_set_lockup(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let stake_account_pubkey =
|
||||
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
|
||||
@@ -749,7 +761,7 @@ pub fn parse_stake_set_lockup(
|
||||
|
||||
pub fn parse_show_stake_account(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let stake_account_pubkey =
|
||||
pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap();
|
||||
@@ -1014,6 +1026,7 @@ pub fn process_withdraw_stake(
|
||||
destination_account_pubkey: &Pubkey,
|
||||
lamports: u64,
|
||||
withdraw_authority: SignerIndex,
|
||||
custodian: Option<SignerIndex>,
|
||||
sign_only: bool,
|
||||
blockhash_query: &BlockhashQuery,
|
||||
nonce_account: Option<&Pubkey>,
|
||||
@@ -1023,12 +1036,14 @@ pub fn process_withdraw_stake(
|
||||
let (recent_blockhash, fee_calculator) =
|
||||
blockhash_query.get_blockhash_and_fee_calculator(rpc_client)?;
|
||||
let withdraw_authority = config.signers[withdraw_authority];
|
||||
let custodian = custodian.map(|index| config.signers[index]);
|
||||
|
||||
let ixs = vec![stake_instruction::withdraw(
|
||||
stake_account_pubkey,
|
||||
&withdraw_authority.pubkey(),
|
||||
destination_account_pubkey,
|
||||
lamports,
|
||||
custodian.map(|signer| signer.pubkey()).as_ref(),
|
||||
)];
|
||||
|
||||
let fee_payer = config.signers[fee_payer];
|
||||
@@ -1257,79 +1272,61 @@ pub fn process_stake_set_lockup(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_stake_state(stake_lamports: u64, stake_state: &StakeState, use_lamports_unit: bool) {
|
||||
fn show_authorized(authorized: &Authorized) {
|
||||
println!("Stake Authority: {}", authorized.staker);
|
||||
println!("Withdraw Authority: {}", authorized.withdrawer);
|
||||
}
|
||||
fn show_lockup(lockup: &Lockup) {
|
||||
println!(
|
||||
"Lockup Timestamp: {} (UnixTimestamp: {})",
|
||||
DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(lockup.unix_timestamp, 0), Utc)
|
||||
.to_rfc3339_opts(SecondsFormat::Secs, true),
|
||||
lockup.unix_timestamp
|
||||
);
|
||||
println!("Lockup Epoch: {}", lockup.epoch);
|
||||
println!("Lockup Custodian: {}", lockup.custodian);
|
||||
}
|
||||
pub fn build_stake_state(
|
||||
stake_lamports: u64,
|
||||
stake_state: &StakeState,
|
||||
use_lamports_unit: bool,
|
||||
) -> CliStakeState {
|
||||
match stake_state {
|
||||
StakeState::Stake(
|
||||
Meta {
|
||||
authorized, lockup, ..
|
||||
},
|
||||
stake,
|
||||
) => {
|
||||
println!(
|
||||
"Total Stake: {}",
|
||||
build_balance_message(stake_lamports, use_lamports_unit, true)
|
||||
);
|
||||
println!("Credits Observed: {}", stake.credits_observed);
|
||||
println!(
|
||||
"Delegated Stake: {}",
|
||||
build_balance_message(stake.delegation.stake, use_lamports_unit, true)
|
||||
);
|
||||
if stake.delegation.voter_pubkey != Pubkey::default() {
|
||||
println!(
|
||||
"Delegated Vote Account Address: {}",
|
||||
stake.delegation.voter_pubkey
|
||||
);
|
||||
}
|
||||
println!(
|
||||
"Stake activates starting from epoch: {}",
|
||||
if stake.delegation.activation_epoch < std::u64::MAX {
|
||||
stake.delegation.activation_epoch
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
if stake.delegation.deactivation_epoch < std::u64::MAX {
|
||||
println!(
|
||||
"Stake deactivates starting from epoch: {}",
|
||||
stake.delegation.deactivation_epoch
|
||||
);
|
||||
}
|
||||
show_authorized(&authorized);
|
||||
show_lockup(&lockup);
|
||||
}
|
||||
StakeState::RewardsPool => println!("Stake account is a rewards pool"),
|
||||
StakeState::Uninitialized => println!("Stake account is uninitialized"),
|
||||
) => CliStakeState {
|
||||
stake_type: CliStakeType::Stake,
|
||||
total_stake: stake_lamports,
|
||||
delegated_stake: Some(stake.delegation.stake),
|
||||
delegated_vote_account_address: if stake.delegation.voter_pubkey != Pubkey::default() {
|
||||
Some(stake.delegation.voter_pubkey.to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
activation_epoch: Some(if stake.delegation.activation_epoch < std::u64::MAX {
|
||||
stake.delegation.activation_epoch
|
||||
} else {
|
||||
0
|
||||
}),
|
||||
deactivation_epoch: if stake.delegation.deactivation_epoch < std::u64::MAX {
|
||||
Some(stake.delegation.deactivation_epoch)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
authorized: Some(authorized.into()),
|
||||
lockup: Some(lockup.into()),
|
||||
use_lamports_unit,
|
||||
},
|
||||
StakeState::RewardsPool => CliStakeState {
|
||||
stake_type: CliStakeType::RewardsPool,
|
||||
..CliStakeState::default()
|
||||
},
|
||||
StakeState::Uninitialized => CliStakeState::default(),
|
||||
StakeState::Initialized(Meta {
|
||||
authorized, lockup, ..
|
||||
}) => {
|
||||
println!(
|
||||
"Total Stake: {}",
|
||||
build_balance_message(stake_lamports, use_lamports_unit, true)
|
||||
);
|
||||
println!("Stake account is undelegated");
|
||||
show_authorized(&authorized);
|
||||
show_lockup(&lockup);
|
||||
}
|
||||
}) => CliStakeState {
|
||||
stake_type: CliStakeType::Initialized,
|
||||
total_stake: stake_lamports,
|
||||
authorized: Some(authorized.into()),
|
||||
lockup: Some(lockup.into()),
|
||||
use_lamports_unit,
|
||||
..CliStakeState::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_show_stake_account(
|
||||
rpc_client: &RpcClient,
|
||||
_config: &CliConfig,
|
||||
config: &CliConfig,
|
||||
stake_account_pubkey: &Pubkey,
|
||||
use_lamports_unit: bool,
|
||||
) -> ProcessResult {
|
||||
@@ -1343,7 +1340,8 @@ pub fn process_show_stake_account(
|
||||
}
|
||||
match stake_account.state() {
|
||||
Ok(stake_state) => {
|
||||
print_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
|
||||
let state = build_stake_state(stake_account.lamports, &stake_state, use_lamports_unit);
|
||||
config.output_format.formatted_print(&state);
|
||||
Ok("".to_string())
|
||||
}
|
||||
Err(err) => Err(CliError::RpcRequestError(format!(
|
||||
@@ -1356,7 +1354,7 @@ pub fn process_show_stake_account(
|
||||
|
||||
pub fn process_show_stake_history(
|
||||
rpc_client: &RpcClient,
|
||||
_config: &CliConfig,
|
||||
config: &CliConfig,
|
||||
use_lamports_unit: bool,
|
||||
) -> ProcessResult {
|
||||
let stake_history_account = rpc_client.get_account(&stake_history::id())?;
|
||||
@@ -1364,26 +1362,15 @@ pub fn process_show_stake_history(
|
||||
CliError::RpcRequestError("Failed to deserialize stake history".to_string())
|
||||
})?;
|
||||
|
||||
println!();
|
||||
println!(
|
||||
"{}",
|
||||
style(format!(
|
||||
" {:<5} {:>20} {:>20} {:>20}",
|
||||
"Epoch", "Effective Stake", "Activating Stake", "Deactivating Stake",
|
||||
))
|
||||
.bold()
|
||||
);
|
||||
|
||||
for (epoch, entry) in stake_history.deref() {
|
||||
println!(
|
||||
" {:>5} {:>20} {:>20} {:>20} {}",
|
||||
epoch,
|
||||
build_balance_message(entry.effective, use_lamports_unit, false),
|
||||
build_balance_message(entry.activating, use_lamports_unit, false),
|
||||
build_balance_message(entry.deactivating, use_lamports_unit, false),
|
||||
if use_lamports_unit { "lamports" } else { "SOL" }
|
||||
);
|
||||
let mut entries: Vec<CliStakeHistoryEntry> = vec![];
|
||||
for entry in stake_history.deref() {
|
||||
entries.push(entry.into());
|
||||
}
|
||||
let stake_history_output = CliStakeHistory {
|
||||
entries,
|
||||
use_lamports_unit,
|
||||
};
|
||||
config.output_format.formatted_print(&stake_history_output);
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
@@ -1525,6 +1512,9 @@ mod tests {
|
||||
let (stake_authority_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
let stake_authority_keypair = Keypair::new();
|
||||
write_keypair(&stake_authority_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
let (custodian_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
let custodian_keypair = Keypair::new();
|
||||
write_keypair(&custodian_keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
// stake-authorize subcommand
|
||||
let stake_account_string = stake_account_pubkey.to_string();
|
||||
@@ -1542,7 +1532,7 @@ mod tests {
|
||||
&new_withdraw_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -1576,7 +1566,7 @@ mod tests {
|
||||
&withdraw_authority_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -1614,7 +1604,7 @@ mod tests {
|
||||
&withdraw_authority_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -1644,7 +1634,7 @@ mod tests {
|
||||
&new_stake_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -1668,7 +1658,7 @@ mod tests {
|
||||
&stake_authority_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -1698,7 +1688,7 @@ mod tests {
|
||||
&withdraw_authority_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -1725,7 +1715,7 @@ mod tests {
|
||||
&new_withdraw_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -1753,7 +1743,7 @@ mod tests {
|
||||
&withdraw_authority_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_stake_authorize, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -1791,7 +1781,7 @@ mod tests {
|
||||
"--sign-only",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -1824,7 +1814,7 @@ mod tests {
|
||||
&pubkey.to_string(),
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -1870,7 +1860,7 @@ mod tests {
|
||||
&pubkey2.to_string(),
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -1902,7 +1892,7 @@ mod tests {
|
||||
&blockhash_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -1939,7 +1929,7 @@ mod tests {
|
||||
&nonce_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -1975,7 +1965,7 @@ mod tests {
|
||||
&fee_payer_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -2009,7 +1999,7 @@ mod tests {
|
||||
&signer,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_authorize, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::StakeAuthorize {
|
||||
stake_account_pubkey,
|
||||
@@ -2050,7 +2040,7 @@ mod tests {
|
||||
"43",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_create_stake_account, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_create_stake_account, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateStakeAccount {
|
||||
stake_account: 1,
|
||||
@@ -2091,7 +2081,12 @@ mod tests {
|
||||
]);
|
||||
|
||||
assert_eq!(
|
||||
parse_command(&test_create_stake_account2, &default_keypair_file, None).unwrap(),
|
||||
parse_command(
|
||||
&test_create_stake_account2,
|
||||
&default_keypair_file,
|
||||
&mut None
|
||||
)
|
||||
.unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateStakeAccount {
|
||||
stake_account: 1,
|
||||
@@ -2144,7 +2139,12 @@ mod tests {
|
||||
]);
|
||||
|
||||
assert_eq!(
|
||||
parse_command(&test_create_stake_account2, &default_keypair_file, None).unwrap(),
|
||||
parse_command(
|
||||
&test_create_stake_account2,
|
||||
&default_keypair_file,
|
||||
&mut None
|
||||
)
|
||||
.unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateStakeAccount {
|
||||
stake_account: 1,
|
||||
@@ -2180,7 +2180,7 @@ mod tests {
|
||||
&vote_account_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -2209,7 +2209,7 @@ mod tests {
|
||||
&stake_authority_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -2240,7 +2240,7 @@ mod tests {
|
||||
&vote_account_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -2269,7 +2269,7 @@ mod tests {
|
||||
&blockhash_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -2299,7 +2299,7 @@ mod tests {
|
||||
"--sign-only",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -2333,7 +2333,7 @@ mod tests {
|
||||
&key1.to_string(),
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -2379,7 +2379,7 @@ mod tests {
|
||||
&key2.to_string(),
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -2416,7 +2416,7 @@ mod tests {
|
||||
&fee_payer_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_delegate_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -2446,13 +2446,14 @@ mod tests {
|
||||
]);
|
||||
|
||||
assert_eq!(
|
||||
parse_command(&test_withdraw_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_withdraw_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::WithdrawStake {
|
||||
stake_account_pubkey,
|
||||
destination_account_pubkey: stake_account_pubkey,
|
||||
lamports: 42_000_000_000,
|
||||
withdraw_authority: 0,
|
||||
custodian: None,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
@@ -2475,13 +2476,14 @@ mod tests {
|
||||
]);
|
||||
|
||||
assert_eq!(
|
||||
parse_command(&test_withdraw_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_withdraw_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::WithdrawStake {
|
||||
stake_account_pubkey,
|
||||
destination_account_pubkey: stake_account_pubkey,
|
||||
lamports: 42_000_000_000,
|
||||
withdraw_authority: 1,
|
||||
custodian: None,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
@@ -2497,6 +2499,39 @@ mod tests {
|
||||
}
|
||||
);
|
||||
|
||||
// Test WithdrawStake Subcommand w/ custodian
|
||||
let test_withdraw_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"withdraw-stake",
|
||||
&stake_account_string,
|
||||
&stake_account_string,
|
||||
"42",
|
||||
"--custodian",
|
||||
&custodian_keypair_file,
|
||||
]);
|
||||
|
||||
assert_eq!(
|
||||
parse_command(&test_withdraw_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::WithdrawStake {
|
||||
stake_account_pubkey,
|
||||
destination_account_pubkey: stake_account_pubkey,
|
||||
lamports: 42_000_000_000,
|
||||
withdraw_authority: 0,
|
||||
custodian: Some(1),
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
},
|
||||
signers: vec![
|
||||
read_keypair_file(&default_keypair_file).unwrap().into(),
|
||||
read_keypair_file(&custodian_keypair_file).unwrap().into()
|
||||
],
|
||||
}
|
||||
);
|
||||
|
||||
// Test WithdrawStake Subcommand w/ authority and offline nonce
|
||||
let test_withdraw_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
@@ -2519,13 +2554,14 @@ mod tests {
|
||||
]);
|
||||
|
||||
assert_eq!(
|
||||
parse_command(&test_withdraw_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_withdraw_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::WithdrawStake {
|
||||
stake_account_pubkey,
|
||||
destination_account_pubkey: stake_account_pubkey,
|
||||
lamports: 42_000_000_000,
|
||||
withdraw_authority: 0,
|
||||
custodian: None,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_account),
|
||||
@@ -2551,7 +2587,7 @@ mod tests {
|
||||
&stake_account_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -2575,7 +2611,7 @@ mod tests {
|
||||
&stake_authority_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -2606,7 +2642,7 @@ mod tests {
|
||||
&blockhash_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -2633,7 +2669,7 @@ mod tests {
|
||||
"--sign-only",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -2664,7 +2700,7 @@ mod tests {
|
||||
&key1.to_string(),
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -2707,7 +2743,7 @@ mod tests {
|
||||
&key2.to_string(),
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -2738,7 +2774,7 @@ mod tests {
|
||||
&fee_payer_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_deactivate_stake, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_deactivate_stake, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
@@ -2772,7 +2808,7 @@ mod tests {
|
||||
"50",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_split_stake_account, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_split_stake_account, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::SplitStake {
|
||||
stake_account_pubkey: stake_account_keypair.pubkey(),
|
||||
@@ -2833,7 +2869,7 @@ mod tests {
|
||||
&stake_signer,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_split_stake_account, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_split_stake_account, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::SplitStake {
|
||||
stake_account_pubkey: stake_account_keypair.pubkey(),
|
||||
|
@@ -102,7 +102,7 @@ impl StorageSubCommands for App<'_, '_> {
|
||||
pub fn parse_storage_create_archiver_account(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let account_owner =
|
||||
pubkey_of_signer(matches, "storage_account_owner", wallet_manager)?.unwrap();
|
||||
@@ -130,7 +130,7 @@ pub fn parse_storage_create_archiver_account(
|
||||
pub fn parse_storage_create_validator_account(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let account_owner =
|
||||
pubkey_of_signer(matches, "storage_account_owner", wallet_manager)?.unwrap();
|
||||
@@ -158,7 +158,7 @@ pub fn parse_storage_create_validator_account(
|
||||
pub fn parse_storage_claim_reward(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let node_account_pubkey =
|
||||
pubkey_of_signer(matches, "node_account_pubkey", wallet_manager)?.unwrap();
|
||||
@@ -180,7 +180,7 @@ pub fn parse_storage_claim_reward(
|
||||
|
||||
pub fn parse_storage_get_account_command(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let storage_account_pubkey =
|
||||
pubkey_of_signer(matches, "storage_account_pubkey", wallet_manager)?.unwrap();
|
||||
@@ -330,7 +330,7 @@ mod tests {
|
||||
parse_command(
|
||||
&test_create_archiver_storage_account,
|
||||
&default_keypair_file,
|
||||
None
|
||||
&mut None
|
||||
)
|
||||
.unwrap(),
|
||||
CliCommandInfo {
|
||||
@@ -362,7 +362,7 @@ mod tests {
|
||||
parse_command(
|
||||
&test_create_validator_storage_account,
|
||||
&default_keypair_file,
|
||||
None
|
||||
&mut None
|
||||
)
|
||||
.unwrap(),
|
||||
CliCommandInfo {
|
||||
@@ -385,7 +385,7 @@ mod tests {
|
||||
&storage_account_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_claim_storage_reward, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_claim_storage_reward, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::ClaimStorageReward {
|
||||
node_account_pubkey: pubkey,
|
||||
|
@@ -1,6 +1,6 @@
|
||||
use crate::{
|
||||
cli::{check_account_for_fee, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
|
||||
display::println_name_value,
|
||||
cli_output::{CliValidatorInfo, CliValidatorInfoVec},
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
@@ -25,7 +25,6 @@ use solana_sdk::{
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{error, sync::Arc};
|
||||
use titlecase::titlecase;
|
||||
|
||||
pub const MAX_SHORT_FIELD_LENGTH: usize = 70;
|
||||
pub const MAX_LONG_FIELD_LENGTH: usize = 300;
|
||||
@@ -229,7 +228,7 @@ impl ValidatorInfoSubCommands for App<'_, '_> {
|
||||
pub fn parse_validator_info_command(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let info_pubkey = pubkey_of(matches, "info_pubkey");
|
||||
// Prepare validator info
|
||||
@@ -375,7 +374,11 @@ pub fn process_set_validator_info(
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
pub fn process_get_validator_info(rpc_client: &RpcClient, pubkey: Option<Pubkey>) -> ProcessResult {
|
||||
pub fn process_get_validator_info(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
pubkey: Option<Pubkey>,
|
||||
) -> ProcessResult {
|
||||
let validator_info: Vec<(Pubkey, Account)> = if let Some(validator_info_pubkey) = pubkey {
|
||||
vec![(
|
||||
validator_info_pubkey,
|
||||
@@ -394,23 +397,22 @@ pub fn process_get_validator_info(rpc_client: &RpcClient, pubkey: Option<Pubkey>
|
||||
.collect()
|
||||
};
|
||||
|
||||
let mut validator_info_list: Vec<CliValidatorInfo> = vec![];
|
||||
if validator_info.is_empty() {
|
||||
println!("No validator info accounts found");
|
||||
}
|
||||
for (validator_info_pubkey, validator_info_account) in validator_info.iter() {
|
||||
let (validator_pubkey, validator_info) =
|
||||
parse_validator_info(&validator_info_pubkey, &validator_info_account)?;
|
||||
println!();
|
||||
println_name_value("Validator Identity Pubkey:", &validator_pubkey.to_string());
|
||||
println_name_value(" Info Pubkey:", &validator_info_pubkey.to_string());
|
||||
for (key, value) in validator_info.iter() {
|
||||
println_name_value(
|
||||
&format!(" {}:", titlecase(key)),
|
||||
&value.as_str().unwrap_or("?"),
|
||||
);
|
||||
}
|
||||
validator_info_list.push(CliValidatorInfo {
|
||||
identity_pubkey: validator_pubkey.to_string(),
|
||||
info_pubkey: validator_info_pubkey.to_string(),
|
||||
info: validator_info,
|
||||
});
|
||||
}
|
||||
|
||||
config
|
||||
.output_format
|
||||
.formatted_print(&CliValidatorInfoVec::new(validator_info_list));
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
|
@@ -1,7 +1,10 @@
|
||||
use crate::cli::{
|
||||
build_balance_message, check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
|
||||
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult,
|
||||
SignerIndex,
|
||||
use crate::{
|
||||
cli::{
|
||||
check_account_for_fee, check_unique_pubkeys, generate_unique_signers,
|
||||
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError,
|
||||
ProcessResult, SignerIndex,
|
||||
},
|
||||
cli_output::{CliEpochVotingHistory, CliLockout, CliVoteAccount},
|
||||
};
|
||||
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*};
|
||||
@@ -240,7 +243,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
pub fn parse_create_vote_account(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let (vote_account, _) = signer_of(matches, "vote_account", wallet_manager)?;
|
||||
let seed = matches.value_of("seed").map(|s| s.to_string());
|
||||
@@ -273,7 +276,7 @@ pub fn parse_create_vote_account(
|
||||
pub fn parse_vote_authorize(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
vote_authorize: VoteAuthorize,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey =
|
||||
@@ -303,7 +306,7 @@ pub fn parse_vote_authorize(
|
||||
pub fn parse_vote_update_validator(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
@@ -330,7 +333,7 @@ pub fn parse_vote_update_validator(
|
||||
|
||||
pub fn parse_vote_get_account_command(
|
||||
matches: &ArgMatches<'_>,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
@@ -353,7 +356,7 @@ pub fn parse_vote_get_account_command(
|
||||
pub fn parse_withdraw_from_vote_account(
|
||||
matches: &ArgMatches<'_>,
|
||||
default_signer_path: &str,
|
||||
wallet_manager: Option<&Arc<RemoteWalletManager>>,
|
||||
wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
|
||||
) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
@@ -520,7 +523,7 @@ pub fn process_vote_update_validator(
|
||||
(&new_identity_pubkey, "new_identity_account".to_string()),
|
||||
)?;
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let ixs = vec![vote_instruction::update_node(
|
||||
let ixs = vec![vote_instruction::update_validator_identity(
|
||||
vote_account_pubkey,
|
||||
&authorized_withdrawer.pubkey(),
|
||||
&new_identity_pubkey,
|
||||
@@ -569,7 +572,7 @@ fn get_vote_account(
|
||||
|
||||
pub fn process_show_vote_account(
|
||||
rpc_client: &RpcClient,
|
||||
_config: &CliConfig,
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
use_lamports_unit: bool,
|
||||
commitment_config: CommitmentConfig,
|
||||
@@ -579,45 +582,38 @@ pub fn process_show_vote_account(
|
||||
|
||||
let epoch_schedule = rpc_client.get_epoch_schedule()?;
|
||||
|
||||
println!(
|
||||
"Account Balance: {}",
|
||||
build_balance_message(vote_account.lamports, use_lamports_unit, true)
|
||||
);
|
||||
println!("Validator Identity: {}", vote_state.node_pubkey);
|
||||
println!("Authorized Voter: {:?}", vote_state.authorized_voters());
|
||||
println!(
|
||||
"Authorized Withdrawer: {}",
|
||||
vote_state.authorized_withdrawer
|
||||
);
|
||||
println!("Credits: {}", vote_state.credits());
|
||||
println!("Commission: {}%", vote_state.commission);
|
||||
println!(
|
||||
"Root Slot: {}",
|
||||
match vote_state.root_slot {
|
||||
Some(slot) => slot.to_string(),
|
||||
None => "~".to_string(),
|
||||
}
|
||||
);
|
||||
println!("Recent Timestamp: {:?}", vote_state.last_timestamp);
|
||||
let mut votes: Vec<CliLockout> = vec![];
|
||||
let mut epoch_voting_history: Vec<CliEpochVotingHistory> = vec![];
|
||||
if !vote_state.votes.is_empty() {
|
||||
println!("recent votes:");
|
||||
for vote in &vote_state.votes {
|
||||
println!(
|
||||
"- slot: {}\n confirmation count: {}",
|
||||
vote.slot, vote.confirmation_count
|
||||
);
|
||||
votes.push(vote.into());
|
||||
}
|
||||
|
||||
println!("Epoch Voting History:");
|
||||
for (epoch, credits, prev_credits) in vote_state.epoch_credits() {
|
||||
let credits_earned = credits - prev_credits;
|
||||
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(*epoch);
|
||||
println!(
|
||||
"- epoch: {}\n slots in epoch: {}\n credits earned: {}",
|
||||
epoch, slots_in_epoch, credits_earned,
|
||||
);
|
||||
epoch_voting_history.push(CliEpochVotingHistory {
|
||||
epoch: *epoch,
|
||||
slots_in_epoch,
|
||||
credits_earned,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let vote_account_data = CliVoteAccount {
|
||||
account_balance: vote_account.lamports,
|
||||
validator_identity: vote_state.node_pubkey.to_string(),
|
||||
authorized_voters: vote_state.authorized_voters().into(),
|
||||
authorized_withdrawer: vote_state.authorized_withdrawer.to_string(),
|
||||
credits: vote_state.credits(),
|
||||
commission: vote_state.commission,
|
||||
root_slot: vote_state.root_slot,
|
||||
recent_timestamp: vote_state.last_timestamp.clone(),
|
||||
votes,
|
||||
epoch_voting_history,
|
||||
use_lamports_unit,
|
||||
};
|
||||
|
||||
config.output_format.formatted_print(&vote_account_data);
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
@@ -687,7 +683,7 @@ mod tests {
|
||||
&pubkey2_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize_voter, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_authorize_voter, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey: pubkey,
|
||||
@@ -710,7 +706,7 @@ mod tests {
|
||||
&pubkey2_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize_voter, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_authorize_voter, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey: pubkey,
|
||||
@@ -740,7 +736,7 @@ mod tests {
|
||||
"10",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_create_vote_account, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_create_vote_account, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
seed: None,
|
||||
@@ -768,7 +764,7 @@ mod tests {
|
||||
&identity_keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_create_vote_account2, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_create_vote_account2, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
seed: None,
|
||||
@@ -800,7 +796,7 @@ mod tests {
|
||||
&authed.to_string(),
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_create_vote_account3, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_create_vote_account3, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
seed: None,
|
||||
@@ -830,7 +826,7 @@ mod tests {
|
||||
&authed.to_string(),
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_create_vote_account4, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_create_vote_account4, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
seed: None,
|
||||
@@ -855,7 +851,7 @@ mod tests {
|
||||
&keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_update_validator, &default_keypair_file, None).unwrap(),
|
||||
parse_command(&test_update_validator, &default_keypair_file, &mut None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: pubkey,
|
||||
@@ -881,7 +877,7 @@ mod tests {
|
||||
parse_command(
|
||||
&test_withdraw_from_vote_account,
|
||||
&default_keypair_file,
|
||||
None
|
||||
&mut None
|
||||
)
|
||||
.unwrap(),
|
||||
CliCommandInfo {
|
||||
@@ -912,7 +908,7 @@ mod tests {
|
||||
parse_command(
|
||||
&test_withdraw_from_vote_account,
|
||||
&default_keypair_file,
|
||||
None
|
||||
&mut None
|
||||
)
|
||||
.unwrap(),
|
||||
CliCommandInfo {
|
||||
|
@@ -1451,6 +1451,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
destination_account_pubkey: recipient_pubkey,
|
||||
lamports: 42,
|
||||
withdraw_authority: 0,
|
||||
custodian: None,
|
||||
sign_only: true,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
nonce_account: Some(nonce_pubkey),
|
||||
@@ -1466,6 +1467,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw() {
|
||||
destination_account_pubkey: recipient_pubkey,
|
||||
lamports: 42,
|
||||
withdraw_authority: 0,
|
||||
custodian: None,
|
||||
sign_only: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_pubkey),
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -19,10 +19,10 @@ reqwest = { version = "0.10.4", default-features = false, features = ["blocking"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.48"
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.3" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.7" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.7" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.7" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.7" }
|
||||
thiserror = "1.0"
|
||||
tungstenite = "0.10.1"
|
||||
url = "2.1.1"
|
||||
@@ -31,4 +31,7 @@ url = "2.1.1"
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "14.0.5"
|
||||
jsonrpc-http-server = "14.0.6"
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.7" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -23,7 +23,9 @@ use solana_sdk::{
|
||||
signers::Signers,
|
||||
transaction::{self, Transaction, TransactionError},
|
||||
};
|
||||
use solana_transaction_status::{ConfirmedBlock, TransactionEncoding, TransactionStatus};
|
||||
use solana_transaction_status::{
|
||||
ConfirmedBlock, ConfirmedTransaction, TransactionEncoding, TransactionStatus,
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::{
|
||||
error,
|
||||
@@ -158,6 +160,28 @@ impl RpcClient {
|
||||
.map(|status_meta| status_meta.status))
|
||||
}
|
||||
|
||||
pub fn get_signature_status_with_commitment_and_history(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
commitment_config: CommitmentConfig,
|
||||
search_transaction_history: bool,
|
||||
) -> ClientResult<Option<transaction::Result<()>>> {
|
||||
let signature_status = self.client.send(
|
||||
&RpcRequest::GetSignatureStatuses,
|
||||
json!([[signature.to_string()], {
|
||||
"searchTransactionHistory": search_transaction_history
|
||||
}]),
|
||||
5,
|
||||
)?;
|
||||
let result: Response<Vec<Option<TransactionStatus>>> =
|
||||
serde_json::from_value(signature_status)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
|
||||
Ok(result.value[0]
|
||||
.clone()
|
||||
.filter(|result| result.satisfies_commitment(commitment_config))
|
||||
.map(|status_meta| status_meta.status))
|
||||
}
|
||||
|
||||
pub fn get_slot(&self) -> ClientResult<Slot> {
|
||||
self.get_slot_with_commitment(CommitmentConfig::default())
|
||||
}
|
||||
@@ -255,6 +279,55 @@ impl RpcClient {
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetConfirmedBlocks"))
|
||||
}
|
||||
|
||||
pub fn get_confirmed_signatures_for_address(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
start_slot: Slot,
|
||||
end_slot: Slot,
|
||||
) -> ClientResult<Vec<Signature>> {
|
||||
let response = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetConfirmedSignaturesForAddress,
|
||||
json!([address.to_string(), start_slot, end_slot]),
|
||||
0,
|
||||
)
|
||||
.map_err(|err| err.into_with_command("GetConfirmedSignaturesForAddress"))?;
|
||||
|
||||
let signatures_base58_str: Vec<String> =
|
||||
serde_json::from_value(response).map_err(|err| {
|
||||
ClientError::new_with_command(err.into(), "GetConfirmedSignaturesForAddress")
|
||||
})?;
|
||||
|
||||
let mut signatures = vec![];
|
||||
for signature_base58_str in signatures_base58_str {
|
||||
signatures.push(
|
||||
signature_base58_str.parse::<Signature>().map_err(|err| {
|
||||
Into::<ClientError>::into(RpcError::ParseError(err.to_string()))
|
||||
})?,
|
||||
);
|
||||
}
|
||||
Ok(signatures)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_transaction(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
encoding: TransactionEncoding,
|
||||
) -> ClientResult<ConfirmedTransaction> {
|
||||
let response = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetConfirmedTransaction,
|
||||
json!([signature.to_string(), encoding]),
|
||||
0,
|
||||
)
|
||||
.map_err(|err| err.into_with_command("GetConfirmedTransaction"))?;
|
||||
|
||||
serde_json::from_value(response)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetConfirmedTransaction"))
|
||||
}
|
||||
|
||||
pub fn get_block_time(&self, slot: Slot) -> ClientResult<UnixTimestamp> {
|
||||
let response = self
|
||||
.client
|
||||
@@ -998,14 +1071,15 @@ impl RpcClient {
|
||||
let mut confirmations = 0;
|
||||
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Waiting for confirmations",
|
||||
confirmations,
|
||||
MAX_LOCKOUT_HISTORY + 1,
|
||||
));
|
||||
|
||||
let mut send_retries = 20;
|
||||
let signature = loop {
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Finalizing transaction {}",
|
||||
confirmations,
|
||||
MAX_LOCKOUT_HISTORY + 1,
|
||||
transaction.signatures[0],
|
||||
));
|
||||
let mut status_retries = 15;
|
||||
let (signature, status) = loop {
|
||||
let signature = self.send_transaction(transaction)?;
|
||||
@@ -1066,9 +1140,10 @@ impl RpcClient {
|
||||
return Ok(signature);
|
||||
}
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Waiting for confirmations",
|
||||
"[{}/{}] Finalizing transaction {}",
|
||||
confirmations + 1,
|
||||
MAX_LOCKOUT_HISTORY + 1,
|
||||
signature,
|
||||
));
|
||||
sleep(Duration::from_millis(500));
|
||||
confirmations = self.get_num_blocks_since_signature_confirmation(&signature)?;
|
||||
|
@@ -11,6 +11,8 @@ pub enum RpcRequest {
|
||||
GetClusterNodes,
|
||||
GetConfirmedBlock,
|
||||
GetConfirmedBlocks,
|
||||
GetConfirmedSignaturesForAddress,
|
||||
GetConfirmedTransaction,
|
||||
GetEpochInfo,
|
||||
GetEpochSchedule,
|
||||
GetGenesisHash,
|
||||
@@ -40,6 +42,9 @@ pub enum RpcRequest {
|
||||
MinimumLedgerSlot,
|
||||
}
|
||||
|
||||
pub const MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS: usize = 256;
|
||||
pub const MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE: u64 = 10_000;
|
||||
|
||||
impl RpcRequest {
|
||||
pub(crate) fn build_request_json(&self, id: u64, params: Value) -> Value {
|
||||
let jsonrpc = "2.0";
|
||||
@@ -52,6 +57,8 @@ impl RpcRequest {
|
||||
RpcRequest::GetClusterNodes => "getClusterNodes",
|
||||
RpcRequest::GetConfirmedBlock => "getConfirmedBlock",
|
||||
RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks",
|
||||
RpcRequest::GetConfirmedSignaturesForAddress => "getConfirmedSignaturesForAddress",
|
||||
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
|
||||
RpcRequest::GetEpochInfo => "getEpochInfo",
|
||||
RpcRequest::GetEpochSchedule => "getEpochSchedule",
|
||||
RpcRequest::GetGenesisHash => "getGenesisHash",
|
||||
|
@@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@@ -41,35 +41,35 @@ regex = "1.3.6"
|
||||
serde = "1.0.105"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.48"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.3" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.3" }
|
||||
solana-client = { path = "../client", version = "1.1.3" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.3" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.3" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.1.7" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.1.7" }
|
||||
solana-client = { path = "../client", version = "1.1.7" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.1.7" }
|
||||
solana-faucet = { path = "../faucet", version = "1.1.7" }
|
||||
ed25519-dalek = "=1.0.0-pre.1"
|
||||
solana-ledger = { path = "../ledger", version = "1.1.3" }
|
||||
solana-logger = { path = "../logger", version = "1.1.3" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.3" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.3" }
|
||||
solana-measure = { path = "../measure", version = "1.1.3" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.3" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.3" }
|
||||
solana-perf = { path = "../perf", version = "1.1.3" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.3" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.3" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.3" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.3" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.3" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.3" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.3" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.3" }
|
||||
solana-ledger = { path = "../ledger", version = "1.1.7" }
|
||||
solana-logger = { path = "../logger", version = "1.1.7" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.1.7" }
|
||||
solana-metrics = { path = "../metrics", version = "1.1.7" }
|
||||
solana-measure = { path = "../measure", version = "1.1.7" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.1.7" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.1.7" }
|
||||
solana-perf = { path = "../perf", version = "1.1.7" }
|
||||
solana-runtime = { path = "../runtime", version = "1.1.7" }
|
||||
solana-sdk = { path = "../sdk", version = "1.1.7" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.1.7" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.1.7" }
|
||||
solana-streamer = { path = "../streamer", version = "1.1.7" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.1.7" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.1.7" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.1.7" }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.3" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.1.7" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
@@ -103,3 +103,6 @@ name = "cluster_info"
|
||||
[[bench]]
|
||||
name = "chacha"
|
||||
required-features = ["chacha"]
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -190,7 +190,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) {
|
||||
let (exit, poh_recorder, poh_service, signal_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let _banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
&poh_recorder,
|
||||
|
@@ -10,7 +10,11 @@ use solana_ledger::shred::Shred;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::sync::RwLock;
|
||||
use std::{collections::HashMap, net::UdpSocket, sync::Arc, time::Instant};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::UdpSocket,
|
||||
sync::{atomic::AtomicU64, Arc},
|
||||
};
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
@@ -32,9 +36,10 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
|
||||
stakes.insert(id, thread_rng().gen_range(1, NUM_PEERS) as u64);
|
||||
}
|
||||
let stakes = Arc::new(stakes);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(&cluster_info, Some(stakes.clone()));
|
||||
let shreds = Arc::new(shreds);
|
||||
let last_datapoint = Arc::new(AtomicU64::new(0));
|
||||
bencher.iter(move || {
|
||||
let shreds = shreds.clone();
|
||||
broadcast_shreds(
|
||||
@@ -42,7 +47,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&mut Instant::now(),
|
||||
&last_datapoint,
|
||||
&mut 0,
|
||||
)
|
||||
.unwrap();
|
||||
|
@@ -45,7 +45,7 @@ fn bench_retransmitter(bencher: &mut Bencher) {
|
||||
peer_sockets.push(socket);
|
||||
}
|
||||
let peer_sockets = Arc::new(peer_sockets);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100_000);
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
|
@@ -15,7 +15,7 @@ use std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::RecvTimeoutError,
|
||||
Arc, RwLock,
|
||||
Arc,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
@@ -30,7 +30,7 @@ impl AccountsHashVerifier {
|
||||
snapshot_package_receiver: SnapshotPackageReceiver,
|
||||
snapshot_package_sender: Option<SnapshotPackageSender>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
halt_on_trusted_validators_accounts_hash_mismatch: bool,
|
||||
fault_injection_rate_slots: u64,
|
||||
@@ -72,7 +72,7 @@ impl AccountsHashVerifier {
|
||||
|
||||
fn process_snapshot(
|
||||
snapshot_package: SnapshotPackage,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
trusted_validators: &Option<HashSet<Pubkey>>,
|
||||
halt_on_trusted_validator_accounts_hash_mismatch: bool,
|
||||
snapshot_package_sender: &Option<SnapshotPackageSender>,
|
||||
@@ -111,14 +111,11 @@ impl AccountsHashVerifier {
|
||||
if sender.send(snapshot_package).is_err() {}
|
||||
}
|
||||
|
||||
cluster_info
|
||||
.write()
|
||||
.unwrap()
|
||||
.push_accounts_hashes(hashes.clone());
|
||||
cluster_info.push_accounts_hashes(hashes.clone());
|
||||
}
|
||||
|
||||
fn should_halt(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
trusted_validators: &Option<HashSet<Pubkey>>,
|
||||
slot_to_hash: &mut HashMap<Slot, Hash>,
|
||||
) -> bool {
|
||||
@@ -126,11 +123,9 @@ impl AccountsHashVerifier {
|
||||
let mut highest_slot = 0;
|
||||
if let Some(trusted_validators) = trusted_validators.as_ref() {
|
||||
for trusted_validator in trusted_validators {
|
||||
let cluster_info_r = cluster_info.read().unwrap();
|
||||
if let Some(accounts_hashes) =
|
||||
cluster_info_r.get_accounts_hash_for_node(trusted_validator)
|
||||
let is_conflicting = cluster_info.get_accounts_hash_for_node(trusted_validator, |accounts_hashes|
|
||||
{
|
||||
for (slot, hash) in accounts_hashes {
|
||||
accounts_hashes.iter().any(|(slot, hash)| {
|
||||
if let Some(reference_hash) = slot_to_hash.get(slot) {
|
||||
if *hash != *reference_hash {
|
||||
error!("Trusted validator {} produced conflicting hashes for slot: {} ({} != {})",
|
||||
@@ -139,16 +134,21 @@ impl AccountsHashVerifier {
|
||||
hash,
|
||||
reference_hash,
|
||||
);
|
||||
|
||||
return true;
|
||||
true
|
||||
} else {
|
||||
verified_count += 1;
|
||||
false
|
||||
}
|
||||
} else {
|
||||
highest_slot = std::cmp::max(*slot, highest_slot);
|
||||
slot_to_hash.insert(*slot, *hash);
|
||||
false
|
||||
}
|
||||
}
|
||||
})
|
||||
}).unwrap_or(false);
|
||||
|
||||
if is_conflicting {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -181,7 +181,7 @@ mod tests {
|
||||
|
||||
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
|
||||
let mut trusted_validators = HashSet::new();
|
||||
let mut slot_to_hash = HashMap::new();
|
||||
@@ -196,8 +196,7 @@ mod tests {
|
||||
let hash2 = hash(&[2]);
|
||||
{
|
||||
let message = make_accounts_hashes_message(&validator1, vec![(0, hash1)]).unwrap();
|
||||
let mut cluster_info_w = cluster_info.write().unwrap();
|
||||
cluster_info_w.push_message(message);
|
||||
cluster_info.push_message(message);
|
||||
}
|
||||
slot_to_hash.insert(0, hash2);
|
||||
trusted_validators.insert(validator1.pubkey());
|
||||
@@ -217,7 +216,7 @@ mod tests {
|
||||
|
||||
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
|
||||
let trusted_validators = HashSet::new();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
@@ -244,9 +243,8 @@ mod tests {
|
||||
0,
|
||||
);
|
||||
}
|
||||
let cluster_info_r = cluster_info.read().unwrap();
|
||||
let cluster_hashes = cluster_info_r
|
||||
.get_accounts_hash_for_node(&keypair.pubkey())
|
||||
let cluster_hashes = cluster_info
|
||||
.get_accounts_hash_for_node(&keypair.pubkey(), |c| c.clone())
|
||||
.unwrap();
|
||||
info!("{:?}", cluster_hashes);
|
||||
assert_eq!(hashes.len(), MAX_SNAPSHOT_HASHES);
|
||||
|
@@ -41,7 +41,7 @@ use std::{
|
||||
net::UdpSocket,
|
||||
sync::atomic::AtomicBool,
|
||||
sync::mpsc::Receiver,
|
||||
sync::{Arc, Mutex, RwLock},
|
||||
sync::{Arc, Mutex},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
time::Instant,
|
||||
@@ -76,7 +76,7 @@ impl BankingStage {
|
||||
/// Create the stage using `bank`. Exit when `verified_receiver` is dropped.
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
|
||||
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
|
||||
@@ -93,7 +93,7 @@ impl BankingStage {
|
||||
}
|
||||
|
||||
fn new_num_threads(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
|
||||
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
|
||||
@@ -104,7 +104,7 @@ impl BankingStage {
|
||||
// Single thread to generate entries from many banks.
|
||||
// This thread talks to poh_service and broadcasts the entries once they have been recorded.
|
||||
// Once an entry has been recorded, its blockhash is registered with the bank.
|
||||
let my_pubkey = cluster_info.read().unwrap().id();
|
||||
let my_pubkey = cluster_info.id();
|
||||
// Many banks that process transactions in parallel.
|
||||
let bank_thread_hdls: Vec<JoinHandle<()>> = (0..num_threads)
|
||||
.map(|i| {
|
||||
@@ -287,7 +287,7 @@ impl BankingStage {
|
||||
my_pubkey: &Pubkey,
|
||||
socket: &std::net::UdpSocket,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
buffered_packets: &mut Vec<PacketsAndOffsets>,
|
||||
enable_forwarding: bool,
|
||||
batch_limit: usize,
|
||||
@@ -331,10 +331,7 @@ impl BankingStage {
|
||||
next_leader.map_or((), |leader_pubkey| {
|
||||
let leader_addr = {
|
||||
cluster_info
|
||||
.read()
|
||||
.unwrap()
|
||||
.lookup(&leader_pubkey)
|
||||
.map(|leader| leader.tpu_forwards)
|
||||
.lookup_contact_info(&leader_pubkey, |leader| leader.tpu_forwards)
|
||||
};
|
||||
|
||||
leader_addr.map_or((), |leader_addr| {
|
||||
@@ -358,7 +355,7 @@ impl BankingStage {
|
||||
my_pubkey: Pubkey,
|
||||
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
recv_start: &mut Instant,
|
||||
enable_forwarding: bool,
|
||||
id: u32,
|
||||
@@ -1049,7 +1046,7 @@ mod tests {
|
||||
let (exit, poh_recorder, poh_service, _entry_receiever) =
|
||||
create_test_recorder(&bank, &blockstore, None);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
&poh_recorder,
|
||||
@@ -1089,7 +1086,7 @@ mod tests {
|
||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
&poh_recorder,
|
||||
@@ -1152,7 +1149,7 @@ mod tests {
|
||||
let (exit, poh_recorder, poh_service, entry_receiver) =
|
||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let banking_stage = BankingStage::new(
|
||||
&cluster_info,
|
||||
&poh_recorder,
|
||||
@@ -1293,7 +1290,7 @@ mod tests {
|
||||
create_test_recorder(&bank, &blockstore, Some(poh_config));
|
||||
let cluster_info =
|
||||
ClusterInfo::new_with_invalid_keypair(Node::new_localhost().info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let _banking_stage = BankingStage::new_num_threads(
|
||||
&cluster_info,
|
||||
&poh_recorder,
|
||||
|
@@ -1,6 +1,6 @@
|
||||
//! A stage to broadcast data from a leader node to validators
|
||||
use self::{
|
||||
broadcast_fake_shreds_run::BroadcastFakeShredsRun,
|
||||
broadcast_fake_shreds_run::BroadcastFakeShredsRun, broadcast_metrics::*,
|
||||
fail_entry_verification_broadcast_run::FailEntryVerificationBroadcastRun,
|
||||
standard_broadcast_run::StandardBroadcastRun,
|
||||
};
|
||||
@@ -20,28 +20,31 @@ use solana_ledger::{blockstore::Blockstore, shred::Shred, staking_utils};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{inc_new_counter_error, inc_new_counter_info};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::timing::duration_as_s;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey};
|
||||
use solana_streamer::sendmmsg::send_mmsg;
|
||||
use std::sync::atomic::AtomicU64;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::UdpSocket,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::{channel, Receiver, RecvError, RecvTimeoutError, Sender},
|
||||
sync::{Arc, Mutex, RwLock},
|
||||
sync::{Arc, Mutex},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
mod broadcast_fake_shreds_run;
|
||||
pub(crate) mod broadcast_metrics;
|
||||
pub(crate) mod broadcast_utils;
|
||||
mod fail_entry_verification_broadcast_run;
|
||||
mod standard_broadcast_run;
|
||||
|
||||
pub const NUM_INSERT_THREADS: usize = 2;
|
||||
pub type RetransmitSlotsSender = CrossbeamSender<HashMap<Slot, Arc<Bank>>>;
|
||||
pub type RetransmitSlotsReceiver = CrossbeamReceiver<HashMap<Slot, Arc<Bank>>>;
|
||||
pub(crate) const NUM_INSERT_THREADS: usize = 2;
|
||||
pub(crate) type RetransmitSlotsSender = CrossbeamSender<HashMap<Slot, Arc<Bank>>>;
|
||||
pub(crate) type RetransmitSlotsReceiver = CrossbeamReceiver<HashMap<Slot, Arc<Bank>>>;
|
||||
pub(crate) type RecordReceiver = Receiver<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>;
|
||||
pub(crate) type TransmitReceiver = Receiver<(TransmitShreds, Option<BroadcastShredBatchInfo>)>;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub enum BroadcastStageReturnType {
|
||||
@@ -59,14 +62,14 @@ impl BroadcastStageType {
|
||||
pub fn new_broadcast_stage(
|
||||
&self,
|
||||
sock: Vec<UdpSocket>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
receiver: Receiver<WorkingBankEntry>,
|
||||
retransmit_slots_receiver: RetransmitSlotsReceiver,
|
||||
exit_sender: &Arc<AtomicBool>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
shred_version: u16,
|
||||
) -> BroadcastStage {
|
||||
let keypair = cluster_info.read().unwrap().keypair.clone();
|
||||
let keypair = cluster_info.keypair.clone();
|
||||
match self {
|
||||
BroadcastStageType::Standard => BroadcastStage::new(
|
||||
sock,
|
||||
@@ -107,18 +110,18 @@ trait BroadcastRun {
|
||||
&mut self,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
receiver: &Receiver<WorkingBankEntry>,
|
||||
socket_sender: &Sender<TransmitShreds>,
|
||||
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
|
||||
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
|
||||
) -> Result<()>;
|
||||
fn transmit(
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
receiver: &Arc<Mutex<TransmitReceiver>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
sock: &UdpSocket,
|
||||
) -> Result<()>;
|
||||
fn record(
|
||||
&self,
|
||||
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<RecordReceiver>>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> Result<()>;
|
||||
}
|
||||
@@ -150,8 +153,8 @@ impl BroadcastStage {
|
||||
fn run(
|
||||
blockstore: &Arc<Blockstore>,
|
||||
receiver: &Receiver<WorkingBankEntry>,
|
||||
socket_sender: &Sender<TransmitShreds>,
|
||||
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
|
||||
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
|
||||
mut broadcast_stage_run: impl BroadcastRun,
|
||||
) -> BroadcastStageReturnType {
|
||||
loop {
|
||||
@@ -202,7 +205,7 @@ impl BroadcastStage {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn new(
|
||||
socks: Vec<UdpSocket>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
receiver: Receiver<WorkingBankEntry>,
|
||||
retransmit_slots_receiver: RetransmitSlotsReceiver,
|
||||
exit_sender: &Arc<AtomicBool>,
|
||||
@@ -250,7 +253,7 @@ impl BroadcastStage {
|
||||
let blockstore_receiver = Arc::new(Mutex::new(blockstore_receiver));
|
||||
for _ in 0..NUM_INSERT_THREADS {
|
||||
let blockstore_receiver = blockstore_receiver.clone();
|
||||
let bs_record = broadcast_stage_run.clone();
|
||||
let mut bs_record = broadcast_stage_run.clone();
|
||||
let btree = blockstore.clone();
|
||||
let t = Builder::new()
|
||||
.name("solana-broadcaster-record".to_string())
|
||||
@@ -289,7 +292,7 @@ impl BroadcastStage {
|
||||
fn check_retransmit_signals(
|
||||
blockstore: &Blockstore,
|
||||
retransmit_slots_receiver: &RetransmitSlotsReceiver,
|
||||
socket_sender: &Sender<TransmitShreds>,
|
||||
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
|
||||
) -> Result<()> {
|
||||
let timer = Duration::from_millis(100);
|
||||
|
||||
@@ -310,7 +313,7 @@ impl BroadcastStage {
|
||||
);
|
||||
|
||||
if !data_shreds.is_empty() {
|
||||
socket_sender.send((stakes.clone(), data_shreds))?;
|
||||
socket_sender.send(((stakes.clone(), data_shreds), None))?;
|
||||
}
|
||||
|
||||
let coding_shreds = Arc::new(
|
||||
@@ -320,7 +323,7 @@ impl BroadcastStage {
|
||||
);
|
||||
|
||||
if !coding_shreds.is_empty() {
|
||||
socket_sender.send((stakes.clone(), coding_shreds))?;
|
||||
socket_sender.send(((stakes.clone(), coding_shreds), None))?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -335,23 +338,30 @@ impl BroadcastStage {
|
||||
}
|
||||
}
|
||||
|
||||
fn update_peer_stats(num_live_peers: i64, broadcast_len: i64, last_datapoint_submit: &mut Instant) {
|
||||
if duration_as_s(&Instant::now().duration_since(*last_datapoint_submit)) >= 1.0 {
|
||||
fn update_peer_stats(
|
||||
num_live_peers: i64,
|
||||
broadcast_len: i64,
|
||||
last_datapoint_submit: &Arc<AtomicU64>,
|
||||
) {
|
||||
let now = timestamp();
|
||||
let last = last_datapoint_submit.load(Ordering::Relaxed);
|
||||
if now - last > 1000
|
||||
&& last_datapoint_submit.compare_and_swap(last, now, Ordering::Relaxed) == last
|
||||
{
|
||||
datapoint_info!(
|
||||
"cluster_info-num_nodes",
|
||||
("live_count", num_live_peers, i64),
|
||||
("broadcast_count", broadcast_len, i64)
|
||||
);
|
||||
*last_datapoint_submit = Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_broadcast_peers<S: std::hash::BuildHasher>(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
stakes: Option<Arc<HashMap<Pubkey, u64, S>>>,
|
||||
) -> (Vec<ContactInfo>, Vec<(u64, usize)>) {
|
||||
use crate::cluster_info;
|
||||
let mut peers = cluster_info.read().unwrap().tvu_peers();
|
||||
let mut peers = cluster_info.tvu_peers();
|
||||
let peers_and_stakes = cluster_info::stake_weight_peers(&mut peers, stakes);
|
||||
(peers, peers_and_stakes)
|
||||
}
|
||||
@@ -363,7 +373,7 @@ pub fn broadcast_shreds(
|
||||
shreds: &Arc<Vec<Shred>>,
|
||||
peers_and_stakes: &[(u64, usize)],
|
||||
peers: &[ContactInfo],
|
||||
last_datapoint_submit: &mut Instant,
|
||||
last_datapoint_submit: &Arc<AtomicU64>,
|
||||
send_mmsg_total: &mut u64,
|
||||
) -> Result<()> {
|
||||
let broadcast_len = peers_and_stakes.len();
|
||||
@@ -402,11 +412,19 @@ pub fn broadcast_shreds(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn distance(a: u64, b: u64) -> u64 {
|
||||
if a > b {
|
||||
a - b
|
||||
} else {
|
||||
b - a
|
||||
}
|
||||
}
|
||||
|
||||
fn num_live_peers(peers: &[ContactInfo]) -> i64 {
|
||||
let mut num_live_peers = 1i64;
|
||||
peers.iter().for_each(|p| {
|
||||
// A peer is considered live if they generated their contact info recently
|
||||
if timestamp() - p.wallclock <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS {
|
||||
if distance(timestamp(), p.wallclock) <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS {
|
||||
num_live_peers += 1;
|
||||
}
|
||||
});
|
||||
@@ -432,11 +450,7 @@ pub mod test {
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::{
|
||||
path::Path,
|
||||
sync::atomic::AtomicBool,
|
||||
sync::mpsc::channel,
|
||||
sync::{Arc, RwLock},
|
||||
thread::sleep,
|
||||
path::Path, sync::atomic::AtomicBool, sync::mpsc::channel, sync::Arc, thread::sleep,
|
||||
};
|
||||
|
||||
pub fn make_transmit_shreds(
|
||||
@@ -471,13 +485,13 @@ pub mod test {
|
||||
}
|
||||
|
||||
fn check_all_shreds_received(
|
||||
transmit_receiver: &Receiver<TransmitShreds>,
|
||||
transmit_receiver: &TransmitReceiver,
|
||||
mut data_index: u64,
|
||||
mut coding_index: u64,
|
||||
num_expected_data_shreds: u64,
|
||||
num_expected_coding_shreds: u64,
|
||||
) {
|
||||
while let Ok(new_retransmit_slots) = transmit_receiver.try_recv() {
|
||||
while let Ok((new_retransmit_slots, _)) = transmit_receiver.try_recv() {
|
||||
if new_retransmit_slots.1[0].is_data() {
|
||||
for data_shred in new_retransmit_slots.1.iter() {
|
||||
assert_eq!(data_shred.index() as u64, data_index);
|
||||
@@ -496,6 +510,17 @@ pub mod test {
|
||||
assert_eq!(num_expected_coding_shreds, coding_index);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_num_live_peers() {
|
||||
let mut ci = ContactInfo::default();
|
||||
ci.wallclock = std::u64::MAX;
|
||||
assert_eq!(num_live_peers(&[ci.clone()]), 1);
|
||||
ci.wallclock = timestamp() - 1;
|
||||
assert_eq!(num_live_peers(&[ci.clone()]), 2);
|
||||
ci.wallclock = timestamp() - CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS - 1;
|
||||
assert_eq!(num_live_peers(&[ci]), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_duplicate_retransmit_signal() {
|
||||
// Setup
|
||||
@@ -569,16 +594,16 @@ pub mod test {
|
||||
let broadcast_buddy = Node::new_localhost_with_pubkey(&buddy_keypair.pubkey());
|
||||
|
||||
// Fill the cluster_info with the buddy's info
|
||||
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
|
||||
cluster_info.insert_info(broadcast_buddy.info);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
|
||||
let exit_sender = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
|
||||
let leader_keypair = cluster_info.read().unwrap().keypair.clone();
|
||||
let leader_keypair = cluster_info.keypair.clone();
|
||||
// Start up the broadcast stage
|
||||
let broadcast_service = BroadcastStage::new(
|
||||
leader_info.sockets.broadcast,
|
||||
|
@@ -28,8 +28,8 @@ impl BroadcastRun for BroadcastFakeShredsRun {
|
||||
&mut self,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
receiver: &Receiver<WorkingBankEntry>,
|
||||
socket_sender: &Sender<TransmitShreds>,
|
||||
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
|
||||
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
|
||||
) -> Result<()> {
|
||||
// 1) Pull entries from banking stage
|
||||
let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
|
||||
@@ -83,26 +83,32 @@ impl BroadcastRun for BroadcastFakeShredsRun {
|
||||
}
|
||||
|
||||
let data_shreds = Arc::new(data_shreds);
|
||||
blockstore_sender.send(data_shreds.clone())?;
|
||||
blockstore_sender.send((data_shreds.clone(), None))?;
|
||||
|
||||
// 3) Start broadcast step
|
||||
//some indicates fake shreds
|
||||
socket_sender.send((Some(Arc::new(HashMap::new())), Arc::new(fake_data_shreds)))?;
|
||||
socket_sender.send((Some(Arc::new(HashMap::new())), Arc::new(fake_coding_shreds)))?;
|
||||
socket_sender.send((
|
||||
(Some(Arc::new(HashMap::new())), Arc::new(fake_data_shreds)),
|
||||
None,
|
||||
))?;
|
||||
socket_sender.send((
|
||||
(Some(Arc::new(HashMap::new())), Arc::new(fake_coding_shreds)),
|
||||
None,
|
||||
))?;
|
||||
//none indicates real shreds
|
||||
socket_sender.send((None, data_shreds))?;
|
||||
socket_sender.send((None, Arc::new(coding_shreds)))?;
|
||||
socket_sender.send(((None, data_shreds), None))?;
|
||||
socket_sender.send(((None, Arc::new(coding_shreds)), None))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
fn transmit(
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
receiver: &Arc<Mutex<TransmitReceiver>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
sock: &UdpSocket,
|
||||
) -> Result<()> {
|
||||
for (stakes, data_shreds) in receiver.lock().unwrap().iter() {
|
||||
let peers = cluster_info.read().unwrap().tvu_peers();
|
||||
for ((stakes, data_shreds), _) in receiver.lock().unwrap().iter() {
|
||||
let peers = cluster_info.tvu_peers();
|
||||
peers.iter().enumerate().for_each(|(i, peer)| {
|
||||
if i <= self.partition && stakes.is_some() {
|
||||
// Send fake shreds to the first N peers
|
||||
@@ -119,11 +125,11 @@ impl BroadcastRun for BroadcastFakeShredsRun {
|
||||
Ok(())
|
||||
}
|
||||
fn record(
|
||||
&self,
|
||||
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<RecordReceiver>>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> Result<()> {
|
||||
for data_shreds in receiver.lock().unwrap().iter() {
|
||||
for (data_shreds, _) in receiver.lock().unwrap().iter() {
|
||||
blockstore.insert_shreds(data_shreds.to_vec(), None, true)?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -139,7 +145,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_tvu_peers_ordering() {
|
||||
let mut cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost(
|
||||
let cluster = ClusterInfo::new_with_invalid_keypair(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
));
|
||||
|
288
core/src/broadcast_stage/broadcast_metrics.rs
Normal file
288
core/src/broadcast_stage/broadcast_metrics.rs
Normal file
@@ -0,0 +1,288 @@
|
||||
use super::*;
|
||||
|
||||
pub(crate) trait BroadcastStats {
|
||||
fn update(&mut self, new_stats: &Self);
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant);
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct BroadcastShredBatchInfo {
|
||||
pub(crate) slot: Slot,
|
||||
pub(crate) num_expected_batches: Option<usize>,
|
||||
pub(crate) slot_start_ts: Instant,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct ProcessShredsStats {
|
||||
// Per-slot elapsed time
|
||||
pub(crate) shredding_elapsed: u64,
|
||||
pub(crate) receive_elapsed: u64,
|
||||
}
|
||||
impl ProcessShredsStats {
|
||||
pub(crate) fn update(&mut self, new_stats: &ProcessShredsStats) {
|
||||
self.shredding_elapsed += new_stats.shredding_elapsed;
|
||||
self.receive_elapsed += new_stats.receive_elapsed;
|
||||
}
|
||||
pub(crate) fn reset(&mut self) {
|
||||
*self = Self::default();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct TransmitShredsStats {
|
||||
pub(crate) transmit_elapsed: u64,
|
||||
pub(crate) send_mmsg_elapsed: u64,
|
||||
pub(crate) get_peers_elapsed: u64,
|
||||
pub(crate) num_shreds: usize,
|
||||
}
|
||||
|
||||
impl BroadcastStats for TransmitShredsStats {
|
||||
fn update(&mut self, new_stats: &TransmitShredsStats) {
|
||||
self.transmit_elapsed += new_stats.transmit_elapsed;
|
||||
self.send_mmsg_elapsed += new_stats.send_mmsg_elapsed;
|
||||
self.get_peers_elapsed += new_stats.get_peers_elapsed;
|
||||
self.num_shreds += new_stats.num_shreds;
|
||||
}
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
|
||||
datapoint_info!(
|
||||
"broadcast-transmit-shreds-stats",
|
||||
("slot", slot as i64, i64),
|
||||
(
|
||||
"end_to_end_elapsed",
|
||||
// `slot_start` signals when the first batch of shreds was
|
||||
// received, used to measure duration of broadcast
|
||||
slot_start.elapsed().as_micros() as i64,
|
||||
i64
|
||||
),
|
||||
("transmit_elapsed", self.transmit_elapsed as i64, i64),
|
||||
("send_mmsg_elapsed", self.send_mmsg_elapsed as i64, i64),
|
||||
("get_peers_elapsed", self.get_peers_elapsed as i64, i64),
|
||||
("num_shreds", self.num_shreds as i64, i64),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct InsertShredsStats {
|
||||
pub(crate) insert_shreds_elapsed: u64,
|
||||
pub(crate) num_shreds: usize,
|
||||
}
|
||||
impl BroadcastStats for InsertShredsStats {
|
||||
fn update(&mut self, new_stats: &InsertShredsStats) {
|
||||
self.insert_shreds_elapsed += new_stats.insert_shreds_elapsed;
|
||||
self.num_shreds += new_stats.num_shreds;
|
||||
}
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
|
||||
datapoint_info!(
|
||||
"broadcast-insert-shreds-stats",
|
||||
("slot", slot as i64, i64),
|
||||
(
|
||||
"end_to_end_elapsed",
|
||||
// `slot_start` signals when the first batch of shreds was
|
||||
// received, used to measure duration of broadcast
|
||||
slot_start.elapsed().as_micros() as i64,
|
||||
i64
|
||||
),
|
||||
(
|
||||
"insert_shreds_elapsed",
|
||||
self.insert_shreds_elapsed as i64,
|
||||
i64
|
||||
),
|
||||
("num_shreds", self.num_shreds as i64, i64),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Tracks metrics of type `T` acrosss multiple threads
|
||||
#[derive(Default)]
|
||||
pub(crate) struct BatchCounter<T: BroadcastStats + Default> {
|
||||
// The number of batches processed across all threads so far
|
||||
num_batches: usize,
|
||||
// Filled in when the last batch of shreds is received,
|
||||
// signals how many batches of shreds to expect
|
||||
num_expected_batches: Option<usize>,
|
||||
broadcast_shred_stats: T,
|
||||
}
|
||||
|
||||
impl<T: BroadcastStats + Default> BatchCounter<T> {
|
||||
#[cfg(test)]
|
||||
pub(crate) fn num_batches(&self) -> usize {
|
||||
self.num_batches
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct SlotBroadcastStats<T: BroadcastStats + Default>(HashMap<Slot, BatchCounter<T>>);
|
||||
|
||||
impl<T: BroadcastStats + Default> SlotBroadcastStats<T> {
|
||||
#[cfg(test)]
|
||||
pub(crate) fn get(&self, slot: Slot) -> Option<&BatchCounter<T>> {
|
||||
self.0.get(&slot)
|
||||
}
|
||||
pub(crate) fn update(&mut self, new_stats: &T, batch_info: &Option<BroadcastShredBatchInfo>) {
|
||||
if let Some(batch_info) = batch_info {
|
||||
let mut should_delete = false;
|
||||
{
|
||||
let slot_batch_counter = self.0.entry(batch_info.slot).or_default();
|
||||
slot_batch_counter.broadcast_shred_stats.update(new_stats);
|
||||
// Only count the ones where `broadcast_shred_batch_info`.is_some(), because
|
||||
// there could potentially be other `retransmit` slots inserted into the
|
||||
// transmit pipeline (signaled by ReplayStage) that are not created by the
|
||||
// main shredding/broadcast pipeline
|
||||
slot_batch_counter.num_batches += 1;
|
||||
if let Some(num_expected_batches) = batch_info.num_expected_batches {
|
||||
slot_batch_counter.num_expected_batches = Some(num_expected_batches);
|
||||
}
|
||||
if let Some(num_expected_batches) = slot_batch_counter.num_expected_batches {
|
||||
if slot_batch_counter.num_batches == num_expected_batches {
|
||||
slot_batch_counter
|
||||
.broadcast_shred_stats
|
||||
.report_stats(batch_info.slot, batch_info.slot_start_ts);
|
||||
should_delete = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if should_delete {
|
||||
self.0
|
||||
.remove(&batch_info.slot)
|
||||
.expect("delete should be successful");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[derive(Default)]
|
||||
struct TestStats {
|
||||
sender: Option<Sender<(usize, Slot, Instant)>>,
|
||||
count: usize,
|
||||
}
|
||||
|
||||
impl BroadcastStats for TestStats {
|
||||
fn update(&mut self, new_stats: &TestStats) {
|
||||
self.count += new_stats.count;
|
||||
self.sender = new_stats.sender.clone();
|
||||
}
|
||||
fn report_stats(&mut self, slot: Slot, slot_start: Instant) {
|
||||
self.sender
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.send((self.count, slot, slot_start))
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update() {
|
||||
let start = Instant::now();
|
||||
let mut slot_broadcast_stats = SlotBroadcastStats::default();
|
||||
slot_broadcast_stats.update(
|
||||
&TransmitShredsStats {
|
||||
transmit_elapsed: 1,
|
||||
get_peers_elapsed: 1,
|
||||
send_mmsg_elapsed: 1,
|
||||
num_shreds: 1,
|
||||
},
|
||||
&Some(BroadcastShredBatchInfo {
|
||||
slot: 0,
|
||||
num_expected_batches: Some(2),
|
||||
slot_start_ts: start.clone(),
|
||||
}),
|
||||
);
|
||||
|
||||
// Singular update
|
||||
let slot_0_stats = slot_broadcast_stats.0.get(&0).unwrap();
|
||||
assert_eq!(slot_0_stats.num_batches, 1);
|
||||
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
|
||||
|
||||
slot_broadcast_stats.update(
|
||||
&TransmitShredsStats {
|
||||
transmit_elapsed: 1,
|
||||
get_peers_elapsed: 1,
|
||||
send_mmsg_elapsed: 1,
|
||||
num_shreds: 1,
|
||||
},
|
||||
&None,
|
||||
);
|
||||
|
||||
// If BroadcastShredBatchInfo == None, then update should be ignored
|
||||
let slot_0_stats = slot_broadcast_stats.0.get(&0).unwrap();
|
||||
assert_eq!(slot_0_stats.num_batches, 1);
|
||||
assert_eq!(slot_0_stats.num_expected_batches.unwrap(), 2);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.transmit_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.get_peers_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.send_mmsg_elapsed, 1);
|
||||
assert_eq!(slot_0_stats.broadcast_shred_stats.num_shreds, 1);
|
||||
|
||||
// If another batch is given, then total number of batches == num_expected_batches == 2,
|
||||
// so the batch should be purged from the HashMap
|
||||
slot_broadcast_stats.update(
|
||||
&TransmitShredsStats {
|
||||
transmit_elapsed: 1,
|
||||
get_peers_elapsed: 1,
|
||||
send_mmsg_elapsed: 1,
|
||||
num_shreds: 1,
|
||||
},
|
||||
&Some(BroadcastShredBatchInfo {
|
||||
slot: 0,
|
||||
num_expected_batches: None,
|
||||
slot_start_ts: start.clone(),
|
||||
}),
|
||||
);
|
||||
|
||||
assert!(slot_broadcast_stats.0.get(&0).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_multi_threaded() {
|
||||
for round in 0..50 {
|
||||
let start = Instant::now();
|
||||
let slot_broadcast_stats = Arc::new(Mutex::new(SlotBroadcastStats::default()));
|
||||
let num_threads = 5;
|
||||
let slot = 0;
|
||||
let (sender, receiver) = channel();
|
||||
let thread_handles: Vec<_> = (0..num_threads)
|
||||
.into_iter()
|
||||
.map(|i| {
|
||||
let slot_broadcast_stats = slot_broadcast_stats.clone();
|
||||
let sender = Some(sender.clone());
|
||||
let test_stats = TestStats { sender, count: 1 };
|
||||
let mut broadcast_batch_info = BroadcastShredBatchInfo {
|
||||
slot,
|
||||
num_expected_batches: None,
|
||||
slot_start_ts: start.clone(),
|
||||
};
|
||||
if i == round % num_threads {
|
||||
broadcast_batch_info.num_expected_batches = Some(num_threads);
|
||||
}
|
||||
Builder::new()
|
||||
.name("test_update_multi_threaded".to_string())
|
||||
.spawn(move || {
|
||||
slot_broadcast_stats
|
||||
.lock()
|
||||
.unwrap()
|
||||
.update(&test_stats, &Some(broadcast_batch_info))
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
for t in thread_handles {
|
||||
t.join().unwrap();
|
||||
}
|
||||
|
||||
assert!(slot_broadcast_stats.lock().unwrap().0.get(&slot).is_none());
|
||||
let (returned_count, returned_slot, returned_instant) = receiver.recv().unwrap();
|
||||
assert_eq!(returned_count, num_threads);
|
||||
assert_eq!(returned_slot, slot);
|
||||
assert_eq!(returned_instant, returned_instant);
|
||||
}
|
||||
}
|
||||
}
|
@@ -23,8 +23,8 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
&mut self,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
receiver: &Receiver<WorkingBankEntry>,
|
||||
socket_sender: &Sender<TransmitShreds>,
|
||||
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
|
||||
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
|
||||
) -> Result<()> {
|
||||
// 1) Pull entries from banking stage
|
||||
let mut receive_results = broadcast_utils::recv_slot_entries(receiver)?;
|
||||
@@ -61,23 +61,23 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
);
|
||||
|
||||
let data_shreds = Arc::new(data_shreds);
|
||||
blockstore_sender.send(data_shreds.clone())?;
|
||||
blockstore_sender.send((data_shreds.clone(), None))?;
|
||||
// 3) Start broadcast step
|
||||
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
|
||||
|
||||
let stakes = stakes.map(Arc::new);
|
||||
socket_sender.send((stakes.clone(), data_shreds))?;
|
||||
socket_sender.send((stakes, Arc::new(coding_shreds)))?;
|
||||
socket_sender.send(((stakes.clone(), data_shreds), None))?;
|
||||
socket_sender.send(((stakes, Arc::new(coding_shreds)), None))?;
|
||||
Ok(())
|
||||
}
|
||||
fn transmit(
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
receiver: &Arc<Mutex<TransmitReceiver>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
sock: &UdpSocket,
|
||||
) -> Result<()> {
|
||||
let (stakes, shreds) = receiver.lock().unwrap().recv()?;
|
||||
let ((stakes, shreds), _) = receiver.lock().unwrap().recv()?;
|
||||
// Broadcast data
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
|
||||
@@ -87,18 +87,18 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&mut Instant::now(),
|
||||
&Arc::new(AtomicU64::new(0)),
|
||||
&mut send_mmsg_total,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
fn record(
|
||||
&self,
|
||||
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<RecordReceiver>>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> Result<()> {
|
||||
let all_shreds = receiver.lock().unwrap().recv()?;
|
||||
let (all_shreds, _) = receiver.lock().unwrap().recv()?;
|
||||
blockstore
|
||||
.insert_shreds(all_shreds.to_vec(), None, true)
|
||||
.expect("Failed to insert shreds in blockstore");
|
||||
|
@@ -1,5 +1,7 @@
|
||||
use super::broadcast_utils::{self, ReceiveResults};
|
||||
use super::*;
|
||||
use super::{
|
||||
broadcast_utils::{self, ReceiveResults},
|
||||
*,
|
||||
};
|
||||
use crate::broadcast_stage::broadcast_utils::UnfinishedSlotInfo;
|
||||
use solana_ledger::{
|
||||
entry::Entry,
|
||||
@@ -9,44 +11,33 @@ use solana_sdk::{pubkey::Pubkey, signature::Keypair, timing::duration_as_us};
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Default)]
|
||||
struct BroadcastStats {
|
||||
// Per-slot elapsed time
|
||||
shredding_elapsed: u64,
|
||||
insert_shreds_elapsed: u64,
|
||||
broadcast_elapsed: u64,
|
||||
receive_elapsed: u64,
|
||||
seed_elapsed: u64,
|
||||
send_mmsg_elapsed: u64,
|
||||
}
|
||||
|
||||
impl BroadcastStats {
|
||||
fn reset(&mut self) {
|
||||
*self = Self::default();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct StandardBroadcastRun {
|
||||
stats: Arc<RwLock<BroadcastStats>>,
|
||||
process_shreds_stats: ProcessShredsStats,
|
||||
transmit_shreds_stats: Arc<Mutex<SlotBroadcastStats<TransmitShredsStats>>>,
|
||||
insert_shreds_stats: Arc<Mutex<SlotBroadcastStats<InsertShredsStats>>>,
|
||||
unfinished_slot: Option<UnfinishedSlotInfo>,
|
||||
current_slot_and_parent: Option<(u64, u64)>,
|
||||
slot_broadcast_start: Option<Instant>,
|
||||
keypair: Arc<Keypair>,
|
||||
shred_version: u16,
|
||||
last_datapoint_submit: Instant,
|
||||
last_datapoint_submit: Arc<AtomicU64>,
|
||||
num_batches: usize,
|
||||
}
|
||||
|
||||
impl StandardBroadcastRun {
|
||||
pub(super) fn new(keypair: Arc<Keypair>, shred_version: u16) -> Self {
|
||||
Self {
|
||||
stats: Arc::new(RwLock::new(BroadcastStats::default())),
|
||||
process_shreds_stats: ProcessShredsStats::default(),
|
||||
transmit_shreds_stats: Arc::new(Mutex::new(SlotBroadcastStats::default())),
|
||||
insert_shreds_stats: Arc::new(Mutex::new(SlotBroadcastStats::default())),
|
||||
unfinished_slot: None,
|
||||
current_slot_and_parent: None,
|
||||
slot_broadcast_start: None,
|
||||
keypair,
|
||||
shred_version,
|
||||
last_datapoint_submit: Instant::now(),
|
||||
last_datapoint_submit: Arc::new(AtomicU64::new(0)),
|
||||
num_batches: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,7 +120,7 @@ impl StandardBroadcastRun {
|
||||
#[cfg(test)]
|
||||
fn test_process_receive_results(
|
||||
&mut self,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
sock: &UdpSocket,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
receive_results: ReceiveResults,
|
||||
@@ -141,6 +132,7 @@ impl StandardBroadcastRun {
|
||||
let brecv = Arc::new(Mutex::new(brecv));
|
||||
//data
|
||||
let _ = self.transmit(&srecv, cluster_info, sock);
|
||||
let _ = self.record(&brecv, blockstore);
|
||||
//coding
|
||||
let _ = self.transmit(&srecv, cluster_info, sock);
|
||||
let _ = self.record(&brecv, blockstore);
|
||||
@@ -150,8 +142,8 @@ impl StandardBroadcastRun {
|
||||
fn process_receive_results(
|
||||
&mut self,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
socket_sender: &Sender<TransmitShreds>,
|
||||
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
|
||||
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
|
||||
receive_results: ReceiveResults,
|
||||
) -> Result<()> {
|
||||
let mut receive_elapsed = receive_results.time_elapsed;
|
||||
@@ -159,11 +151,13 @@ impl StandardBroadcastRun {
|
||||
let bank = receive_results.bank.clone();
|
||||
let last_tick_height = receive_results.last_tick_height;
|
||||
inc_new_counter_info!("broadcast_service-entries_received", num_entries);
|
||||
|
||||
let old_broadcast_start = self.slot_broadcast_start;
|
||||
let old_num_batches = self.num_batches;
|
||||
if self.current_slot_and_parent.is_none()
|
||||
|| bank.slot() != self.current_slot_and_parent.unwrap().0
|
||||
{
|
||||
self.slot_broadcast_start = Some(Instant::now());
|
||||
self.num_batches = 0;
|
||||
let slot = bank.slot();
|
||||
let parent_slot = bank.parent_slot();
|
||||
|
||||
@@ -178,19 +172,19 @@ impl StandardBroadcastRun {
|
||||
self.check_for_interrupted_slot(bank.ticks_per_slot() as u8);
|
||||
|
||||
// 2) Convert entries to shreds and coding shreds
|
||||
|
||||
let (shredder, next_shred_index) = self.init_shredder(
|
||||
blockstore,
|
||||
(bank.tick_height() % bank.ticks_per_slot()) as u8,
|
||||
);
|
||||
let mut data_shreds = self.entries_to_data_shreds(
|
||||
let is_last_in_slot = last_tick_height == bank.max_tick_height();
|
||||
let data_shreds = self.entries_to_data_shreds(
|
||||
&shredder,
|
||||
next_shred_index,
|
||||
&receive_results.entries,
|
||||
last_tick_height == bank.max_tick_height(),
|
||||
is_last_in_slot,
|
||||
);
|
||||
//Insert the first shred so blockstore stores that the leader started this block
|
||||
//This must be done before the blocks are sent out over the wire.
|
||||
// Insert the first shred so blockstore stores that the leader started this block
|
||||
// This must be done before the blocks are sent out over the wire.
|
||||
if !data_shreds.is_empty() && data_shreds[0].index() == 0 {
|
||||
let first = vec![data_shreds[0].clone()];
|
||||
blockstore
|
||||
@@ -198,27 +192,56 @@ impl StandardBroadcastRun {
|
||||
.expect("Failed to insert shreds in blockstore");
|
||||
}
|
||||
let last_data_shred = data_shreds.len();
|
||||
if let Some(last_shred) = last_unfinished_slot_shred {
|
||||
data_shreds.push(last_shred);
|
||||
}
|
||||
let to_shreds_elapsed = to_shreds_start.elapsed();
|
||||
|
||||
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
|
||||
let stakes = stakes.map(Arc::new);
|
||||
let data_shreds = Arc::new(data_shreds);
|
||||
socket_sender.send((stakes.clone(), data_shreds.clone()))?;
|
||||
blockstore_sender.send(data_shreds.clone())?;
|
||||
let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred]);
|
||||
let coding_shreds = Arc::new(coding_shreds);
|
||||
socket_sender.send((stakes, coding_shreds.clone()))?;
|
||||
blockstore_sender.send(coding_shreds)?;
|
||||
self.update_broadcast_stats(BroadcastStats {
|
||||
shredding_elapsed: duration_as_us(&to_shreds_elapsed),
|
||||
receive_elapsed: duration_as_us(&receive_elapsed),
|
||||
..BroadcastStats::default()
|
||||
|
||||
// Broadcast the last shred of the interrupted slot if necessary
|
||||
if let Some(last_shred) = last_unfinished_slot_shred {
|
||||
let batch_info = Some(BroadcastShredBatchInfo {
|
||||
slot: last_shred.slot(),
|
||||
num_expected_batches: Some(old_num_batches + 1),
|
||||
slot_start_ts: old_broadcast_start.expect(
|
||||
"Old broadcast start time for previous slot must exist if the previous slot
|
||||
was interrupted",
|
||||
),
|
||||
});
|
||||
let last_shred = Arc::new(vec![last_shred]);
|
||||
socket_sender.send(((stakes.clone(), last_shred.clone()), batch_info.clone()))?;
|
||||
blockstore_sender.send((last_shred, batch_info))?;
|
||||
}
|
||||
|
||||
// Increment by two batches, one for the data batch, one for the coding batch.
|
||||
self.num_batches += 2;
|
||||
let num_expected_batches = {
|
||||
if is_last_in_slot {
|
||||
Some(self.num_batches)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
let batch_info = Some(BroadcastShredBatchInfo {
|
||||
slot: bank.slot(),
|
||||
num_expected_batches,
|
||||
slot_start_ts: self
|
||||
.slot_broadcast_start
|
||||
.clone()
|
||||
.expect("Start timestamp must exist for a slot if we're broadcasting the slot"),
|
||||
});
|
||||
|
||||
let data_shreds = Arc::new(data_shreds);
|
||||
socket_sender.send(((stakes.clone(), data_shreds.clone()), batch_info.clone()))?;
|
||||
blockstore_sender.send((data_shreds.clone(), batch_info.clone()))?;
|
||||
let coding_shreds = shredder.data_shreds_to_coding_shreds(&data_shreds[0..last_data_shred]);
|
||||
let coding_shreds = Arc::new(coding_shreds);
|
||||
socket_sender.send(((stakes, coding_shreds.clone()), batch_info.clone()))?;
|
||||
blockstore_sender.send((coding_shreds, batch_info))?;
|
||||
self.process_shreds_stats.update(&ProcessShredsStats {
|
||||
shredding_elapsed: duration_as_us(&to_shreds_elapsed),
|
||||
receive_elapsed: duration_as_us(&receive_elapsed),
|
||||
});
|
||||
if last_tick_height == bank.max_tick_height() {
|
||||
self.report_and_reset_stats();
|
||||
self.unfinished_slot = None;
|
||||
@@ -227,10 +250,15 @@ impl StandardBroadcastRun {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn insert(&self, blockstore: &Arc<Blockstore>, shreds: Arc<Vec<Shred>>) -> Result<()> {
|
||||
fn insert(
|
||||
&mut self,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
shreds: Arc<Vec<Shred>>,
|
||||
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
|
||||
) -> Result<()> {
|
||||
// Insert shreds into blockstore
|
||||
let insert_shreds_start = Instant::now();
|
||||
//The first shred is inserted synchronously
|
||||
// The first shred is inserted synchronously
|
||||
let data_shreds = if !shreds.is_empty() && shreds[0].index() == 0 {
|
||||
shreds[1..].to_vec()
|
||||
} else {
|
||||
@@ -240,74 +268,80 @@ impl StandardBroadcastRun {
|
||||
.insert_shreds(data_shreds, None, true)
|
||||
.expect("Failed to insert shreds in blockstore");
|
||||
let insert_shreds_elapsed = insert_shreds_start.elapsed();
|
||||
self.update_broadcast_stats(BroadcastStats {
|
||||
let new_insert_shreds_stats = InsertShredsStats {
|
||||
insert_shreds_elapsed: duration_as_us(&insert_shreds_elapsed),
|
||||
..BroadcastStats::default()
|
||||
});
|
||||
num_shreds: shreds.len(),
|
||||
};
|
||||
self.update_insertion_metrics(&new_insert_shreds_stats, &broadcast_shred_batch_info);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_insertion_metrics(
|
||||
&mut self,
|
||||
new_insertion_shreds_stats: &InsertShredsStats,
|
||||
broadcast_shred_batch_info: &Option<BroadcastShredBatchInfo>,
|
||||
) {
|
||||
let mut insert_shreds_stats = self.insert_shreds_stats.lock().unwrap();
|
||||
insert_shreds_stats.update(new_insertion_shreds_stats, broadcast_shred_batch_info);
|
||||
}
|
||||
|
||||
fn broadcast(
|
||||
&mut self,
|
||||
sock: &UdpSocket,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
stakes: Option<Arc<HashMap<Pubkey, u64>>>,
|
||||
shreds: Arc<Vec<Shred>>,
|
||||
broadcast_shred_batch_info: Option<BroadcastShredBatchInfo>,
|
||||
) -> Result<()> {
|
||||
let seed_start = Instant::now();
|
||||
let seed_elapsed = seed_start.elapsed();
|
||||
trace!("Broadcasting {:?} shreds", shreds.len());
|
||||
// Get the list of peers to broadcast to
|
||||
let get_peers_start = Instant::now();
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
let get_peers_elapsed = get_peers_start.elapsed();
|
||||
|
||||
// Broadcast the shreds
|
||||
let broadcast_start = Instant::now();
|
||||
trace!("Broadcasting {:?} shreds", shreds.len());
|
||||
|
||||
let (peers, peers_and_stakes) = get_broadcast_peers(cluster_info, stakes);
|
||||
|
||||
let transmit_start = Instant::now();
|
||||
let mut send_mmsg_total = 0;
|
||||
broadcast_shreds(
|
||||
sock,
|
||||
&shreds,
|
||||
&peers_and_stakes,
|
||||
&peers,
|
||||
&mut self.last_datapoint_submit,
|
||||
&self.last_datapoint_submit,
|
||||
&mut send_mmsg_total,
|
||||
)?;
|
||||
|
||||
let broadcast_elapsed = broadcast_start.elapsed();
|
||||
|
||||
self.update_broadcast_stats(BroadcastStats {
|
||||
broadcast_elapsed: duration_as_us(&broadcast_elapsed),
|
||||
seed_elapsed: duration_as_us(&seed_elapsed),
|
||||
let transmit_elapsed = transmit_start.elapsed();
|
||||
let new_transmit_shreds_stats = TransmitShredsStats {
|
||||
transmit_elapsed: duration_as_us(&transmit_elapsed),
|
||||
get_peers_elapsed: duration_as_us(&get_peers_elapsed),
|
||||
send_mmsg_elapsed: send_mmsg_total,
|
||||
..BroadcastStats::default()
|
||||
});
|
||||
num_shreds: shreds.len(),
|
||||
};
|
||||
|
||||
// Process metrics
|
||||
self.update_transmit_metrics(&new_transmit_shreds_stats, &broadcast_shred_batch_info);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_broadcast_stats(&self, stats: BroadcastStats) {
|
||||
let mut wstats = self.stats.write().unwrap();
|
||||
wstats.receive_elapsed += stats.receive_elapsed;
|
||||
wstats.shredding_elapsed += stats.shredding_elapsed;
|
||||
wstats.insert_shreds_elapsed += stats.insert_shreds_elapsed;
|
||||
wstats.broadcast_elapsed += stats.broadcast_elapsed;
|
||||
wstats.seed_elapsed += stats.seed_elapsed;
|
||||
wstats.send_mmsg_elapsed += stats.send_mmsg_elapsed;
|
||||
fn update_transmit_metrics(
|
||||
&mut self,
|
||||
new_transmit_shreds_stats: &TransmitShredsStats,
|
||||
broadcast_shred_batch_info: &Option<BroadcastShredBatchInfo>,
|
||||
) {
|
||||
let mut transmit_shreds_stats = self.transmit_shreds_stats.lock().unwrap();
|
||||
transmit_shreds_stats.update(new_transmit_shreds_stats, broadcast_shred_batch_info);
|
||||
}
|
||||
|
||||
fn report_and_reset_stats(&mut self) {
|
||||
let stats = self.stats.read().unwrap();
|
||||
let stats = &self.process_shreds_stats;
|
||||
assert!(self.unfinished_slot.is_some());
|
||||
datapoint_info!(
|
||||
"broadcast-bank-stats",
|
||||
"broadcast-process-shreds-stats",
|
||||
("slot", self.unfinished_slot.unwrap().slot as i64, i64),
|
||||
("shredding_time", stats.shredding_elapsed as i64, i64),
|
||||
("insertion_time", stats.insert_shreds_elapsed as i64, i64),
|
||||
("broadcast_time", stats.broadcast_elapsed as i64, i64),
|
||||
("receive_time", stats.receive_elapsed as i64, i64),
|
||||
("send_mmsg", stats.send_mmsg_elapsed as i64, i64),
|
||||
("seed", stats.seed_elapsed as i64, i64),
|
||||
(
|
||||
"num_shreds",
|
||||
"num_data_shreds",
|
||||
i64::from(self.unfinished_slot.unwrap().next_shred_index),
|
||||
i64
|
||||
),
|
||||
@@ -317,8 +351,7 @@ impl StandardBroadcastRun {
|
||||
i64
|
||||
),
|
||||
);
|
||||
drop(stats);
|
||||
self.stats.write().unwrap().reset();
|
||||
self.process_shreds_stats.reset();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -327,8 +360,8 @@ impl BroadcastRun for StandardBroadcastRun {
|
||||
&mut self,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
receiver: &Receiver<WorkingBankEntry>,
|
||||
socket_sender: &Sender<TransmitShreds>,
|
||||
blockstore_sender: &Sender<Arc<Vec<Shred>>>,
|
||||
socket_sender: &Sender<(TransmitShreds, Option<BroadcastShredBatchInfo>)>,
|
||||
blockstore_sender: &Sender<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>,
|
||||
) -> Result<()> {
|
||||
let receive_results = broadcast_utils::recv_slot_entries(receiver)?;
|
||||
self.process_receive_results(
|
||||
@@ -340,20 +373,20 @@ impl BroadcastRun for StandardBroadcastRun {
|
||||
}
|
||||
fn transmit(
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<Receiver<TransmitShreds>>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
receiver: &Arc<Mutex<TransmitReceiver>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
sock: &UdpSocket,
|
||||
) -> Result<()> {
|
||||
let (stakes, shreds) = receiver.lock().unwrap().recv()?;
|
||||
self.broadcast(sock, cluster_info, stakes, shreds)
|
||||
let ((stakes, shreds), slot_start_ts) = receiver.lock().unwrap().recv()?;
|
||||
self.broadcast(sock, cluster_info, stakes, shreds, slot_start_ts)
|
||||
}
|
||||
fn record(
|
||||
&self,
|
||||
receiver: &Arc<Mutex<Receiver<Arc<Vec<Shred>>>>>,
|
||||
&mut self,
|
||||
receiver: &Arc<Mutex<RecordReceiver>>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
) -> Result<()> {
|
||||
let shreds = receiver.lock().unwrap().recv()?;
|
||||
self.insert(blockstore, shreds)
|
||||
let (shreds, slot_start_ts) = receiver.lock().unwrap().recv()?;
|
||||
self.insert(blockstore, shreds, slot_start_ts)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -371,7 +404,7 @@ mod test {
|
||||
genesis_config::GenesisConfig,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
fn setup(
|
||||
@@ -379,7 +412,7 @@ mod test {
|
||||
) -> (
|
||||
Arc<Blockstore>,
|
||||
GenesisConfig,
|
||||
Arc<RwLock<ClusterInfo>>,
|
||||
Arc<ClusterInfo>,
|
||||
Arc<Bank>,
|
||||
Arc<Keypair>,
|
||||
UdpSocket,
|
||||
@@ -392,9 +425,9 @@ mod test {
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let leader_pubkey = leader_keypair.pubkey();
|
||||
let leader_info = Node::new_localhost_with_pubkey(&leader_pubkey);
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
|
||||
leader_info.info.clone(),
|
||||
)));
|
||||
));
|
||||
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut genesis_config = create_genesis_config(10_000).genesis_config;
|
||||
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot) + 1;
|
||||
@@ -469,12 +502,29 @@ mod test {
|
||||
// Make sure the slot is not complete
|
||||
assert!(!blockstore.is_full(0));
|
||||
// Modify the stats, should reset later
|
||||
standard_broadcast_run
|
||||
.stats
|
||||
.write()
|
||||
.unwrap()
|
||||
.receive_elapsed = 10;
|
||||
|
||||
standard_broadcast_run.process_shreds_stats.receive_elapsed = 10;
|
||||
// Broadcast stats should exist, and 2 batches should have been sent,
|
||||
// one for data, one for coding
|
||||
assert_eq!(
|
||||
standard_broadcast_run
|
||||
.transmit_shreds_stats
|
||||
.lock()
|
||||
.unwrap()
|
||||
.get(unfinished_slot.slot)
|
||||
.unwrap()
|
||||
.num_batches(),
|
||||
2
|
||||
);
|
||||
assert_eq!(
|
||||
standard_broadcast_run
|
||||
.insert_shreds_stats
|
||||
.lock()
|
||||
.unwrap()
|
||||
.get(unfinished_slot.slot)
|
||||
.unwrap()
|
||||
.num_batches(),
|
||||
2
|
||||
);
|
||||
// Try to fetch ticks from blockstore, nothing should break
|
||||
assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), ticks0);
|
||||
assert_eq!(
|
||||
@@ -485,7 +535,7 @@ mod test {
|
||||
// Step 2: Make a transmission for another bank that interrupts the transmission for
|
||||
// slot 0
|
||||
let bank2 = Arc::new(Bank::new_from_parent(&bank0, &leader_keypair.pubkey(), 2));
|
||||
|
||||
let interrupted_slot = unfinished_slot.slot;
|
||||
// Interrupting the slot should cause the unfinished_slot and stats to reset
|
||||
let num_shreds = 1;
|
||||
assert!(num_shreds < num_shreds_per_slot);
|
||||
@@ -509,10 +559,24 @@ mod test {
|
||||
|
||||
// Check that the stats were reset as well
|
||||
assert_eq!(
|
||||
standard_broadcast_run.stats.read().unwrap().receive_elapsed,
|
||||
standard_broadcast_run.process_shreds_stats.receive_elapsed,
|
||||
0
|
||||
);
|
||||
|
||||
// Broadcast stats for interrupted slot should be cleared
|
||||
assert!(standard_broadcast_run
|
||||
.transmit_shreds_stats
|
||||
.lock()
|
||||
.unwrap()
|
||||
.get(interrupted_slot)
|
||||
.is_none());
|
||||
assert!(standard_broadcast_run
|
||||
.insert_shreds_stats
|
||||
.lock()
|
||||
.unwrap()
|
||||
.get(interrupted_slot)
|
||||
.is_none());
|
||||
|
||||
// Try to fetch the incomplete ticks from blockstore, should succeed
|
||||
assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), ticks0);
|
||||
assert_eq!(
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -197,7 +197,7 @@ pub struct ClusterInfoVoteListener {
|
||||
impl ClusterInfoVoteListener {
|
||||
pub fn new(
|
||||
exit: &Arc<AtomicBool>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
sigverify_disabled: bool,
|
||||
sender: CrossbeamSender<Vec<Packets>>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
@@ -262,7 +262,7 @@ impl ClusterInfoVoteListener {
|
||||
|
||||
fn recv_loop(
|
||||
exit: Arc<AtomicBool>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
sigverify_disabled: bool,
|
||||
verified_vote_packets_sender: VerifiedVotePacketsSender,
|
||||
verified_vote_transactions_sender: VerifiedVoteTransactionsSender,
|
||||
@@ -272,40 +272,12 @@ impl ClusterInfoVoteListener {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return Ok(());
|
||||
}
|
||||
let (labels, votes, new_ts) = cluster_info.read().unwrap().get_votes(last_ts);
|
||||
let (labels, votes, new_ts) = cluster_info.get_votes(last_ts);
|
||||
inc_new_counter_debug!("cluster_info_vote_listener-recv_count", votes.len());
|
||||
|
||||
last_ts = new_ts;
|
||||
let msgs = packet::to_packets(&votes);
|
||||
if !msgs.is_empty() {
|
||||
let r = if sigverify_disabled {
|
||||
sigverify::ed25519_verify_disabled(&msgs)
|
||||
} else {
|
||||
sigverify::ed25519_verify_cpu(&msgs)
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
r.iter()
|
||||
.map(|packets_results| packets_results.len())
|
||||
.sum::<usize>(),
|
||||
votes.len()
|
||||
);
|
||||
|
||||
let (vote_txs, packets) = izip!(
|
||||
labels.into_iter(),
|
||||
votes.into_iter(),
|
||||
r.iter().flatten(),
|
||||
msgs
|
||||
)
|
||||
.filter_map(|(label, vote, verify_result, packet)| {
|
||||
if *verify_result != 0 {
|
||||
Some((vote, (label, packet)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unzip();
|
||||
|
||||
if !votes.is_empty() {
|
||||
let (vote_txs, packets) = Self::verify_votes(votes, labels, sigverify_disabled);
|
||||
verified_vote_transactions_sender.send(vote_txs)?;
|
||||
verified_vote_packets_sender.send(packets)?;
|
||||
}
|
||||
@@ -314,6 +286,42 @@ impl ClusterInfoVoteListener {
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_votes(
|
||||
votes: Vec<Transaction>,
|
||||
labels: Vec<CrdsValueLabel>,
|
||||
sigverify_disabled: bool,
|
||||
) -> (Vec<Transaction>, Vec<(CrdsValueLabel, Packets)>) {
|
||||
let msgs = packet::to_packets_chunked(&votes, 1);
|
||||
let r = if sigverify_disabled {
|
||||
sigverify::ed25519_verify_disabled(&msgs)
|
||||
} else {
|
||||
sigverify::ed25519_verify_cpu(&msgs)
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
r.iter()
|
||||
.map(|packets_results| packets_results.len())
|
||||
.sum::<usize>(),
|
||||
votes.len()
|
||||
);
|
||||
|
||||
let (vote_txs, packets) = izip!(
|
||||
labels.into_iter(),
|
||||
votes.into_iter(),
|
||||
r.iter().flatten(),
|
||||
msgs,
|
||||
)
|
||||
.filter_map(|(label, vote, verify_result, packet)| {
|
||||
if *verify_result != 0 {
|
||||
Some((vote, (label, packet)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unzip();
|
||||
(vote_txs, packets)
|
||||
}
|
||||
|
||||
fn bank_send_loop(
|
||||
exit: Arc<AtomicBool>,
|
||||
verified_vote_packets_receiver: VerifiedVotePacketsReceiver,
|
||||
@@ -525,6 +533,7 @@ mod tests {
|
||||
genesis_utils::{self, GenesisConfigInfo, ValidatorVoteKeypairs},
|
||||
};
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::Signature;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_vote_program::vote_transaction;
|
||||
|
||||
@@ -971,4 +980,61 @@ mod tests {
|
||||
validator_voting_keypairs,
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_votes_empty() {
|
||||
solana_logger::setup();
|
||||
let votes = vec![];
|
||||
let labels = vec![];
|
||||
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, labels, false);
|
||||
assert!(vote_txs.is_empty());
|
||||
assert!(packets.is_empty());
|
||||
}
|
||||
|
||||
fn verify_packets_len(packets: &Vec<(CrdsValueLabel, Packets)>, ref_value: usize) {
|
||||
let num_packets: usize = packets.iter().map(|p| p.1.packets.len()).sum();
|
||||
assert_eq!(num_packets, ref_value);
|
||||
}
|
||||
|
||||
fn test_vote_tx() -> Transaction {
|
||||
let node_keypair = Keypair::new();
|
||||
let vote_keypair = Keypair::new();
|
||||
let auth_voter_keypair = Keypair::new();
|
||||
let vote_tx = vote_transaction::new_vote_transaction(
|
||||
vec![0],
|
||||
Hash::default(),
|
||||
Hash::default(),
|
||||
&node_keypair,
|
||||
&vote_keypair,
|
||||
&auth_voter_keypair,
|
||||
);
|
||||
|
||||
vote_tx
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_votes_1_pass() {
|
||||
let vote_tx = test_vote_tx();
|
||||
let votes = vec![vote_tx.clone()];
|
||||
let labels = vec![CrdsValueLabel::Vote(0, Pubkey::new_rand())];
|
||||
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, labels, false);
|
||||
assert_eq!(vote_txs.len(), 1);
|
||||
verify_packets_len(&packets, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bad_vote() {
|
||||
let vote_tx = test_vote_tx();
|
||||
let mut bad_vote = vote_tx.clone();
|
||||
bad_vote.signatures[0] = Signature::default();
|
||||
let votes = vec![vote_tx.clone(), bad_vote, vote_tx];
|
||||
let label = CrdsValueLabel::Vote(0, Pubkey::new_rand());
|
||||
let labels: Vec<_> = (0..votes.len())
|
||||
.into_iter()
|
||||
.map(|_| label.clone())
|
||||
.collect();
|
||||
let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, labels, false);
|
||||
assert_eq!(vote_txs.len(), 2);
|
||||
verify_packets_len(&packets, 2);
|
||||
}
|
||||
}
|
||||
|
@@ -27,15 +27,10 @@ impl ClusterSlots {
|
||||
pub fn lookup(&self, slot: Slot) -> Option<Arc<RwLock<SlotPubkeys>>> {
|
||||
self.cluster_slots.read().unwrap().get(&slot).cloned()
|
||||
}
|
||||
pub fn update(
|
||||
&self,
|
||||
root: Slot,
|
||||
cluster_info: &RwLock<ClusterInfo>,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
) {
|
||||
pub fn update(&self, root: Slot, cluster_info: &ClusterInfo, bank_forks: &RwLock<BankForks>) {
|
||||
self.update_peers(cluster_info, bank_forks);
|
||||
let since = *self.since.read().unwrap();
|
||||
let epoch_slots = cluster_info.read().unwrap().get_epoch_slots_since(since);
|
||||
let epoch_slots = cluster_info.get_epoch_slots_since(since);
|
||||
self.update_internal(root, epoch_slots);
|
||||
}
|
||||
fn update_internal(&self, root: Slot, epoch_slots: (Vec<EpochSlots>, Option<u64>)) {
|
||||
@@ -95,7 +90,7 @@ impl ClusterSlots {
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn update_peers(&self, cluster_info: &RwLock<ClusterInfo>, bank_forks: &RwLock<BankForks>) {
|
||||
fn update_peers(&self, cluster_info: &ClusterInfo, bank_forks: &RwLock<BankForks>) {
|
||||
let root_bank = bank_forks.read().unwrap().root_bank().clone();
|
||||
let root_epoch = root_bank.epoch();
|
||||
let my_epoch = *self.epoch.read().unwrap();
|
||||
@@ -111,7 +106,7 @@ impl ClusterSlots {
|
||||
.clone();
|
||||
|
||||
*self.validator_stakes.write().unwrap() = validator_stakes;
|
||||
let id = cluster_info.read().unwrap().id();
|
||||
let id = cluster_info.id();
|
||||
*self.self_id.write().unwrap() = id;
|
||||
*self.epoch.write().unwrap() = Some(root_epoch);
|
||||
}
|
||||
|
@@ -13,9 +13,11 @@ use std::{
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
pub type BlockCommitmentArray = [u64; MAX_LOCKOUT_HISTORY + 1];
|
||||
|
||||
#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BlockCommitment {
|
||||
pub commitment: [u64; MAX_LOCKOUT_HISTORY],
|
||||
pub commitment: BlockCommitmentArray,
|
||||
}
|
||||
|
||||
impl BlockCommitment {
|
||||
@@ -28,8 +30,17 @@ impl BlockCommitment {
|
||||
assert!(confirmation_count > 0 && confirmation_count <= MAX_LOCKOUT_HISTORY);
|
||||
self.commitment[confirmation_count - 1]
|
||||
}
|
||||
|
||||
pub fn increase_rooted_stake(&mut self, stake: u64) {
|
||||
self.commitment[MAX_LOCKOUT_HISTORY] += stake;
|
||||
}
|
||||
|
||||
pub fn get_rooted_stake(&self) -> u64 {
|
||||
self.commitment[MAX_LOCKOUT_HISTORY]
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn new(commitment: [u64; MAX_LOCKOUT_HISTORY]) -> Self {
|
||||
pub(crate) fn new(commitment: BlockCommitmentArray) -> Self {
|
||||
Self { commitment }
|
||||
}
|
||||
}
|
||||
@@ -110,6 +121,16 @@ impl BlockCommitmentCache {
|
||||
0
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_confirmed_rooted(&self, slot: Slot) -> bool {
|
||||
self.get_block_commitment(slot)
|
||||
.map(|block_commitment| {
|
||||
(block_commitment.get_rooted_stake() as f64 / self.total_stake as f64)
|
||||
> VOTE_THRESHOLD_SIZE
|
||||
})
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn new_for_tests() -> Self {
|
||||
let mut block_commitment: HashMap<Slot, BlockCommitment> = HashMap::new();
|
||||
@@ -259,7 +280,7 @@ impl AggregateCommitmentService {
|
||||
commitment
|
||||
.entry(*a)
|
||||
.or_insert_with(BlockCommitment::default)
|
||||
.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
|
||||
.increase_rooted_stake(lamports);
|
||||
} else {
|
||||
ancestors_index = i;
|
||||
break;
|
||||
@@ -351,7 +372,7 @@ mod tests {
|
||||
|
||||
for a in ancestors {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
@@ -376,7 +397,7 @@ mod tests {
|
||||
for a in ancestors {
|
||||
if a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else {
|
||||
let mut expected = BlockCommitment::default();
|
||||
@@ -408,7 +429,7 @@ mod tests {
|
||||
for (i, a) in ancestors.iter().enumerate() {
|
||||
if *a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if i <= 4 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
|
@@ -278,7 +278,7 @@ impl CrdsGossipPull {
|
||||
failed
|
||||
}
|
||||
// build a set of filters of the current crds table
|
||||
// num_filters - used to increase the likely hood of a value in crds being added to some filter
|
||||
// num_filters - used to increase the likelyhood of a value in crds being added to some filter
|
||||
pub fn build_crds_filters(&self, crds: &Crds, bloom_size: usize) -> Vec<CrdsFilter> {
|
||||
let num = cmp::max(
|
||||
CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS,
|
||||
|
@@ -84,6 +84,9 @@ impl Uncompressed {
|
||||
(min_slot - self.first_slot) as usize
|
||||
};
|
||||
for i in start..self.num {
|
||||
if i >= self.slots.len() as usize {
|
||||
break;
|
||||
}
|
||||
if self.slots.get(i as u64) {
|
||||
rv.push(self.first_slot + i as Slot);
|
||||
}
|
||||
@@ -256,6 +259,14 @@ mod tests {
|
||||
assert_eq!(slots.to_slots(1), vec![1]);
|
||||
assert!(slots.to_slots(2).is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_epoch_slots_to_slots_overflow() {
|
||||
let mut slots = Uncompressed::new(1);
|
||||
slots.num = 100;
|
||||
assert!(slots.to_slots(0).is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_epoch_slots_uncompressed_add_2() {
|
||||
let mut slots = Uncompressed::new(1);
|
||||
|
@@ -22,7 +22,7 @@ pub struct GossipService {
|
||||
|
||||
impl GossipService {
|
||||
pub fn new(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
bank_forks: Option<Arc<RwLock<BankForks>>>,
|
||||
gossip_socket: UdpSocket,
|
||||
exit: &Arc<AtomicBool>,
|
||||
@@ -31,7 +31,7 @@ impl GossipService {
|
||||
let gossip_socket = Arc::new(gossip_socket);
|
||||
trace!(
|
||||
"GossipService: id: {}, listening on: {:?}",
|
||||
&cluster_info.read().unwrap().my_data().id,
|
||||
&cluster_info.id(),
|
||||
gossip_socket.local_addr().unwrap()
|
||||
);
|
||||
let t_receiver = streamer::receiver(
|
||||
@@ -89,7 +89,7 @@ pub fn discover(
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let (gossip_service, ip_echo, spy_ref) = make_gossip_node(entrypoint, &exit, my_gossip_addr);
|
||||
|
||||
let id = spy_ref.read().unwrap().keypair.pubkey();
|
||||
let id = spy_ref.id();
|
||||
info!("Entrypoint: {:?}", entrypoint);
|
||||
info!("Node Id: {:?}", id);
|
||||
if let Some(my_gossip_addr) = my_gossip_addr {
|
||||
@@ -113,7 +113,7 @@ pub fn discover(
|
||||
info!(
|
||||
"discover success in {}s...\n{}",
|
||||
secs,
|
||||
spy_ref.read().unwrap().contact_info_trace()
|
||||
spy_ref.contact_info_trace()
|
||||
);
|
||||
return Ok((tvu_peers, storage_peers));
|
||||
}
|
||||
@@ -121,15 +121,12 @@ pub fn discover(
|
||||
if !tvu_peers.is_empty() {
|
||||
info!(
|
||||
"discover failed to match criteria by timeout...\n{}",
|
||||
spy_ref.read().unwrap().contact_info_trace()
|
||||
spy_ref.contact_info_trace()
|
||||
);
|
||||
return Ok((tvu_peers, storage_peers));
|
||||
}
|
||||
|
||||
info!(
|
||||
"discover failed...\n{}",
|
||||
spy_ref.read().unwrap().contact_info_trace()
|
||||
);
|
||||
info!("discover failed...\n{}", spy_ref.contact_info_trace());
|
||||
Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
"Discover failed",
|
||||
@@ -176,7 +173,7 @@ pub fn get_multi_client(nodes: &[ContactInfo]) -> (ThinClient, usize) {
|
||||
}
|
||||
|
||||
fn spy(
|
||||
spy_ref: Arc<RwLock<ClusterInfo>>,
|
||||
spy_ref: Arc<ClusterInfo>,
|
||||
num_nodes: Option<usize>,
|
||||
timeout: Option<u64>,
|
||||
find_node_by_pubkey: Option<Pubkey>,
|
||||
@@ -194,13 +191,8 @@ fn spy(
|
||||
}
|
||||
}
|
||||
|
||||
tvu_peers = spy_ref
|
||||
.read()
|
||||
.unwrap()
|
||||
.all_tvu_peers()
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
storage_peers = spy_ref.read().unwrap().all_storage_peers();
|
||||
tvu_peers = spy_ref.all_tvu_peers().into_iter().collect::<Vec<_>>();
|
||||
storage_peers = spy_ref.all_storage_peers();
|
||||
|
||||
let mut nodes: Vec<_> = tvu_peers.iter().chain(storage_peers.iter()).collect();
|
||||
nodes.sort();
|
||||
@@ -232,10 +224,7 @@ fn spy(
|
||||
met_criteria = true;
|
||||
}
|
||||
if i % 20 == 0 {
|
||||
info!(
|
||||
"discovering...\n{}",
|
||||
spy_ref.read().unwrap().contact_info_trace()
|
||||
);
|
||||
info!("discovering...\n{}", spy_ref.contact_info_trace());
|
||||
}
|
||||
sleep(Duration::from_millis(
|
||||
crate::cluster_info::GOSSIP_SLEEP_MILLIS,
|
||||
@@ -256,18 +245,18 @@ fn make_gossip_node(
|
||||
entrypoint: Option<&SocketAddr>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
gossip_addr: Option<&SocketAddr>,
|
||||
) -> (GossipService, Option<TcpListener>, Arc<RwLock<ClusterInfo>>) {
|
||||
) -> (GossipService, Option<TcpListener>, Arc<ClusterInfo>) {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let (node, gossip_socket, ip_echo) = if let Some(gossip_addr) = gossip_addr {
|
||||
ClusterInfo::gossip_node(&keypair.pubkey(), gossip_addr)
|
||||
} else {
|
||||
ClusterInfo::spy_node(&keypair.pubkey())
|
||||
};
|
||||
let mut cluster_info = ClusterInfo::new(node, keypair);
|
||||
let cluster_info = ClusterInfo::new(node, keypair);
|
||||
if let Some(entrypoint) = entrypoint {
|
||||
cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint));
|
||||
}
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
let gossip_service = GossipService::new(&cluster_info.clone(), None, gossip_socket, &exit);
|
||||
(gossip_service, ip_echo, cluster_info)
|
||||
}
|
||||
@@ -277,7 +266,7 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::cluster_info::{ClusterInfo, Node};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[test]
|
||||
#[ignore]
|
||||
@@ -286,7 +275,7 @@ mod tests {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let tn = Node::new_localhost();
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(tn.info.clone());
|
||||
let c = Arc::new(RwLock::new(cluster_info));
|
||||
let c = Arc::new(cluster_info);
|
||||
let d = GossipService::new(&c, None, tn.sockets.gossip, &exit);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
d.join().unwrap();
|
||||
@@ -300,16 +289,16 @@ mod tests {
|
||||
let contact_info = ContactInfo::new_localhost(&keypair.pubkey(), 0);
|
||||
let peer0_info = ContactInfo::new_localhost(&peer0, 0);
|
||||
let peer1_info = ContactInfo::new_localhost(&peer1, 0);
|
||||
let mut cluster_info = ClusterInfo::new(contact_info.clone(), Arc::new(keypair));
|
||||
let cluster_info = ClusterInfo::new(contact_info.clone(), Arc::new(keypair));
|
||||
cluster_info.insert_info(peer0_info.clone());
|
||||
cluster_info.insert_info(peer1_info);
|
||||
|
||||
let spy_ref = Arc::new(RwLock::new(cluster_info));
|
||||
let spy_ref = Arc::new(cluster_info);
|
||||
|
||||
let (met_criteria, secs, tvu_peers, _) = spy(spy_ref.clone(), None, Some(1), None, None);
|
||||
assert_eq!(met_criteria, false);
|
||||
assert_eq!(secs, 1);
|
||||
assert_eq!(tvu_peers, spy_ref.read().unwrap().tvu_peers());
|
||||
assert_eq!(tvu_peers, spy_ref.tvu_peers());
|
||||
|
||||
// Find num_nodes
|
||||
let (met_criteria, _, _, _) = spy(spy_ref.clone(), Some(1), None, None, None);
|
||||
|
@@ -81,7 +81,7 @@ impl RepairService {
|
||||
blockstore: Arc<Blockstore>,
|
||||
exit: Arc<AtomicBool>,
|
||||
repair_socket: Arc<UdpSocket>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
repair_strategy: RepairStrategy,
|
||||
cluster_slots: Arc<ClusterSlots>,
|
||||
) -> Self {
|
||||
@@ -106,17 +106,24 @@ impl RepairService {
|
||||
blockstore: &Blockstore,
|
||||
exit: &AtomicBool,
|
||||
repair_socket: &UdpSocket,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
repair_strategy: RepairStrategy,
|
||||
cluster_slots: &Arc<ClusterSlots>,
|
||||
) {
|
||||
let serve_repair = ServeRepair::new(cluster_info.clone());
|
||||
let id = cluster_info.read().unwrap().id();
|
||||
let id = cluster_info.id();
|
||||
if let RepairStrategy::RepairAll { .. } = repair_strategy {
|
||||
Self::initialize_lowest_slot(id, blockstore, cluster_info);
|
||||
}
|
||||
let mut repair_stats = RepairStats::default();
|
||||
let mut last_stats = Instant::now();
|
||||
if let RepairStrategy::RepairAll {
|
||||
ref completed_slots_receiver,
|
||||
..
|
||||
} = repair_strategy
|
||||
{
|
||||
Self::initialize_epoch_slots(blockstore, cluster_info, completed_slots_receiver);
|
||||
}
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
@@ -141,14 +148,7 @@ impl RepairService {
|
||||
let new_root = blockstore.last_root();
|
||||
let lowest_slot = blockstore.lowest_slot();
|
||||
Self::update_lowest_slot(&id, lowest_slot, &cluster_info);
|
||||
Self::update_completed_slots(
|
||||
&id,
|
||||
new_root,
|
||||
&cluster_slots,
|
||||
blockstore,
|
||||
completed_slots_receiver,
|
||||
&cluster_info,
|
||||
);
|
||||
Self::update_completed_slots(completed_slots_receiver, &cluster_info);
|
||||
cluster_slots.update(new_root, cluster_info, bank_forks);
|
||||
Self::generate_repairs(blockstore, new_root, MAX_REPAIR_LENGTH)
|
||||
}
|
||||
@@ -308,54 +308,57 @@ impl RepairService {
|
||||
}
|
||||
}
|
||||
|
||||
fn initialize_lowest_slot(
|
||||
id: Pubkey,
|
||||
blockstore: &Blockstore,
|
||||
cluster_info: &RwLock<ClusterInfo>,
|
||||
) {
|
||||
fn initialize_lowest_slot(id: Pubkey, blockstore: &Blockstore, cluster_info: &ClusterInfo) {
|
||||
// Safe to set into gossip because by this time, the leader schedule cache should
|
||||
// also be updated with the latest root (done in blockstore_processor) and thus
|
||||
// will provide a schedule to window_service for any incoming shreds up to the
|
||||
// last_confirmed_epoch.
|
||||
cluster_info
|
||||
.write()
|
||||
.unwrap()
|
||||
.push_lowest_slot(id, blockstore.lowest_slot());
|
||||
cluster_info.push_lowest_slot(id, blockstore.lowest_slot());
|
||||
}
|
||||
|
||||
fn update_completed_slots(
|
||||
id: &Pubkey,
|
||||
root: Slot,
|
||||
cluster_slots: &ClusterSlots,
|
||||
blockstore: &Blockstore,
|
||||
completed_slots_receiver: &CompletedSlotsReceiver,
|
||||
cluster_info: &RwLock<ClusterInfo>,
|
||||
cluster_info: &ClusterInfo,
|
||||
) {
|
||||
let mine = cluster_slots.collect(id);
|
||||
let mut slots: Vec<Slot> = vec![];
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
more.retain(|x| !mine.contains(x));
|
||||
slots.append(&mut more);
|
||||
}
|
||||
blockstore
|
||||
.live_slots_iterator(root)
|
||||
.for_each(|(slot, slot_meta)| {
|
||||
if slot_meta.is_full() && !mine.contains(&slot) {
|
||||
slots.push(slot)
|
||||
}
|
||||
});
|
||||
slots.sort();
|
||||
slots.dedup();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.write().unwrap().push_epoch_slots(&slots);
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
}
|
||||
}
|
||||
|
||||
fn update_lowest_slot(id: &Pubkey, lowest_slot: Slot, cluster_info: &RwLock<ClusterInfo>) {
|
||||
cluster_info
|
||||
.write()
|
||||
.unwrap()
|
||||
.push_lowest_slot(*id, lowest_slot);
|
||||
fn update_lowest_slot(id: &Pubkey, lowest_slot: Slot, cluster_info: &ClusterInfo) {
|
||||
cluster_info.push_lowest_slot(*id, lowest_slot);
|
||||
}
|
||||
|
||||
fn initialize_epoch_slots(
|
||||
blockstore: &Blockstore,
|
||||
cluster_info: &ClusterInfo,
|
||||
completed_slots_receiver: &CompletedSlotsReceiver,
|
||||
) {
|
||||
let root = blockstore.last_root();
|
||||
let mut slots: Vec<_> = blockstore
|
||||
.live_slots_iterator(root)
|
||||
.filter_map(|(slot, slot_meta)| {
|
||||
if slot_meta.is_full() {
|
||||
Some(slot)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
while let Ok(mut more) = completed_slots_receiver.try_recv() {
|
||||
slots.append(&mut more);
|
||||
}
|
||||
slots.sort();
|
||||
slots.dedup();
|
||||
if !slots.is_empty() {
|
||||
cluster_info.push_epoch_slots(&slots);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn join(self) -> thread::Result<()> {
|
||||
@@ -589,17 +592,13 @@ mod test {
|
||||
#[test]
|
||||
pub fn test_update_lowest_slot() {
|
||||
let node_info = Node::new_localhost_with_pubkey(&Pubkey::default());
|
||||
let cluster_info = RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
node_info.info.clone(),
|
||||
));
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(node_info.info.clone());
|
||||
RepairService::update_lowest_slot(&Pubkey::default(), 5, &cluster_info);
|
||||
let lowest = cluster_info
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_lowest_slot_for_node(&Pubkey::default(), None)
|
||||
.unwrap()
|
||||
.0
|
||||
.clone();
|
||||
.get_lowest_slot_for_node(&Pubkey::default(), None, |lowest_slot, _| {
|
||||
lowest_slot.clone()
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(lowest.lowest, 5);
|
||||
}
|
||||
}
|
||||
|
@@ -114,7 +114,7 @@ impl ReplayStage {
|
||||
config: ReplayStageConfig,
|
||||
blockstore: Arc<Blockstore>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
ledger_signal_receiver: Receiver<bool>,
|
||||
poh_recorder: Arc<Mutex<PohRecorder>>,
|
||||
vote_tracker: Arc<VoteTracker>,
|
||||
@@ -283,7 +283,11 @@ impl ReplayStage {
|
||||
|
||||
for r in failure_reasons {
|
||||
if let HeaviestForkFailures::NoPropagatedConfirmation(slot) = r {
|
||||
progress.log_propagated_stats(slot, &bank_forks);
|
||||
if let Some(latest_leader_slot) =
|
||||
progress.get_latest_leader_slot(slot)
|
||||
{
|
||||
progress.log_propagated_stats(latest_leader_slot, &bank_forks);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -685,7 +689,7 @@ impl ReplayStage {
|
||||
progress: &mut ProgressMap,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
authorized_voter_keypairs: &[Arc<Keypair>],
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
root_bank_sender: &Sender<Vec<Arc<Bank>>>,
|
||||
@@ -758,7 +762,7 @@ impl ReplayStage {
|
||||
}
|
||||
|
||||
fn push_vote(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
bank: &Arc<Bank>,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
authorized_voter_keypairs: &[Arc<Keypair>],
|
||||
@@ -811,7 +815,7 @@ impl ReplayStage {
|
||||
}
|
||||
Some(authorized_voter_keypair) => authorized_voter_keypair,
|
||||
};
|
||||
let node_keypair = cluster_info.read().unwrap().keypair.clone();
|
||||
let node_keypair = cluster_info.keypair.clone();
|
||||
|
||||
// Send our last few votes along with the new one
|
||||
let vote_ix = vote_instruction::vote(
|
||||
@@ -825,10 +829,7 @@ impl ReplayStage {
|
||||
let blockhash = bank.last_blockhash();
|
||||
vote_tx.partial_sign(&[node_keypair.as_ref()], blockhash);
|
||||
vote_tx.partial_sign(&[authorized_voter_keypair.as_ref()], blockhash);
|
||||
cluster_info
|
||||
.write()
|
||||
.unwrap()
|
||||
.push_vote(tower_index, vote_tx);
|
||||
cluster_info.push_vote(tower_index, vote_tx);
|
||||
}
|
||||
|
||||
fn update_commitment_cache(
|
||||
|
@@ -38,7 +38,7 @@ const MAX_PACKET_BATCH_SIZE: usize = 100;
|
||||
fn retransmit(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
r: &Arc<Mutex<PacketReceiver>>,
|
||||
sock: &UdpSocket,
|
||||
id: u32,
|
||||
@@ -63,11 +63,8 @@ fn retransmit(
|
||||
let mut peers_len = 0;
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&r_bank, bank_epoch);
|
||||
let stakes = stakes.map(Arc::new);
|
||||
let (peers, stakes_and_index) = cluster_info
|
||||
.read()
|
||||
.unwrap()
|
||||
.sorted_retransmit_peers_and_stakes(stakes);
|
||||
let me = cluster_info.read().unwrap().my_data();
|
||||
let (peers, stakes_and_index) = cluster_info.sorted_retransmit_peers_and_stakes(stakes);
|
||||
let my_id = cluster_info.id();
|
||||
let mut discard_total = 0;
|
||||
let mut repair_total = 0;
|
||||
let mut retransmit_total = 0;
|
||||
@@ -88,7 +85,7 @@ fn retransmit(
|
||||
|
||||
let mut compute_turbine_peers = Measure::start("turbine_start");
|
||||
let (my_index, mut shuffled_stakes_and_index) = ClusterInfo::shuffle_peers_and_index(
|
||||
&me.id,
|
||||
&my_id,
|
||||
&peers,
|
||||
&stakes_and_index,
|
||||
packet.meta.seed,
|
||||
@@ -154,7 +151,7 @@ pub fn retransmitter(
|
||||
sockets: Arc<Vec<UdpSocket>>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
r: Arc<Mutex<PacketReceiver>>,
|
||||
) -> Vec<JoinHandle<()>> {
|
||||
(0..sockets.len())
|
||||
@@ -206,7 +203,7 @@ impl RetransmitStage {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
retransmit_sockets: Arc<Vec<UdpSocket>>,
|
||||
repair_socket: Arc<UdpSocket>,
|
||||
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
|
||||
@@ -316,11 +313,11 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let other = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(other);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(other);
|
||||
cluster_info.insert_info(me);
|
||||
|
||||
let retransmit_socket = Arc::new(vec![UdpSocket::bind("0.0.0.0:0").unwrap()]);
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
let cluster_info = Arc::new(cluster_info);
|
||||
|
||||
let (retransmit_sender, retransmit_receiver) = channel();
|
||||
let t_retransmit = retransmitter(
|
||||
|
121
core/src/rpc.rs
121
core/src/rpc.rs
@@ -1,17 +1,23 @@
|
||||
//! The `rpc` module implements the Solana RPC interface.
|
||||
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo, commitment::BlockCommitmentCache, contact_info::ContactInfo,
|
||||
storage_stage::StorageState, validator::ValidatorExit,
|
||||
cluster_info::ClusterInfo,
|
||||
commitment::{BlockCommitmentArray, BlockCommitmentCache},
|
||||
contact_info::ContactInfo,
|
||||
storage_stage::StorageState,
|
||||
validator::ValidatorExit,
|
||||
};
|
||||
use bincode::serialize;
|
||||
use jsonrpc_core::{Error, Metadata, Result};
|
||||
use jsonrpc_derive::rpc;
|
||||
use solana_client::rpc_response::*;
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks, blockstore::Blockstore, rooted_slot_iterator::RootedSlotIterator,
|
||||
use solana_client::{
|
||||
rpc_request::{
|
||||
MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE, MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS,
|
||||
},
|
||||
rpc_response::*,
|
||||
};
|
||||
use solana_faucet::faucet::request_airdrop_transaction;
|
||||
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
|
||||
use solana_perf::packet::PACKET_DATA_SIZE;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
@@ -38,9 +44,6 @@ use std::{
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
const MAX_QUERY_ITEMS: usize = 256;
|
||||
const MAX_SLOT_RANGE: u64 = 10_000;
|
||||
|
||||
type RpcResponse<T> = Result<Response<T>>;
|
||||
|
||||
fn new_response<T>(bank: &Bank, value: T) -> RpcResponse<T> {
|
||||
@@ -219,7 +222,7 @@ impl JsonRpcRequestProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_block_commitment(&self, block: Slot) -> RpcBlockCommitment<[u64; MAX_LOCKOUT_HISTORY]> {
|
||||
fn get_block_commitment(&self, block: Slot) -> RpcBlockCommitment<BlockCommitmentArray> {
|
||||
let r_block_commitment = self.block_commitment_cache.read().unwrap();
|
||||
RpcBlockCommitment {
|
||||
commitment: r_block_commitment
|
||||
@@ -378,18 +381,12 @@ impl JsonRpcRequestProcessor {
|
||||
if end_slot < start_slot {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let start_slot = (start_slot..end_slot).find(|&slot| self.blockstore.is_root(slot));
|
||||
if let Some(start_slot) = start_slot {
|
||||
let mut slots: Vec<Slot> = RootedSlotIterator::new(start_slot, &self.blockstore)
|
||||
.unwrap()
|
||||
.map(|(slot, _)| slot)
|
||||
.collect();
|
||||
slots.retain(|&x| x <= end_slot);
|
||||
Ok(slots)
|
||||
} else {
|
||||
Ok(vec![])
|
||||
}
|
||||
Ok(self
|
||||
.blockstore
|
||||
.rooted_slot_iterator(start_slot)
|
||||
.map_err(|_| Error::internal_error())?
|
||||
.filter(|&slot| slot <= end_slot)
|
||||
.collect())
|
||||
}
|
||||
|
||||
pub fn get_block_time(&self, slot: Slot) -> Result<Option<UnixTimestamp>> {
|
||||
@@ -493,7 +490,9 @@ impl JsonRpcRequestProcessor {
|
||||
.map(|(slot, status)| {
|
||||
let r_block_commitment_cache = self.block_commitment_cache.read().unwrap();
|
||||
|
||||
let confirmations = if r_block_commitment_cache.root() >= slot {
|
||||
let confirmations = if r_block_commitment_cache.root() >= slot
|
||||
&& r_block_commitment_cache.is_confirmed_rooted(slot)
|
||||
{
|
||||
None
|
||||
} else {
|
||||
r_block_commitment_cache
|
||||
@@ -540,10 +539,17 @@ impl JsonRpcRequestProcessor {
|
||||
Ok(vec![])
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_first_available_block(&self) -> Result<Slot> {
|
||||
Ok(self
|
||||
.blockstore
|
||||
.get_first_available_block()
|
||||
.unwrap_or_default())
|
||||
}
|
||||
}
|
||||
|
||||
fn get_tpu_addr(cluster_info: &Arc<RwLock<ClusterInfo>>) -> Result<SocketAddr> {
|
||||
let contact_info = cluster_info.read().unwrap().my_data();
|
||||
fn get_tpu_addr(cluster_info: &ClusterInfo) -> Result<SocketAddr> {
|
||||
let contact_info = cluster_info.my_contact_info();
|
||||
Ok(contact_info.tpu)
|
||||
}
|
||||
|
||||
@@ -558,7 +564,7 @@ fn verify_signature(input: &str) -> Result<Signature> {
|
||||
#[derive(Clone)]
|
||||
pub struct Meta {
|
||||
pub request_processor: Arc<RwLock<JsonRpcRequestProcessor>>,
|
||||
pub cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
pub cluster_info: Arc<ClusterInfo>,
|
||||
pub genesis_hash: Hash,
|
||||
}
|
||||
impl Metadata for Meta {}
|
||||
@@ -651,7 +657,7 @@ pub trait RpcSol {
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
block: Slot,
|
||||
) -> Result<RpcBlockCommitment<[u64; MAX_LOCKOUT_HISTORY]>>;
|
||||
) -> Result<RpcBlockCommitment<BlockCommitmentArray>>;
|
||||
|
||||
#[rpc(meta, name = "getGenesisHash")]
|
||||
fn get_genesis_hash(&self, meta: Self::Metadata) -> Result<String>;
|
||||
@@ -798,6 +804,9 @@ pub trait RpcSol {
|
||||
start_slot: Slot,
|
||||
end_slot: Slot,
|
||||
) -> Result<Vec<String>>;
|
||||
|
||||
#[rpc(meta, name = "getFirstAvailableBlock")]
|
||||
fn get_first_available_block(&self, meta: Self::Metadata) -> Result<Slot>;
|
||||
}
|
||||
|
||||
pub struct RpcSolImpl;
|
||||
@@ -904,7 +913,7 @@ impl RpcSol for RpcSolImpl {
|
||||
}
|
||||
|
||||
fn get_cluster_nodes(&self, meta: Self::Metadata) -> Result<Vec<RpcContactInfo>> {
|
||||
let cluster_info = meta.cluster_info.read().unwrap();
|
||||
let cluster_info = &meta.cluster_info;
|
||||
fn valid_address_or_none(addr: &SocketAddr) -> Option<SocketAddr> {
|
||||
if ContactInfo::is_valid_address(addr) {
|
||||
Some(*addr)
|
||||
@@ -912,12 +921,12 @@ impl RpcSol for RpcSolImpl {
|
||||
None
|
||||
}
|
||||
}
|
||||
let shred_version = cluster_info.my_data().shred_version;
|
||||
let my_shred_version = cluster_info.my_shred_version();
|
||||
Ok(cluster_info
|
||||
.all_peers()
|
||||
.iter()
|
||||
.filter_map(|(contact_info, _)| {
|
||||
if shred_version == contact_info.shred_version
|
||||
if my_shred_version == contact_info.shred_version
|
||||
&& ContactInfo::is_valid_address(&contact_info.gossip)
|
||||
{
|
||||
Some(RpcContactInfo {
|
||||
@@ -955,7 +964,7 @@ impl RpcSol for RpcSolImpl {
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
block: Slot,
|
||||
) -> Result<RpcBlockCommitment<[u64; MAX_LOCKOUT_HISTORY]>> {
|
||||
) -> Result<RpcBlockCommitment<BlockCommitmentArray>> {
|
||||
Ok(meta
|
||||
.request_processor
|
||||
.read()
|
||||
@@ -1067,10 +1076,10 @@ impl RpcSol for RpcSolImpl {
|
||||
signature_strs: Vec<String>,
|
||||
config: Option<RpcSignatureStatusConfig>,
|
||||
) -> RpcResponse<Vec<Option<TransactionStatus>>> {
|
||||
if signature_strs.len() > MAX_QUERY_ITEMS {
|
||||
if signature_strs.len() > MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS {
|
||||
return Err(Error::invalid_params(format!(
|
||||
"Too many inputs provided; max {}",
|
||||
MAX_QUERY_ITEMS
|
||||
MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS
|
||||
)));
|
||||
}
|
||||
let mut signatures: Vec<Signature> = vec![];
|
||||
@@ -1363,16 +1372,16 @@ impl RpcSol for RpcSolImpl {
|
||||
end_slot: Slot,
|
||||
) -> Result<Vec<String>> {
|
||||
let pubkey = verify_pubkey(pubkey_str)?;
|
||||
if end_slot <= start_slot {
|
||||
if end_slot < start_slot {
|
||||
return Err(Error::invalid_params(format!(
|
||||
"start_slot {} must be smaller than end_slot {}",
|
||||
"start_slot {} must be less than or equal to end_slot {}",
|
||||
start_slot, end_slot
|
||||
)));
|
||||
}
|
||||
if end_slot - start_slot > MAX_SLOT_RANGE {
|
||||
if end_slot - start_slot > MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE {
|
||||
return Err(Error::invalid_params(format!(
|
||||
"Slot range too large; max {}",
|
||||
MAX_SLOT_RANGE
|
||||
MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS_SLOT_RANGE
|
||||
)));
|
||||
}
|
||||
meta.request_processor
|
||||
@@ -1386,6 +1395,13 @@ impl RpcSol for RpcSolImpl {
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
fn get_first_available_block(&self, meta: Self::Metadata) -> Result<Slot> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_first_available_block()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -1557,17 +1573,12 @@ pub mod tests {
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
)));
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::default(),
|
||||
)));
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
|
||||
cluster_info
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert_info(ContactInfo::new_with_pubkey_socketaddr(
|
||||
&leader_pubkey,
|
||||
&socketaddr!("127.0.0.1:1234"),
|
||||
));
|
||||
cluster_info.insert_info(ContactInfo::new_with_pubkey_socketaddr(
|
||||
&leader_pubkey,
|
||||
&socketaddr!("127.0.0.1:1234"),
|
||||
));
|
||||
|
||||
let mut io = MetaIoHandler::default();
|
||||
let rpc = RpcSolImpl;
|
||||
@@ -2077,7 +2088,7 @@ pub mod tests {
|
||||
.expect("actual response deserialization");
|
||||
let result = result.as_ref().unwrap();
|
||||
assert_eq!(expected_res, result.status);
|
||||
assert_eq!(None, result.confirmations);
|
||||
assert_eq!(Some(2), result.confirmations);
|
||||
|
||||
// Test getSignatureStatus request on unprocessed tx
|
||||
let tx = system_transaction::transfer(&alice, &bob_pubkey, 10, blockhash);
|
||||
@@ -2260,9 +2271,7 @@ pub mod tests {
|
||||
);
|
||||
Arc::new(RwLock::new(request_processor))
|
||||
},
|
||||
cluster_info: Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::default(),
|
||||
))),
|
||||
cluster_info: Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default())),
|
||||
genesis_hash: Hash::default(),
|
||||
};
|
||||
|
||||
@@ -2279,9 +2288,9 @@ pub mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_rpc_get_tpu_addr() {
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::new_with_socketaddr(&socketaddr!("127.0.0.1:1234")),
|
||||
)));
|
||||
));
|
||||
assert_eq!(
|
||||
get_tpu_addr(&cluster_info),
|
||||
Ok(socketaddr!("127.0.0.1:1234"))
|
||||
@@ -2428,8 +2437,8 @@ pub mod tests {
|
||||
let validator_exit = create_validator_exit(&exit);
|
||||
let bank_forks = new_bank_forks().0;
|
||||
|
||||
let commitment_slot0 = BlockCommitment::new([8; MAX_LOCKOUT_HISTORY]);
|
||||
let commitment_slot1 = BlockCommitment::new([9; MAX_LOCKOUT_HISTORY]);
|
||||
let commitment_slot0 = BlockCommitment::new([8; MAX_LOCKOUT_HISTORY + 1]);
|
||||
let commitment_slot1 = BlockCommitment::new([9; MAX_LOCKOUT_HISTORY + 1]);
|
||||
let mut block_commitment: HashMap<u64, BlockCommitment> = HashMap::new();
|
||||
block_commitment
|
||||
.entry(0)
|
||||
@@ -2521,7 +2530,7 @@ pub mod tests {
|
||||
let res = io.handle_request_sync(&req, meta);
|
||||
let result: Response = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
let commitment_response: RpcBlockCommitment<[u64; MAX_LOCKOUT_HISTORY]> =
|
||||
let commitment_response: RpcBlockCommitment<BlockCommitmentArray> =
|
||||
if let Response::Single(res) = result {
|
||||
if let Output::Success(res) = res {
|
||||
serde_json::from_value(res.result).unwrap()
|
||||
|
@@ -320,6 +320,7 @@ mod tests {
|
||||
};
|
||||
use jsonrpc_core::{futures::sync::mpsc, Response};
|
||||
use jsonrpc_pubsub::{PubSubHandler, Session};
|
||||
use serial_test_derive::serial;
|
||||
use solana_budget_program::{self, budget_instruction};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
@@ -357,6 +358,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_signature_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@@ -403,6 +405,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_signature_unsubscribe() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@@ -451,6 +454,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_account_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
mut genesis_config,
|
||||
@@ -561,6 +565,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_account_unsubscribe() {
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
let session = create_session();
|
||||
@@ -721,6 +726,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_slot_subscribe() {
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let session = create_session();
|
||||
@@ -745,6 +751,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_slot_unsubscribe() {
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let session = create_session();
|
||||
|
@@ -15,8 +15,9 @@ use solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
snapshot_utils,
|
||||
};
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::{hash::Hash, pubkey::Pubkey};
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
net::SocketAddr,
|
||||
path::{Path, PathBuf},
|
||||
sync::{mpsc::channel, Arc, RwLock},
|
||||
@@ -24,6 +25,10 @@ use std::{
|
||||
};
|
||||
use tokio::prelude::Future;
|
||||
|
||||
// If trusted validators are specified, consider this validator healthy if its latest account hash
|
||||
// is no further behind than this distance from the latest trusted validator account hash
|
||||
const HEALTH_CHECK_SLOT_DISTANCE: u64 = 150;
|
||||
|
||||
pub struct JsonRpcService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
|
||||
@@ -37,15 +42,24 @@ struct RpcRequestMiddleware {
|
||||
ledger_path: PathBuf,
|
||||
snapshot_archive_path_regex: Regex,
|
||||
snapshot_config: Option<SnapshotConfig>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
}
|
||||
|
||||
impl RpcRequestMiddleware {
|
||||
pub fn new(ledger_path: PathBuf, snapshot_config: Option<SnapshotConfig>) -> Self {
|
||||
pub fn new(
|
||||
ledger_path: PathBuf,
|
||||
snapshot_config: Option<SnapshotConfig>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
ledger_path,
|
||||
snapshot_archive_path_regex: Regex::new(r"/snapshot-\d+-[[:alnum:]]+\.tar\.bz2$")
|
||||
.unwrap(),
|
||||
snapshot_config,
|
||||
cluster_info,
|
||||
trusted_validators,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,6 +128,63 @@ impl RpcRequestMiddleware {
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
fn health_check(&self) -> &'static str {
|
||||
let response = if let Some(trusted_validators) = &self.trusted_validators {
|
||||
let (latest_account_hash_slot, latest_trusted_validator_account_hash_slot) = {
|
||||
(
|
||||
self.cluster_info
|
||||
.get_accounts_hash_for_node(&self.cluster_info.id(), |hashes| {
|
||||
hashes
|
||||
.iter()
|
||||
.max_by(|a, b| a.0.cmp(&b.0))
|
||||
.map(|slot_hash| slot_hash.0)
|
||||
})
|
||||
.flatten()
|
||||
.unwrap_or(0),
|
||||
trusted_validators
|
||||
.iter()
|
||||
.map(|trusted_validator| {
|
||||
self.cluster_info
|
||||
.get_accounts_hash_for_node(&trusted_validator, |hashes| {
|
||||
hashes
|
||||
.iter()
|
||||
.max_by(|a, b| a.0.cmp(&b.0))
|
||||
.map(|slot_hash| slot_hash.0)
|
||||
})
|
||||
.flatten()
|
||||
.unwrap_or(0)
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(0),
|
||||
)
|
||||
};
|
||||
|
||||
// This validator is considered healthy if its latest account hash slot is within
|
||||
// `HEALTH_CHECK_SLOT_DISTANCE` of the latest trusted validator's account hash slot
|
||||
if latest_account_hash_slot > 0
|
||||
&& latest_trusted_validator_account_hash_slot > 0
|
||||
&& latest_account_hash_slot
|
||||
> latest_trusted_validator_account_hash_slot
|
||||
.saturating_sub(HEALTH_CHECK_SLOT_DISTANCE)
|
||||
{
|
||||
"ok"
|
||||
} else {
|
||||
warn!(
|
||||
"health check: me={}, latest trusted_validator={}",
|
||||
latest_account_hash_slot, latest_trusted_validator_account_hash_slot
|
||||
);
|
||||
"behind"
|
||||
}
|
||||
} else {
|
||||
// No trusted validator point of reference available, so this validator is healthy
|
||||
// because it's running
|
||||
"ok"
|
||||
};
|
||||
|
||||
info!("health check: {}", response);
|
||||
response
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestMiddleware for RpcRequestMiddleware {
|
||||
@@ -148,6 +219,16 @@ impl RequestMiddleware for RpcRequestMiddleware {
|
||||
}
|
||||
if self.is_get_path(request.uri().path()) {
|
||||
self.get(request.uri().path())
|
||||
} else if request.uri().path() == "/health" {
|
||||
RequestMiddlewareAction::Respond {
|
||||
should_validate_hosts: true,
|
||||
response: Box::new(jsonrpc_core::futures::future::ok(
|
||||
hyper::Response::builder()
|
||||
.status(hyper::StatusCode::OK)
|
||||
.body(hyper::Body::from(self.health_check()))
|
||||
.unwrap(),
|
||||
)),
|
||||
}
|
||||
} else {
|
||||
RequestMiddlewareAction::Proceed {
|
||||
should_continue_on_invalid_cors: false,
|
||||
@@ -166,11 +247,12 @@ impl JsonRpcService {
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
genesis_hash: Hash,
|
||||
ledger_path: &Path,
|
||||
storage_state: StorageState,
|
||||
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
) -> Self {
|
||||
info!("rpc bound to {:?}", rpc_addr);
|
||||
info!("rpc configuration: {:?}", config);
|
||||
@@ -196,20 +278,35 @@ impl JsonRpcService {
|
||||
let rpc = RpcSolImpl;
|
||||
io.extend_with(rpc.to_delegate());
|
||||
|
||||
let server =
|
||||
ServerBuilder::with_meta_extractor(io, move |_req: &hyper::Request<hyper::Body>| Meta {
|
||||
let request_middleware = RpcRequestMiddleware::new(
|
||||
ledger_path,
|
||||
snapshot_config,
|
||||
cluster_info.clone(),
|
||||
trusted_validators,
|
||||
);
|
||||
let server = ServerBuilder::with_meta_extractor(
|
||||
io,
|
||||
move |_req: &hyper::Request<hyper::Body>| Meta {
|
||||
request_processor: request_processor.clone(),
|
||||
cluster_info: cluster_info.clone(),
|
||||
genesis_hash
|
||||
}).threads(4)
|
||||
.cors(DomainsValidation::AllowOnly(vec![
|
||||
AccessControlAllowOrigin::Any,
|
||||
]))
|
||||
.cors_max_age(86400)
|
||||
.request_middleware(RpcRequestMiddleware::new(ledger_path, snapshot_config))
|
||||
.start_http(&rpc_addr);
|
||||
genesis_hash,
|
||||
},
|
||||
)
|
||||
.threads(num_cpus::get())
|
||||
.cors(DomainsValidation::AllowOnly(vec![
|
||||
AccessControlAllowOrigin::Any,
|
||||
]))
|
||||
.cors_max_age(86400)
|
||||
.request_middleware(request_middleware)
|
||||
.start_http(&rpc_addr);
|
||||
|
||||
if let Err(e) = server {
|
||||
warn!("JSON RPC service unavailable error: {:?}. \nAlso, check that port {} is not already in use by another application", e, rpc_addr.port());
|
||||
warn!(
|
||||
"JSON RPC service unavailable error: {:?}. \n\
|
||||
Also, check that port {} is not already in use by another application",
|
||||
e,
|
||||
rpc_addr.port()
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -248,7 +345,11 @@ impl JsonRpcService {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{contact_info::ContactInfo, rpc::tests::create_validator_exit};
|
||||
use crate::{
|
||||
contact_info::ContactInfo,
|
||||
crds_value::{CrdsData, CrdsValue, SnapshotHash},
|
||||
rpc::tests::create_validator_exit,
|
||||
};
|
||||
use solana_ledger::{
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
get_tmp_ledger_path,
|
||||
@@ -268,9 +369,7 @@ mod tests {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let validator_exit = create_validator_exit(&exit);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::default(),
|
||||
)));
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
let rpc_addr = SocketAddr::new(
|
||||
ip_addr,
|
||||
@@ -292,6 +391,7 @@ mod tests {
|
||||
&PathBuf::from("farf"),
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
None,
|
||||
);
|
||||
let thread = rpc_service.thread_hdl.thread();
|
||||
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");
|
||||
@@ -312,7 +412,9 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_is_get_path() {
|
||||
let rrm = RpcRequestMiddleware::new(PathBuf::from("/"), None);
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
|
||||
let rrm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
|
||||
let rrm_with_snapshot_config = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
Some(SnapshotConfig {
|
||||
@@ -320,6 +422,8 @@ mod tests {
|
||||
snapshot_package_output_path: PathBuf::from("/"),
|
||||
snapshot_path: PathBuf::from("/"),
|
||||
}),
|
||||
cluster_info,
|
||||
None,
|
||||
);
|
||||
|
||||
assert!(rrm.is_get_path("/genesis.tar.bz2"));
|
||||
@@ -341,4 +445,84 @@ mod tests {
|
||||
assert!(!rrm.is_get_path(".."));
|
||||
assert!(!rrm.is_get_path("🎣"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_health_check_with_no_trusted_validators() {
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
|
||||
let rm = RpcRequestMiddleware::new(PathBuf::from("/"), None, cluster_info.clone(), None);
|
||||
assert_eq!(rm.health_check(), "ok");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_health_check_with_trusted_validators() {
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
|
||||
let trusted_validators = vec![Pubkey::new_rand(), Pubkey::new_rand(), Pubkey::new_rand()];
|
||||
let rm = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
None,
|
||||
cluster_info.clone(),
|
||||
Some(trusted_validators.clone().into_iter().collect()),
|
||||
);
|
||||
|
||||
// No account hashes for this node or any trusted validators == "behind"
|
||||
assert_eq!(rm.health_check(), "behind");
|
||||
|
||||
// No account hashes for any trusted validators == "behind"
|
||||
cluster_info.push_accounts_hashes(vec![(1000, Hash::default()), (900, Hash::default())]);
|
||||
assert_eq!(rm.health_check(), "behind");
|
||||
|
||||
// This node is ahead of the trusted validators == "ok"
|
||||
cluster_info
|
||||
.gossip
|
||||
.write()
|
||||
.unwrap()
|
||||
.crds
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
|
||||
trusted_validators[0].clone(),
|
||||
vec![
|
||||
(1, Hash::default()),
|
||||
(1001, Hash::default()),
|
||||
(2, Hash::default()),
|
||||
],
|
||||
))),
|
||||
1,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(rm.health_check(), "ok");
|
||||
|
||||
// Node is slightly behind the trusted validators == "ok"
|
||||
cluster_info
|
||||
.gossip
|
||||
.write()
|
||||
.unwrap()
|
||||
.crds
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
|
||||
trusted_validators[1].clone(),
|
||||
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE - 1, Hash::default())],
|
||||
))),
|
||||
1,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(rm.health_check(), "ok");
|
||||
|
||||
// Node is far behind the trusted validators == "behind"
|
||||
cluster_info
|
||||
.gossip
|
||||
.write()
|
||||
.unwrap()
|
||||
.crds
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
|
||||
trusted_validators[2].clone(),
|
||||
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE, Hash::default())],
|
||||
))),
|
||||
1,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(rm.health_check(), "behind");
|
||||
}
|
||||
}
|
||||
|
@@ -619,6 +619,7 @@ pub(crate) mod tests {
|
||||
use crate::commitment::BlockCommitment;
|
||||
use jsonrpc_core::futures::{self, stream::Stream};
|
||||
use jsonrpc_pubsub::typed::Subscriber;
|
||||
use serial_test_derive::serial;
|
||||
use solana_budget_program;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_sdk::{
|
||||
@@ -654,6 +655,7 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_check_account_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@@ -726,6 +728,7 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_check_program_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@@ -806,6 +809,7 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_check_signature_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@@ -947,6 +951,7 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_check_slot_subscribe() {
|
||||
let (subscriber, _id_receiver, transport_receiver) =
|
||||
Subscriber::new_test("slotNotification");
|
||||
@@ -988,6 +993,7 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_check_root_subscribe() {
|
||||
let (subscriber, _id_receiver, mut transport_receiver) =
|
||||
Subscriber::new_test("rootNotification");
|
||||
@@ -1028,6 +1034,7 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_add_and_remove_subscription() {
|
||||
let mut subscriptions: HashMap<u64, HashMap<SubscriptionId, (Sink<()>, Confirmations)>> =
|
||||
HashMap::new();
|
||||
|
@@ -72,7 +72,7 @@ pub struct ServeRepair {
|
||||
/// set the keypair that will be used to sign repair responses
|
||||
keypair: Arc<Keypair>,
|
||||
my_info: ContactInfo,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
}
|
||||
|
||||
type RepairCache = HashMap<Slot, (Vec<ContactInfo>, Vec<(u64, usize)>)>;
|
||||
@@ -80,16 +80,13 @@ type RepairCache = HashMap<Slot, (Vec<ContactInfo>, Vec<(u64, usize)>)>;
|
||||
impl ServeRepair {
|
||||
/// Without a valid keypair gossip will not function. Only useful for tests.
|
||||
pub fn new_with_invalid_keypair(contact_info: ContactInfo) -> Self {
|
||||
Self::new(Arc::new(RwLock::new(
|
||||
ClusterInfo::new_with_invalid_keypair(contact_info),
|
||||
Self::new(Arc::new(ClusterInfo::new_with_invalid_keypair(
|
||||
contact_info,
|
||||
)))
|
||||
}
|
||||
|
||||
pub fn new(cluster_info: Arc<RwLock<ClusterInfo>>) -> Self {
|
||||
let (keypair, my_info) = {
|
||||
let r_cluster_info = cluster_info.read().unwrap();
|
||||
(r_cluster_info.keypair.clone(), r_cluster_info.my_data())
|
||||
};
|
||||
pub fn new(cluster_info: Arc<ClusterInfo>) -> Self {
|
||||
let (keypair, my_info) = { (cluster_info.keypair.clone(), cluster_info.my_contact_info()) };
|
||||
Self {
|
||||
keypair,
|
||||
my_info,
|
||||
@@ -362,11 +359,7 @@ impl ServeRepair {
|
||||
// find a peer that appears to be accepting replication and has the desired slot, as indicated
|
||||
// by a valid tvu port location
|
||||
if cache.get(&repair_request.slot()).is_none() {
|
||||
let repair_peers: Vec<_> = self
|
||||
.cluster_info
|
||||
.read()
|
||||
.unwrap()
|
||||
.repair_peers(repair_request.slot());
|
||||
let repair_peers: Vec<_> = self.cluster_info.repair_peers(repair_request.slot());
|
||||
if repair_peers.is_empty() {
|
||||
return Err(ClusterInfoError::NoPeers.into());
|
||||
}
|
||||
@@ -654,7 +647,7 @@ mod tests {
|
||||
fn window_index_request() {
|
||||
let cluster_slots = ClusterSlots::default();
|
||||
let me = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(me)));
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(me));
|
||||
let serve_repair = ServeRepair::new(cluster_info.clone());
|
||||
let rv = serve_repair.repair_request(
|
||||
&cluster_slots,
|
||||
@@ -680,7 +673,7 @@ mod tests {
|
||||
wallclock: 0,
|
||||
shred_version: 0,
|
||||
};
|
||||
cluster_info.write().unwrap().insert_info(nxt.clone());
|
||||
cluster_info.insert_info(nxt.clone());
|
||||
let rv = serve_repair
|
||||
.repair_request(
|
||||
&cluster_slots,
|
||||
@@ -708,7 +701,7 @@ mod tests {
|
||||
wallclock: 0,
|
||||
shred_version: 0,
|
||||
};
|
||||
cluster_info.write().unwrap().insert_info(nxt);
|
||||
cluster_info.insert_info(nxt);
|
||||
let mut one = false;
|
||||
let mut two = false;
|
||||
while !one || !two {
|
||||
|
@@ -5,7 +5,7 @@ use std::{
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
mpsc::RecvTimeoutError,
|
||||
Arc, RwLock,
|
||||
Arc,
|
||||
},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
@@ -20,7 +20,7 @@ impl SnapshotPackagerService {
|
||||
snapshot_package_receiver: SnapshotPackageReceiver,
|
||||
starting_snapshot_hash: Option<(Slot, Hash)>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
) -> Self {
|
||||
let exit = exit.clone();
|
||||
let cluster_info = cluster_info.clone();
|
||||
@@ -32,10 +32,7 @@ impl SnapshotPackagerService {
|
||||
if let Some(starting_snapshot_hash) = starting_snapshot_hash {
|
||||
hashes.push(starting_snapshot_hash);
|
||||
}
|
||||
cluster_info
|
||||
.write()
|
||||
.unwrap()
|
||||
.push_snapshot_hashes(hashes.clone());
|
||||
cluster_info.push_snapshot_hashes(hashes.clone());
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
@@ -58,10 +55,7 @@ impl SnapshotPackagerService {
|
||||
while hashes.len() > MAX_SNAPSHOT_HASHES {
|
||||
hashes.remove(0);
|
||||
}
|
||||
cluster_info
|
||||
.write()
|
||||
.unwrap()
|
||||
.push_snapshot_hashes(hashes.clone());
|
||||
cluster_info.push_snapshot_hashes(hashes.clone());
|
||||
}
|
||||
}
|
||||
Err(RecvTimeoutError::Disconnected) => break,
|
||||
|
@@ -184,7 +184,7 @@ impl StorageStage {
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
) -> Self {
|
||||
let (instruction_sender, instruction_receiver) = channel();
|
||||
@@ -286,7 +286,7 @@ impl StorageStage {
|
||||
|
||||
fn send_transaction(
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &ClusterInfo,
|
||||
instruction: Instruction,
|
||||
keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
@@ -323,7 +323,7 @@ impl StorageStage {
|
||||
for _ in 0..5 {
|
||||
transactions_socket.send_to(
|
||||
&bincode::serialize(&transaction).unwrap(),
|
||||
cluster_info.read().unwrap().my_data().tpu,
|
||||
cluster_info.my_contact_info().tpu,
|
||||
)?;
|
||||
sleep(Duration::from_millis(100));
|
||||
if Self::poll_for_signature_confirmation(
|
||||
@@ -652,10 +652,10 @@ impl StorageStage {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn test_cluster_info(id: &Pubkey) -> Arc<RwLock<ClusterInfo>> {
|
||||
pub fn test_cluster_info(id: &Pubkey) -> Arc<ClusterInfo> {
|
||||
let contact_info = ContactInfo::new_localhost(id, 0);
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(contact_info);
|
||||
Arc::new(RwLock::new(cluster_info))
|
||||
Arc::new(cluster_info)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@@ -36,7 +36,7 @@ pub struct Tpu {
|
||||
impl Tpu {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
poh_recorder: &Arc<Mutex<PohRecorder>>,
|
||||
entry_receiver: Receiver<WorkingBankEntry>,
|
||||
retransmit_slots_receiver: RetransmitSlotsReceiver,
|
||||
|
@@ -84,7 +84,7 @@ impl Tvu {
|
||||
authorized_voter_keypairs: Vec<Arc<Keypair>>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: &Arc<ClusterInfo>,
|
||||
sockets: Sockets,
|
||||
blockstore: Arc<Blockstore>,
|
||||
storage_state: &StorageState,
|
||||
@@ -103,11 +103,7 @@ impl Tvu {
|
||||
retransmit_slots_sender: RetransmitSlotsSender,
|
||||
tvu_config: TvuConfig,
|
||||
) -> Self {
|
||||
let keypair: Arc<Keypair> = cluster_info
|
||||
.read()
|
||||
.expect("Unable to read from cluster_info during Tvu creation")
|
||||
.keypair
|
||||
.clone();
|
||||
let keypair: Arc<Keypair> = cluster_info.keypair.clone();
|
||||
|
||||
let Sockets {
|
||||
repair: repair_socket,
|
||||
@@ -170,7 +166,7 @@ impl Tvu {
|
||||
accounts_hash_receiver,
|
||||
snapshot_package_sender,
|
||||
exit,
|
||||
cluster_info,
|
||||
&cluster_info,
|
||||
tvu_config.trusted_validators.clone(),
|
||||
tvu_config.halt_on_trusted_validators_accounts_hash_mismatch,
|
||||
tvu_config.accounts_hash_fault_injection_slots,
|
||||
@@ -257,12 +253,14 @@ pub mod tests {
|
||||
use super::*;
|
||||
use crate::banking_stage::create_test_recorder;
|
||||
use crate::cluster_info::{ClusterInfo, Node};
|
||||
use serial_test_derive::serial;
|
||||
use solana_ledger::create_new_tmp_ledger;
|
||||
use solana_ledger::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_runtime::bank::Bank;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_tvu_exit() {
|
||||
solana_logger::setup();
|
||||
let leader = Node::new_localhost();
|
||||
@@ -275,9 +273,9 @@ pub mod tests {
|
||||
let bank_forks = BankForks::new(0, Bank::new(&genesis_config));
|
||||
|
||||
//start cluster_info1
|
||||
let mut cluster_info1 = ClusterInfo::new_with_invalid_keypair(target1.info.clone());
|
||||
let cluster_info1 = ClusterInfo::new_with_invalid_keypair(target1.info.clone());
|
||||
cluster_info1.insert_info(leader.info.clone());
|
||||
let cref1 = Arc::new(RwLock::new(cluster_info1));
|
||||
let cref1 = Arc::new(cluster_info1);
|
||||
|
||||
let (blockstore_path, _) = create_new_tmp_ledger!(&genesis_config);
|
||||
let (blockstore, l_receiver, completed_slots_receiver) =
|
||||
|
@@ -229,10 +229,7 @@ impl Validator {
|
||||
}
|
||||
}
|
||||
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new(
|
||||
node.info.clone(),
|
||||
keypair.clone(),
|
||||
)));
|
||||
let cluster_info = Arc::new(ClusterInfo::new(node.info.clone(), keypair.clone()));
|
||||
|
||||
let storage_state = StorageState::new(
|
||||
&bank.last_blockhash(),
|
||||
@@ -265,6 +262,7 @@ impl Validator {
|
||||
ledger_path,
|
||||
storage_state.clone(),
|
||||
validator_exit.clone(),
|
||||
config.trusted_validators.clone(),
|
||||
),
|
||||
PubSubService::new(
|
||||
&subscriptions,
|
||||
@@ -363,10 +361,7 @@ impl Validator {
|
||||
// Insert the entrypoint info, should only be None if this node
|
||||
// is the bootstrap validator
|
||||
if let Some(entrypoint_info) = entrypoint_info_option {
|
||||
cluster_info
|
||||
.write()
|
||||
.unwrap()
|
||||
.set_entrypoint(entrypoint_info.clone());
|
||||
cluster_info.set_entrypoint(entrypoint_info.clone());
|
||||
}
|
||||
|
||||
let (snapshot_packager_service, snapshot_package_sender) =
|
||||
@@ -634,11 +629,7 @@ fn new_banks_from_blockstore(
|
||||
)
|
||||
}
|
||||
|
||||
fn wait_for_supermajority(
|
||||
config: &ValidatorConfig,
|
||||
bank: &Arc<Bank>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
) {
|
||||
fn wait_for_supermajority(config: &ValidatorConfig, bank: &Bank, cluster_info: &ClusterInfo) {
|
||||
if config.wait_for_supermajority != Some(bank.slot()) {
|
||||
return;
|
||||
}
|
||||
@@ -647,10 +638,9 @@ fn wait_for_supermajority(
|
||||
"Waiting for 80% of activated stake at slot {} to be in gossip...",
|
||||
bank.slot()
|
||||
);
|
||||
loop {
|
||||
let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info);
|
||||
for i in 1.. {
|
||||
let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info, i % 10 == 0);
|
||||
|
||||
info!("{}% of activated stake in gossip", gossip_stake_percent,);
|
||||
if gossip_stake_percent >= 80 {
|
||||
break;
|
||||
}
|
||||
@@ -781,33 +771,86 @@ fn report_target_features() {
|
||||
}
|
||||
|
||||
// Get the activated stake percentage (based on the provided bank) that is visible in gossip
|
||||
fn get_stake_percent_in_gossip(bank: &Arc<Bank>, cluster_info: &Arc<RwLock<ClusterInfo>>) -> u64 {
|
||||
let mut gossip_stake = 0;
|
||||
fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: bool) -> u64 {
|
||||
let mut online_stake = 0;
|
||||
let mut wrong_shred_stake = 0;
|
||||
let mut wrong_shred_nodes = vec![];
|
||||
let mut offline_stake = 0;
|
||||
let mut offline_nodes = vec![];
|
||||
|
||||
let mut total_activated_stake = 0;
|
||||
let tvu_peers = cluster_info.read().unwrap().tvu_peers();
|
||||
let me = cluster_info.read().unwrap().my_data();
|
||||
let all_tvu_peers = cluster_info.all_tvu_peers();
|
||||
let my_shred_version = cluster_info.my_shred_version();
|
||||
let my_id = cluster_info.id();
|
||||
|
||||
for (activated_stake, vote_account) in bank.vote_accounts().values() {
|
||||
let vote_state =
|
||||
solana_vote_program::vote_state::VoteState::from(&vote_account).unwrap_or_default();
|
||||
total_activated_stake += activated_stake;
|
||||
if tvu_peers
|
||||
|
||||
if *activated_stake == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(peer) = all_tvu_peers
|
||||
.iter()
|
||||
.filter(|peer| peer.shred_version == me.shred_version)
|
||||
.any(|peer| peer.id == vote_state.node_pubkey)
|
||||
.find(|peer| peer.id == vote_state.node_pubkey)
|
||||
{
|
||||
trace!(
|
||||
"observed {} in gossip, (activated_stake={})",
|
||||
vote_state.node_pubkey,
|
||||
activated_stake
|
||||
);
|
||||
gossip_stake += activated_stake;
|
||||
} else if vote_state.node_pubkey == cluster_info.read().unwrap().id() {
|
||||
gossip_stake += activated_stake;
|
||||
if peer.shred_version == my_shred_version {
|
||||
trace!(
|
||||
"observed {} in gossip, (activated_stake={})",
|
||||
vote_state.node_pubkey,
|
||||
activated_stake
|
||||
);
|
||||
online_stake += activated_stake;
|
||||
} else {
|
||||
wrong_shred_stake += activated_stake;
|
||||
wrong_shred_nodes.push((*activated_stake, vote_state.node_pubkey));
|
||||
}
|
||||
} else if vote_state.node_pubkey == my_id {
|
||||
online_stake += activated_stake; // This node is online
|
||||
} else {
|
||||
offline_stake += activated_stake;
|
||||
offline_nodes.push((*activated_stake, vote_state.node_pubkey));
|
||||
}
|
||||
}
|
||||
|
||||
gossip_stake * 100 / total_activated_stake
|
||||
if log {
|
||||
info!(
|
||||
"{}% of active stake visible in gossip",
|
||||
online_stake * 100 / total_activated_stake
|
||||
);
|
||||
|
||||
if !wrong_shred_nodes.is_empty() {
|
||||
info!(
|
||||
"{}% of active stake has the wrong shred version in gossip",
|
||||
wrong_shred_stake * 100 / total_activated_stake,
|
||||
);
|
||||
for (stake, identity) in wrong_shred_nodes {
|
||||
info!(
|
||||
" {}% - {}",
|
||||
stake * 100 / total_activated_stake,
|
||||
identity
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if !offline_nodes.is_empty() {
|
||||
info!(
|
||||
"{}% of active stake is not visible in gossip",
|
||||
offline_stake * 100 / total_activated_stake
|
||||
);
|
||||
for (stake, identity) in offline_nodes {
|
||||
info!(
|
||||
" {}% - {}",
|
||||
stake * 100 / total_activated_stake,
|
||||
identity
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
online_stake * 100 / total_activated_stake
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@@ -249,7 +249,7 @@ impl WindowService {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new<F>(
|
||||
blockstore: Arc<Blockstore>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
cluster_info: Arc<ClusterInfo>,
|
||||
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
|
||||
retransmit: PacketSender,
|
||||
repair_socket: Arc<UdpSocket>,
|
||||
@@ -294,7 +294,7 @@ impl WindowService {
|
||||
);
|
||||
|
||||
let t_window = Self::start_recv_window_thread(
|
||||
cluster_info.read().unwrap().id(),
|
||||
cluster_info.id(),
|
||||
exit,
|
||||
&blockstore,
|
||||
insert_sender,
|
||||
@@ -514,7 +514,7 @@ mod test {
|
||||
net::UdpSocket,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::channel,
|
||||
sync::{Arc, RwLock},
|
||||
sync::Arc,
|
||||
thread::sleep,
|
||||
time::Duration,
|
||||
};
|
||||
@@ -630,9 +630,9 @@ mod test {
|
||||
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let (retransmit_sender, _retransmit_receiver) = channel();
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::new_localhost(&Pubkey::default(), 0),
|
||||
)));
|
||||
));
|
||||
let cluster_slots = Arc::new(ClusterSlots::default());
|
||||
let repair_sock = Arc::new(UdpSocket::bind(socketaddr_any!()).unwrap());
|
||||
let window = WindowService::new(
|
||||
|
@@ -24,7 +24,6 @@ mod tests {
|
||||
signature::{Keypair, Signer},
|
||||
system_transaction,
|
||||
};
|
||||
use std::sync::RwLock;
|
||||
use std::{fs, path::PathBuf, sync::atomic::AtomicBool, sync::mpsc::channel, sync::Arc};
|
||||
use tempfile::TempDir;
|
||||
|
||||
@@ -312,9 +311,7 @@ mod tests {
|
||||
// channel hold hard links to these deleted snapshots. We verify this is the case below.
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::default(),
|
||||
)));
|
||||
let cluster_info = Arc::new(ClusterInfo::new_with_invalid_keypair(ContactInfo::default()));
|
||||
|
||||
let snapshot_packager_service =
|
||||
SnapshotPackagerService::new(receiver, None, &exit, &cluster_info);
|
||||
|
@@ -72,7 +72,7 @@ fn run_simulation(stakes: &[u64], fanout: usize) {
|
||||
|
||||
// describe the leader
|
||||
let leader_info = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.clone());
|
||||
let cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.clone());
|
||||
|
||||
// setup staked nodes
|
||||
let mut staked_nodes = HashMap::new();
|
||||
@@ -105,7 +105,7 @@ fn run_simulation(stakes: &[u64], fanout: usize) {
|
||||
senders.lock().unwrap().insert(node.id, s);
|
||||
})
|
||||
});
|
||||
let c_info = cluster_info.clone();
|
||||
let c_info = cluster_info.clone_with_id(&cluster_info.id());
|
||||
|
||||
let staked_nodes = Arc::new(staked_nodes);
|
||||
let shreds_len = 100;
|
||||
@@ -140,7 +140,6 @@ fn run_simulation(stakes: &[u64], fanout: usize) {
|
||||
// start turbine simulation
|
||||
let now = Instant::now();
|
||||
batches.par_iter_mut().for_each(|batch| {
|
||||
let mut cluster = c_info.clone();
|
||||
let mut remaining = batch.len();
|
||||
let senders: HashMap<_, _> = senders.lock().unwrap().clone();
|
||||
while remaining > 0 {
|
||||
@@ -150,7 +149,7 @@ fn run_simulation(stakes: &[u64], fanout: usize) {
|
||||
"Timed out with {:?} remaining nodes",
|
||||
remaining
|
||||
);
|
||||
cluster.gossip.set_self(&*id);
|
||||
let cluster = c_info.clone_with_id(id);
|
||||
if !*layer1_done {
|
||||
recv.iter().for_each(|i| {
|
||||
retransmit(
|
||||
|
@@ -10,19 +10,16 @@ use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::Arc;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
fn test_node(exit: &Arc<AtomicBool>) -> (Arc<RwLock<ClusterInfo>>, GossipService, UdpSocket) {
|
||||
fn test_node(exit: &Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let mut test_node = Node::new_localhost_with_pubkey(&keypair.pubkey());
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new(
|
||||
test_node.info.clone(),
|
||||
keypair,
|
||||
)));
|
||||
let cluster_info = Arc::new(ClusterInfo::new(test_node.info.clone(), keypair));
|
||||
let gossip_service = GossipService::new(&cluster_info, None, test_node.sockets.gossip, exit);
|
||||
let _ = cluster_info.read().unwrap().my_data();
|
||||
let _ = cluster_info.my_contact_info();
|
||||
(
|
||||
cluster_info,
|
||||
gossip_service,
|
||||
@@ -36,7 +33,7 @@ fn test_node(exit: &Arc<AtomicBool>) -> (Arc<RwLock<ClusterInfo>>, GossipService
|
||||
/// tests that actually use this function are below
|
||||
fn run_gossip_topo<F>(num: usize, topo: F)
|
||||
where
|
||||
F: Fn(&Vec<(Arc<RwLock<ClusterInfo>>, GossipService, UdpSocket)>) -> (),
|
||||
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>) -> (),
|
||||
{
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let listen: Vec<_> = (0..num).map(|_| test_node(&exit)).collect();
|
||||
@@ -44,10 +41,7 @@ where
|
||||
let mut done = true;
|
||||
for i in 0..(num * 32) {
|
||||
done = true;
|
||||
let total: usize = listen
|
||||
.iter()
|
||||
.map(|v| v.0.read().unwrap().gossip_peers().len())
|
||||
.sum();
|
||||
let total: usize = listen.iter().map(|v| v.0.gossip_peers().len()).sum();
|
||||
if (total + num) * 10 > num * num * 9 {
|
||||
done = true;
|
||||
break;
|
||||
@@ -71,11 +65,10 @@ fn gossip_ring() {
|
||||
for n in 0..num {
|
||||
let y = n % listen.len();
|
||||
let x = (n + 1) % listen.len();
|
||||
let mut xv = listen[x].0.write().unwrap();
|
||||
let yv = listen[y].0.read().unwrap();
|
||||
let mut d = yv.lookup(&yv.id()).unwrap().clone();
|
||||
let yv = &listen[y].0;
|
||||
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
|
||||
d.wallclock = timestamp();
|
||||
xv.insert_info(d);
|
||||
listen[x].0.insert_info(d);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -90,11 +83,10 @@ fn gossip_ring_large() {
|
||||
for n in 0..num {
|
||||
let y = n % listen.len();
|
||||
let x = (n + 1) % listen.len();
|
||||
let mut xv = listen[x].0.write().unwrap();
|
||||
let yv = listen[y].0.read().unwrap();
|
||||
let mut d = yv.lookup(&yv.id()).unwrap().clone();
|
||||
let yv = &listen[y].0;
|
||||
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
|
||||
d.wallclock = timestamp();
|
||||
xv.insert_info(d);
|
||||
listen[x].0.insert_info(d);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -107,10 +99,10 @@ fn gossip_star() {
|
||||
for n in 0..(num - 1) {
|
||||
let x = 0;
|
||||
let y = (n + 1) % listen.len();
|
||||
let mut xv = listen[x].0.write().unwrap();
|
||||
let yv = listen[y].0.read().unwrap();
|
||||
let mut yd = yv.lookup(&yv.id()).unwrap().clone();
|
||||
let yv = &listen[y].0;
|
||||
let mut yd = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
|
||||
yd.wallclock = timestamp();
|
||||
let xv = &listen[x].0;
|
||||
xv.insert_info(yd);
|
||||
trace!("star leader {}", &xv.id());
|
||||
}
|
||||
@@ -124,13 +116,13 @@ fn gossip_rstar() {
|
||||
run_gossip_topo(10, |listen| {
|
||||
let num = listen.len();
|
||||
let xd = {
|
||||
let xv = listen[0].0.read().unwrap();
|
||||
xv.lookup(&xv.id()).unwrap().clone()
|
||||
let xv = &listen[0].0;
|
||||
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
|
||||
};
|
||||
trace!("rstar leader {}", xd.id);
|
||||
for n in 0..(num - 1) {
|
||||
let y = (n + 1) % listen.len();
|
||||
let mut yv = listen[y].0.write().unwrap();
|
||||
let yv = &listen[y].0;
|
||||
yv.insert_info(xd.clone());
|
||||
trace!("rstar insert {} into {}", xd.id, yv.id());
|
||||
}
|
||||
@@ -147,10 +139,10 @@ pub fn cluster_info_retransmit() {
|
||||
let (c2, dr2, tn2) = test_node(&exit);
|
||||
trace!("c3:");
|
||||
let (c3, dr3, tn3) = test_node(&exit);
|
||||
let c1_data = c1.read().unwrap().my_data().clone();
|
||||
let c1_contact_info = c1.my_contact_info();
|
||||
|
||||
c2.write().unwrap().insert_info(c1_data.clone());
|
||||
c3.write().unwrap().insert_info(c1_data.clone());
|
||||
c2.insert_info(c1_contact_info.clone());
|
||||
c3.insert_info(c1_contact_info);
|
||||
|
||||
let num = 3;
|
||||
|
||||
@@ -158,9 +150,9 @@ pub fn cluster_info_retransmit() {
|
||||
trace!("waiting to converge:");
|
||||
let mut done = false;
|
||||
for _ in 0..30 {
|
||||
done = c1.read().unwrap().gossip_peers().len() == num - 1
|
||||
&& c2.read().unwrap().gossip_peers().len() == num - 1
|
||||
&& c3.read().unwrap().gossip_peers().len() == num - 1;
|
||||
done = c1.gossip_peers().len() == num - 1
|
||||
&& c2.gossip_peers().len() == num - 1
|
||||
&& c3.gossip_peers().len() == num - 1;
|
||||
if done {
|
||||
break;
|
||||
}
|
||||
@@ -169,7 +161,7 @@ pub fn cluster_info_retransmit() {
|
||||
assert!(done);
|
||||
let mut p = Packet::default();
|
||||
p.meta.size = 10;
|
||||
let peers = c1.read().unwrap().retransmit_peers();
|
||||
let peers = c1.retransmit_peers();
|
||||
let retransmit_peers: Vec<_> = peers.iter().collect();
|
||||
ClusterInfo::retransmit_to(&retransmit_peers, &mut p, None, &tn1, false).unwrap();
|
||||
let res: Vec<_> = [tn1, tn2, tn3]
|
||||
|
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "1.1.3"
|
||||
version = "1.1.7"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@@ -26,3 +26,6 @@ syn_0_15 = { package = "syn", version = "0.15.42", features = ["extra-traits", "
|
||||
syn_1_0 = { package = "syn", version = "1.0.3", features = ["extra-traits", "fold", "full"] }
|
||||
tokio = { version = "0.1.22",features=["bytes", "codec", "default", "fs", "io", "mio", "num_cpus", "reactor", "rt-full", "sync", "tcp", "timer", "tokio-codec", "tokio-current-thread", "tokio-executor", "tokio-io", "tokio-io", "tokio-reactor", "tokio-tcp", "tokio-tcp", "tokio-threadpool", "tokio-timer", "tokio-udp", "tokio-uds", "udp", "uds"] }
|
||||
winapi = { version = "0.3.8", features=["basetsd", "consoleapi", "errhandlingapi", "fileapi", "handleapi", "impl-debug", "impl-default", "knownfolders", "libloaderapi", "memoryapi", "minwinbase", "minwindef", "ntdef", "ntsecapi", "ntstatus", "objbase", "processenv", "processthreadsapi", "profileapi", "shlobj", "std", "synchapi", "sysinfoapi", "timezoneapi", "utilapiset", "winbase", "wincon", "windef", "winerror", "winnls", "winnt", "winreg", "winsock2", "winuser", "ws2def", "ws2ipdef", "ws2tcpip", "wtypesbase"] }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@@ -3,7 +3,9 @@ set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
usage=$(cargo -q run -p solana-cli -- -C ~/.foo --help | sed -e 's|'"$HOME"'|~|g' -e 's/[[:space:]]\+$//')
|
||||
: "${rust_stable:=}" # Pacify shellcheck
|
||||
|
||||
usage=$(cargo +"$rust_stable" -q run -p solana-cli -- -C ~/.foo --help | sed -e 's|'"$HOME"'|~|g' -e 's/[[:space:]]\+$//')
|
||||
|
||||
out=${1:-src/cli/usage.md}
|
||||
|
||||
@@ -31,6 +33,6 @@ in_subcommands=0
|
||||
while read -r subcommand rest; do
|
||||
[[ $subcommand == "SUBCOMMANDS:" ]] && in_subcommands=1 && continue
|
||||
if ((in_subcommands)); then
|
||||
section "$(cargo -q run -p solana-cli -- help "$subcommand" | sed -e 's|'"$HOME"'|~|g' -e 's/[[:space:]]\+$//')" "####" >> "$out"
|
||||
section "$(cargo +"$rust_stable" -q run -p solana-cli -- help "$subcommand" | sed -e 's|'"$HOME"'|~|g' -e 's/[[:space:]]\+$//')" "####" >> "$out"
|
||||
fi
|
||||
done <<<"$usage">>"$out"
|
||||
|
@@ -1,17 +1,17 @@
|
||||
# Table of contents
|
||||
|
||||
* [Introduction](introduction.md)
|
||||
* [Wallet Guide](wallet/README.md)
|
||||
* [App Wallets](wallet/app-wallets.md)
|
||||
* [Trust Wallet](wallet/trust-wallet.md)
|
||||
* [Ledger Live](wallet/ledger-live.md)
|
||||
* [Command-line Wallets](wallet/cli-wallets.md)
|
||||
* [Wallet Guide](wallet-guide/README.md)
|
||||
* [App Wallets](wallet-guide/apps.md)
|
||||
* [Trust Wallet](wallet-guide/trust-wallet.md)
|
||||
* [Ledger Live](wallet-guide/ledger-live.md)
|
||||
* [Command-line Wallets](wallet-guide/cli.md)
|
||||
* [Paper Wallet](paper-wallet/README.md)
|
||||
* [Paper Wallet Usage](paper-wallet/paper-wallet-usage.md)
|
||||
* [Hardware Wallets](remote-wallet/README.md)
|
||||
* [Ledger Hardware Wallet](remote-wallet/ledger.md)
|
||||
* [Hardware Wallets](hardware-wallets/README.md)
|
||||
* [Ledger Hardware Wallet](hardware-wallets/ledger.md)
|
||||
* [File System Wallet](file-system-wallet/README.md)
|
||||
* [Support / Troubleshooting](wallet/support.md)
|
||||
* [Support / Troubleshooting](wallet-guide/support.md)
|
||||
* [Command Line Guide](cli/README.md)
|
||||
* [Install the Solana Command Line Tool Suite](cli/install-solana-cli-tools.md)
|
||||
* [Command Line Conventions](cli/conventions.md)
|
||||
@@ -22,6 +22,7 @@
|
||||
* [Offline Signing](offline-signing/README.md)
|
||||
* [Durable Transaction Nonces](offline-signing/durable-nonce.md)
|
||||
* [Command-line Reference](cli/usage.md)
|
||||
* [Solana Clusters](clusters.md)
|
||||
* [Develop Applications](apps/README.md)
|
||||
* [Example: Web Wallet](apps/webwallet.md)
|
||||
* [Example: Tic-Tac-Toe](apps/tictactoe.md)
|
||||
@@ -29,6 +30,13 @@
|
||||
* [Anatomy of a Transaction](transaction.md)
|
||||
* [JSON RPC API](apps/jsonrpc-api.md)
|
||||
* [JavaScript API](apps/javascript-api.md)
|
||||
* [Run a Validator](running-validator/README.md)
|
||||
* [Validator Requirements](running-validator/validator-reqs.md)
|
||||
* [Start a Validator](running-validator/validator-start.md)
|
||||
* [Stake](running-validator/validator-stake.md)
|
||||
* [Monitor a Validator](running-validator/validator-monitor.md)
|
||||
* [Publish Validator Info](running-validator/validator-info.md)
|
||||
* [Troubleshoot](running-validator/validator-troubleshoot.md)
|
||||
* [Participate in Tour de SOL](tour-de-sol/README.md)
|
||||
* [Useful Links & Discussion](tour-de-sol/useful-links.md)
|
||||
* [Registration](tour-de-sol/registration/README.md)
|
||||
@@ -39,24 +47,9 @@
|
||||
* [Registration FAQ](tour-de-sol/registration/validator-registration-and-rewards-faq.md)
|
||||
* [Participation](tour-de-sol/participation/README.md)
|
||||
* [Requirements to run a validator](tour-de-sol/participation/validator-technical-requirements.md)
|
||||
* [Steps to create a validator](tour-de-sol/participation/steps-to-create-a-validator/README.md)
|
||||
* [Install the Solana software](tour-de-sol/participation/steps-to-create-a-validator/install-the-solana-software.md)
|
||||
* [Create a validator public key](tour-de-sol/participation/steps-to-create-a-validator/validator-public-key-registration.md)
|
||||
* [Create and configure a Solana validator](tour-de-sol/participation/steps-to-create-a-validator/connecting-your-validator.md)
|
||||
* [Confirm the Solana network is running](tour-de-sol/participation/steps-to-create-a-validator/confirm-the-solana-network-is-running.md)
|
||||
* [Connect to the Solana network](tour-de-sol/participation/steps-to-create-a-validator/connect-to-the-solana-network.md)
|
||||
* [Validator catch-up](tour-de-sol/participation/steps-to-create-a-validator/monitoring-your-validator.md)
|
||||
* [Staking](tour-de-sol/participation/steps-to-create-a-validator/delegating-stake.md)
|
||||
* [Publish information about your validator](tour-de-sol/participation/steps-to-create-a-validator/publishing-information-about-your-validator.md)
|
||||
* [Dry Run 6](tour-de-sol/participation/dry-run-6.md)
|
||||
* [Create a validator public key](tour-de-sol/participation/validator-public-key-registration.md)
|
||||
* [Steps to create a validator](tour-de-sol/participation/steps-to-create-a-validator.md)
|
||||
* [Submit Bug Reports](tour-de-sol/submitting-bugs.md)
|
||||
* [Run a Validator](running-validator/README.md)
|
||||
* [Validator Requirements](running-validator/validator-reqs.md)
|
||||
* [Start a Validator](running-validator/validator-start.md)
|
||||
* [Stake](running-validator/validator-stake.md)
|
||||
* [Monitor a Validator](running-validator/validator-monitor.md)
|
||||
* [Publish Validator Info](running-validator/validator-info.md)
|
||||
* [Troubleshoot](running-validator/validator-troubleshoot.md)
|
||||
* [Benchmark a Cluster](cluster/bench-tps.md)
|
||||
* [Performance Metrics](cluster/performance-metrics.md)
|
||||
* [Solana's Architecture](cluster/README.md)
|
||||
|
@@ -27,6 +27,7 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
* [getEpochSchedule](jsonrpc-api.md#getepochschedule)
|
||||
* [getFeeCalculatorForBlockhash](jsonrpc-api.md#getfeecalculatorforblockhash)
|
||||
* [getFeeRateGovernor](jsonrpc-api.md#getfeerategovernor)
|
||||
* [getFirstAvailableBlock](jsonrpc-api.md#getfirstavailableblock)
|
||||
* [getGenesisHash](jsonrpc-api.md#getgenesishash)
|
||||
* [getIdentity](jsonrpc-api.md#getidentity)
|
||||
* [getInflation](jsonrpc-api.md#getinflation)
|
||||
@@ -115,6 +116,16 @@ Many methods that take a commitment parameter return an RpcResponse JSON object
|
||||
* `value` : The value returned by the operation itself.
|
||||
|
||||
|
||||
## Health Check
|
||||
Although not a JSON RPC API, a `GET /heath` at the RPC HTTP Endpoint provides a
|
||||
health-check mechanism for use by load balancers or other network
|
||||
infrastructure. This request will always return a HTTP 200 OK response with a body of
|
||||
"ok" or "behind" based on the following conditions:
|
||||
1. If one or more `--trusted-validator` arguments are provided to `solana-validator`, "ok" is returned
|
||||
when the node has within `HEALTH_CHECK_SLOT_DISTANCE` slots of the highest trusted validator,
|
||||
otherwise "behind" is returned.
|
||||
2. "ok" is always returned if no trusted validators are provided.
|
||||
|
||||
## JSON RPC API Reference
|
||||
|
||||
### getAccountInfo
|
||||
@@ -128,14 +139,15 @@ Returns all information associated with the account of provided Pubkey
|
||||
|
||||
#### Results:
|
||||
|
||||
The result value will be an RpcResponse JSON object containing an AccountInfo JSON object.
|
||||
The result will be an RpcResponse JSON object with `value` equal to:
|
||||
|
||||
* `RpcResponse<AccountInfo>`, RpcResponse JSON object with `value` field set to AccountInfo, a JSON object containing:
|
||||
* `lamports: <u64>`, number of lamports assigned to this account, as a u64
|
||||
* `owner: <string>`, base-58 encoded Pubkey of the program this account has been assigned to
|
||||
* `data: <string>`, base-58 encoded data associated with the account
|
||||
* `executable: <bool>`, boolean indicating if the account contains a program \(and is strictly read-only\)
|
||||
* `rentEpoch`: <u64>, the epoch at which this account will next owe rent, as u64
|
||||
* `<null>` - if the requested account doesn't exist
|
||||
* `<object>` - otherwise, a JSON object containing:
|
||||
* `lamports: <u64>`, number of lamports assigned to this account, as a u64
|
||||
* `owner: <string>`, base-58 encoded Pubkey of the program this account has been assigned to
|
||||
* `data: <string>`, base-58 encoded data associated with the account
|
||||
* `executable: <bool>`, boolean indicating if the account contains a program \(and is strictly read-only\)
|
||||
* `rentEpoch`: <u64>, the epoch at which this account will next owe rent, as u64
|
||||
|
||||
#### Example:
|
||||
|
||||
@@ -270,22 +282,24 @@ Returns identity and transaction information about a confirmed block in the ledg
|
||||
|
||||
The result field will be an object with the following fields:
|
||||
|
||||
* `blockhash: <string>` - the blockhash of this block, as base-58 encoded string
|
||||
* `previousBlockhash: <string>` - the blockhash of this block's parent, as base-58 encoded string
|
||||
* `parentSlot: <u64>` - the slot index of this block's parent
|
||||
* `transactions: <array>` - an array of JSON objects containing:
|
||||
* `transaction: <object|string>` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
|
||||
* `meta: <object>` - transaction status metadata object, containing `null` or:
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `fee: <u64>` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
|
||||
* DEPRECATED: `status: <object>` - Transaction status
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError
|
||||
* `rewards: <array>` - an array of JSON objects containing:
|
||||
* `pubkey: <string>` - The public key, as base-58 encoded string, of the account that received the reward
|
||||
* `lamports: <i64>`- number of reward lamports credited or debited by the account, as a i64
|
||||
* `<null>` - if specified block is not confirmed
|
||||
* `<object>` - if block is confirmed, an object with the following fields:
|
||||
* `blockhash: <string>` - the blockhash of this block, as base-58 encoded string
|
||||
* `previousBlockhash: <string>` - the blockhash of this block's parent, as base-58 encoded string; if the parent block is not available due to ledger cleanup, this field will return "11111111111111111111111111111111"
|
||||
* `parentSlot: <u64>` - the slot index of this block's parent
|
||||
* `transactions: <array>` - an array of JSON objects containing:
|
||||
* `transaction: <object|string>` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
|
||||
* `meta: <object>` - transaction status metadata object, containing `null` or:
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `fee: <u64>` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
|
||||
* DEPRECATED: `status: <object>` - Transaction status
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError
|
||||
* `rewards: <array>` - an array of JSON objects containing:
|
||||
* `pubkey: <string>` - The public key, as base-58 encoded string, of the account that received the reward
|
||||
* `lamports: <i64>`- number of reward lamports credited or debited by the account, as a i64
|
||||
|
||||
#### Example:
|
||||
|
||||
@@ -385,17 +399,18 @@ Returns transaction details for a confirmed transaction
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an object with the following fields:
|
||||
* `slot: <u64>` - the slot this transaction was processed in
|
||||
* `transaction: <object|string>` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
|
||||
* `meta: <object>` - transaction status metadata object, containing `null` or:
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `fee: <u64>` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
|
||||
* DEPRECATED: `status: <object>` - Transaction status
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError
|
||||
* `<null>` - if transaction is not found or not confirmed
|
||||
* `<object>` - if transaction is confirmed, an object with the following fields:
|
||||
* `slot: <u64>` - the slot this transaction was processed in
|
||||
* `transaction: <object|string>` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
|
||||
* `meta: <object | null>` - transaction status metadata object:
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `fee: <u64>` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
|
||||
* DEPRECATED: `status: <object>` - Transaction status
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError
|
||||
|
||||
#### Example:
|
||||
|
||||
@@ -478,9 +493,11 @@ Returns the fee calculator associated with the query blockhash, or `null` if the
|
||||
|
||||
#### Results:
|
||||
|
||||
The `result` field will be `null` if the query blockhash has expired, otherwise an `object` with the following fields:
|
||||
The result will be an RpcResponse JSON object with `value` equal to:
|
||||
|
||||
* `feeCalculator: <object>`, `FeeCalculator` object describing the cluster fee rate at the queried blockhash
|
||||
* `<null>` - if the query blockhash has expired
|
||||
* `<object>` - otherwise, a JSON object containing:
|
||||
* `feeCalculator: <object>`, `FeeCalculator` object describing the cluster fee rate at the queried blockhash
|
||||
|
||||
#### Example:
|
||||
|
||||
@@ -520,6 +537,28 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":54},"value":{"feeRateGovernor":{"burnPercent":50,"maxLamportsPerSignature":100000,"minLamportsPerSignature":5000,"targetLamportsPerSignature":10000,"targetSignaturesPerSlot":20000}}},"id":1}
|
||||
```
|
||||
|
||||
### getFirstAvailableBlock
|
||||
|
||||
Returns the slot of the lowest confirmed block that has not been purged from the ledger
|
||||
|
||||
#### Parameters:
|
||||
|
||||
None
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<u64>` - Slot
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getFirstAvailableBlock"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":250000,"id":1}
|
||||
```
|
||||
|
||||
### getGenesisHash
|
||||
|
||||
Returns the genesis hash
|
||||
@@ -605,9 +644,10 @@ Returns the leader schedule for an epoch
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be a dictionary of leader public keys \(as base-58 encoded
|
||||
strings\) and their corresponding leader slot indices as values (indices are to
|
||||
the first slot in the requested epoch)
|
||||
* `<null>` - if requested epoch is not found
|
||||
* `<object>` - otherwise, the result field will be a dictionary of leader public keys
|
||||
\(as base-58 encoded strings\) and their corresponding leader slot indices as values
|
||||
(indices are to the first slot in the requested epoch)
|
||||
|
||||
#### Example:
|
||||
|
||||
@@ -723,7 +763,7 @@ An array of:
|
||||
* `<null>` - Unknown transaction
|
||||
* `<object>`
|
||||
* `slot: <u64>` - The slot the transaction was processed
|
||||
* `confirmations: <usize | null>` - Number of blocks since signature confirmation, null if rooted
|
||||
* `confirmations: <usize | null>` - Number of blocks since signature confirmation, null if rooted, as well as finalized by a supermajority of the cluster
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* DEPRECATED: `status: <object>` - Transaction status
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
@@ -940,7 +980,7 @@ The result field will be a JSON object with the following fields:
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.1.3"},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.1.7"},"id":1}
|
||||
```
|
||||
|
||||
### getVoteAccounts
|
||||
|
@@ -14,5 +14,5 @@ secure access to your Solana accounts.
|
||||
To get started using the Solana Command Line (CLI) tools:
|
||||
- [Install the Solana Tools](install-solana-cli-tools.md)
|
||||
- [Choose a Cluster](choose-a-cluster.md)
|
||||
- [Create a Wallet](../wallet/cli-wallets.md)
|
||||
- [Create a Wallet](../wallet-guide/cli.md)
|
||||
- [Check out our CLI conventions](conventions.md)
|
||||
|
@@ -1,60 +1,9 @@
|
||||
# Connecting to a Cluster
|
||||
See [Solana Clusters](../clusters.md) for general information about the
|
||||
available clusters.
|
||||
|
||||
Solana maintains several different clusters with different purposes. Each
|
||||
cluster features a Solana-run gossip node that serves as an entrypoint to the
|
||||
cluster.
|
||||
|
||||
## Before you Begin
|
||||
|
||||
- Make sure you have first
|
||||
[installed the Solana command line tools](install-solana-cli-tools.md)
|
||||
|
||||
## Choose a Cluster
|
||||
|
||||
#### Mainnet Beta
|
||||
A permissionless, persistent cluster for early token holders and launch partners.
|
||||
No smart contracts or inflation.
|
||||
* Tokens that are issued on Mainnet Beta are **real** SOL
|
||||
* If you have paid money to purchase/be issued tokens, such as through our
|
||||
CoinList auction, these tokens will be transferred on Mainnet Beta.
|
||||
* Note: If you are using a non-command-line wallet such as
|
||||
[Trust Wallet](../wallet/trust-wallet.md),
|
||||
the wallet will always be connecting to Mainnet Beta.
|
||||
* Gossip entrypoint for Mainnet Beta: `mainnet-beta.solana.com:8001`
|
||||
* RPC URL for Mainnet Beta: `https://api.mainnet-beta.solana.com`
|
||||
|
||||
#### Devnet
|
||||
* Devnet serves as a playground for anyone who wants to take Solana for a
|
||||
test drive, as a user, token holder, app developer, or validator.
|
||||
* Application developers should target Devnet.
|
||||
* Potential validators should first target Devnet.
|
||||
* Key differences between Devnet and Mainnet Beta:
|
||||
* Devnet tokens are **not real**
|
||||
* Devnet includes a token faucet for airdrops for application testing
|
||||
* Devnet may be subject to ledger resets
|
||||
* Devnet typically runs a newer software version than Mainnet Beta
|
||||
* Devnet may be maintained by different validators than Mainnet Beta
|
||||
* Gossip entrypoint for Devnet: `devnet.solana.com:8001`
|
||||
* RPC URL for Devnet: `https://devnet.solana.com`
|
||||
|
||||
#### Testnet (Tour de SOL Cluster)
|
||||
* Testnet is where we stress test recent release features on a live
|
||||
cluster, particularly focused on network performance, stability and validator
|
||||
behavior.
|
||||
* [Tour de SOL](../tour-de-sol/README.md) initiative runs on Testnet, where we
|
||||
encourage malicious behavior and attacks on the network to help us find and
|
||||
squash bugs or network vulnerabilities.
|
||||
* Testnet tokens are **not real**
|
||||
* Testnet may be subject to ledger resets.
|
||||
* Testnet typically runs a newer software release than both Devnet and
|
||||
Mainnet Beta
|
||||
* Testnet may be maintained by different validators than Mainnet Beta
|
||||
* Gossip entrypoint for Testnet: `testnet.solana.com:8001`
|
||||
* RPC URL for Testnet: `https://testnet.solana.com`
|
||||
|
||||
## Configure the Command-line
|
||||
|
||||
You can check what cluster the Solana CLI is currently targeting by
|
||||
## Configure the command-line tool
|
||||
You can check what cluster the Solana command-line tool (CLI) is currently targeting by
|
||||
running the following command:
|
||||
|
||||
```bash
|
||||
@@ -65,21 +14,11 @@ Use `solana config set` command to target a particular cluster. After setting
|
||||
a cluster target, any future subcommands will send/receive information from that
|
||||
cluster.
|
||||
|
||||
##### Targetting Mainnet Beta
|
||||
```bash
|
||||
solana config set --url https://api.mainnet-beta.solana.com
|
||||
```
|
||||
|
||||
##### Targetting Devnet
|
||||
For example to target the Devnet cluster, run:
|
||||
```bash
|
||||
solana config set --url https://devnet.solana.com
|
||||
```
|
||||
|
||||
##### Targetting Testnet
|
||||
```bash
|
||||
solana config set --url https://testnet.solana.com
|
||||
```
|
||||
|
||||
## Ensure Versions Match
|
||||
|
||||
Though not strictly necessary, the CLI will generally work best when its version
|
||||
|
@@ -29,7 +29,7 @@ the base58 encoding of your public key, such as
|
||||
|
||||
Many commands using the CLI tools require a value for a `<KEYPAIR>`. The value
|
||||
you should use for the keypair depend on what type of
|
||||
[command line wallet you created](../wallet/cli-wallets.md).
|
||||
[command line wallet you created](../wallet-guide/cli.md).
|
||||
|
||||
For example, the way to display any wallet's address
|
||||
(also known as the keypair's pubkey), the CLI help document shows:
|
||||
@@ -66,7 +66,7 @@ solana-keygen pubkey /home/solana/my_wallet.json
|
||||
#### Hardware Wallet
|
||||
|
||||
If you chose a hardware wallet, use your
|
||||
[keypair URL](../remote-wallet/README.md#specify-a-hardware-wallet-key),
|
||||
[keypair URL](../hardware-wallets/README.md#specify-a-hardware-wallet-key),
|
||||
such as `usb://ledger?key=0`.
|
||||
```bash
|
||||
solana-keygen pubkey usb://ledger?key=0
|
||||
|
@@ -2,7 +2,7 @@
|
||||
This page decribes how to receive and send SOL tokens using the command line
|
||||
tools with a command line wallet such as a [paper wallet](../paper-wallet/README.md),
|
||||
a [file system wallet](../file-system-wallet/README.md), or a
|
||||
[hardware wallet](../remote-wallet/README.md). Before you begin, make sure
|
||||
[hardware wallet](../hardware-wallets/README.md). Before you begin, make sure
|
||||
you have created a wallet and have access to its address (pubkey) and the
|
||||
signing keypair. Check out our
|
||||
[conventions for entering keypairs for different wallet types](../cli/conventions.md#keypair-conventions).
|
||||
@@ -58,7 +58,7 @@ pubkey: GKvqsuNcnwWqPzzuhLmGi4rzzh55FhJtGizkhHaEJqiV
|
||||
You can also create a second (or more) wallet of any type:
|
||||
[paper](../paper-wallet/paper-wallet-usage.md#creating-multiple-paper-wallet-addresses),
|
||||
[file system](../file-system-wallet/README.md#creating-multiple-file-system-wallet-addresses),
|
||||
or [hardware](../remote-wallet/README.md#multiple-addresses-on-a-single-hardware-wallet).
|
||||
or [hardware](../hardware-wallets/README.md#multiple-addresses-on-a-single-hardware-wallet).
|
||||
|
||||
#### Transfer tokens from your first wallet to the second address
|
||||
|
||||
@@ -131,7 +131,7 @@ To receive tokens, you will need an address for others to send tokens to. In
|
||||
Solana, the wallet address is the public key of a keypair. There are a variety
|
||||
of techniques for generating keypairs. The method you choose will depend on how
|
||||
you choose to store keypairs. Keypairs are stored in wallets. Before receiving
|
||||
tokens, you will need to [create a wallet](../wallet/cli-wallets.md).
|
||||
tokens, you will need to [create a wallet](../wallet-guide/cli.md).
|
||||
Once completed, you should have a public key
|
||||
for each keypair you generated. The public key is a long string of base58
|
||||
characters. Its length varies from 32 to 44 characters.
|
||||
|
@@ -129,4 +129,4 @@ In this example the client connects to our public testnet. To run validators on
|
||||
$ NDEBUG=1 ./multinode-demo/bench-tps.sh --entrypoint devnet.solana.com:8001 --faucet devnet.solana.com:9900 --duration 60 --tx_count 50
|
||||
```
|
||||
|
||||
You can observe the effects of your client's transactions on our [dashboard](https://metrics.solana.com:3000/d/testnet/testnet-hud?orgId=2&from=now-30m&to=now&refresh=5s&var-testnet=testnet)
|
||||
You can observe the effects of your client's transactions on our [metrics dashboard](https://metrics.solana.com:3000/d/monitor/cluster-telemetry?var-testnet=devnet)
|
||||
|
@@ -6,7 +6,7 @@ Solana takes a very different approach, which it calls _Proof of History_ or _Po
|
||||
|
||||
Solana technically never sends a _block_, but uses the term to describe the sequence of entries that validators vote on to achieve _confirmation_. In that way, Solana's confirmation times can be compared apples to apples to block-based systems. The current implementation sets block time to 800ms.
|
||||
|
||||
What's happening under the hood is that entries are streamed to validators as quickly as a leader node can batch a set of valid transactions into an entry. Validators process those entries long before it is time to vote on their validity. By processing the transactions optimistically, there is effectively no delay between the time the last entry is received and the time when the node can vote. In the event consensus is **not** achieved, a node simply rolls back its state. This optimisic processing technique was introduced in 1981 and called [Optimistic Concurrency Control](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.4735). It can be applied to blockchain architecture where a cluster votes on a hash that represents the full ledger up to some _block height_. In Solana, it is implemented trivially using the last entry's PoH hash.
|
||||
What's happening under the hood is that entries are streamed to validators as quickly as a leader node can batch a set of valid transactions into an entry. Validators process those entries long before it is time to vote on their validity. By processing the transactions optimistically, there is effectively no delay between the time the last entry is received and the time when the node can vote. In the event consensus is **not** achieved, a node simply rolls back its state. This optimisic processing technique was introduced in 1981 and called [Optimistic Concurrency Control](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.75.4735). It can be applied to blockchain architecture where a cluster votes on a hash that represents the full ledger up to some _block height_. In Solana, it is implemented trivially using the last entry's PoH hash.
|
||||
|
||||
## Relationship to VDFs
|
||||
|
||||
|
119
docs/src/clusters.md
Normal file
119
docs/src/clusters.md
Normal file
@@ -0,0 +1,119 @@
|
||||
# Solana Clusters
|
||||
Solana maintains several different clusters with different purposes.
|
||||
|
||||
Before you begin make sure you have first
|
||||
[installed the Solana command line tools](cli/install-solana-cli-tools.md)
|
||||
|
||||
Explorers:
|
||||
* [http://explorer.solana.com/](https://explorer.solana.com/).
|
||||
* [http://solanabeach.io/](http://solanabeach.io/).
|
||||
|
||||
## Devnet
|
||||
* Devnet serves as a playground for anyone who wants to take Solana for a
|
||||
test drive, as a user, token holder, app developer, or validator.
|
||||
* Application developers should target Devnet.
|
||||
* Potential validators should first target Devnet.
|
||||
* Key differences between Devnet and Mainnet Beta:
|
||||
* Devnet tokens are **not real**
|
||||
* Devnet includes a token faucet for airdrops for application testing
|
||||
* Devnet may be subject to ledger resets
|
||||
* Devnet typically runs a newer software version than Mainnet Beta
|
||||
* Devnet may be maintained by different validators than Mainnet Beta
|
||||
* Gossip entrypoint for Devnet: `devnet.solana.com:8001`
|
||||
* RPC URL for Devnet: `https://devnet.solana.com`
|
||||
|
||||
|
||||
##### Example `solana` command-line configuration
|
||||
```bash
|
||||
solana config set --url https://devnet.solana.com
|
||||
```
|
||||
|
||||
##### Example `solana-validator` command-line
|
||||
```bash
|
||||
$ solana-validator \
|
||||
--identity ~/validator-keypair.json \
|
||||
--vote-account ~/vote-account-keypair.json \
|
||||
--trusted-validator 47Sbuv6jL7CViK9F2NMW51aQGhfdpUu7WNvKyH645Rfi \
|
||||
--no-untrusted-rpc \
|
||||
--ledger ~/validator-ledger \
|
||||
--rpc-port 8899 \
|
||||
--dynamic-port-range 8000-8010
|
||||
--entrypoint devnet.solana.com:8001 \
|
||||
--expected-genesis-hash HzyuivuNXMHJKjM6q6BE2qBsR3etqW21BSvuJTpJFj9A \
|
||||
--expected-shred-version 61357 \
|
||||
--limit-ledger-size
|
||||
```
|
||||
|
||||
|
||||
## Testnet
|
||||
* Testnet is where we stress test recent release features on a live
|
||||
cluster, particularly focused on network performance, stability and validator
|
||||
behavior.
|
||||
* [Tour de SOL](tour-de-sol/README.md) initiative runs on Testnet, where we
|
||||
encourage malicious behavior and attacks on the network to help us find and
|
||||
squash bugs or network vulnerabilities.
|
||||
* Testnet tokens are **not real**
|
||||
* Testnet may be subject to ledger resets.
|
||||
* Testnet typically runs a newer software release than both Devnet and
|
||||
Mainnet Beta
|
||||
* Testnet may be maintained by different validators than Mainnet Beta
|
||||
* Gossip entrypoint for Testnet: `35.203.170.30:8001`
|
||||
* RPC URL for Testnet: `https://testnet.solana.com`
|
||||
|
||||
|
||||
##### Example `solana` command-line configuration
|
||||
```bash
|
||||
solana config set --url https://testnet.solana.com
|
||||
```
|
||||
|
||||
##### Example `solana-validator` command-line
|
||||
```bash
|
||||
$ solana-validator \
|
||||
--identity ~/validator-keypair.json \
|
||||
--vote-account ~/vote-account-keypair.json \
|
||||
--trusted-validator 5D1fNXzvv5NjV1ysLjirC4WY92RNsVH18vjmcszZd8on \
|
||||
--no-untrusted-rpc \
|
||||
--ledger ~/validator-ledger \
|
||||
--rpc-port 8899 \
|
||||
--dynamic-port-range 8000-8010
|
||||
--entrypoint 35.203.170.30:8001 \
|
||||
--expected-genesis-hash 4uhcVJyU9pJkvQyS88uRDiswHXSCkY3zQawwpjk2NsNY \
|
||||
--expected-shred-version 28769 \
|
||||
--limit-ledger-size
|
||||
```
|
||||
|
||||
## Mainnet Beta
|
||||
A permissionless, persistent cluster for early token holders and launch partners.
|
||||
Currently smart contracts, rewards, and inflation are disabled.
|
||||
* Tokens that are issued on Mainnet Beta are **real** SOL
|
||||
* If you have paid money to purchase/be issued tokens, such as through our
|
||||
CoinList auction, these tokens will be transferred on Mainnet Beta.
|
||||
* Note: If you are using a non-command-line wallet such as
|
||||
[Trust Wallet](wallet-guide/trust-wallet.md),
|
||||
the wallet will always be connecting to Mainnet Beta.
|
||||
* Gossip entrypoint for Mainnet Beta: `mainnet-beta.solana.com:8001`
|
||||
* RPC URL for Mainnet Beta: `https://api.mainnet-beta.solana.com`
|
||||
|
||||
##### Example `solana` command-line configuration
|
||||
```bash
|
||||
solana config set --url https://api.mainnet-beta.solana.com
|
||||
```
|
||||
|
||||
##### Example `solana-validator` command-line
|
||||
```bash
|
||||
$ solana-validator \
|
||||
--identity ~/validator-keypair.json \
|
||||
--vote-account ~/vote-account-keypair.json \
|
||||
--trusted-validator 7Np41oeYqPefeNQEHSv1UDhYrehxin3NStELsSKCT4K2 \
|
||||
--trusted-validator GdnSyH3YtwcxFvQrVVJMm1JhTS4QVX7MFsX56uJLUfiZ \
|
||||
--trusted-validator DE1bawNcRJB9rVm3buyMVfr8mBEoyyu73NBovf2oXJsJ \
|
||||
--trusted-validator CakcnaRDHka2gXyfbEd2d3xsvkJkqsLw2akB3zsN1D2S \
|
||||
--no-untrusted-rpc \
|
||||
--ledger ~/validator-ledger \
|
||||
--rpc-port 8899 \
|
||||
--dynamic-port-range 8000-8010
|
||||
--entrypoint mainnet-beta.solana.com:8001 \
|
||||
--expected-genesis-hash 5eykt4UsFv8P8NJdTREpY1vzqKqZKvdpKuc147dw2N9d \
|
||||
--expected-shred-version 54208 \
|
||||
--limit-ledger-size
|
||||
```
|
90
docs/src/hardware-wallets/ledger.md
Normal file
90
docs/src/hardware-wallets/ledger.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# Ledger Hardware Wallet
|
||||
|
||||
The Ledger Nano S hardware wallet offers secure storage of your Solana private
|
||||
keys. The Solana Ledger app enables derivation of essentially infinite keys, and
|
||||
secure transaction signing.
|
||||
|
||||
## Before You Begin
|
||||
|
||||
- [Set up a Ledger Nano S with the Solana App](../wallet-guide/ledger-live.md)
|
||||
- [Install the Solana command-line tools](../cli/install-solana-cli-tools.md)
|
||||
|
||||
## Use Ledger Device with Solana CLI
|
||||
|
||||
1. Ensure the Ledger Live application is closed
|
||||
2. Plug your Ledger device into your computer's USB port
|
||||
3. Enter your pin and start the Solana app on the Ledger device
|
||||
4. Press both buttons to advance past the "Pending Ledger review" screen
|
||||
5. Ensure the screen reads "Application is ready"
|
||||
6. On your computer, run:
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey usb://ledger
|
||||
```
|
||||
|
||||
This confirms your Ledger device is connected properly and in the correct state
|
||||
to interact with the Solana CLI. The command returns your Ledger's unique
|
||||
*wallet ID*. When you have multiple Nano S devices connected to the same
|
||||
computer, you can use your wallet key to specify which Ledger hardware wallet
|
||||
you want to use. Run the same command again, but this time, with its fully
|
||||
qualified URL:
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey usb://ledger/<WALLET_ID>
|
||||
```
|
||||
|
||||
where you replace `<WALLET_ID>` with the output of the first command.
|
||||
Confirm it prints the same wallet ID as before.
|
||||
|
||||
To learn more about keypair URLs, see
|
||||
[Specify A Hardware Wallet Key](README.md#specify-a-hardware-wallet-key)
|
||||
|
||||
Read more about [sending and receiving tokens](../cli/transfer-tokens.md) and
|
||||
[delegating stake](../cli/delegate-stake.md). You can use your Ledger keypair URL
|
||||
anywhere you see an option or argument that accepts a `<KEYPAIR>`.
|
||||
|
||||
### Install the Solana Beta App
|
||||
|
||||
For those of you that enjoy life on the edge, you can use the command-line to
|
||||
install the latest Solana Ledger app release before it has been validated by
|
||||
the Ledger team and made available via Ledger Live. Note that because the app
|
||||
is not installed via Ledger Live, you will need to approve installation from an
|
||||
"unsafe" manager, as well as see the message, "This app is not genuine" each
|
||||
time you open the app. Once the app is available on Ledger Live, you can
|
||||
reinstall the app from there, and the message will no longer be displayed.
|
||||
|
||||
1. Connect your Ledger device via USB and enter your pin to unlock it
|
||||
2. Download and run the Solana Ledger app installer:
|
||||
```text
|
||||
curl -sSLf https://github.com/solana-labs/ledger-app-solana/releases/download/v0.2.0/install.sh | sh
|
||||
```
|
||||
3. When prompted, approve the "unsafe" manager on your device
|
||||
4. When prompted, approve the installation on your device
|
||||
5. An installation window appears and your device will display "Processing..."
|
||||
6. The app installation is confirmed
|
||||
|
||||
If you encounter the following error:
|
||||
|
||||
```text
|
||||
Traceback (most recent call last):
|
||||
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.7/lib/python3.7/runpy.py", line 193, in _run_module_as_main
|
||||
"__main__", mod_spec)
|
||||
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.7/lib/python3.7/runpy.py", line 85, in _run_code
|
||||
exec(code, run_globals)
|
||||
File "ledger-env/lib/python3.7/site-packages/ledgerblue/loadApp.py", line 197, in <module>
|
||||
dongle = getDongle(args.apdu)
|
||||
File "ledger-env/lib/python3.7/site-packages/ledgerblue/comm.py", line 216, in getDongle
|
||||
dev.open_path(hidDevicePath)
|
||||
File "hid.pyx", line 72, in hid.device.open_path
|
||||
OSError: open failed
|
||||
```
|
||||
|
||||
To fix, check the following:
|
||||
|
||||
1. Ensure your Ledger device is connected to USB
|
||||
2. Ensure your Ledger device is unlocked and not waiting for you to enter your pin
|
||||
3. Ensure the Ledger Live application is not open
|
||||
|
||||
## Support
|
||||
|
||||
Check out our [Wallet Support Page](../wallet-guide/support.md) for ways to get help.
|
@@ -6,7 +6,7 @@ Solana is an open source project implementing a new, high-performance, permissio
|
||||
|
||||
## Why Solana?
|
||||
|
||||
It is possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.4735). At Solana, we are demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes cannot trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
|
||||
It is possible for a centralized database to process 710,000 transactions per second on a standard gigabit network if the transactions are, on average, no more than 176 bytes. A centralized database can also replicate itself and maintain high availability without significantly compromising that transaction rate using the distributed system technique known as Optimistic Concurrency Control [\[H.T.Kung, J.T.Robinson (1981)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.75.4735). At Solana, we are demonstrating that these same theoretical limits apply just as well to blockchain on an adversarial network. The key ingredient? Finding a way to share time when nodes cannot trust one-another. Once nodes can trust time, suddenly ~40 years of distributed systems research becomes applicable to blockchain!
|
||||
|
||||
> Perhaps the most striking difference between algorithms obtained by our method and ones based upon timeout is that using timeout produces a traditional distributed algorithm in which the processes operate asynchronously, while our method produces a globally synchronous one in which every process does the same thing at (approximately) the same time. Our method seems to contradict the whole purpose of distributed processing, which is to permit different processes to operate independently and perform different functions. However, if a distributed system is really a single system, then the processes must be synchronized in some way. Conceptually, the easiest way to synchronize processes is to get them all to do the same thing at the same time. Therefore, our method is used to implement a kernel that performs the necessary synchronization--for example, making sure that two different processes do not try to modify a file at the same time. Processes might spend only a small fraction of their time executing the synchronizing kernel; the rest of the time, they can operate independently--e.g., accessing different files. This is an approach we have advocated even when fault-tolerance is not required. The method's basic simplicity makes it easier to understand the precise properties of a system, which is crucial if one is to know just how fault-tolerant the system is. [\[L.Lamport (1984)\]](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1078)
|
||||
|
||||
|
@@ -168,4 +168,4 @@ your own accounts for different purposes.
|
||||
|
||||
## Support
|
||||
|
||||
Check out our [Wallet Support Page](../wallet/support.md) for ways to get help.
|
||||
Check out our [Wallet Support Page](../wallet-guide/support.md) for ways to get help.
|
||||
|
@@ -1,48 +0,0 @@
|
||||
# Ledger Hardware Wallet
|
||||
|
||||
The Ledger Nano S hardware wallet offers secure storage of your Solana private
|
||||
keys. The Solana Ledger app enables derivation of essentially infinite keys, and
|
||||
secure transaction signing.
|
||||
|
||||
## Before You Begin
|
||||
|
||||
- [Set up a Ledger Nano S with the Solana App](../wallet/ledger-live.md)
|
||||
- [Install the Solana command-line tools](../cli/install-solana-cli-tools.md)
|
||||
|
||||
## Use Ledger Device with Solana CLI
|
||||
|
||||
1. Ensure the Ledger Live application is closed
|
||||
2. Plug your Ledger device into your computer's USB port
|
||||
3. Enter your pin and start the Solana app on the Ledger device
|
||||
4. Press both buttons to advance past the "Pending Ledger review" screen
|
||||
5. Ensure the screen reads "Application is ready"
|
||||
6. On your computer, run:
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey usb://ledger
|
||||
```
|
||||
|
||||
This confirms your Ledger device is connected properly and in the correct state
|
||||
to interact with the Solana CLI. The command returns your Ledger's unique
|
||||
*wallet ID*. When you have multiple Nano S devices connected to the same
|
||||
computer, you can use your wallet key to specify which Ledger hardware wallet
|
||||
you want to use. Run the same command again, but this time, with its fully
|
||||
qualified URL:
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey usb://ledger/<WALLET_ID>
|
||||
```
|
||||
|
||||
where you replace `<WALLET_ID>` with the output of the first command.
|
||||
Confirm it prints the same wallet ID as before.
|
||||
|
||||
To learn more about keypair URLs, see
|
||||
[Specify A Hardware Wallet Key](README.md#specify-a-hardware-wallet-key)
|
||||
|
||||
Read more about [sending and receiving tokens](../cli/transfer-tokens.md) and
|
||||
[delegating stake](../cli/delegate-stake.md). You can use your Ledger keypair URL
|
||||
anywhere you see an option or argument that accepts a `<KEYPAIR>`.
|
||||
|
||||
## Support
|
||||
|
||||
Check out our [Wallet Support Page](../wallet/support.md) for ways to get help.
|
@@ -1,32 +1,5 @@
|
||||
# Running a Validator
|
||||
|
||||
This document describes how to participate in the Solana testnet as a validator
|
||||
node.
|
||||
This section describes how run a Solana validator node.
|
||||
|
||||
Please note some of the information and instructions described here may change
|
||||
in future releases, and documentation will be updated for mainnet participation.
|
||||
|
||||
## Overview
|
||||
|
||||
Solana currently maintains several testnets, each featuring a validator that can
|
||||
serve as the entrypoint to the cluster for your validator.
|
||||
|
||||
Current testnet entrypoints:
|
||||
|
||||
* Developer testnet, devnet.solana.com
|
||||
|
||||
Solana may launch special testnets for validator participation; we will provide
|
||||
you with a specific entrypoint URL to use.
|
||||
|
||||
Prior to mainnet, the testnets may be running different versions of solana
|
||||
software, which may feature breaking changes. For information on choosing a
|
||||
testnet and finding software version info, jump to [Choosing a Testnet](validator-testnet.md).
|
||||
|
||||
The testnets are configured to reset the ledger daily, or sooner, should the
|
||||
hourly automated cluster sanity test fail.
|
||||
|
||||
There is a network explorer that shows the status of solana testnets available
|
||||
at [http://explorer.solana.com/](https://explorer.solana.com/).
|
||||
|
||||
Also we'd love it if you choose to register your validator node with us at
|
||||
[https://forms.gle/LfFscZqJELbuUP139](https://forms.gle/LfFscZqJELbuUP139).
|
||||
There are several clusters available to connect to, see [choosing a Cluster](../cli/choose-a-cluster.md) for an overview of each.
|
||||
|
@@ -86,7 +86,7 @@ To monitor your validator during its warmup period:
|
||||
* `solana validators` displays the current active stake of all validators, including yours
|
||||
* `solana stake-history ` shows the history of stake warming up and cooling down over recent epochs
|
||||
* Look for log messages on your validator indicating your next leader slot: `[2019-09-27T20:16:00.319721164Z INFO solana_core::replay_stage] <VALIDATOR_IDENTITY_PUBKEY> voted and reset PoH at tick height ####. My next leader slot is ####`
|
||||
* Once your stake is warmed up, you will see a stake balance listed for your validator on the [Solana Network Explorer](http://explorer.solana.com/validators)
|
||||
* Once your stake is warmed up, you will see a stake balance listed for your validator by running `solana validators`
|
||||
|
||||
## Monitor Your Staked Validator
|
||||
|
||||
|
@@ -9,10 +9,10 @@ set the `--url` argument for cli commands. For example:
|
||||
solana config set --url http://devnet.solana.com
|
||||
```
|
||||
|
||||
\(You can always override the set configuration by explicitly passing the
|
||||
`--url` argument with a command, eg: `solana --url http://tds.solana.com balance`\)
|
||||
While this section demonstrates how to connect to the Devnet cluster, the steps
|
||||
are similar for the other [Solana Clusters](../clusters.md).
|
||||
|
||||
## Confirm The Testnet Is Reachable
|
||||
## Confirm The Cluster Is Reachable
|
||||
|
||||
Before attaching a validator node, sanity check that the cluster is accessible
|
||||
to your machine by fetching the transaction count:
|
||||
@@ -21,9 +21,6 @@ to your machine by fetching the transaction count:
|
||||
solana transaction-count
|
||||
```
|
||||
|
||||
Inspect the network explorer at
|
||||
[https://explorer.solana.com/](https://explorer.solana.com/) for activity.
|
||||
|
||||
View the [metrics dashboard](https://metrics.solana.com:3000/d/monitor/cluster-telemetry) for more
|
||||
detail on cluster activity.
|
||||
|
||||
@@ -111,7 +108,7 @@ network. **It is crucial to back-up this information.**
|
||||
|
||||
If you don’t back up this information, you WILL NOT BE ABLE TO RECOVER YOUR
|
||||
VALIDATOR if you lose access to it. If this happens, YOU WILL LOSE YOUR
|
||||
ALLOCATION OF LAMPORTS TOO.
|
||||
ALLOCATION OF SOL TOO.
|
||||
|
||||
To back-up your validator identify keypair, **back-up your
|
||||
"validator-keypair.json” file or your seed phrase to a secure location.**
|
||||
@@ -138,8 +135,10 @@ Wallet Config Updated: /home/solana/.config/solana/wallet/config.yml
|
||||
Airdrop yourself some SOL to get started:
|
||||
|
||||
```bash
|
||||
solana airdrop 1000
|
||||
solana airdrop 10
|
||||
```
|
||||
Note that airdrops are only available on Devnet. Testnet SOL can be obtained by
|
||||
participating in the [Tour de SOL](../tour-de-sol/README.md) program.
|
||||
|
||||
To view your current balance:
|
||||
|
||||
@@ -187,12 +186,16 @@ account state divergence.
|
||||
|
||||
## Connect Your Validator
|
||||
|
||||
Connect to a testnet cluster by running:
|
||||
Connect to the cluster by running:
|
||||
|
||||
```bash
|
||||
solana-validator --identity ~/validator-keypair.json --vote-account ~/vote-account-keypair.json \
|
||||
--ledger ~/validator-ledger --rpc-port 8899 --entrypoint devnet.solana.com:8001 \
|
||||
--limit-ledger-size
|
||||
solana-validator \
|
||||
--identity ~/validator-keypair.json \
|
||||
--vote-account ~/vote-account-keypair.json \
|
||||
--ledger ~/validator-ledger \
|
||||
--rpc-port 8899 \
|
||||
--entrypoint devnet.solana.com:8001 \
|
||||
--limit-ledger-size
|
||||
```
|
||||
|
||||
To force validator logging to the console add a `--log -` argument, otherwise
|
||||
@@ -219,9 +222,9 @@ If your validator is connected, its public key and IP address will appear in the
|
||||
By default the validator will dynamically select available network ports in the
|
||||
8000-10000 range, and may be overridden with `--dynamic-port-range`. For
|
||||
example, `solana-validator --dynamic-port-range 11000-11010 ...` will restrict
|
||||
the validator to ports 11000-11011.
|
||||
the validator to ports 11000-11010.
|
||||
|
||||
### Limiting ledger size to conserve disk space
|
||||
|
||||
The `--limit-ledger-size` arg will instruct the validator to only retain the
|
||||
The `--limit-ledger-size` argument will instruct the validator to only retain the
|
||||
last couple hours of ledger. To retain the full ledger, simply remove that arg.
|
||||
|
@@ -1,5 +1 @@
|
||||
---
|
||||
description: Setup configuration
|
||||
---
|
||||
|
||||
# Participation
|
||||
|
@@ -1,30 +0,0 @@
|
||||
# Dry Run 6
|
||||
|
||||
Dry Run 6 will test the performance of a heterogeneous cluster of validators under heavier transaction load. It will be structured as a series of increasing rounds of transactions per second \(TPS\). At the end of each round the validators that survive and produce a block in at least 80% of their leader slots will receive additional stake for the next round. The rounds will continue until the cluster stops making progress.
|
||||
|
||||

|
||||
|
||||
## Cluster Parameters:
|
||||
|
||||
* GPUs optional
|
||||
* Solana software version: v0.22.2
|
||||
* Target transaction rate of first round: 2,000 TPS
|
||||
* Transaction rate round increment: 2,000 TPS
|
||||
* Epoch duration: 4096 slots \(approximately 27 minutes\)
|
||||
* Transaction signature fee: 0 lamport
|
||||
* Genesis allotment to each validator: 2 SOL \(1 SOL for initial delegation, 1 SOL for transaction fees\)
|
||||
* Round duration: 20 minutes of transactions, followed by 2-3 epochs for stake warm up
|
||||
|
||||
## Timeline
|
||||
|
||||
### 24 hour onboarding period: Connect, delegate, stake warmup \(approximately 24 hours\)
|
||||
|
||||
When the cluster boots, validators will have over 24 hours to connect and delegate 1 SOL of stake to themselves
|
||||
|
||||
### Ramp TPS rounds begin! \(approx. 1-2 hours per round\)
|
||||
|
||||
Now that all validator stake is active, the Ramp TPS program will begin running the rounds of increasing transactions-per-second until the cluster fails to maintain consensus or TPS cannot be pushed any higher. Each round starts with of 20 minutes of solid transactions. After 20 minutes all validators that remain with the cluster will receive an additional stake delegation. This new stake will also take 3-4 epochs to warm up, and once warm up is complete the next round commences with an increased transaction rate.
|
||||
|
||||
## References
|
||||
|
||||
* [Ramp TPS program](https://github.com/solana-labs/tour-de-sol/tree/master/ramp-tps)
|
@@ -0,0 +1,9 @@
|
||||
# Steps to create a validator
|
||||
|
||||
To create a Solana validator, follow the normal [validator workflow](../../running-validator/validator-start.md)
|
||||
targeting the [Testnet cluster](../../clusters.md).
|
||||
|
||||
Note that Testnet validators are automatically staked by a process that runs
|
||||
every Epoch. If your validator is running correctly then in a couple of days it
|
||||
will be staked (and automatically destaked if offline for a prolonged period of
|
||||
time).
|
@@ -1,12 +0,0 @@
|
||||
# Steps to create a validator
|
||||
|
||||
To create a Solana validator, complete the following steps, using the instructions that follow this section.
|
||||
|
||||
* [Install the Solana software](install-the-solana-software.md)
|
||||
* [Create your validator public key](validator-public-key-registration.md)
|
||||
* [Create and configure your validator](connecting-your-validator.md)
|
||||
* [Confirm the Solana network is running](confirm-the-solana-network-is-running.md)
|
||||
* [Connect your validator to the Solana network](connect-to-the-solana-network.md)
|
||||
* [Validator catch-up](monitoring-your-validator.md)
|
||||
* [Execute core validator functions](delegating-stake.md)
|
||||
* [Publish validator info \(optional\)](publishing-information-about-your-validator.md)
|
@@ -1,23 +0,0 @@
|
||||
# Confirm the Solana network is running
|
||||
|
||||
Before you connect your validator to the Solana network, confirm the network is running. To do this, view the existing nodes in the network using:
|
||||
|
||||
```bash
|
||||
solana-gossip spy --entrypoint tds.solana.com:8001
|
||||
```
|
||||
|
||||
If you see more than 1 node listed in the output of the above command, the network is running.
|
||||
|
||||
To view the current active stake of all validators, run:
|
||||
|
||||
```bash
|
||||
solana show-validators
|
||||
```
|
||||
|
||||
Finally the `ping` command can be used to check that the cluster is able to process transactions:
|
||||
|
||||
```bash
|
||||
solana ping
|
||||
```
|
||||
|
||||
This command sends a tiny transaction every 2 seconds and reports how long it takes to confirm it.
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user