Compare commits
104 Commits
Author | SHA1 | Date | |
---|---|---|---|
3db644320e | |||
e29fffbdb8 | |||
f5e6259199 | |||
0c2f002774 | |||
9815dcc159 | |||
87a46ffc30 | |||
19215ddaa2 | |||
e35bd54d99 | |||
6672d640f5 | |||
b8b84a4c95 | |||
9677b602e7 | |||
771e1e3a71 | |||
c12ea7a112 | |||
6047796c24 | |||
11a0a9ac47 | |||
5fbe5aa22d | |||
8a879a52ef | |||
c7669d4afe | |||
1f81206210 | |||
5a2a34b035 | |||
2ef7579b6c | |||
fed7cfef58 | |||
8b2ad77699 | |||
abcabc18ac | |||
4e0a3862a6 | |||
2eaf47d563 | |||
31f7b3782e | |||
d6169f92c1 | |||
7df72d36c4 | |||
5318cdac8f | |||
00434d5e6e | |||
ebf644ddef | |||
5e4fe9c67b | |||
1b65f9189e | |||
57d91c9da0 | |||
a6e6ec63f1 | |||
b8b1e57df4 | |||
969afe54c2 | |||
5a8d4dcbd7 | |||
685ef72288 | |||
521fd755ac | |||
74fe6163c6 | |||
6e592dba17 | |||
625a9fd932 | |||
5d37a0d108 | |||
68cb6aa1af | |||
9d0cb47367 | |||
569d0ccb4d | |||
ffe17566f1 | |||
293ad196f3 | |||
729e1159aa | |||
f9d354f711 | |||
f9849b515b | |||
fdc0276ed1 | |||
08569c81e9 | |||
3ba89f8363 | |||
9161dbc08e | |||
a1b2fa295a | |||
4f33eaa9dd | |||
3718bab078 | |||
dfc48705a4 | |||
f59115b503 | |||
cac467118e | |||
d0718075a7 | |||
ad55cc79b3 | |||
5111cc10ca | |||
a1736606dc | |||
bae659b9c7 | |||
c480c2225d | |||
52771c472e | |||
5ce21827c8 | |||
a2c4a70fbf | |||
d6e5f78834 | |||
74eb408460 | |||
a4c6576ba4 | |||
1fcc391a8d | |||
2970f960a4 | |||
d06bea7fb2 | |||
45a57e8513 | |||
3622e513aa | |||
c4e1faa853 | |||
905428bee6 | |||
9596e7772c | |||
5294fe6292 | |||
571cf53827 | |||
35ae76532a | |||
57dce86d5e | |||
797cb01bb8 | |||
9eded7a227 | |||
a8d32103d1 | |||
49d4925856 | |||
f5fad5b43d | |||
4c40f9dbc9 | |||
17db734783 | |||
6ce9f97254 | |||
1688dd6b5c | |||
07ffcab857 | |||
de6cf6b7e3 | |||
32cf04c77d | |||
96df4c772f | |||
640c2f88bd | |||
82f78a5610 | |||
cf8f8afbc6 | |||
e6bc92f6c9 |
875
Cargo.lock
generated
875
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -44,12 +44,14 @@ members = [
|
||||
"runtime",
|
||||
"sdk",
|
||||
"sdk-c",
|
||||
"scripts",
|
||||
"upload-perf",
|
||||
"net-utils",
|
||||
"fixed-buf",
|
||||
"vote-signer",
|
||||
"cli",
|
||||
"rayon-threadlimit",
|
||||
"watchtower",
|
||||
]
|
||||
|
||||
exclude = [
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-archiver"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,10 +10,10 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
console = "0.9.1"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.0" }
|
||||
solana-core = { path = "../core", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.21.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.6" }
|
||||
solana-core = { path = "../core", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
solana-metrics = { path = "../metrics", version = "0.21.6" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,11 +10,11 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.2.0"
|
||||
solana-core = { path = "../core", version = "0.21.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.0" }
|
||||
solana-measure = { path = "../measure", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-core = { path = "../core", version = "0.21.6" }
|
||||
solana-ledger = { path = "../ledger", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.6" }
|
||||
solana-measure = { path = "../measure", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
rand = "0.6.5"
|
||||
crossbeam-channel = "0.3"
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -23,19 +23,19 @@ serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
serde_json = "1.0.41"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.0" }
|
||||
solana-core = { path = "../core", version = "0.21.0" }
|
||||
solana-genesis = { path = "../genesis", version = "0.21.0" }
|
||||
solana-client = { path = "../client", version = "0.21.0" }
|
||||
solana-drone = { path = "../drone", version = "0.21.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.21.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.6" }
|
||||
solana-core = { path = "../core", version = "0.21.6" }
|
||||
solana-genesis = { path = "../genesis", version = "0.21.6" }
|
||||
solana-client = { path = "../client", version = "0.21.6" }
|
||||
solana-drone = { path = "../drone", version = "0.21.6" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
solana-metrics = { path = "../metrics", version = "0.21.6" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.6" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
untrusted = "0.7.0"
|
||||
ws = "0.9.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.21.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.21.6" }
|
||||
|
@ -2,14 +2,14 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.0" }
|
||||
solana-core = { path = "../core", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.6" }
|
||||
solana-core = { path = "../core", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.6" }
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -16,24 +16,24 @@ serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
serde_json = "1.0.41"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.0" }
|
||||
solana-core = { path = "../core", version = "0.21.0" }
|
||||
solana-genesis = { path = "../genesis", version = "0.21.0" }
|
||||
solana-client = { path = "../client", version = "0.21.0" }
|
||||
solana-drone = { path = "../drone", version = "0.21.0" }
|
||||
solana-librapay-api = { path = "../programs/librapay_api", version = "0.21.0", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.21.0" }
|
||||
solana-measure = { path = "../measure", version = "0.21.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "0.21.0", optional = true }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.6" }
|
||||
solana-core = { path = "../core", version = "0.21.6" }
|
||||
solana-genesis = { path = "../genesis", version = "0.21.6" }
|
||||
solana-client = { path = "../client", version = "0.21.6" }
|
||||
solana-drone = { path = "../drone", version = "0.21.6" }
|
||||
solana-librapay-api = { path = "../programs/librapay_api", version = "0.21.6", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
solana-metrics = { path = "../metrics", version = "0.21.6" }
|
||||
solana-measure = { path = "../measure", version = "0.21.6" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.6" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "0.21.6", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.2.0"
|
||||
serial_test_derive = "0.2.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.21.0" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "0.21.6" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay-api", "solana-move-loader-program"]
|
||||
|
@ -24,6 +24,7 @@ use std::{
|
||||
cmp,
|
||||
collections::VecDeque,
|
||||
net::SocketAddr,
|
||||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
|
||||
Arc, RwLock,
|
||||
@ -88,8 +89,13 @@ where
|
||||
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
|
||||
let client = &clients[0];
|
||||
|
||||
let start = gen_keypairs.len() - (tx_count * 2) as usize;
|
||||
let keypairs = &gen_keypairs[start..];
|
||||
let mut source_keypair_chunks: Vec<Vec<&Keypair>> = Vec::new();
|
||||
let mut dest_keypair_chunks: Vec<VecDeque<&Keypair>> = Vec::new();
|
||||
assert!(gen_keypairs.len() >= 2 * tx_count);
|
||||
for chunk in gen_keypairs.chunks_exact(2 * tx_count) {
|
||||
source_keypair_chunks.push(chunk[..tx_count].iter().collect());
|
||||
dest_keypair_chunks.push(chunk[tx_count..].iter().collect());
|
||||
}
|
||||
|
||||
let first_tx_count = loop {
|
||||
match client.get_transaction_count() {
|
||||
@ -126,9 +132,23 @@ where
|
||||
|
||||
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
|
||||
|
||||
let recent_blockhash = Arc::new(RwLock::new(get_recent_blockhash(client.as_ref()).0));
|
||||
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
|
||||
let total_tx_sent_count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let blockhash_thread = {
|
||||
let exit_signal = exit_signal.clone();
|
||||
let recent_blockhash = recent_blockhash.clone();
|
||||
let client = client.clone();
|
||||
let id = id.pubkey();
|
||||
Builder::new()
|
||||
.name("solana-blockhash-poller".to_string())
|
||||
.spawn(move || {
|
||||
poll_blockhash(&exit_signal, &recent_blockhash, &client, &id);
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let s_threads: Vec<_> = (0..threads)
|
||||
.map(|_| {
|
||||
let exit_signal = exit_signal.clone();
|
||||
@ -154,58 +174,40 @@ where
|
||||
|
||||
// generate and send transactions for the specified duration
|
||||
let start = Instant::now();
|
||||
let keypair_chunks = source_keypair_chunks.len() as u64;
|
||||
let mut reclaim_lamports_back_to_source_account = false;
|
||||
let mut i = keypair0_balance;
|
||||
let mut blockhash = Hash::default();
|
||||
let mut blockhash_time;
|
||||
while start.elapsed() < duration {
|
||||
// ping-pong between source and destination accounts for each loop iteration
|
||||
// this seems to be faster than trying to determine the balance of individual
|
||||
// accounts
|
||||
let len = tx_count as usize;
|
||||
blockhash_time = Instant::now();
|
||||
if let Ok((new_blockhash, _fee_calculator)) = client.get_new_blockhash(&blockhash) {
|
||||
blockhash = new_blockhash;
|
||||
} else {
|
||||
if blockhash_time.elapsed().as_secs() > 30 {
|
||||
panic!("Blockhash is not updating");
|
||||
}
|
||||
sleep(Duration::from_millis(100));
|
||||
continue;
|
||||
}
|
||||
datapoint_debug!(
|
||||
"bench-tps-get_blockhash",
|
||||
("duration", duration_as_us(&blockhash_time.elapsed()), i64)
|
||||
);
|
||||
|
||||
blockhash_time = Instant::now();
|
||||
let balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
||||
metrics_submit_lamport_balance(balance);
|
||||
datapoint_debug!(
|
||||
"bench-tps-get_balance",
|
||||
("duration", duration_as_us(&blockhash_time.elapsed()), i64)
|
||||
);
|
||||
|
||||
let chunk_index = (i % keypair_chunks) as usize;
|
||||
generate_txs(
|
||||
&shared_txs,
|
||||
&blockhash,
|
||||
&keypairs[..len],
|
||||
&keypairs[len..],
|
||||
&recent_blockhash,
|
||||
&source_keypair_chunks[chunk_index],
|
||||
&dest_keypair_chunks[chunk_index],
|
||||
threads,
|
||||
reclaim_lamports_back_to_source_account,
|
||||
&libra_args,
|
||||
);
|
||||
// In sustained mode overlap the transfers with generation
|
||||
// this has higher average performance but lower peak performance
|
||||
// in tested environments.
|
||||
if !sustained {
|
||||
|
||||
// In sustained mode, overlap the transfers with generation. This has higher average
|
||||
// performance but lower peak performance in tested environments.
|
||||
if sustained {
|
||||
// Ensure that we don't generate more transactions than we can handle.
|
||||
while shared_txs.read().unwrap().len() > 2 * threads {
|
||||
sleep(Duration::from_millis(1));
|
||||
}
|
||||
} else {
|
||||
while shared_tx_active_thread_count.load(Ordering::Relaxed) > 0 {
|
||||
sleep(Duration::from_millis(1));
|
||||
}
|
||||
}
|
||||
|
||||
// Rotate destination keypairs so that the next round of transactions will have different
|
||||
// transaction signatures even when blockhash is reused.
|
||||
dest_keypair_chunks[chunk_index].rotate_left(1);
|
||||
|
||||
i += 1;
|
||||
if should_switch_directions(num_lamports_per_account, i) {
|
||||
if should_switch_directions(num_lamports_per_account, keypair_chunks, i) {
|
||||
reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account;
|
||||
}
|
||||
}
|
||||
@ -228,6 +230,11 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
info!("Waiting for blockhash thread...");
|
||||
if let Err(err) = blockhash_thread.join() {
|
||||
info!(" join() failed with: {:?}", err);
|
||||
}
|
||||
|
||||
let balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
||||
metrics_submit_lamport_balance(balance);
|
||||
|
||||
@ -252,8 +259,8 @@ fn metrics_submit_lamport_balance(lamport_balance: u64) {
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
fn generate_move_txs(
|
||||
source: &[Keypair],
|
||||
dest: &[Keypair],
|
||||
source: &[&Keypair],
|
||||
dest: &VecDeque<&Keypair>,
|
||||
reclaim: bool,
|
||||
move_keypairs: &[Keypair],
|
||||
libra_pay_program_id: &Pubkey,
|
||||
@ -297,8 +304,8 @@ fn generate_move_txs(
|
||||
}
|
||||
|
||||
fn generate_system_txs(
|
||||
source: &[Keypair],
|
||||
dest: &[Keypair],
|
||||
source: &[&Keypair],
|
||||
dest: &VecDeque<&Keypair>,
|
||||
reclaim: bool,
|
||||
blockhash: &Hash,
|
||||
) -> Vec<(Transaction, u64)> {
|
||||
@ -321,15 +328,19 @@ fn generate_system_txs(
|
||||
|
||||
fn generate_txs(
|
||||
shared_txs: &SharedTransactions,
|
||||
blockhash: &Hash,
|
||||
source: &[Keypair],
|
||||
dest: &[Keypair],
|
||||
blockhash: &Arc<RwLock<Hash>>,
|
||||
source: &[&Keypair],
|
||||
dest: &VecDeque<&Keypair>,
|
||||
threads: usize,
|
||||
reclaim: bool,
|
||||
libra_args: &Option<LibraKeys>,
|
||||
) {
|
||||
let blockhash = *blockhash.read().unwrap();
|
||||
let tx_count = source.len();
|
||||
info!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
|
||||
info!(
|
||||
"Signing transactions... {} (reclaim={}, blockhash={})",
|
||||
tx_count, reclaim, &blockhash
|
||||
);
|
||||
let signing_start = Instant::now();
|
||||
|
||||
let transactions = if let Some((
|
||||
@ -353,11 +364,11 @@ fn generate_txs(
|
||||
&_libra_keys,
|
||||
_libra_pay_program_id,
|
||||
&_libra_genesis_keypair.pubkey(),
|
||||
blockhash,
|
||||
&blockhash,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
generate_system_txs(source, dest, reclaim, blockhash)
|
||||
generate_system_txs(source, dest, reclaim, &blockhash)
|
||||
};
|
||||
|
||||
let duration = signing_start.elapsed();
|
||||
@ -386,6 +397,48 @@ fn generate_txs(
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_blockhash<T: Client>(
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
blockhash: &Arc<RwLock<Hash>>,
|
||||
client: &Arc<T>,
|
||||
id: &Pubkey,
|
||||
) {
|
||||
let mut blockhash_last_updated = Instant::now();
|
||||
let mut last_error_log = Instant::now();
|
||||
loop {
|
||||
let blockhash_updated = {
|
||||
let old_blockhash = *blockhash.read().unwrap();
|
||||
if let Ok((new_blockhash, _fee)) = client.get_new_blockhash(&old_blockhash) {
|
||||
*blockhash.write().unwrap() = new_blockhash;
|
||||
blockhash_last_updated = Instant::now();
|
||||
true
|
||||
} else {
|
||||
if blockhash_last_updated.elapsed().as_secs() > 120 {
|
||||
eprintln!("Blockhash is stuck");
|
||||
exit(1)
|
||||
} else if blockhash_last_updated.elapsed().as_secs() > 30
|
||||
&& last_error_log.elapsed().as_secs() >= 1
|
||||
{
|
||||
last_error_log = Instant::now();
|
||||
error!("Blockhash is not updating");
|
||||
}
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if blockhash_updated {
|
||||
let balance = client.get_balance(id).unwrap_or(0);
|
||||
metrics_submit_lamport_balance(balance);
|
||||
}
|
||||
|
||||
if exit_signal.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
|
||||
sleep(Duration::from_millis(50));
|
||||
}
|
||||
}
|
||||
|
||||
fn do_tx_transfers<T: Client>(
|
||||
exit_signal: &Arc<AtomicBool>,
|
||||
shared_txs: &SharedTransactions,
|
||||
@ -398,11 +451,10 @@ fn do_tx_transfers<T: Client>(
|
||||
if thread_batch_sleep_ms > 0 {
|
||||
sleep(Duration::from_millis(thread_batch_sleep_ms as u64));
|
||||
}
|
||||
let txs;
|
||||
{
|
||||
let txs = {
|
||||
let mut shared_txs_wl = shared_txs.write().expect("write lock in do_tx_transfers");
|
||||
txs = shared_txs_wl.pop_front();
|
||||
}
|
||||
shared_txs_wl.pop_front()
|
||||
};
|
||||
if let Some(txs0) = txs {
|
||||
shared_tx_thread_count.fetch_add(1, Ordering::Relaxed);
|
||||
info!(
|
||||
@ -758,11 +810,15 @@ fn compute_and_report_stats(
|
||||
);
|
||||
}
|
||||
|
||||
// First transfer 3/4 of the lamports to the dest accounts
|
||||
// then ping-pong 1/4 of the lamports back to the other account
|
||||
// this leaves 1/4 lamport buffer in each account
|
||||
fn should_switch_directions(num_lamports_per_account: u64, i: u64) -> bool {
|
||||
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
|
||||
// First transfer 2/3 of the lamports to the dest accounts
|
||||
// then ping-pong 1/3 of the lamports back to the other account
|
||||
// this leaves 1/3 lamport buffer in each account
|
||||
fn should_switch_directions(num_lamports_per_account: u64, keypair_chunks: u64, i: u64) -> bool {
|
||||
if i < keypair_chunks * (2 * num_lamports_per_account) / 3 {
|
||||
return false;
|
||||
}
|
||||
|
||||
i % (keypair_chunks * num_lamports_per_account / 3) == 0
|
||||
}
|
||||
|
||||
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u64) {
|
||||
@ -897,9 +953,12 @@ fn fund_move_keys<T: Client>(
|
||||
info!("funded libra funding key {}", i);
|
||||
}
|
||||
|
||||
let tx_count = keypairs.len();
|
||||
let amount = total / (tx_count as u64);
|
||||
for (i, keys) in keypairs[..tx_count].chunks(NUM_FUNDING_KEYS).enumerate() {
|
||||
let keypair_count = keypairs.len();
|
||||
let amount = total / (keypair_count as u64);
|
||||
for (i, keys) in keypairs[..keypair_count]
|
||||
.chunks(NUM_FUNDING_KEYS)
|
||||
.enumerate()
|
||||
{
|
||||
for (j, key) in keys.iter().enumerate() {
|
||||
let tx = librapay_transaction::transfer(
|
||||
libra_pay_program_id,
|
||||
@ -949,18 +1008,18 @@ pub fn generate_and_fund_keypairs<T: Client>(
|
||||
client: &T,
|
||||
drone_addr: Option<SocketAddr>,
|
||||
funding_key: &Keypair,
|
||||
tx_count: usize,
|
||||
keypair_count: usize,
|
||||
lamports_per_account: u64,
|
||||
use_move: bool,
|
||||
) -> Result<(Vec<Keypair>, Option<LibraKeys>, u64)> {
|
||||
info!("Creating {} keypairs...", tx_count * 2);
|
||||
let (mut keypairs, extra) = generate_keypairs(funding_key, tx_count as u64 * 2);
|
||||
info!("Creating {} keypairs...", keypair_count);
|
||||
let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64);
|
||||
info!("Get lamports...");
|
||||
|
||||
// Sample the first keypair, see if it has lamports, if so then resume.
|
||||
// This logic is to prevent lamport loss on repeated solana-bench-tps executions
|
||||
let last_keypair_balance = client
|
||||
.get_balance(&keypairs[tx_count * 2 - 1].pubkey())
|
||||
.get_balance(&keypairs[keypair_count - 1].pubkey())
|
||||
.unwrap_or(0);
|
||||
|
||||
#[cfg(feature = "move")]
|
||||
@ -999,7 +1058,7 @@ pub fn generate_and_fund_keypairs<T: Client>(
|
||||
// Still fund the solana ones which will be used for fees.
|
||||
let seed = [0u8; 32];
|
||||
let mut rnd = GenKeys::new(seed);
|
||||
let move_keypairs = rnd.gen_n_keypairs(tx_count as u64 * 2);
|
||||
let move_keypairs = rnd.gen_n_keypairs(keypair_count as u64);
|
||||
fund_move_keys(
|
||||
client,
|
||||
funding_key,
|
||||
@ -1032,7 +1091,7 @@ pub fn generate_and_fund_keypairs<T: Client>(
|
||||
}
|
||||
|
||||
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
|
||||
keypairs.truncate(2 * tx_count);
|
||||
keypairs.truncate(keypair_count);
|
||||
|
||||
Ok((keypairs, move_keypairs_ret, last_keypair_balance))
|
||||
}
|
||||
@ -1048,17 +1107,21 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_switch_directions() {
|
||||
assert_eq!(should_switch_directions(20, 0), false);
|
||||
assert_eq!(should_switch_directions(20, 1), false);
|
||||
assert_eq!(should_switch_directions(20, 14), false);
|
||||
assert_eq!(should_switch_directions(20, 15), true);
|
||||
assert_eq!(should_switch_directions(20, 16), false);
|
||||
assert_eq!(should_switch_directions(20, 19), false);
|
||||
assert_eq!(should_switch_directions(20, 20), true);
|
||||
assert_eq!(should_switch_directions(20, 21), false);
|
||||
assert_eq!(should_switch_directions(20, 99), false);
|
||||
assert_eq!(should_switch_directions(20, 100), true);
|
||||
assert_eq!(should_switch_directions(20, 101), false);
|
||||
assert_eq!(should_switch_directions(30, 1, 0), false);
|
||||
assert_eq!(should_switch_directions(30, 1, 1), false);
|
||||
assert_eq!(should_switch_directions(30, 1, 20), true);
|
||||
assert_eq!(should_switch_directions(30, 1, 21), false);
|
||||
assert_eq!(should_switch_directions(30, 1, 30), true);
|
||||
assert_eq!(should_switch_directions(30, 1, 90), true);
|
||||
assert_eq!(should_switch_directions(30, 1, 91), false);
|
||||
|
||||
assert_eq!(should_switch_directions(30, 2, 0), false);
|
||||
assert_eq!(should_switch_directions(30, 2, 1), false);
|
||||
assert_eq!(should_switch_directions(30, 2, 20), false);
|
||||
assert_eq!(should_switch_directions(30, 2, 40), true);
|
||||
assert_eq!(should_switch_directions(30, 2, 90), false);
|
||||
assert_eq!(should_switch_directions(30, 2, 100), true);
|
||||
assert_eq!(should_switch_directions(30, 2, 101), false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1072,8 +1135,9 @@ mod tests {
|
||||
config.tx_count = 10;
|
||||
config.duration = Duration::from_secs(5);
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let (keypairs, _move_keypairs, _keypair_balance) =
|
||||
generate_and_fund_keypairs(&clients[0], None, &config.id, config.tx_count, 20, false)
|
||||
generate_and_fund_keypairs(&clients[0], None, &config.id, keypair_count, 20, false)
|
||||
.unwrap();
|
||||
|
||||
do_bench_tps(clients, config, keypairs, 0, None);
|
||||
@ -1084,11 +1148,11 @@ mod tests {
|
||||
let (genesis_config, id) = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let client = BankClient::new(bank);
|
||||
let tx_count = 10;
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _move_keypairs, _keypair_balance) =
|
||||
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, false).unwrap();
|
||||
generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap();
|
||||
|
||||
for kp in &keypairs {
|
||||
assert_eq!(
|
||||
@ -1107,11 +1171,11 @@ mod tests {
|
||||
genesis_config.fee_calculator = fee_calculator;
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let client = BankClient::new(bank);
|
||||
let tx_count = 10;
|
||||
let keypair_count = 20;
|
||||
let lamports = 20;
|
||||
|
||||
let (keypairs, _move_keypairs, _keypair_balance) =
|
||||
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, false).unwrap();
|
||||
generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap();
|
||||
|
||||
let max_fee = client
|
||||
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
|
||||
|
@ -15,6 +15,7 @@ pub struct Config {
|
||||
pub num_nodes: usize,
|
||||
pub duration: Duration,
|
||||
pub tx_count: usize,
|
||||
pub keypair_multiplier: usize,
|
||||
pub thread_batch_sleep_ms: usize,
|
||||
pub sustained: bool,
|
||||
pub client_ids_and_stake_file: String,
|
||||
@ -36,6 +37,7 @@ impl Default for Config {
|
||||
num_nodes: 1,
|
||||
duration: Duration::new(std::u64::MAX, 0),
|
||||
tx_count: 50_000,
|
||||
keypair_multiplier: 8,
|
||||
thread_batch_sleep_ms: 1000,
|
||||
sustained: false,
|
||||
client_ids_and_stake_file: String::new(),
|
||||
@ -122,6 +124,13 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
|
||||
.takes_value(true)
|
||||
.help("Number of transactions to send per batch")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("keypair_multiplier")
|
||||
.long("keypair-multiplier")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.help("Multiply by transaction count to determine number of keypairs to create")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("thread-batch-sleep-ms")
|
||||
.short("z")
|
||||
@ -208,7 +217,15 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||
}
|
||||
|
||||
if let Some(s) = matches.value_of("tx_count") {
|
||||
args.tx_count = s.to_string().parse().expect("can't parse tx_account");
|
||||
args.tx_count = s.to_string().parse().expect("can't parse tx_count");
|
||||
}
|
||||
|
||||
if let Some(s) = matches.value_of("keypair_multiplier") {
|
||||
args.keypair_multiplier = s
|
||||
.to_string()
|
||||
.parse()
|
||||
.expect("can't parse keypair-multiplier");
|
||||
assert!(args.keypair_multiplier >= 2);
|
||||
}
|
||||
|
||||
if let Some(t) = matches.value_of("thread-batch-sleep-ms") {
|
||||
|
@ -24,6 +24,7 @@ fn main() {
|
||||
id,
|
||||
num_nodes,
|
||||
tx_count,
|
||||
keypair_multiplier,
|
||||
client_ids_and_stake_file,
|
||||
write_to_client_file,
|
||||
read_from_client_file,
|
||||
@ -34,9 +35,10 @@ fn main() {
|
||||
..
|
||||
} = &cli_config;
|
||||
|
||||
let keypair_count = *tx_count * keypair_multiplier;
|
||||
if *write_to_client_file {
|
||||
info!("Generating {} keypairs", *tx_count * 2);
|
||||
let (keypairs, _) = generate_keypairs(&id, *tx_count as u64 * 2);
|
||||
info!("Generating {} keypairs", keypair_count);
|
||||
let (keypairs, _) = generate_keypairs(&id, keypair_count as u64);
|
||||
let num_accounts = keypairs.len() as u64;
|
||||
let max_fee =
|
||||
FeeCalculator::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
|
||||
@ -102,10 +104,10 @@ fn main() {
|
||||
last_balance = primordial_account.balance;
|
||||
});
|
||||
|
||||
if keypairs.len() < tx_count * 2 {
|
||||
if keypairs.len() < keypair_count {
|
||||
eprintln!(
|
||||
"Expected {} accounts in {}, only received {} (--tx_count mismatch?)",
|
||||
tx_count * 2,
|
||||
keypair_count,
|
||||
client_ids_and_stake_file,
|
||||
keypairs.len(),
|
||||
);
|
||||
@ -121,7 +123,7 @@ fn main() {
|
||||
&client,
|
||||
Some(*drone_addr),
|
||||
&id,
|
||||
*tx_count,
|
||||
keypair_count,
|
||||
*num_lamports_per_account,
|
||||
*use_move,
|
||||
)
|
||||
|
@ -47,11 +47,12 @@ fn test_bench_tps_local_cluster(config: Config) {
|
||||
|
||||
let lamports_per_account = 100;
|
||||
|
||||
let keypair_count = config.tx_count * config.keypair_multiplier;
|
||||
let (keypairs, move_keypairs, _keypair_balance) = generate_and_fund_keypairs(
|
||||
&client,
|
||||
Some(drone_addr),
|
||||
&config.id,
|
||||
config.tx_count,
|
||||
keypair_count,
|
||||
lamports_per_account,
|
||||
config.use_move,
|
||||
)
|
||||
|
@ -36,6 +36,10 @@
|
||||
* [Troubleshooting](running-validator/validator-troubleshoot.md)
|
||||
* [FAQ](running-validator/validator-faq.md)
|
||||
* [Running an Archiver](running-archiver.md)
|
||||
* [Paper Wallet](paper-wallet/README.md)
|
||||
* [Installation](paper-wallet/installation.md)
|
||||
* [Creating and Using a Seed Phrase](paper-wallet/keypair.md)
|
||||
* [Paper Wallet Usage](paper-wallet/usage.md)
|
||||
* [API Reference](api-reference/README.md)
|
||||
* [Transaction](api-reference/transaction-api.md)
|
||||
* [Instruction](api-reference/instruction-api.md)
|
||||
|
@ -177,7 +177,7 @@ $ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
|
||||
## Usage
|
||||
### solana-cli
|
||||
```text
|
||||
solana-cli 0.21.0
|
||||
solana-cli 0.21.6
|
||||
Blockchain, Rebuilt for Scale
|
||||
|
||||
USAGE:
|
||||
@ -398,8 +398,8 @@ OPTIONS:
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
|
||||
ARGS:
|
||||
<STORAGE ACCOUNT OWNER PUBKEY>
|
||||
<STORAGE ACCOUNT PUBKEY>
|
||||
<STORAGE ACCOUNT OWNER PUBKEY>
|
||||
<STORAGE ACCOUNT PUBKEY>
|
||||
```
|
||||
|
||||
#### solana-create-stake-account
|
||||
@ -448,8 +448,8 @@ OPTIONS:
|
||||
-k, --keypair <PATH> /path/to/id.json
|
||||
|
||||
ARGS:
|
||||
<STORAGE ACCOUNT OWNER PUBKEY>
|
||||
<STORAGE ACCOUNT PUBKEY>
|
||||
<STORAGE ACCOUNT OWNER PUBKEY>
|
||||
<STORAGE ACCOUNT PUBKEY>
|
||||
```
|
||||
|
||||
#### solana-create-vote-account
|
||||
@ -467,7 +467,7 @@ FLAGS:
|
||||
OPTIONS:
|
||||
--authorized-voter <PUBKEY> Public key of the authorized voter (defaults to vote account)
|
||||
--authorized-withdrawer <PUBKEY> Public key of the authorized withdrawer (defaults to cli config pubkey)
|
||||
--commission <NUM> The commission taken on reward redemption (0-255), default: 0
|
||||
--commission <NUM> The commission taken on reward redemption (0-100), default: 0
|
||||
-C, --config <PATH> Configuration file to use [default:
|
||||
~/.config/solana/cli/config.yml]
|
||||
-u, --url <URL> JSON RPC URL for the solana cluster
|
||||
@ -674,7 +674,7 @@ USAGE:
|
||||
solana pay [FLAGS] [OPTIONS] <PUBKEY> <AMOUNT> [--] [UNIT]
|
||||
|
||||
FLAGS:
|
||||
--cancelable
|
||||
--cancelable
|
||||
-h, --help Prints help information
|
||||
-V, --version Prints version information
|
||||
|
||||
|
@ -18,8 +18,10 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
* [getAccountInfo](jsonrpc-api.md#getaccountinfo)
|
||||
* [getBalance](jsonrpc-api.md#getbalance)
|
||||
* [getBlockCommitment](jsonrpc-api.md#getblockcommitment)
|
||||
* [getBlockTime](jsonrpc-api.md#getblocktime)
|
||||
* [getClusterNodes](jsonrpc-api.md#getclusternodes)
|
||||
* [getConfirmedBlock](jsonrpc-api.md#getconfirmedblock)
|
||||
* [getConfirmedBlocks](jsonrpc-api.md#getconfirmedblocks)
|
||||
* [getEpochInfo](jsonrpc-api.md#getepochinfo)
|
||||
* [getEpochSchedule](jsonrpc-api.md#getepochschedule)
|
||||
* [getGenesisHash](jsonrpc-api.md#getgenesishash)
|
||||
@ -155,7 +157,7 @@ The result value will be an RpcResponse JSON object containing an AccountInfo JS
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.21.0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]}},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.21.6,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]}},"id":1}
|
||||
```
|
||||
|
||||
### getBalance
|
||||
@ -209,6 +211,31 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":[{"commitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,32]},42],"id":1}
|
||||
```
|
||||
|
||||
### getBlockTime
|
||||
|
||||
Returns the estimated production time of a block. Validators report their UTC
|
||||
time to the ledger on a regular interval. A block's time is calculated as an
|
||||
offset from the median value of the most recent validator time report.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `u64` - block, identified by Slot
|
||||
|
||||
#### Results:
|
||||
|
||||
* `null` - block has not yet been produced
|
||||
* `i64` - estimated production time, as Unix timestamp (seconds since the Unix epoch)
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getBlockTime","params":[5]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":1574721591,"id":1}
|
||||
```
|
||||
|
||||
### getClusterNodes
|
||||
|
||||
Returns information about all the nodes participating in the cluster
|
||||
@ -269,6 +296,31 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"m
|
||||
{"jsonrpc":"2.0","result":{"blockhash":[165,245,120,183,32,205,89,222,249,114,229,49,250,231,149,122,156,232,181,83,238,194,157,153,7,213,180,54,177,6,25,101],"parentSlot":429,"previousBlockhash":[21,108,181,90,139,241,212,203,45,78,232,29,161,31,159,188,110,82,81,11,250,74,47,140,188,28,23,96,251,164,208,166],"transactions":[[{"message":{"accountKeys":[[5],[219,181,202,40,52,148,34,136,186,59,137,160,250,225,234,17,244,160,88,116,24,176,30,227,68,11,199,38,141,68,131,228],[233,48,179,56,91,40,254,206,53,48,196,176,119,248,158,109,121,77,11,69,108,160,128,27,228,122,146,249,53,184,68,87],[6,167,213,23,25,47,10,175,198,242,101,227,251,119,204,122,218,130,197,41,208,190,59,19,110,45,0,85,32,0,0,0],[6,167,213,23,24,199,116,201,40,86,99,152,105,29,94,182,139,94,184,163,155,75,109,92,115,85,91,33,0,0,0,0],[7,97,72,29,53,116,116,187,124,77,118,36,235,211,189,179,216,53,94,115,209,16,67,252,13,163,83,128,0,0,0,0]],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[[1],{"accounts":[[3],1,2,3],"data":[[52],2,0,0,0,1,0,0,0,0,0,0,0,173,1,0,0,0,0,0,0,86,55,9,248,142,238,135,114,103,83,247,124,67,68,163,233,55,41,59,129,64,50,110,221,234,234,27,213,205,193,219,50],"program_id_index":4}],"recentBlockhash":[21,108,181,90,139,241,212,203,45,78,232,29,161,31,159,188,110,82,81,11,250,74,47,140,188,28,23,96,251,164,208,166]},"signatures":[[2],[119,9,95,108,35,95,7,1,69,101,65,45,5,204,61,114,172,88,123,238,32,201,135,229,57,50,13,21,106,216,129,183,238,43,37,101,148,81,56,232,88,136,80,65,46,189,39,106,94,13,238,54,186,48,118,186,0,62,121,122,172,171,66,5],[78,40,77,250,10,93,6,157,48,173,100,40,251,9,7,218,7,184,43,169,76,240,254,34,235,48,41,175,119,126,75,107,106,248,45,161,119,48,174,213,57,69,111,225,245,60,148,73,124,82,53,6,203,126,120,180,111,169,89,64,29,23,237,13]]},{"fee":100000,"status":{"Ok":null}}]]},"id":1}
|
||||
```
|
||||
|
||||
### getConfirmedBlocks
|
||||
|
||||
Returns a list of confirmed blocks
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `integer` - start_slot, as u64 integer
|
||||
* `integer` - (optional) end_slot, as u64 integer
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an array of u64 integers listing confirmed blocks
|
||||
between start_slot and either end_slot, if provided, or latest confirmed block,
|
||||
inclusive.
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlocks","params":[5, 10]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":[5,6,7,8,9,10],"id":1}
|
||||
```
|
||||
|
||||
### getEpochInfo
|
||||
|
||||
Returns information about the current epoch
|
||||
@ -347,15 +399,18 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
|
||||
### getLeaderSchedule
|
||||
|
||||
Returns the leader schedule for the current epoch
|
||||
Returns the leader schedule for an epoch
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `slot` - (optional) Fetch the leader schedule for the epoch that corresponds to the provided slot. If unspecified, the leader schedule for the current epoch is fetched
|
||||
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an array of leader public keys \(as base-58 encoded strings\) for each slot in the current epoch
|
||||
The result field will be a dictionary of leader public keys \(as base-58 encoded
|
||||
strings\) and their corresponding leader slot indices as values (indices are to
|
||||
the first slot in the requested epoch)
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -364,7 +419,7 @@ The result field will be an array of leader public keys \(as base-58 encoded str
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":[...],"id":1}
|
||||
{"jsonrpc":"2.0","result":{"4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63]},"id":1}
|
||||
```
|
||||
|
||||
### getMinimumBalanceForRentExemption
|
||||
@ -687,7 +742,7 @@ The result field will be a JSON object of `current` and `delinquent` accounts, e
|
||||
* `nodePubkey` - Node public key, as base-58 encoded string
|
||||
* `activatedStake` - the stake, in lamports, delegated to this vote account and active in this epoch
|
||||
* `epochVoteAccount` - bool, whether the vote account is staked for this epoch
|
||||
* `commission`, an 8-bit integer used as a fraction \(commission/MAX\_U8\) for rewards payout
|
||||
* `commission`, percentage (0-100) of rewards payout owed to the vote account
|
||||
* `lastVote` - Most recent slot voted on by this vote account
|
||||
|
||||
#### Example:
|
||||
@ -798,7 +853,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
|
||||
#### Notification Format:
|
||||
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.21.0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.21.6,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||
```
|
||||
|
||||
### accountUnsubscribe
|
||||
@ -856,7 +911,7 @@ Subscribe to a program to receive notifications when the lamports or data for a
|
||||
* `object` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\)
|
||||
|
||||
```bash
|
||||
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0.21.0,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
|
||||
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0.21.6,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
|
||||
```
|
||||
|
||||
### programUnsubscribe
|
||||
|
@ -16,7 +16,7 @@ The total stake allocated to a Vote account can be calculated by the sum of all
|
||||
|
||||
## Vote and Stake accounts
|
||||
|
||||
The rewards process is split into two on-chain programs. The Vote program solves the problem of making stakes slashable. The Stake account acts as custodian of the rewards pool, and provides passive delegation. The Stake program is responsible for paying out each staker once the staker proves to the Stake program that its delegate has participated in validating the ledger.
|
||||
The rewards process is split into two on-chain programs. The Vote program solves the problem of making stakes slashable. The Stake program acts as custodian of the rewards pool and provides for passive delegation. The Stake program is responsible for paying rewards to staker and voter when shown that a staker's delegate has participated in validating the ledger.
|
||||
|
||||
### VoteState
|
||||
|
||||
@ -228,4 +228,4 @@ Only lamports in excess of effective+activating stake may be withdrawn at any ti
|
||||
|
||||
### Lock-up
|
||||
|
||||
Stake accounts support the notion of lock-up, wherein the stake account balance is unavailable for withdrawal until a specified time. Lock-up is specified as a slot height, i.e. the minimum slot height that must be reached by the network before the stake account balance is available for withdrawal, except to a specified custodian. This information is gathered when the stake account is created.
|
||||
Stake accounts support the notion of lock-up, wherein the stake account balance is unavailable for withdrawal until a specified time. Lock-up is specified as an epoch height, i.e. the minimum epoch height that must be reached by the network before the stake account balance is available for withdrawal, unless the transaction is also signed by a specified custodian. This information is gathered when the stake account is created, and stored in the Lockup field of the stake account's state.
|
||||
|
@ -81,8 +81,8 @@ With a FEC rate: `16:4`
|
||||
|
||||
With FEC rate of `16:16`
|
||||
* `G = 12800`
|
||||
* `S = SUM of i=0 -> 32 for binomial(prob_failure = 0.2775, trials = 64, failures = i) = 0.002132`
|
||||
* `B = (1 - 0.002132) ^ (12800 / 32) = 0.42583`
|
||||
* `S = SUM of i=0 -> 32 for binomial(prob_failure = 0.2775, trials = 64, failures = i) = 0.0.21.6`
|
||||
* `B = (1 - 0.0.21.6) ^ (12800 / 32) = 0.42583`
|
||||
|
||||
With FEC rate of `32:32`
|
||||
* `G = 12800`
|
||||
|
24
book/src/paper-wallet/README.md
Normal file
24
book/src/paper-wallet/README.md
Normal file
@ -0,0 +1,24 @@
|
||||
# Paper Wallet
|
||||
|
||||
This document describes how to create and use a paper wallet with the Solana CLI
|
||||
tools.
|
||||
|
||||
{% hint style="info" %}
|
||||
We do not intend to advise on how to *securely* create or manage paper wallets.
|
||||
Please research the security concerns carefully.
|
||||
{% endhint %}
|
||||
|
||||
## Overview
|
||||
|
||||
Solana provides a key generation tool to derive keys from BIP39 compliant seed
|
||||
phrases. Solana CLI commands for running a validator and staking tokens all
|
||||
support keypair input via seed phrases.
|
||||
|
||||
To learn more about the BIP39 standard, visit the Bitcoin BIPs Github repository
|
||||
[here](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki).
|
||||
|
||||
{% page-ref page="installation.md" %}
|
||||
|
||||
{% page-ref page="keypair.md" %}
|
||||
|
||||
{% page-ref page="usage.md" %}
|
51
book/src/paper-wallet/installation.md
Normal file
51
book/src/paper-wallet/installation.md
Normal file
@ -0,0 +1,51 @@
|
||||
# Installation Guide
|
||||
Follow this guide to setup Solana's key generation tool called `solana-keygen`
|
||||
|
||||
{% hint style="warn" %}
|
||||
After installation, ensure your version is `0.21.6` or higher by running `solana-keygen -V`
|
||||
{% endhint %}
|
||||
|
||||
## Download
|
||||
First, download the latest release tarball from GitHub.
|
||||
|
||||
1. Setup download url
|
||||
|
||||
```bash
|
||||
solana_downloads=https://github.com/solana-labs/solana/releases/latest/download
|
||||
```
|
||||
|
||||
2. Specify the download file based on your machine
|
||||
|
||||
**MacOS**
|
||||
```bash
|
||||
solana_release=solana-release-x86_64-apple-darwin.tar.bz2
|
||||
```
|
||||
|
||||
**Linux**
|
||||
```bash
|
||||
solana_release=solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
```
|
||||
|
||||
3. Download
|
||||
|
||||
```bash
|
||||
curl -L -sSf -o solana-release.tar.bz2 $solana_downloads/$solana_release
|
||||
```
|
||||
|
||||
## Extract
|
||||
Next, extract the tarball
|
||||
```bash
|
||||
tar xf solana-release.tar.bz2
|
||||
```
|
||||
|
||||
## Add to "PATH"
|
||||
Now add the tool to your PATH environment variable with the following command
|
||||
```bash
|
||||
export PATH="$(pwd)/solana-release/bin:${PATH}"
|
||||
```
|
||||
|
||||
## Check
|
||||
Finally, check that `solana-keygen` can be run by running
|
||||
```bash
|
||||
solana-keygen -V
|
||||
```
|
70
book/src/paper-wallet/keypair.md
Normal file
70
book/src/paper-wallet/keypair.md
Normal file
@ -0,0 +1,70 @@
|
||||
# Creating a Paper Wallet
|
||||
|
||||
Using the `solana-keygen` tool, it is possible to generate new seed phrases as
|
||||
well as derive a keypair from an existing seed phrase and (optional) passphrase.
|
||||
The seed phrase and passphrase can be used together as a paper wallet. As long
|
||||
as you keep your seed phrase and passphrase stored safely, you can use them to
|
||||
access your account.
|
||||
|
||||
{% hint style="info" %}
|
||||
For more information about how seed phrases work, review this
|
||||
[Bitcoin Wiki page](https://en.bitcoin.it/wiki/Seed_phrase).
|
||||
{% endhint %}
|
||||
|
||||
## Seed Phrase Generation
|
||||
|
||||
Generating a new keypair can be done using the `solana-keygen new` command. The
|
||||
command will generate a random seed phrase, ask you to enter an optional
|
||||
passphrase, and then will display the derived public key and the generated seed
|
||||
phrase for your paper wallet.
|
||||
|
||||
```bash
|
||||
solana-keygen new --no-outfile
|
||||
```
|
||||
|
||||
{% hint style="warning" %}
|
||||
If the `--no-outfile` flag is **omitted**, the default behavior is to write the
|
||||
keypair to `~/.config/solana/id.json`
|
||||
{% endhint %}
|
||||
|
||||
{% hint style="info" %}
|
||||
For added security, increase the seed phrase word count using the `--word-count`
|
||||
argument
|
||||
{% endhint %}
|
||||
|
||||
For full usage details run:
|
||||
|
||||
```bash
|
||||
solana-keygen new --help
|
||||
```
|
||||
|
||||
## Public Key Derivation
|
||||
|
||||
Public keys can be derived from a seed phrase and a passphrase if you choose to
|
||||
use one. This is useful for using using an offline-generated seed phrase to
|
||||
derive a valid public key. The `solana-keygen pubkey` command will walk you
|
||||
through entering your seed phrase and a passphrase if you chose to use one.
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey ASK
|
||||
```
|
||||
|
||||
{% hint style="info" %}
|
||||
Note that you could potentially use different passphrases for the same seed
|
||||
phrase. Each unique passphrase will yield a different keypair.
|
||||
{% endhint %}
|
||||
|
||||
The `solana-keygen` tool assumes the use of the BIP39 standard English word
|
||||
list. If you chose to deviate from the word list or used a different language
|
||||
for your seed phrase, you can still derive a valid public key but will need to
|
||||
explicitly skip seed phrase validation.
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey ASK --skip-seed-phrase-validation
|
||||
```
|
||||
|
||||
For full usage details run:
|
||||
|
||||
```bash
|
||||
solana-keygen pubkey --help
|
||||
```
|
73
book/src/paper-wallet/usage.md
Normal file
73
book/src/paper-wallet/usage.md
Normal file
@ -0,0 +1,73 @@
|
||||
# Paper Wallet Usage
|
||||
|
||||
Solana commands can be run without ever saving a keypair to disk on a machine.
|
||||
If avoiding writing a private key to disk is a security concern of yours, you've
|
||||
come to the right place.
|
||||
|
||||
{% hint style="warning" %}
|
||||
Even using this secure input method, it's still possible that a private key gets
|
||||
written to disk by unencrypted memory swaps. It is the user's responsibility to
|
||||
protect against this scenario.
|
||||
{% endhint %}
|
||||
|
||||
## Running a Validator
|
||||
|
||||
In order to run a validator, you will need to specify an "identity keypair"
|
||||
which will be used to fund all of the vote transactions signed by your validator.
|
||||
Rather than specifying a path with `--identity-keypair <PATH>` you can use the
|
||||
`--ask-seed-phrase` option.
|
||||
|
||||
```bash
|
||||
solana-validator --ask-seed-phrase identity-keypair --ledger ...
|
||||
|
||||
[identity-keypair] seed phrase: 🔒
|
||||
[identity-keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
```
|
||||
|
||||
The `--ask-seed-phrase` option accepts multiple keypairs. If you wish to use this
|
||||
input method for your voting keypair as well you can do the following:
|
||||
|
||||
```bash
|
||||
solana-validator --ask-seed-phrase identity-keypair voting-keypair --ledger ...
|
||||
|
||||
[identity-keypair] seed phrase: 🔒
|
||||
[identity-keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
[voting-keypair] seed phrase: 🔒
|
||||
[voting-keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
```
|
||||
|
||||
Refer to the following page for a comprehensive guide on running a validator:
|
||||
{% page-ref page="../running-validator/README.md" %}
|
||||
|
||||
## Delegating Stake
|
||||
|
||||
Solana CLI tooling supports secure keypair input for stake delegation. To do so,
|
||||
first create a stake account with some SOL. Use the special `ASK` keyword to
|
||||
trigger a seed phrase input prompt for the stake account and use
|
||||
`--ask-seed-phrase keypair` to securely input the funding keypair.
|
||||
|
||||
```bash
|
||||
solana create-stake-account ASK 1 SOL --ask-seed-phrase keypair
|
||||
|
||||
[stake_account] seed phrase: 🔒
|
||||
[stake_account] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
[keypair] seed phrase: 🔒
|
||||
[keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
```
|
||||
|
||||
Then, to delegate that stake to a validator, use `--ask-seed-phrase keypair` to
|
||||
securely input the funding keypair.
|
||||
|
||||
```bash
|
||||
solana delegate-stake --ask-seed-phrase keypair <STAKE_ACCOUNT_PUBKEY> <VOTE_ACCOUNT_PUBKEY>
|
||||
|
||||
[keypair] seed phrase: 🔒
|
||||
[keypair] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue:
|
||||
```
|
||||
|
||||
Refer to the following page for a comprehensive guide on delegating stake:
|
||||
{% page-ref page="../running-validator/validator-stake.md" %}
|
||||
|
||||
---
|
||||
|
||||
{% page-ref page="../api-reference/cli.md" %}
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -86,17 +86,19 @@ nodes=(
|
||||
--rpc-port 18899"
|
||||
)
|
||||
|
||||
for i in $(seq 1 $extraNodes); do
|
||||
portStart=$((8100 + i * 50))
|
||||
portEnd=$((portStart + 49))
|
||||
nodes+=(
|
||||
"multinode-demo/validator.sh \
|
||||
--no-restart \
|
||||
--dynamic-port-range $portStart-$portEnd
|
||||
--label dyn$i \
|
||||
--init-complete-file init-complete-node$((2 + i)).log"
|
||||
)
|
||||
done
|
||||
if [[ extraNodes -gt 0 ]]; then
|
||||
for i in $(seq 1 $extraNodes); do
|
||||
portStart=$((8100 + i * 50))
|
||||
portEnd=$((portStart + 49))
|
||||
nodes+=(
|
||||
"multinode-demo/validator.sh \
|
||||
--no-restart \
|
||||
--dynamic-port-range $portStart-$portEnd
|
||||
--label dyn$i \
|
||||
--init-complete-file init-complete-node$((2 + i)).log"
|
||||
)
|
||||
done
|
||||
fi
|
||||
numNodes=$((2 + extraNodes))
|
||||
|
||||
pids=()
|
||||
@ -313,7 +315,7 @@ flag_error() {
|
||||
|
||||
if ! $skipSetup; then
|
||||
clear_config_dir "$SOLANA_CONFIG_DIR"
|
||||
multinode-demo/setup.sh
|
||||
multinode-demo/setup.sh --hashes-per-tick sleep
|
||||
else
|
||||
verifyLedger
|
||||
fi
|
||||
@ -365,7 +367,7 @@ while [[ $iteration -le $iterations ]]; do
|
||||
echo "--- Wallet sanity ($iteration)"
|
||||
(
|
||||
set -x
|
||||
timeout 90s scripts/wallet-sanity.sh --url http://127.0.0.1"$walletRpcPort"
|
||||
timeout 60s scripts/wallet-sanity.sh --url http://127.0.0.1"$walletRpcPort"
|
||||
) || flag_error
|
||||
|
||||
iteration=$((iteration + 1))
|
||||
|
@ -33,7 +33,7 @@ else
|
||||
fi
|
||||
|
||||
if [[ -z $CHANNEL_OR_TAG ]]; then
|
||||
echo +++ Unable to determine channel to publish into, exiting.
|
||||
echo +++ Unable to determine channel or tag to publish into, exiting.
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
@ -246,7 +246,7 @@ sanity() {
|
||||
(
|
||||
set -x
|
||||
NO_INSTALL_CHECK=1 \
|
||||
ci/testnet-sanity.sh beta-testnet-solana-com gce us-west1-b
|
||||
ci/testnet-sanity.sh beta-testnet-solana-com gce -P us-west1-b
|
||||
maybe_deploy_software --deploy-if-newer
|
||||
)
|
||||
;;
|
||||
@ -260,7 +260,7 @@ sanity() {
|
||||
testnet)
|
||||
(
|
||||
set -x
|
||||
ci/testnet-sanity.sh testnet-solana-com gce us-west1-b
|
||||
ci/testnet-sanity.sh testnet-solana-com gce -P us-west1-b
|
||||
)
|
||||
;;
|
||||
testnet-perf)
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,7 +12,7 @@ edition = "2018"
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
semver = "0.9.0"
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
tiny-bip39 = "0.6.2"
|
||||
url = "2.1.0"
|
||||
|
||||
|
@ -3,8 +3,9 @@ use clap::ArgMatches;
|
||||
use solana_sdk::{
|
||||
native_token::sol_to_lamports,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Keypair, KeypairUtil},
|
||||
signature::{read_keypair_file, Keypair, KeypairUtil, Signature},
|
||||
};
|
||||
use std::str::FromStr;
|
||||
|
||||
// Return parsed values from matches at `name`
|
||||
pub fn values_of<T>(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<T>>
|
||||
@ -35,7 +36,7 @@ pub fn keypair_of(matches: &ArgMatches<'_>, name: &str) -> Option<Keypair> {
|
||||
if let Some(value) = matches.value_of(name) {
|
||||
if value == ASK_KEYWORD {
|
||||
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
||||
keypair_from_seed_phrase(name, skip_validation).ok()
|
||||
keypair_from_seed_phrase(name, skip_validation, true).ok()
|
||||
} else {
|
||||
read_keypair_file(value).ok()
|
||||
}
|
||||
@ -50,6 +51,20 @@ pub fn pubkey_of(matches: &ArgMatches<'_>, name: &str) -> Option<Pubkey> {
|
||||
value_of(matches, name).or_else(|| keypair_of(matches, name).map(|keypair| keypair.pubkey()))
|
||||
}
|
||||
|
||||
// Return pubkey/signature pairs for a string of the form pubkey=signature
|
||||
pub fn pubkeys_sigs_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<(Pubkey, Signature)>> {
|
||||
matches.values_of(name).map(|values| {
|
||||
values
|
||||
.map(|pubkey_signer_string| {
|
||||
let mut signer = pubkey_signer_string.split('=');
|
||||
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
|
||||
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
|
||||
(key, sig)
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn amount_of(matches: &ArgMatches<'_>, name: &str, unit: &str) -> Option<u64> {
|
||||
if matches.value_of(unit) == Some("lamports") {
|
||||
value_of(matches, name)
|
||||
@ -172,4 +187,25 @@ mod tests {
|
||||
|
||||
fs::remove_file(&outfile).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pubkeys_sigs_of() {
|
||||
let key1 = Pubkey::new_rand();
|
||||
let key2 = Pubkey::new_rand();
|
||||
let sig1 = Keypair::new().sign_message(&[0u8]);
|
||||
let sig2 = Keypair::new().sign_message(&[1u8]);
|
||||
let signer1 = format!("{}={}", key1, sig1);
|
||||
let signer2 = format!("{}={}", key2, sig2);
|
||||
let matches = app().clone().get_matches_from(vec![
|
||||
"test",
|
||||
"--multiple",
|
||||
&signer1,
|
||||
"--multiple",
|
||||
&signer2,
|
||||
]);
|
||||
assert_eq!(
|
||||
pubkeys_sigs_of(&matches, "multiple"),
|
||||
Some(vec![(key1, sig1), (key2, sig2)])
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
use crate::keypair::ASK_KEYWORD;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::read_keypair_file;
|
||||
use solana_sdk::signature::{read_keypair_file, Signature};
|
||||
use std::str::FromStr;
|
||||
|
||||
// Return an error if a pubkey cannot be parsed.
|
||||
pub fn is_pubkey(string: String) -> Result<(), String> {
|
||||
@ -10,6 +12,14 @@ pub fn is_pubkey(string: String) -> Result<(), String> {
|
||||
}
|
||||
}
|
||||
|
||||
// Return an error if a hash cannot be parsed.
|
||||
pub fn is_hash(string: String) -> Result<(), String> {
|
||||
match string.parse::<Hash>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(format!("{:?}", err)),
|
||||
}
|
||||
}
|
||||
|
||||
// Return an error if a keypair file cannot be parsed.
|
||||
pub fn is_keypair(string: String) -> Result<(), String> {
|
||||
read_keypair_file(&string)
|
||||
@ -32,6 +42,28 @@ pub fn is_pubkey_or_keypair(string: String) -> Result<(), String> {
|
||||
is_pubkey(string.clone()).or_else(|_| is_keypair(string))
|
||||
}
|
||||
|
||||
// Return an error if string cannot be parsed as pubkey=signature string
|
||||
pub fn is_pubkey_sig(string: String) -> Result<(), String> {
|
||||
let mut signer = string.split('=');
|
||||
match Pubkey::from_str(
|
||||
signer
|
||||
.next()
|
||||
.ok_or_else(|| "Malformed signer string".to_string())?,
|
||||
) {
|
||||
Ok(_) => {
|
||||
match Signature::from_str(
|
||||
signer
|
||||
.next()
|
||||
.ok_or_else(|| "Malformed signer string".to_string())?,
|
||||
) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(format!("{:?}", err)),
|
||||
}
|
||||
}
|
||||
Err(err) => Err(format!("{:?}", err)),
|
||||
}
|
||||
}
|
||||
|
||||
// Return an error if a url cannot be parsed.
|
||||
pub fn is_url(string: String) -> Result<(), String> {
|
||||
match url::Url::parse(&string) {
|
||||
@ -65,3 +97,24 @@ pub fn is_port(port: String) -> Result<(), String> {
|
||||
.map(|_| ())
|
||||
.map_err(|e| format!("{:?}", e))
|
||||
}
|
||||
|
||||
pub fn is_valid_percentage(percentage: String) -> Result<(), String> {
|
||||
percentage
|
||||
.parse::<u8>()
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Unable to parse input percentage, provided: {}, err: {:?}",
|
||||
percentage, e
|
||||
)
|
||||
})
|
||||
.and_then(|v| {
|
||||
if v > 100 {
|
||||
Err(format!(
|
||||
"Percentage must be in range of 0 to 100, provided: {}",
|
||||
v
|
||||
))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -2,11 +2,18 @@ use crate::ArgConstant;
|
||||
use bip39::{Language, Mnemonic, Seed};
|
||||
use clap::values_t;
|
||||
use rpassword::prompt_password_stderr;
|
||||
use solana_sdk::signature::{
|
||||
keypair_from_seed, keypair_from_seed_phrase_and_passphrase, read_keypair_file, Keypair,
|
||||
KeypairUtil,
|
||||
use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{
|
||||
keypair_from_seed, keypair_from_seed_phrase_and_passphrase, read_keypair_file, Keypair,
|
||||
KeypairUtil,
|
||||
},
|
||||
};
|
||||
use std::{
|
||||
error,
|
||||
io::{stdin, stdout, Write},
|
||||
process::exit,
|
||||
};
|
||||
use std::error;
|
||||
|
||||
// Keyword used to indicate that the user should be asked for a keypair seed phrase
|
||||
pub const ASK_KEYWORD: &str = "ASK";
|
||||
@ -41,10 +48,25 @@ impl KeypairWithSource {
|
||||
}
|
||||
}
|
||||
|
||||
/// Prompts user for a passphrase and then asks for confirmirmation to check for mistakes
|
||||
pub fn prompt_passphrase(prompt: &str) -> Result<String, Box<dyn error::Error>> {
|
||||
let passphrase = prompt_password_stderr(&prompt)?;
|
||||
if !passphrase.is_empty() {
|
||||
let confirmed = rpassword::prompt_password_stderr("Enter same passphrase again: ")?;
|
||||
if confirmed != passphrase {
|
||||
return Err("Passphrases did not match".into());
|
||||
}
|
||||
}
|
||||
Ok(passphrase)
|
||||
}
|
||||
|
||||
/// Reads user input from stdin to retrieve a seed phrase and passphrase for keypair derivation
|
||||
/// Optionally skips validation of seed phrase
|
||||
/// Optionally confirms recovered public key
|
||||
pub fn keypair_from_seed_phrase(
|
||||
keypair_name: &str,
|
||||
skip_validation: bool,
|
||||
confirm_pubkey: bool,
|
||||
) -> Result<Keypair, Box<dyn error::Error>> {
|
||||
let seed_phrase = prompt_password_stderr(&format!("[{}] seed phrase: ", keypair_name))?;
|
||||
let seed_phrase = seed_phrase.trim();
|
||||
@ -53,15 +75,30 @@ pub fn keypair_from_seed_phrase(
|
||||
keypair_name,
|
||||
);
|
||||
|
||||
if skip_validation {
|
||||
let passphrase = prompt_password_stderr(&passphrase_prompt)?;
|
||||
keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)
|
||||
let keypair = if skip_validation {
|
||||
let passphrase = prompt_passphrase(&passphrase_prompt)?;
|
||||
keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)?
|
||||
} else {
|
||||
let mnemonic = Mnemonic::from_phrase(seed_phrase, Language::English)?;
|
||||
let passphrase = prompt_password_stderr(&passphrase_prompt)?;
|
||||
let sanitized = sanitize_seed_phrase(seed_phrase);
|
||||
let mnemonic = Mnemonic::from_phrase(sanitized, Language::English)?;
|
||||
let passphrase = prompt_passphrase(&passphrase_prompt)?;
|
||||
let seed = Seed::new(&mnemonic, &passphrase);
|
||||
keypair_from_seed(seed.as_bytes())
|
||||
keypair_from_seed(seed.as_bytes())?
|
||||
};
|
||||
|
||||
if confirm_pubkey {
|
||||
let pubkey = Pubkey::new(keypair.public.as_ref());
|
||||
print!("Recovered pubkey `{:?}`. Continue? (y/n): ", pubkey);
|
||||
let _ignored = stdout().flush();
|
||||
let mut input = String::new();
|
||||
stdin().read_line(&mut input).expect("Unexpected input");
|
||||
if input.to_lowercase().trim() != "y" {
|
||||
println!("Exiting");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(keypair)
|
||||
}
|
||||
|
||||
/// Checks CLI arguments to determine whether a keypair should be:
|
||||
@ -91,7 +128,7 @@ pub fn keypair_input(
|
||||
}
|
||||
|
||||
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
||||
keypair_from_seed_phrase(keypair_name, skip_validation)
|
||||
keypair_from_seed_phrase(keypair_name, skip_validation, true)
|
||||
.map(|keypair| KeypairWithSource::new(keypair, Source::SeedPhrase))
|
||||
} else if let Some(keypair_file) = matches.value_of(keypair_match_name) {
|
||||
read_keypair_file(keypair_file).map(|keypair| KeypairWithSource::new(keypair, Source::File))
|
||||
@ -100,6 +137,13 @@ pub fn keypair_input(
|
||||
}
|
||||
}
|
||||
|
||||
fn sanitize_seed_phrase(seed_phrase: &str) -> String {
|
||||
seed_phrase
|
||||
.split_whitespace()
|
||||
.collect::<Vec<&str>>()
|
||||
.join(" ")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@ -111,4 +155,13 @@ mod tests {
|
||||
let KeypairWithSource { source, .. } = keypair_input(&arg_matches, "").unwrap();
|
||||
assert_eq!(source, Source::Generated);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sanitize_seed_phrase() {
|
||||
let seed_phrase = " Mary had\ta\u{2009}little \n\t lamb";
|
||||
assert_eq!(
|
||||
"Mary had a little lamb".to_owned(),
|
||||
sanitize_seed_phrase(seed_phrase)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -20,6 +20,7 @@ dirs = "2.0.2"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.8"
|
||||
indicatif = "0.13.0"
|
||||
humantime = "1.3.0"
|
||||
num-traits = "0.2"
|
||||
pretty-hex = "0.1.1"
|
||||
reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tls"] }
|
||||
@ -27,24 +28,24 @@ serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
serde_json = "1.0.41"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.21.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.0" }
|
||||
solana-client = { path = "../client", version = "0.21.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.21.0" }
|
||||
solana-drone = { path = "../drone", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.21.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.21.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.21.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.21.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.21.6" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.6" }
|
||||
solana-client = { path = "../client", version = "0.21.6" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.21.6" }
|
||||
solana-drone = { path = "../drone", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.6" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.21.6" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.21.6" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.21.6" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.21.6" }
|
||||
url = "2.1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "0.21.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.21.0" }
|
||||
solana-core = { path = "../core", version = "0.21.6" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.21.6" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
|
519
cli/src/cli.rs
519
cli/src/cli.rs
@ -1,5 +1,10 @@
|
||||
use crate::{
|
||||
cluster_query::*, display::println_name_value, stake::*, storage::*, validator_info::*, vote::*,
|
||||
cluster_query::*,
|
||||
display::{println_name_value, println_signers},
|
||||
stake::*,
|
||||
storage::*,
|
||||
validator_info::*,
|
||||
vote::*,
|
||||
};
|
||||
use chrono::prelude::*;
|
||||
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
@ -15,6 +20,7 @@ use solana_drone::drone::request_airdrop_transaction;
|
||||
use solana_drone::drone_mock::request_airdrop_transaction;
|
||||
use solana_sdk::{
|
||||
bpf_loader,
|
||||
clock::{Epoch, Slot},
|
||||
commitment_config::CommitmentConfig,
|
||||
fee_calculator::FeeCalculator,
|
||||
hash::Hash,
|
||||
@ -74,6 +80,9 @@ pub enum CliCommand {
|
||||
},
|
||||
ClusterVersion,
|
||||
Fees,
|
||||
GetBlockTime {
|
||||
slot: Slot,
|
||||
},
|
||||
GetEpochInfo {
|
||||
commitment_config: CommitmentConfig,
|
||||
},
|
||||
@ -91,6 +100,10 @@ pub enum CliCommand {
|
||||
timeout: Duration,
|
||||
commitment_config: CommitmentConfig,
|
||||
},
|
||||
ShowBlockProduction {
|
||||
epoch: Option<Epoch>,
|
||||
slot_limit: Option<u64>,
|
||||
},
|
||||
ShowGossip,
|
||||
ShowValidators {
|
||||
use_lamports_unit: bool,
|
||||
@ -105,8 +118,20 @@ pub enum CliCommand {
|
||||
lockup: Lockup,
|
||||
lamports: u64,
|
||||
},
|
||||
DeactivateStake(Pubkey),
|
||||
DelegateStake(Pubkey, Pubkey, bool),
|
||||
DeactivateStake {
|
||||
stake_account_pubkey: Pubkey,
|
||||
sign_only: bool,
|
||||
signers: Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash: Option<Hash>,
|
||||
},
|
||||
DelegateStake {
|
||||
stake_account_pubkey: Pubkey,
|
||||
vote_account_pubkey: Pubkey,
|
||||
force: bool,
|
||||
sign_only: bool,
|
||||
signers: Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash: Option<Hash>,
|
||||
},
|
||||
RedeemVoteCredits(Pubkey, Pubkey),
|
||||
ShowStakeHistory {
|
||||
use_lamports_unit: bool,
|
||||
@ -152,7 +177,16 @@ pub enum CliCommand {
|
||||
aggregate: bool,
|
||||
span: Option<u64>,
|
||||
},
|
||||
VoteAuthorize(Pubkey, Pubkey, VoteAuthorize),
|
||||
VoteAuthorize {
|
||||
vote_account_pubkey: Pubkey,
|
||||
new_authorized_pubkey: Pubkey,
|
||||
vote_authorize: VoteAuthorize,
|
||||
},
|
||||
VoteUpdateValidator {
|
||||
vote_account_pubkey: Pubkey,
|
||||
new_identity_pubkey: Pubkey,
|
||||
authorized_voter: KeypairEq,
|
||||
},
|
||||
// Wallet Commands
|
||||
Address,
|
||||
Airdrop {
|
||||
@ -174,6 +208,9 @@ pub enum CliCommand {
|
||||
timestamp_pubkey: Option<Pubkey>,
|
||||
witnesses: Option<Vec<Pubkey>>,
|
||||
cancelable: bool,
|
||||
sign_only: bool,
|
||||
signers: Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash: Option<Hash>,
|
||||
},
|
||||
ShowAccount {
|
||||
pubkey: Pubkey,
|
||||
@ -266,6 +303,7 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
command: CliCommand::Fees,
|
||||
require_keypair: false,
|
||||
}),
|
||||
("get-block-time", Some(matches)) => parse_get_block_time(matches),
|
||||
("get-epoch-info", Some(matches)) => parse_get_epoch_info(matches),
|
||||
("get-genesis-hash", Some(_matches)) => Ok(CliCommandInfo {
|
||||
command: CliCommand::GetGenesisHash,
|
||||
@ -274,6 +312,7 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
("get-slot", Some(matches)) => parse_get_slot(matches),
|
||||
("get-transaction-count", Some(matches)) => parse_get_transaction_count(matches),
|
||||
("ping", Some(matches)) => parse_cluster_ping(matches),
|
||||
("show-block-production", Some(matches)) => parse_show_block_production(matches),
|
||||
("show-gossip", Some(_matches)) => Ok(CliCommandInfo {
|
||||
command: CliCommand::ShowGossip,
|
||||
require_keypair: false,
|
||||
@ -321,6 +360,7 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
},
|
||||
// Vote Commands
|
||||
("create-vote-account", Some(matches)) => parse_vote_create_account(matches),
|
||||
("vote-update-validator", Some(matches)) => parse_vote_update_validator(matches),
|
||||
("vote-authorize-voter", Some(matches)) => {
|
||||
parse_vote_authorize(matches, VoteAuthorize::Voter)
|
||||
}
|
||||
@ -413,6 +453,9 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
let timestamp_pubkey = value_of(&matches, "timestamp_pubkey");
|
||||
let witnesses = values_of(&matches, "witness");
|
||||
let cancelable = matches.is_present("cancelable");
|
||||
let sign_only = matches.is_present("sign_only");
|
||||
let signers = pubkeys_sigs_of(&matches, "signer");
|
||||
let blockhash = value_of(&matches, "blockhash");
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Pay {
|
||||
@ -422,8 +465,11 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
timestamp_pubkey,
|
||||
witnesses,
|
||||
cancelable,
|
||||
sign_only,
|
||||
signers,
|
||||
blockhash,
|
||||
},
|
||||
require_keypair: true,
|
||||
require_keypair: !sign_only,
|
||||
})
|
||||
}
|
||||
("show-account", Some(matches)) => {
|
||||
@ -481,20 +527,20 @@ pub type ProcessResult = Result<String, Box<dyn std::error::Error>>;
|
||||
|
||||
pub fn check_account_for_fee(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
account_pubkey: &Pubkey,
|
||||
fee_calculator: &FeeCalculator,
|
||||
message: &Message,
|
||||
) -> Result<(), Box<dyn error::Error>> {
|
||||
check_account_for_multiple_fees(rpc_client, config, fee_calculator, &[message])
|
||||
check_account_for_multiple_fees(rpc_client, account_pubkey, fee_calculator, &[message])
|
||||
}
|
||||
|
||||
fn check_account_for_multiple_fees(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
account_pubkey: &Pubkey,
|
||||
fee_calculator: &FeeCalculator,
|
||||
messages: &[&Message],
|
||||
) -> Result<(), Box<dyn error::Error>> {
|
||||
let balance = rpc_client.retry_get_balance(&config.keypair.pubkey(), 5)?;
|
||||
let balance = rpc_client.retry_get_balance(account_pubkey, 5)?;
|
||||
if let Some(lamports) = balance {
|
||||
if lamports
|
||||
>= messages
|
||||
@ -522,6 +568,48 @@ pub fn check_unique_pubkeys(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_blockhash_fee_calculator(
|
||||
rpc_client: &RpcClient,
|
||||
sign_only: bool,
|
||||
blockhash: Option<Hash>,
|
||||
) -> Result<(Hash, FeeCalculator), Box<dyn std::error::Error>> {
|
||||
Ok(if let Some(blockhash) = blockhash {
|
||||
if sign_only {
|
||||
(blockhash, FeeCalculator::default())
|
||||
} else {
|
||||
(blockhash, rpc_client.get_recent_blockhash()?.1)
|
||||
}
|
||||
} else {
|
||||
rpc_client.get_recent_blockhash()?
|
||||
})
|
||||
}
|
||||
|
||||
pub fn return_signers(tx: &Transaction) -> ProcessResult {
|
||||
println_signers(tx);
|
||||
let signers: Vec<_> = tx
|
||||
.signatures
|
||||
.iter()
|
||||
.zip(tx.message.account_keys.clone())
|
||||
.map(|(signature, pubkey)| format!("{}={}", pubkey, signature))
|
||||
.collect();
|
||||
|
||||
Ok(json!({
|
||||
"blockhash": tx.message.recent_blockhash.to_string(),
|
||||
"signers": &signers,
|
||||
})
|
||||
.to_string())
|
||||
}
|
||||
|
||||
pub fn replace_signatures(tx: &mut Transaction, signers: &[(Pubkey, Signature)]) -> ProcessResult {
|
||||
tx.replace_signatures(signers).map_err(|_| {
|
||||
CliError::BadParameter(
|
||||
"Transaction construction failed, incorrect signature or public key provided"
|
||||
.to_string(),
|
||||
)
|
||||
})?;
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
fn process_airdrop(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
@ -671,7 +759,12 @@ fn process_deploy(
|
||||
let mut finalize_tx = Transaction::new(&signers, message, blockhash);
|
||||
messages.push(&finalize_tx.message);
|
||||
|
||||
check_account_for_multiple_fees(rpc_client, config, &fee_calculator, &messages)?;
|
||||
check_account_for_multiple_fees(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&messages,
|
||||
)?;
|
||||
|
||||
trace!("Creating program account");
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut create_account_tx, &signers);
|
||||
@ -694,6 +787,7 @@ fn process_deploy(
|
||||
.to_string())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn process_pay(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
@ -703,12 +797,17 @@ fn process_pay(
|
||||
timestamp_pubkey: Option<Pubkey>,
|
||||
witnesses: &Option<Vec<Pubkey>>,
|
||||
cancelable: bool,
|
||||
sign_only: bool,
|
||||
signers: &Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash: Option<Hash>,
|
||||
) -> ProcessResult {
|
||||
check_unique_pubkeys(
|
||||
(&config.keypair.pubkey(), "cli keypair".to_string()),
|
||||
(to, "to".to_string()),
|
||||
)?;
|
||||
let (blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
let (blockhash, fee_calculator) =
|
||||
get_blockhash_fee_calculator(rpc_client, sign_only, blockhash)?;
|
||||
|
||||
let cancelable = if cancelable {
|
||||
Some(config.keypair.pubkey())
|
||||
@ -718,9 +817,22 @@ fn process_pay(
|
||||
|
||||
if timestamp == None && *witnesses == None {
|
||||
let mut tx = system_transaction::transfer(&config.keypair, to, lamports, blockhash);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
if let Some(signers) = signers {
|
||||
replace_signatures(&mut tx, &signers)?;
|
||||
}
|
||||
|
||||
if sign_only {
|
||||
return_signers(&tx)
|
||||
} else {
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
}
|
||||
} else if *witnesses == None {
|
||||
let dt = timestamp.unwrap();
|
||||
let dt_pubkey = match timestamp_pubkey {
|
||||
@ -745,19 +857,29 @@ fn process_pay(
|
||||
ixs,
|
||||
blockhash,
|
||||
);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
let result =
|
||||
rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair, &contract_state]);
|
||||
let signature_str = log_instruction_custom_error::<BudgetError>(result)?;
|
||||
if let Some(signers) = signers {
|
||||
replace_signatures(&mut tx, &signers)?;
|
||||
}
|
||||
if sign_only {
|
||||
return_signers(&tx)
|
||||
} else {
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client
|
||||
.send_and_confirm_transaction(&mut tx, &[&config.keypair, &contract_state]);
|
||||
let signature_str = log_instruction_custom_error::<BudgetError>(result)?;
|
||||
|
||||
Ok(json!({
|
||||
"signature": signature_str,
|
||||
"processId": format!("{}", contract_state.pubkey()),
|
||||
})
|
||||
.to_string())
|
||||
Ok(json!({
|
||||
"signature": signature_str,
|
||||
"processId": format!("{}", contract_state.pubkey()),
|
||||
})
|
||||
.to_string())
|
||||
}
|
||||
} else if timestamp == None {
|
||||
let (blockhash, _fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
let witness = if let Some(ref witness_vec) = *witnesses {
|
||||
witness_vec[0]
|
||||
} else {
|
||||
@ -783,16 +905,28 @@ fn process_pay(
|
||||
ixs,
|
||||
blockhash,
|
||||
);
|
||||
let result =
|
||||
rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair, &contract_state]);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
let signature_str = log_instruction_custom_error::<BudgetError>(result)?;
|
||||
if let Some(signers) = signers {
|
||||
replace_signatures(&mut tx, &signers)?;
|
||||
}
|
||||
if sign_only {
|
||||
return_signers(&tx)
|
||||
} else {
|
||||
let result = rpc_client
|
||||
.send_and_confirm_transaction(&mut tx, &[&config.keypair, &contract_state]);
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let signature_str = log_instruction_custom_error::<BudgetError>(result)?;
|
||||
|
||||
Ok(json!({
|
||||
"signature": signature_str,
|
||||
"processId": format!("{}", contract_state.pubkey()),
|
||||
})
|
||||
.to_string())
|
||||
Ok(json!({
|
||||
"signature": signature_str,
|
||||
"processId": format!("{}", contract_state.pubkey()),
|
||||
})
|
||||
.to_string())
|
||||
}
|
||||
} else {
|
||||
Ok("Combo transactions not yet handled".to_string())
|
||||
}
|
||||
@ -806,7 +940,12 @@ fn process_cancel(rpc_client: &RpcClient, config: &CliConfig, pubkey: &Pubkey) -
|
||||
&config.keypair.pubkey(),
|
||||
);
|
||||
let mut tx = Transaction::new_signed_instructions(&[&config.keypair], vec![ix], blockhash);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
|
||||
log_instruction_custom_error::<BudgetError>(result)
|
||||
}
|
||||
@ -822,7 +961,12 @@ fn process_time_elapsed(
|
||||
|
||||
let ix = budget_instruction::apply_timestamp(&config.keypair.pubkey(), pubkey, to, dt);
|
||||
let mut tx = Transaction::new_signed_instructions(&[&config.keypair], vec![ix], blockhash);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
|
||||
log_instruction_custom_error::<BudgetError>(result)
|
||||
}
|
||||
@ -837,7 +981,12 @@ fn process_witness(
|
||||
|
||||
let ix = budget_instruction::apply_signature(&config.keypair.pubkey(), pubkey, to);
|
||||
let mut tx = Transaction::new_signed_instructions(&[&config.keypair], vec![ix], blockhash);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
|
||||
log_instruction_custom_error::<BudgetError>(result)
|
||||
}
|
||||
@ -870,6 +1019,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
CliCommand::Catchup { node_pubkey } => process_catchup(&rpc_client, node_pubkey),
|
||||
CliCommand::ClusterVersion => process_cluster_version(&rpc_client),
|
||||
CliCommand::Fees => process_fees(&rpc_client),
|
||||
CliCommand::GetBlockTime { slot } => process_get_block_time(&rpc_client, *slot),
|
||||
CliCommand::GetGenesisHash => process_get_genesis_hash(&rpc_client),
|
||||
CliCommand::GetEpochInfo { commitment_config } => {
|
||||
process_get_epoch_info(&rpc_client, commitment_config)
|
||||
@ -895,6 +1045,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
timeout,
|
||||
commitment_config,
|
||||
),
|
||||
CliCommand::ShowBlockProduction { epoch, slot_limit } => {
|
||||
process_show_block_production(&rpc_client, *epoch, *slot_limit)
|
||||
}
|
||||
CliCommand::ShowGossip => process_show_gossip(&rpc_client),
|
||||
CliCommand::ShowValidators { use_lamports_unit } => {
|
||||
process_show_validators(&rpc_client, *use_lamports_unit)
|
||||
@ -926,18 +1079,36 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
*lamports,
|
||||
),
|
||||
// Deactivate stake account
|
||||
CliCommand::DeactivateStake(stake_account_pubkey) => {
|
||||
process_deactivate_stake_account(&rpc_client, config, &stake_account_pubkey)
|
||||
}
|
||||
CliCommand::DelegateStake(stake_account_pubkey, vote_account_pubkey, force) => {
|
||||
process_delegate_stake(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
&vote_account_pubkey,
|
||||
*force,
|
||||
)
|
||||
}
|
||||
CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
sign_only,
|
||||
ref signers,
|
||||
blockhash,
|
||||
} => process_deactivate_stake_account(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
*sign_only,
|
||||
signers,
|
||||
*blockhash,
|
||||
),
|
||||
CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
force,
|
||||
sign_only,
|
||||
ref signers,
|
||||
blockhash,
|
||||
} => process_delegate_stake(
|
||||
&rpc_client,
|
||||
config,
|
||||
&stake_account_pubkey,
|
||||
&vote_account_pubkey,
|
||||
*force,
|
||||
*sign_only,
|
||||
signers,
|
||||
*blockhash,
|
||||
),
|
||||
CliCommand::RedeemVoteCredits(stake_account_pubkey, vote_account_pubkey) => {
|
||||
process_redeem_vote_credits(
|
||||
&rpc_client,
|
||||
@ -1053,15 +1224,28 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
&vote_account_pubkey,
|
||||
*use_lamports_unit,
|
||||
),
|
||||
CliCommand::VoteAuthorize(vote_account_pubkey, new_authorized_pubkey, vote_authorize) => {
|
||||
process_vote_authorize(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
&new_authorized_pubkey,
|
||||
*vote_authorize,
|
||||
)
|
||||
}
|
||||
CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey,
|
||||
new_authorized_pubkey,
|
||||
vote_authorize,
|
||||
} => process_vote_authorize(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
&new_authorized_pubkey,
|
||||
*vote_authorize,
|
||||
),
|
||||
CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_pubkey,
|
||||
authorized_voter,
|
||||
} => process_vote_update_validator(
|
||||
&rpc_client,
|
||||
config,
|
||||
&vote_account_pubkey,
|
||||
&new_identity_pubkey,
|
||||
authorized_voter,
|
||||
),
|
||||
CliCommand::Uptime {
|
||||
pubkey: vote_account_pubkey,
|
||||
aggregate,
|
||||
@ -1118,6 +1302,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
timestamp_pubkey,
|
||||
ref witnesses,
|
||||
cancelable,
|
||||
sign_only,
|
||||
ref signers,
|
||||
blockhash,
|
||||
} => process_pay(
|
||||
&rpc_client,
|
||||
config,
|
||||
@ -1127,6 +1314,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
*timestamp_pubkey,
|
||||
witnesses,
|
||||
*cancelable,
|
||||
*sign_only,
|
||||
signers,
|
||||
*blockhash,
|
||||
),
|
||||
CliCommand::ShowAccount {
|
||||
pubkey,
|
||||
@ -1410,6 +1600,29 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
Arg::with_name("cancelable")
|
||||
.long("cancelable")
|
||||
.takes_value(false),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("sign_only")
|
||||
.long("sign-only")
|
||||
.takes_value(false)
|
||||
.help("Sign the transaction offline"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("signer")
|
||||
.long("signer")
|
||||
.value_name("PUBKEY=BASE58_SIG")
|
||||
.takes_value(true)
|
||||
.validator(is_pubkey_sig)
|
||||
.multiple(true)
|
||||
.help("Provide a public-key/signature pair for the transaction"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("blockhash")
|
||||
.long("blockhash")
|
||||
.value_name("BLOCKHASH")
|
||||
.takes_value(true)
|
||||
.validator(is_hash)
|
||||
.help("Use the supplied blockhash"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
@ -1667,6 +1880,9 @@ mod tests {
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
@ -1694,6 +1910,9 @@ mod tests {
|
||||
timestamp_pubkey: None,
|
||||
witnesses: Some(vec![witness0, witness1]),
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
@ -1717,6 +1936,9 @@ mod tests {
|
||||
timestamp_pubkey: None,
|
||||
witnesses: Some(vec![witness0]),
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
@ -1744,6 +1966,130 @@ mod tests {
|
||||
timestamp_pubkey: Some(witness0),
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test Pay Subcommand w/ sign-only
|
||||
let test_pay = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"pay",
|
||||
&pubkey_string,
|
||||
"50",
|
||||
"lamports",
|
||||
"--sign-only",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
},
|
||||
require_keypair: false
|
||||
}
|
||||
);
|
||||
|
||||
// Test Pay Subcommand w/ signer
|
||||
let key1 = Pubkey::new_rand();
|
||||
let sig1 = Keypair::new().sign_message(&[0u8]);
|
||||
let signer1 = format!("{}={}", key1, sig1);
|
||||
let test_pay = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"pay",
|
||||
&pubkey_string,
|
||||
"50",
|
||||
"lamports",
|
||||
"--signer",
|
||||
&signer1,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: Some(vec![(key1, sig1)]),
|
||||
blockhash: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test Pay Subcommand w/ signers
|
||||
let key2 = Pubkey::new_rand();
|
||||
let sig2 = Keypair::new().sign_message(&[1u8]);
|
||||
let signer2 = format!("{}={}", key2, sig2);
|
||||
let test_pay = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"pay",
|
||||
&pubkey_string,
|
||||
"50",
|
||||
"lamports",
|
||||
"--signer",
|
||||
&signer1,
|
||||
"--signer",
|
||||
&signer2,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: Some(vec![(key1, sig1), (key2, sig2)]),
|
||||
blockhash: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test Pay Subcommand w/ Blockhash
|
||||
let blockhash = Hash::default();
|
||||
let blockhash_string = format!("{}", blockhash);
|
||||
let test_pay = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"pay",
|
||||
&pubkey_string,
|
||||
"50",
|
||||
"lamports",
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_pay).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Pay {
|
||||
lamports: 50,
|
||||
to: pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: Some(blockhash),
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
@ -1788,6 +2134,9 @@ mod tests {
|
||||
timestamp_pubkey: Some(witness0),
|
||||
witnesses: Some(vec![witness0, witness1]),
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
@ -1866,8 +2215,20 @@ mod tests {
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
|
||||
let new_authorized_pubkey = Pubkey::new_rand();
|
||||
config.command =
|
||||
CliCommand::VoteAuthorize(bob_pubkey, new_authorized_pubkey, VoteAuthorize::Voter);
|
||||
config.command = CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_authorized_pubkey,
|
||||
vote_authorize: VoteAuthorize::Voter,
|
||||
};
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
|
||||
let new_identity_pubkey = Pubkey::new_rand();
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_identity_pubkey,
|
||||
authorized_voter: Keypair::new().into(),
|
||||
};
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
|
||||
@ -1894,7 +2255,12 @@ mod tests {
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
|
||||
let stake_pubkey = Pubkey::new_rand();
|
||||
config.command = CliCommand::DeactivateStake(stake_pubkey);
|
||||
config.command = CliCommand::DeactivateStake {
|
||||
stake_account_pubkey: stake_pubkey,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
|
||||
@ -1915,6 +2281,9 @@ mod tests {
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
let signature = process_command(&config);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
@ -1928,6 +2297,9 @@ mod tests {
|
||||
timestamp_pubkey: Some(config.keypair.pubkey()),
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
let result = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&result.unwrap()).unwrap();
|
||||
@ -1949,6 +2321,9 @@ mod tests {
|
||||
timestamp_pubkey: None,
|
||||
witnesses: Some(vec![witness]),
|
||||
cancelable: true,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
let result = process_command(&config);
|
||||
let json: Value = serde_json::from_str(&result.unwrap()).unwrap();
|
||||
@ -2036,7 +2411,18 @@ mod tests {
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
config.command = CliCommand::VoteAuthorize(bob_pubkey, bob_pubkey, VoteAuthorize::Voter);
|
||||
config.command = CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_authorized_pubkey: bob_pubkey,
|
||||
vote_authorize: VoteAuthorize::Voter,
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
config.command = CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: bob_pubkey,
|
||||
new_identity_pubkey: bob_pubkey,
|
||||
authorized_voter: Keypair::new().into(),
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
config.command = CliCommand::GetSlot {
|
||||
@ -2056,6 +2442,9 @@ mod tests {
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
@ -2066,6 +2455,9 @@ mod tests {
|
||||
timestamp_pubkey: Some(config.keypair.pubkey()),
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
@ -2076,6 +2468,9 @@ mod tests {
|
||||
timestamp_pubkey: None,
|
||||
witnesses: Some(vec![witness]),
|
||||
cancelable: true,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
assert!(process_command(&config).is_err());
|
||||
|
||||
|
@ -5,21 +5,22 @@ use crate::{
|
||||
},
|
||||
display::println_name_value,
|
||||
};
|
||||
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
|
||||
use clap::{value_t, value_t_or_exit, App, Arg, ArgMatches, SubCommand};
|
||||
use console::{style, Emoji};
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*};
|
||||
use solana_client::{rpc_client::RpcClient, rpc_request::RpcVoteAccountInfo};
|
||||
use solana_sdk::{
|
||||
clock,
|
||||
clock::{self, Slot},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_schedule::{Epoch, EpochSchedule},
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, KeypairUtil},
|
||||
system_transaction,
|
||||
};
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
collections::{HashMap, VecDeque},
|
||||
net::SocketAddr,
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
@ -53,6 +54,17 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.about("Get the version of the cluster entrypoint"),
|
||||
)
|
||||
.subcommand(SubCommand::with_name("fees").about("Display current cluster fees"))
|
||||
.subcommand(SubCommand::with_name("get-block-time")
|
||||
.about("Get estimated production time of a block")
|
||||
.arg(
|
||||
Arg::with_name("slot")
|
||||
.index(1)
|
||||
.takes_value(true)
|
||||
.value_name("SLOT")
|
||||
.required(true)
|
||||
.help("Slot number of the block to query")
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("get-epoch-info")
|
||||
.about("Get information about the current epoch")
|
||||
@ -136,6 +148,22 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("show-block-production")
|
||||
.about("Show information about block production")
|
||||
.arg(
|
||||
Arg::with_name("epoch")
|
||||
.long("epoch")
|
||||
.takes_value(true)
|
||||
.help("Epoch to show block production for [default: current epoch]"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("slot_limit")
|
||||
.long("slot-limit")
|
||||
.takes_value(true)
|
||||
.help("Limit results to this many slots from the end of the epoch [default: full epoch]"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("show-gossip")
|
||||
.about("Show the current gossip network nodes"),
|
||||
@ -187,6 +215,14 @@ pub fn parse_cluster_ping(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Cl
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_get_block_time(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let slot = value_t_or_exit!(matches, "slot", u64);
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::GetBlockTime { slot },
|
||||
require_keypair: false,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_get_epoch_info(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
@ -241,6 +277,20 @@ fn new_spinner_progress_bar() -> ProgressBar {
|
||||
progress_bar
|
||||
}
|
||||
|
||||
/// Aggregate epoch credit stats and return (total credits, total slots, total epochs)
|
||||
pub fn aggregate_epoch_credits(
|
||||
epoch_credits: &[(Epoch, u64, u64)],
|
||||
epoch_schedule: &EpochSchedule,
|
||||
) -> (u64, u64, u64) {
|
||||
epoch_credits
|
||||
.iter()
|
||||
.fold((0, 0, 0), |acc, (epoch, credits, prev_credits)| {
|
||||
let credits_earned = credits - prev_credits;
|
||||
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(*epoch);
|
||||
(acc.0 + credits_earned, acc.1 + slots_in_epoch, acc.2 + 1)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn process_catchup(rpc_client: &RpcClient, node_pubkey: &Pubkey) -> ProcessResult {
|
||||
let cluster_nodes = rpc_client.get_cluster_nodes()?;
|
||||
|
||||
@ -313,6 +363,11 @@ pub fn process_fees(rpc_client: &RpcClient) -> ProcessResult {
|
||||
))
|
||||
}
|
||||
|
||||
pub fn process_get_block_time(rpc_client: &RpcClient, slot: Slot) -> ProcessResult {
|
||||
let timestamp = rpc_client.get_block_time(slot)?;
|
||||
Ok(timestamp.to_string())
|
||||
}
|
||||
|
||||
pub fn process_get_epoch_info(
|
||||
rpc_client: &RpcClient,
|
||||
commitment_config: &CommitmentConfig,
|
||||
@ -336,11 +391,7 @@ pub fn process_get_epoch_info(
|
||||
);
|
||||
println_name_value(
|
||||
"Time remaining in current epoch:",
|
||||
&format!(
|
||||
"{} minutes, {} seconds",
|
||||
remaining_time_in_epoch.as_secs() / 60,
|
||||
remaining_time_in_epoch.as_secs() % 60
|
||||
),
|
||||
&humantime::format_duration(remaining_time_in_epoch).to_string(),
|
||||
);
|
||||
Ok("".to_string())
|
||||
}
|
||||
@ -358,6 +409,149 @@ pub fn process_get_slot(
|
||||
Ok(slot.to_string())
|
||||
}
|
||||
|
||||
pub fn parse_show_block_production(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let epoch = value_t!(matches, "epoch", Epoch).ok();
|
||||
let slot_limit = value_t!(matches, "slot_limit", u64).ok();
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::ShowBlockProduction { epoch, slot_limit },
|
||||
require_keypair: false,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn process_show_block_production(
|
||||
rpc_client: &RpcClient,
|
||||
epoch: Option<Epoch>,
|
||||
slot_limit: Option<u64>,
|
||||
) -> ProcessResult {
|
||||
let epoch_schedule = rpc_client.get_epoch_schedule()?;
|
||||
let epoch_info = rpc_client.get_epoch_info_with_commitment(CommitmentConfig::max())?;
|
||||
|
||||
let epoch = epoch.unwrap_or(epoch_info.epoch);
|
||||
|
||||
if epoch > epoch_info.epoch {
|
||||
return Err(format!("Epoch {} is in the future", epoch).into());
|
||||
}
|
||||
|
||||
let end_slot = std::cmp::min(
|
||||
epoch_info.absolute_slot,
|
||||
epoch_schedule.get_last_slot_in_epoch(epoch),
|
||||
);
|
||||
let start_slot = {
|
||||
let start_slot = epoch_schedule.get_first_slot_in_epoch(epoch);
|
||||
std::cmp::max(
|
||||
end_slot.saturating_sub(slot_limit.unwrap_or(start_slot)),
|
||||
start_slot,
|
||||
)
|
||||
};
|
||||
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
progress_bar.set_message("Connecting...");
|
||||
progress_bar.set_message(&format!("Fetching leader schedule for epoch {}...", epoch));
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"Fetching confirmed blocks between slots {} and {}...",
|
||||
start_slot, end_slot
|
||||
));
|
||||
let confirmed_blocks = rpc_client.get_confirmed_blocks(start_slot, Some(end_slot))?;
|
||||
|
||||
let total_slots = (end_slot - start_slot + 1) as usize;
|
||||
let total_blocks = confirmed_blocks.len();
|
||||
assert!(total_blocks <= total_slots);
|
||||
let total_slots_missed = total_slots - total_blocks;
|
||||
let mut leader_slot_count = HashMap::new();
|
||||
let mut leader_missed_slots = HashMap::new();
|
||||
|
||||
let leader_schedule = rpc_client
|
||||
.get_leader_schedule_with_commitment(Some(start_slot), CommitmentConfig::max())?;
|
||||
if leader_schedule.is_none() {
|
||||
return Err(format!("Unable to fetch leader schedule for slot {}", start_slot).into());
|
||||
}
|
||||
let leader_schedule = leader_schedule.unwrap();
|
||||
|
||||
let mut leader_per_slot_index = Vec::new();
|
||||
leader_per_slot_index.resize(total_slots, "");
|
||||
for (pubkey, leader_slots) in leader_schedule.iter() {
|
||||
for slot_index in leader_slots.iter() {
|
||||
if *slot_index < total_slots {
|
||||
leader_per_slot_index[*slot_index] = pubkey;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"Processing {} slots containing {} blocks and {} empty slots...",
|
||||
total_slots, total_blocks, total_slots_missed
|
||||
));
|
||||
|
||||
let mut confirmed_blocks_index = 0;
|
||||
for (slot_index, leader) in leader_per_slot_index.iter().enumerate().take(total_slots) {
|
||||
let slot = start_slot + slot_index as u64;
|
||||
let slot_count = leader_slot_count.entry(leader).or_insert(0);
|
||||
*slot_count += 1;
|
||||
let missed_slots = leader_missed_slots.entry(leader).or_insert(0);
|
||||
|
||||
loop {
|
||||
if !confirmed_blocks.is_empty() {
|
||||
let slot_of_next_confirmed_block = confirmed_blocks[confirmed_blocks_index];
|
||||
if slot_of_next_confirmed_block < slot {
|
||||
confirmed_blocks_index += 1;
|
||||
continue;
|
||||
}
|
||||
if slot_of_next_confirmed_block == slot {
|
||||
break;
|
||||
}
|
||||
}
|
||||
*missed_slots += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
progress_bar.finish_and_clear();
|
||||
println!(
|
||||
"\n{}",
|
||||
style(format!(
|
||||
" {:<44} {:>15} {:>15} {:>15} {:>23}",
|
||||
"Identity Pubkey",
|
||||
"Leader Slots",
|
||||
"Blocks Produced",
|
||||
"Missed Slots",
|
||||
"Missed Block Percentage",
|
||||
))
|
||||
.bold()
|
||||
);
|
||||
|
||||
let mut table = vec![];
|
||||
for (leader, leader_slots) in leader_slot_count.iter() {
|
||||
let missed_slots = leader_missed_slots.get(leader).unwrap();
|
||||
let blocks_produced = leader_slots - missed_slots;
|
||||
table.push(format!(
|
||||
" {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
|
||||
leader,
|
||||
leader_slots,
|
||||
blocks_produced,
|
||||
missed_slots,
|
||||
*missed_slots as f64 / *leader_slots as f64 * 100.
|
||||
));
|
||||
}
|
||||
table.sort();
|
||||
|
||||
println!(
|
||||
"{}\n\n {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
|
||||
table.join("\n"),
|
||||
format!("Epoch {} total:", epoch),
|
||||
total_slots,
|
||||
total_blocks,
|
||||
total_slots_missed,
|
||||
total_slots_missed as f64 / total_slots as f64 * 100.
|
||||
);
|
||||
println!(
|
||||
" (using data from {} slots: {} to {})",
|
||||
total_slots, start_slot, end_slot
|
||||
);
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
pub fn process_get_transaction_count(
|
||||
rpc_client: &RpcClient,
|
||||
commitment_config: &CommitmentConfig,
|
||||
@ -399,7 +593,12 @@ pub fn process_ping(
|
||||
|
||||
let transaction =
|
||||
system_transaction::transfer(&config.keypair, &to, lamports, recent_blockhash);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &transaction.message)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&transaction.message,
|
||||
)?;
|
||||
|
||||
match rpc_client.send_transaction(&transaction) {
|
||||
Ok(signature) => {
|
||||
@ -442,8 +641,7 @@ pub fn process_ping(
|
||||
// Sleep for half a slot
|
||||
if signal_receiver
|
||||
.recv_timeout(Duration::from_millis(
|
||||
500 * solana_sdk::clock::DEFAULT_TICKS_PER_SLOT
|
||||
/ solana_sdk::clock::DEFAULT_TICKS_PER_SECOND,
|
||||
500 * clock::DEFAULT_TICKS_PER_SLOT / clock::DEFAULT_TICKS_PER_SECOND,
|
||||
))
|
||||
.is_ok()
|
||||
{
|
||||
@ -526,6 +724,7 @@ pub fn process_show_gossip(rpc_client: &RpcClient) -> ProcessResult {
|
||||
}
|
||||
|
||||
pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool) -> ProcessResult {
|
||||
let epoch_schedule = rpc_client.get_epoch_schedule()?;
|
||||
let vote_accounts = rpc_client.get_vote_accounts()?;
|
||||
let total_active_stake = vote_accounts
|
||||
.current
|
||||
@ -568,19 +767,21 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
println!(
|
||||
"{}",
|
||||
style(format!(
|
||||
" {:<44} {:<44} {:<11} {:>10} {:>11} {}",
|
||||
" {:<44} {:<44} {} {} {} {:>7} {}",
|
||||
"Identity Pubkey",
|
||||
"Vote Account Pubkey",
|
||||
"Commission",
|
||||
"Last Vote",
|
||||
"Root Block",
|
||||
"Uptime",
|
||||
"Active Stake",
|
||||
))
|
||||
.bold()
|
||||
);
|
||||
|
||||
fn print_vote_account(
|
||||
vote_account: &RpcVoteAccountInfo,
|
||||
vote_account: RpcVoteAccountInfo,
|
||||
epoch_schedule: &EpochSchedule,
|
||||
total_active_stake: f64,
|
||||
use_lamports_unit: bool,
|
||||
delinquent: bool,
|
||||
@ -592,8 +793,20 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
format!("{}", v)
|
||||
}
|
||||
}
|
||||
|
||||
fn uptime(epoch_credits: Vec<(Epoch, u64, u64)>, epoch_schedule: &EpochSchedule) -> String {
|
||||
let (total_credits, total_slots, _) =
|
||||
aggregate_epoch_credits(&epoch_credits, &epoch_schedule);
|
||||
if total_slots > 0 {
|
||||
let total_uptime = 100_f64 * total_credits as f64 / total_slots as f64;
|
||||
format!("{:.2}%", total_uptime)
|
||||
} else {
|
||||
"-".into()
|
||||
}
|
||||
}
|
||||
|
||||
println!(
|
||||
"{} {:<44} {:<44} {:>3} ({:>4.1}%) {:>10} {:>11} {:>11}",
|
||||
"{} {:<44} {:<44} {:>9}% {:>8} {:>10} {:>7} {}",
|
||||
if delinquent {
|
||||
WARNING.to_string()
|
||||
} else {
|
||||
@ -602,9 +815,9 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
vote_account.node_pubkey,
|
||||
vote_account.vote_pubkey,
|
||||
vote_account.commission,
|
||||
f64::from(vote_account.commission) * 100.0 / f64::from(std::u8::MAX),
|
||||
non_zero_or_dash(vote_account.last_vote),
|
||||
non_zero_or_dash(vote_account.root_slot),
|
||||
uptime(vote_account.epoch_credits, epoch_schedule),
|
||||
if vote_account.activated_stake > 0 {
|
||||
format!(
|
||||
"{} ({:.2}%)",
|
||||
@ -617,11 +830,23 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
|
||||
);
|
||||
}
|
||||
|
||||
for vote_account in vote_accounts.current.iter() {
|
||||
print_vote_account(vote_account, total_active_stake, use_lamports_unit, false);
|
||||
for vote_account in vote_accounts.current.into_iter() {
|
||||
print_vote_account(
|
||||
vote_account,
|
||||
&epoch_schedule,
|
||||
total_active_stake,
|
||||
use_lamports_unit,
|
||||
false,
|
||||
);
|
||||
}
|
||||
for vote_account in vote_accounts.delinquent.iter() {
|
||||
print_vote_account(vote_account, total_active_stake, use_lamports_unit, true);
|
||||
for vote_account in vote_accounts.delinquent.into_iter() {
|
||||
print_vote_account(
|
||||
vote_account,
|
||||
&epoch_schedule,
|
||||
total_active_stake,
|
||||
use_lamports_unit,
|
||||
true,
|
||||
);
|
||||
}
|
||||
|
||||
Ok("".to_string())
|
||||
@ -656,6 +881,20 @@ mod tests {
|
||||
}
|
||||
);
|
||||
|
||||
let slot = 100;
|
||||
let test_get_block_time = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"get-block-time",
|
||||
&slot.to_string(),
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_get_block_time).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::GetBlockTime { slot },
|
||||
require_keypair: false
|
||||
}
|
||||
);
|
||||
|
||||
let test_get_epoch_info = test_commands
|
||||
.clone()
|
||||
.get_matches_from(vec!["test", "get-epoch-info"]);
|
||||
|
@ -1,4 +1,5 @@
|
||||
use console::style;
|
||||
use solana_sdk::transaction::Transaction;
|
||||
|
||||
// Pretty print a "name value"
|
||||
pub fn println_name_value(name: &str, value: &str) {
|
||||
@ -22,3 +23,14 @@ pub fn println_name_value_or(name: &str, value: &str, default_value: &str) {
|
||||
println!("{} {}", style(name).bold(), style(value));
|
||||
};
|
||||
}
|
||||
|
||||
pub fn println_signers(tx: &Transaction) {
|
||||
println!();
|
||||
println!("Blockhash: {}", tx.message.recent_blockhash);
|
||||
println!("Signers (Pubkey=Signature):");
|
||||
tx.signatures
|
||||
.iter()
|
||||
.zip(tx.message.account_keys.clone())
|
||||
.for_each(|(signature, pubkey)| println!(" {:?}={:?}", pubkey, signature));
|
||||
println!();
|
||||
}
|
||||
|
397
cli/src/stake.rs
397
cli/src/stake.rs
@ -1,14 +1,16 @@
|
||||
use crate::cli::{
|
||||
build_balance_message, check_account_for_fee, check_unique_pubkeys,
|
||||
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult,
|
||||
get_blockhash_fee_calculator, log_instruction_custom_error, replace_signatures, return_signers,
|
||||
CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult,
|
||||
};
|
||||
use clap::{App, Arg, ArgMatches, SubCommand};
|
||||
use console::style;
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_sdk::signature::Keypair;
|
||||
use solana_sdk::signature::{Keypair, Signature};
|
||||
use solana_sdk::{
|
||||
account_utils::State,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::KeypairUtil,
|
||||
system_instruction::SystemError,
|
||||
@ -120,6 +122,29 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("The vote account to which the stake will be delegated")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("sign_only")
|
||||
.long("sign-only")
|
||||
.takes_value(false)
|
||||
.help("Sign the transaction offline"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("signer")
|
||||
.long("signer")
|
||||
.value_name("PUBKEY=BASE58_SIG")
|
||||
.takes_value(true)
|
||||
.validator(is_pubkey_sig)
|
||||
.multiple(true)
|
||||
.help("Provide a public-key/signature pair for the transaction"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("blockhash")
|
||||
.long("blockhash")
|
||||
.value_name("BLOCKHASH")
|
||||
.takes_value(true)
|
||||
.validator(is_hash)
|
||||
.help("Use the supplied blockhash"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("stake-authorize-staker")
|
||||
@ -176,6 +201,29 @@ impl StakeSubCommands for App<'_, '_> {
|
||||
.required(true)
|
||||
.help("Stake account to be deactivated.")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("sign_only")
|
||||
.long("sign-only")
|
||||
.takes_value(false)
|
||||
.help("Sign the transaction offline"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("signer")
|
||||
.long("signer")
|
||||
.value_name("PUBKEY=BASE58_SIG")
|
||||
.takes_value(true)
|
||||
.validator(is_pubkey_sig)
|
||||
.multiple(true)
|
||||
.help("Provide a public-key/signature pair for the transaction"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("blockhash")
|
||||
.long("blockhash")
|
||||
.value_name("BLOCKHASH")
|
||||
.takes_value(true)
|
||||
.validator(is_hash)
|
||||
.help("Use the supplied blockhash"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("withdraw-stake")
|
||||
@ -293,10 +341,21 @@ pub fn parse_stake_delegate_stake(matches: &ArgMatches<'_>) -> Result<CliCommand
|
||||
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
|
||||
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
|
||||
let force = matches.is_present("force");
|
||||
let sign_only = matches.is_present("sign_only");
|
||||
let signers = pubkeys_sigs_of(&matches, "signer");
|
||||
let blockhash = value_of(matches, "blockhash");
|
||||
let require_keypair = signers.is_none();
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::DelegateStake(stake_account_pubkey, vote_account_pubkey, force),
|
||||
require_keypair: true,
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
force,
|
||||
sign_only,
|
||||
signers,
|
||||
blockhash,
|
||||
},
|
||||
require_keypair,
|
||||
})
|
||||
}
|
||||
|
||||
@ -320,6 +379,7 @@ pub fn parse_stake_authorize(
|
||||
pub fn parse_redeem_vote_credits(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
|
||||
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::RedeemVoteCredits(stake_account_pubkey, vote_account_pubkey),
|
||||
require_keypair: true,
|
||||
@ -328,9 +388,19 @@ pub fn parse_redeem_vote_credits(matches: &ArgMatches<'_>) -> Result<CliCommandI
|
||||
|
||||
pub fn parse_stake_deactivate_stake(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
|
||||
let sign_only = matches.is_present("sign_only");
|
||||
let signers = pubkeys_sigs_of(&matches, "signer");
|
||||
let blockhash = value_of(matches, "blockhash");
|
||||
let require_keypair = signers.is_none();
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake(stake_account_pubkey),
|
||||
require_keypair: true,
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
sign_only,
|
||||
signers,
|
||||
blockhash,
|
||||
},
|
||||
require_keypair,
|
||||
})
|
||||
}
|
||||
|
||||
@ -423,7 +493,12 @@ pub fn process_create_stake_account(
|
||||
&[&config.keypair, stake_account],
|
||||
recent_blockhash,
|
||||
);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result =
|
||||
rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair, stake_account]);
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
@ -454,7 +529,12 @@ pub fn process_stake_authorize(
|
||||
&[&config.keypair],
|
||||
recent_blockhash,
|
||||
);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
|
||||
log_instruction_custom_error::<StakeError>(result)
|
||||
}
|
||||
@ -463,8 +543,12 @@ pub fn process_deactivate_stake_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
stake_account_pubkey: &Pubkey,
|
||||
sign_only: bool,
|
||||
signers: &Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash: Option<Hash>,
|
||||
) -> ProcessResult {
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let (recent_blockhash, fee_calculator) =
|
||||
get_blockhash_fee_calculator(rpc_client, sign_only, blockhash)?;
|
||||
let ixs = vec![stake_instruction::deactivate_stake(
|
||||
stake_account_pubkey,
|
||||
&config.keypair.pubkey(),
|
||||
@ -475,9 +559,21 @@ pub fn process_deactivate_stake_account(
|
||||
&[&config.keypair],
|
||||
recent_blockhash,
|
||||
);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
|
||||
log_instruction_custom_error::<StakeError>(result)
|
||||
if let Some(signers) = signers {
|
||||
replace_signatures(&mut tx, &signers)?;
|
||||
}
|
||||
if sign_only {
|
||||
return_signers(&tx)
|
||||
} else {
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&tx.message.account_keys[0],
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
|
||||
log_instruction_custom_error::<StakeError>(result)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_withdraw_stake(
|
||||
@ -502,7 +598,12 @@ pub fn process_withdraw_stake(
|
||||
&[&config.keypair],
|
||||
recent_blockhash,
|
||||
);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
|
||||
log_instruction_custom_error::<StakeError>(result)
|
||||
}
|
||||
@ -524,7 +625,12 @@ pub fn process_redeem_vote_credits(
|
||||
&[&config.keypair],
|
||||
recent_blockhash,
|
||||
);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
|
||||
log_instruction_custom_error::<StakeError>(result)
|
||||
}
|
||||
@ -619,7 +725,7 @@ pub fn process_show_stake_history(
|
||||
println!(
|
||||
"{}",
|
||||
style(format!(
|
||||
" {:<5} {:>15} {:>16} {:>18}",
|
||||
" {:<5} {:>20} {:>20} {:>20}",
|
||||
"Epoch", "Effective Stake", "Activating Stake", "Deactivating Stake",
|
||||
))
|
||||
.bold()
|
||||
@ -627,7 +733,7 @@ pub fn process_show_stake_history(
|
||||
|
||||
for (epoch, entry) in stake_history.deref() {
|
||||
println!(
|
||||
" {:>5} {:>15} {:>16} {:>18} {}",
|
||||
" {:>5} {:>20} {:>20} {:>20} {}",
|
||||
epoch,
|
||||
build_balance_message(entry.effective, use_lamports_unit, false),
|
||||
build_balance_message(entry.activating, use_lamports_unit, false),
|
||||
@ -644,6 +750,9 @@ pub fn process_delegate_stake(
|
||||
stake_account_pubkey: &Pubkey,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
force: bool,
|
||||
sign_only: bool,
|
||||
signers: &Option<Vec<(Pubkey, Signature)>>,
|
||||
blockhash: Option<Hash>,
|
||||
) -> ProcessResult {
|
||||
check_unique_pubkeys(
|
||||
(&config.keypair.pubkey(), "cli keypair".to_string()),
|
||||
@ -690,7 +799,8 @@ pub fn process_delegate_stake(
|
||||
}
|
||||
}
|
||||
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let (recent_blockhash, fee_calculator) =
|
||||
get_blockhash_fee_calculator(rpc_client, sign_only, blockhash)?;
|
||||
|
||||
let ixs = vec![stake_instruction::delegate_stake(
|
||||
stake_account_pubkey,
|
||||
@ -704,9 +814,21 @@ pub fn process_delegate_stake(
|
||||
&[&config.keypair],
|
||||
recent_blockhash,
|
||||
);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
|
||||
log_instruction_custom_error::<StakeError>(result)
|
||||
if let Some(signers) = signers {
|
||||
replace_signatures(&mut tx, &signers)?;
|
||||
}
|
||||
if sign_only {
|
||||
return_signers(&tx)
|
||||
} else {
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&tx.message.account_keys[0],
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
|
||||
log_instruction_custom_error::<StakeError>(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -831,18 +953,25 @@ mod tests {
|
||||
);
|
||||
|
||||
// Test DelegateStake Subcommand
|
||||
let stake_pubkey = Pubkey::new_rand();
|
||||
let stake_pubkey_string = stake_pubkey.to_string();
|
||||
let vote_account_pubkey = Pubkey::new_rand();
|
||||
let vote_account_string = vote_account_pubkey.to_string();
|
||||
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"delegate-stake",
|
||||
&stake_pubkey_string,
|
||||
&stake_account_string,
|
||||
&vote_account_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_delegate_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DelegateStake(stake_pubkey, stake_account_pubkey, false),
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
force: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
@ -851,22 +980,133 @@ mod tests {
|
||||
"test",
|
||||
"delegate-stake",
|
||||
"--force",
|
||||
&stake_pubkey_string,
|
||||
&stake_account_string,
|
||||
&vote_account_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_delegate_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DelegateStake(stake_pubkey, stake_account_pubkey, true),
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
force: true,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test Delegate Subcommand w/ Blockhash
|
||||
let blockhash = Hash::default();
|
||||
let blockhash_string = format!("{}", blockhash);
|
||||
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"delegate-stake",
|
||||
&stake_account_string,
|
||||
&vote_account_string,
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_delegate_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
force: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: Some(blockhash)
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"delegate-stake",
|
||||
&stake_account_string,
|
||||
&vote_account_string,
|
||||
"--sign-only",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_delegate_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
force: false,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash: None
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test Delegate Subcommand w/ signer
|
||||
let key1 = Pubkey::new_rand();
|
||||
let sig1 = Keypair::new().sign_message(&[0u8]);
|
||||
let signer1 = format!("{}={}", key1, sig1);
|
||||
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"delegate-stake",
|
||||
&stake_account_string,
|
||||
&vote_account_string,
|
||||
"--signer",
|
||||
&signer1,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_delegate_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
force: false,
|
||||
sign_only: false,
|
||||
signers: Some(vec![(key1, sig1)]),
|
||||
blockhash: None
|
||||
},
|
||||
require_keypair: false
|
||||
}
|
||||
);
|
||||
|
||||
// Test Delegate Subcommand w/ signers
|
||||
let key2 = Pubkey::new_rand();
|
||||
let sig2 = Keypair::new().sign_message(&[0u8]);
|
||||
let signer2 = format!("{}={}", key2, sig2);
|
||||
let test_delegate_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"delegate-stake",
|
||||
&stake_account_string,
|
||||
&vote_account_string,
|
||||
"--signer",
|
||||
&signer1,
|
||||
"--signer",
|
||||
&signer2,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_delegate_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DelegateStake {
|
||||
stake_account_pubkey,
|
||||
vote_account_pubkey,
|
||||
force: false,
|
||||
sign_only: false,
|
||||
signers: Some(vec![(key1, sig1), (key2, sig2)]),
|
||||
blockhash: None
|
||||
},
|
||||
require_keypair: false
|
||||
}
|
||||
);
|
||||
|
||||
// Test WithdrawStake Subcommand
|
||||
let test_withdraw_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"withdraw-stake",
|
||||
&stake_pubkey_string,
|
||||
&stake_account_string,
|
||||
&stake_account_string,
|
||||
"42",
|
||||
"lamports",
|
||||
@ -875,7 +1115,7 @@ mod tests {
|
||||
assert_eq!(
|
||||
parse_command(&test_withdraw_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::WithdrawStake(stake_pubkey, stake_account_pubkey, 42),
|
||||
command: CliCommand::WithdrawStake(stake_account_pubkey, stake_account_pubkey, 42),
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
@ -884,14 +1124,111 @@ mod tests {
|
||||
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"deactivate-stake",
|
||||
&stake_pubkey_string,
|
||||
&stake_account_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_deactivate_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake(stake_pubkey),
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test Deactivate Subcommand w/ Blockhash
|
||||
let blockhash = Hash::default();
|
||||
let blockhash_string = format!("{}", blockhash);
|
||||
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"deactivate-stake",
|
||||
&stake_account_string,
|
||||
"--blockhash",
|
||||
&blockhash_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_deactivate_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: Some(blockhash)
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"deactivate-stake",
|
||||
&stake_account_string,
|
||||
"--sign-only",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_deactivate_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash: None
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test Deactivate Subcommand w/ signers
|
||||
let key1 = Pubkey::new_rand();
|
||||
let sig1 = Keypair::new().sign_message(&[0u8]);
|
||||
let signer1 = format!("{}={}", key1, sig1);
|
||||
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"deactivate-stake",
|
||||
&stake_account_string,
|
||||
"--signer",
|
||||
&signer1,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_deactivate_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
sign_only: false,
|
||||
signers: Some(vec![(key1, sig1)]),
|
||||
blockhash: None
|
||||
},
|
||||
require_keypair: false
|
||||
}
|
||||
);
|
||||
|
||||
// Test Deactivate Subcommand w/ signers
|
||||
let key2 = Pubkey::new_rand();
|
||||
let sig2 = Keypair::new().sign_message(&[0u8]);
|
||||
let signer2 = format!("{}={}", key2, sig2);
|
||||
let test_deactivate_stake = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"deactivate-stake",
|
||||
&stake_account_string,
|
||||
"--signer",
|
||||
&signer1,
|
||||
"--signer",
|
||||
&signer2,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_deactivate_stake).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::DeactivateStake {
|
||||
stake_account_pubkey,
|
||||
sign_only: false,
|
||||
signers: Some(vec![(key1, sig1), (key2, sig2)]),
|
||||
blockhash: None
|
||||
},
|
||||
require_keypair: false
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -176,7 +176,12 @@ pub fn process_create_storage_account(
|
||||
ixs,
|
||||
recent_blockhash,
|
||||
);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result =
|
||||
rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair, &storage_account]);
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
@ -196,7 +201,12 @@ pub fn process_claim_storage_reward(
|
||||
let message = Message::new_with_payer(vec![instruction], Some(&signers[0].pubkey()));
|
||||
|
||||
let mut tx = Transaction::new(&signers, message, recent_blockhash);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let signature_str = rpc_client.send_and_confirm_transaction(&mut tx, &signers)?;
|
||||
Ok(signature_str.to_string())
|
||||
}
|
||||
|
@ -347,7 +347,12 @@ pub fn process_set_validator_info(
|
||||
// Submit transaction
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let mut tx = Transaction::new(&signers, message, recent_blockhash);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let signature_str = rpc_client.send_and_confirm_transaction(&mut tx, &signers)?;
|
||||
|
||||
println!("Success! Validator info published at: {:?}", info_pubkey);
|
||||
|
203
cli/src/vote.rs
203
cli/src/vote.rs
@ -1,6 +1,10 @@
|
||||
use crate::cli::{
|
||||
build_balance_message, check_account_for_fee, check_unique_pubkeys,
|
||||
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult,
|
||||
use crate::{
|
||||
cli::{
|
||||
build_balance_message, check_account_for_fee, check_unique_pubkeys,
|
||||
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError,
|
||||
ProcessResult,
|
||||
},
|
||||
cluster_query::aggregate_epoch_credits,
|
||||
};
|
||||
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*};
|
||||
@ -34,9 +38,9 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.help("Vote account keypair to fund"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("node_pubkey")
|
||||
Arg::with_name("identity_pubkey")
|
||||
.index(2)
|
||||
.value_name("VALIDATOR PUBKEY")
|
||||
.value_name("VALIDATOR IDENTITY PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
@ -47,7 +51,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.long("commission")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.help("The commission taken on reward redemption (0-255), default: 0"),
|
||||
.help("The commission taken on reward redemption (0-100), default: 0"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized_voter")
|
||||
@ -66,6 +70,37 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.help("Public key of the authorized withdrawer (defaults to cli config pubkey)"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-update-validator")
|
||||
.about("Update the vote account's validator identity")
|
||||
.arg(
|
||||
Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
.value_name("VOTE ACCOUNT PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("Vote account to update"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("new_identity_pubkey")
|
||||
.index(2)
|
||||
.value_name("NEW VALIDATOR IDENTITY PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("New validator that will vote with this account"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("authorized_voter")
|
||||
.index(3)
|
||||
.value_name("AUTHORIZED VOTER KEYPAIR")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_keypair)
|
||||
.help("Authorized voter keypair"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("vote-authorize-voter")
|
||||
.about("Authorize a new vote signing keypair for the given vote account")
|
||||
@ -159,7 +194,7 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
|
||||
pub fn parse_vote_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account = keypair_of(matches, "vote_account").unwrap();
|
||||
let node_pubkey = pubkey_of(matches, "node_pubkey").unwrap();
|
||||
let identity_pubkey = pubkey_of(matches, "identity_pubkey").unwrap();
|
||||
let commission = value_of(&matches, "commission").unwrap_or(0);
|
||||
let authorized_voter = pubkey_of(matches, "authorized_voter");
|
||||
let authorized_withdrawer = pubkey_of(matches, "authorized_withdrawer");
|
||||
@ -167,7 +202,7 @@ pub fn parse_vote_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandI
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::CreateVoteAccount {
|
||||
vote_account: vote_account.into(),
|
||||
node_pubkey,
|
||||
node_pubkey: identity_pubkey,
|
||||
authorized_voter,
|
||||
authorized_withdrawer,
|
||||
commission,
|
||||
@ -184,11 +219,26 @@ pub fn parse_vote_authorize(
|
||||
let new_authorized_pubkey = pubkey_of(matches, "new_authorized_pubkey").unwrap();
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::VoteAuthorize(
|
||||
command: CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey,
|
||||
new_authorized_pubkey,
|
||||
vote_authorize,
|
||||
),
|
||||
},
|
||||
require_keypair: true,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_vote_update_validator(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
|
||||
let new_identity_pubkey = pubkey_of(matches, "new_identity_pubkey").unwrap();
|
||||
let authorized_voter = keypair_of(matches, "authorized_voter").unwrap();
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey,
|
||||
new_identity_pubkey,
|
||||
authorized_voter: authorized_voter.into(),
|
||||
},
|
||||
require_keypair: true,
|
||||
})
|
||||
}
|
||||
@ -229,7 +279,7 @@ pub fn process_create_vote_account(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
vote_account: &Keypair,
|
||||
node_pubkey: &Pubkey,
|
||||
identity_pubkey: &Pubkey,
|
||||
authorized_voter: &Option<Pubkey>,
|
||||
authorized_withdrawer: &Option<Pubkey>,
|
||||
commission: u8,
|
||||
@ -237,7 +287,7 @@ pub fn process_create_vote_account(
|
||||
let vote_account_pubkey = vote_account.pubkey();
|
||||
check_unique_pubkeys(
|
||||
(&vote_account_pubkey, "vote_account_pubkey".to_string()),
|
||||
(&node_pubkey, "node_pubkey".to_string()),
|
||||
(&identity_pubkey, "identity_pubkey".to_string()),
|
||||
)?;
|
||||
check_unique_pubkeys(
|
||||
(&config.keypair.pubkey(), "cli keypair".to_string()),
|
||||
@ -251,7 +301,7 @@ pub fn process_create_vote_account(
|
||||
1
|
||||
};
|
||||
let vote_init = VoteInit {
|
||||
node_pubkey: *node_pubkey,
|
||||
node_pubkey: *identity_pubkey,
|
||||
authorized_voter: authorized_voter.unwrap_or(vote_account_pubkey),
|
||||
authorized_withdrawer: authorized_withdrawer.unwrap_or(config.keypair.pubkey()),
|
||||
commission,
|
||||
@ -268,7 +318,12 @@ pub fn process_create_vote_account(
|
||||
ixs,
|
||||
recent_blockhash,
|
||||
);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair, vote_account]);
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
}
|
||||
@ -298,7 +353,46 @@ pub fn process_vote_authorize(
|
||||
&[&config.keypair],
|
||||
recent_blockhash,
|
||||
);
|
||||
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
|
||||
log_instruction_custom_error::<VoteError>(result)
|
||||
}
|
||||
|
||||
pub fn process_vote_update_validator(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
vote_account_pubkey: &Pubkey,
|
||||
new_identity_pubkey: &Pubkey,
|
||||
authorized_voter: &Keypair,
|
||||
) -> ProcessResult {
|
||||
check_unique_pubkeys(
|
||||
(vote_account_pubkey, "vote_account_pubkey".to_string()),
|
||||
(new_identity_pubkey, "new_identity_pubkey".to_string()),
|
||||
)?;
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
let ixs = vec![vote_instruction::update_node(
|
||||
vote_account_pubkey,
|
||||
&authorized_voter.pubkey(),
|
||||
new_identity_pubkey,
|
||||
)];
|
||||
|
||||
let mut tx = Transaction::new_signed_with_payer(
|
||||
ixs,
|
||||
Some(&config.keypair.pubkey()),
|
||||
&[&config.keypair, authorized_voter],
|
||||
recent_blockhash,
|
||||
);
|
||||
check_account_for_fee(
|
||||
rpc_client,
|
||||
&config.keypair.pubkey(),
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
|
||||
log_instruction_custom_error::<VoteError>(result)
|
||||
}
|
||||
@ -338,17 +432,14 @@ pub fn process_show_vote_account(
|
||||
"account balance: {}",
|
||||
build_balance_message(vote_account.lamports, use_lamports_unit, true)
|
||||
);
|
||||
println!("node id: {}", vote_state.node_pubkey);
|
||||
println!("validator identity: {}", vote_state.node_pubkey);
|
||||
println!("authorized voter: {}", vote_state.authorized_voter);
|
||||
println!(
|
||||
"authorized withdrawer: {}",
|
||||
vote_state.authorized_withdrawer
|
||||
);
|
||||
println!("credits: {}", vote_state.credits());
|
||||
println!(
|
||||
"commission: {}%",
|
||||
f64::from(vote_state.commission) / f64::from(std::u32::MAX)
|
||||
);
|
||||
println!("commission: {}%", vote_state.commission);
|
||||
println!(
|
||||
"root slot: {}",
|
||||
match vote_state.root_slot {
|
||||
@ -389,38 +480,42 @@ pub fn process_uptime(
|
||||
|
||||
let epoch_schedule = rpc_client.get_epoch_schedule()?;
|
||||
|
||||
println!("Node id: {}", vote_state.node_pubkey);
|
||||
println!("Authorized voter: {}", vote_state.authorized_voter);
|
||||
println!("validator identity: {}", vote_state.node_pubkey);
|
||||
println!("authorized voter: {}", vote_state.authorized_voter);
|
||||
if !vote_state.votes.is_empty() {
|
||||
println!("Uptime:");
|
||||
println!("uptime:");
|
||||
|
||||
let epoch_credits_vec: Vec<(u64, u64, u64)> = vote_state.epoch_credits().copied().collect();
|
||||
|
||||
let epoch_credits = if let Some(x) = span {
|
||||
epoch_credits_vec.iter().rev().take(x as usize)
|
||||
let epoch_credits: Vec<(u64, u64, u64)> = if let Some(x) = span {
|
||||
vote_state
|
||||
.epoch_credits()
|
||||
.iter()
|
||||
.rev()
|
||||
.take(x as usize)
|
||||
.cloned()
|
||||
.collect()
|
||||
} else {
|
||||
epoch_credits_vec.iter().rev().take(epoch_credits_vec.len())
|
||||
vote_state.epoch_credits().iter().rev().cloned().collect()
|
||||
};
|
||||
|
||||
if aggregate {
|
||||
let (credits_earned, slots_in_epoch, epochs): (u64, u64, u64) =
|
||||
epoch_credits.fold((0, 0, 0), |acc, (epoch, credits, prev_credits)| {
|
||||
let credits_earned = credits - prev_credits;
|
||||
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(*epoch);
|
||||
(acc.0 + credits_earned, acc.1 + slots_in_epoch, acc.2 + 1)
|
||||
});
|
||||
let total_uptime = credits_earned as f64 / slots_in_epoch as f64;
|
||||
println!("{:.2}% over {} epochs", total_uptime * 100_f64, epochs,);
|
||||
let (total_credits, total_slots, epochs) =
|
||||
aggregate_epoch_credits(&epoch_credits, &epoch_schedule);
|
||||
if total_slots > 0 {
|
||||
let total_uptime = 100_f64 * total_credits as f64 / total_slots as f64;
|
||||
println!("{:.2}% over {} epochs", total_uptime, epochs);
|
||||
} else {
|
||||
println!("Insufficient voting history available");
|
||||
}
|
||||
} else {
|
||||
for (epoch, credits, prev_credits) in epoch_credits {
|
||||
let credits_earned = credits - prev_credits;
|
||||
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(*epoch);
|
||||
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(epoch);
|
||||
let uptime = credits_earned as f64 / slots_in_epoch as f64;
|
||||
println!("- epoch: {} {:.2}% uptime", epoch, uptime * 100_f64,);
|
||||
}
|
||||
}
|
||||
if let Some(x) = span {
|
||||
if x > epoch_credits_vec.len() as u64 {
|
||||
if x > vote_state.epoch_credits().len() as u64 {
|
||||
println!("(span longer than available epochs)");
|
||||
}
|
||||
}
|
||||
@ -446,17 +541,24 @@ mod tests {
|
||||
let keypair = Keypair::new();
|
||||
let pubkey = keypair.pubkey();
|
||||
let pubkey_string = pubkey.to_string();
|
||||
let keypair2 = Keypair::new();
|
||||
let pubkey2 = keypair2.pubkey();
|
||||
let pubkey2_string = pubkey2.to_string();
|
||||
|
||||
let test_authorize_voter = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"vote-authorize-voter",
|
||||
&pubkey_string,
|
||||
&pubkey_string,
|
||||
&pubkey2_string,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_authorize_voter).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::VoteAuthorize(pubkey, pubkey, VoteAuthorize::Voter),
|
||||
command: CliCommand::VoteAuthorize {
|
||||
vote_account_pubkey: pubkey,
|
||||
new_authorized_pubkey: pubkey2,
|
||||
vote_authorize: VoteAuthorize::Voter
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
@ -567,6 +669,27 @@ mod tests {
|
||||
}
|
||||
);
|
||||
|
||||
let test_update_validator = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"vote-update-validator",
|
||||
&pubkey_string,
|
||||
&pubkey2_string,
|
||||
&keypair_file,
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_update_validator).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::VoteUpdateValidator {
|
||||
vote_account_pubkey: pubkey,
|
||||
new_identity_pubkey: pubkey2,
|
||||
authorized_voter: solana_sdk::signature::read_keypair_file(&keypair_file)
|
||||
.unwrap()
|
||||
.into(),
|
||||
},
|
||||
require_keypair: true
|
||||
}
|
||||
);
|
||||
|
||||
// Test Uptime Subcommand
|
||||
let pubkey = Pubkey::new_rand();
|
||||
let matches = test_commands.clone().get_matches_from(vec![
|
||||
|
104
cli/tests/pay.rs
104
cli/tests/pay.rs
@ -3,9 +3,9 @@ use serde_json::Value;
|
||||
use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_drone::drone::run_local_drone;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::KeypairUtil;
|
||||
use solana_sdk::{hash::Hash, pubkey::Pubkey, signature::KeypairUtil, signature::Signature};
|
||||
use std::fs::remove_dir_all;
|
||||
use std::str::FromStr;
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[cfg(test)]
|
||||
@ -71,6 +71,9 @@ fn test_cli_timestamp_tx() {
|
||||
timestamp_pubkey: Some(config_witness.keypair.pubkey()),
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
let sig_response = process_command(&config_payer);
|
||||
|
||||
@ -138,6 +141,9 @@ fn test_cli_witness_tx() {
|
||||
timestamp_pubkey: None,
|
||||
witnesses: Some(vec![config_witness.keypair.pubkey()]),
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
let sig_response = process_command(&config_payer);
|
||||
|
||||
@ -198,6 +204,9 @@ fn test_cli_cancel_tx() {
|
||||
timestamp_pubkey: None,
|
||||
witnesses: Some(vec![config_witness.keypair.pubkey()]),
|
||||
cancelable: true,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
let sig_response = process_command(&config_payer).unwrap();
|
||||
|
||||
@ -223,3 +232,94 @@ fn test_cli_cancel_tx() {
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_offline_pay_tx() {
|
||||
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
|
||||
let (sender, receiver) = channel();
|
||||
run_local_drone(alice, sender, None);
|
||||
let drone_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let mut config_offline = CliConfig::default();
|
||||
config_offline.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
let mut config_online = CliConfig::default();
|
||||
config_online.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
assert_ne!(
|
||||
config_offline.keypair.pubkey(),
|
||||
config_online.keypair.pubkey()
|
||||
);
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&drone_addr,
|
||||
&config_offline.keypair.pubkey(),
|
||||
50,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&drone_addr,
|
||||
&config_online.keypair.pubkey(),
|
||||
50,
|
||||
)
|
||||
.unwrap();
|
||||
check_balance(50, &rpc_client, &config_offline.keypair.pubkey());
|
||||
check_balance(50, &rpc_client, &config_online.keypair.pubkey());
|
||||
|
||||
config_offline.command = CliCommand::Pay {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
let sig_response = process_command(&config_offline).unwrap();
|
||||
|
||||
check_balance(50, &rpc_client, &config_offline.keypair.pubkey());
|
||||
check_balance(50, &rpc_client, &config_online.keypair.pubkey());
|
||||
check_balance(0, &rpc_client, &bob_pubkey);
|
||||
|
||||
let object: Value = serde_json::from_str(&sig_response).unwrap();
|
||||
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
|
||||
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
|
||||
let signers: Vec<_> = signer_strings
|
||||
.iter()
|
||||
.map(|signer_string| {
|
||||
let mut signer = signer_string.as_str().unwrap().split('=');
|
||||
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
|
||||
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
|
||||
(key, sig)
|
||||
})
|
||||
.collect();
|
||||
|
||||
config_online.command = CliCommand::Pay {
|
||||
lamports: 10,
|
||||
to: bob_pubkey,
|
||||
timestamp: None,
|
||||
timestamp_pubkey: None,
|
||||
witnesses: None,
|
||||
cancelable: false,
|
||||
sign_only: false,
|
||||
signers: Some(signers),
|
||||
blockhash: Some(blockhash_str.parse::<Hash>().unwrap()),
|
||||
};
|
||||
process_command(&config_online).unwrap();
|
||||
|
||||
check_balance(40, &rpc_client, &config_offline.keypair.pubkey());
|
||||
check_balance(50, &rpc_client, &config_online.keypair.pubkey());
|
||||
check_balance(10, &rpc_client, &bob_pubkey);
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
252
cli/tests/stake.rs
Normal file
252
cli/tests/stake.rs
Normal file
@ -0,0 +1,252 @@
|
||||
use serde_json::Value;
|
||||
use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_drone::drone::run_local_drone;
|
||||
use solana_sdk::{
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, write_keypair, KeypairUtil, Signature},
|
||||
};
|
||||
use solana_stake_program::stake_state::Lockup;
|
||||
use std::fs::remove_dir_all;
|
||||
use std::str::FromStr;
|
||||
use std::sync::mpsc::channel;
|
||||
|
||||
#[cfg(test)]
|
||||
use solana_core::validator::new_validator_for_tests;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
fn make_tmp_file() -> (String, NamedTempFile) {
|
||||
let tmp_file = NamedTempFile::new().unwrap();
|
||||
(String::from(tmp_file.path().to_str().unwrap()), tmp_file)
|
||||
}
|
||||
|
||||
fn check_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
|
||||
(0..5).for_each(|tries| {
|
||||
let balance = client.retry_get_balance(pubkey, 1).unwrap().unwrap();
|
||||
if balance == expected_balance {
|
||||
return;
|
||||
}
|
||||
if tries == 4 {
|
||||
assert_eq!(balance, expected_balance);
|
||||
}
|
||||
sleep(Duration::from_millis(500));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stake_delegation_and_deactivation() {
|
||||
solana_logger::setup();
|
||||
|
||||
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_drone(alice, sender, None);
|
||||
let drone_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let mut config_validator = CliConfig::default();
|
||||
config_validator.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
let mut config_vote = CliConfig::default();
|
||||
config_vote.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&config_vote.keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
let mut config_stake = CliConfig::default();
|
||||
config_stake.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&config_stake.keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&drone_addr,
|
||||
&config_validator.keypair.pubkey(),
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_balance(100_000, &rpc_client, &config_validator.keypair.pubkey());
|
||||
|
||||
// Create vote account
|
||||
config_validator.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
|
||||
node_pubkey: config_validator.keypair.pubkey(),
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: None,
|
||||
commission: 0,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
// Create stake account
|
||||
config_validator.command = CliCommand::CreateStakeAccount {
|
||||
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
lockup: Lockup {
|
||||
custodian: Pubkey::default(),
|
||||
epoch: 0,
|
||||
},
|
||||
lamports: 50_000,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
// Delegate stake
|
||||
config_validator.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
vote_account_pubkey: config_vote.keypair.pubkey(),
|
||||
force: true,
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
// Deactivate stake
|
||||
config_validator.command = CliCommand::DeactivateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
sign_only: false,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stake_delegation_and_deactivation_offline() {
|
||||
solana_logger::setup();
|
||||
|
||||
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
|
||||
let (sender, receiver) = channel();
|
||||
run_local_drone(alice, sender, None);
|
||||
let drone_addr = receiver.recv().unwrap();
|
||||
|
||||
let rpc_client = RpcClient::new_socket(leader_data.rpc);
|
||||
|
||||
let mut config_validator = CliConfig::default();
|
||||
config_validator.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
let mut config_payer = CliConfig::default();
|
||||
config_payer.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
|
||||
let mut config_vote = CliConfig::default();
|
||||
config_vote.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&config_vote.keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
let mut config_stake = CliConfig::default();
|
||||
config_stake.json_rpc_url =
|
||||
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
|
||||
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
|
||||
write_keypair(&config_stake.keypair, tmp_file.as_file_mut()).unwrap();
|
||||
|
||||
request_and_confirm_airdrop(
|
||||
&rpc_client,
|
||||
&drone_addr,
|
||||
&config_validator.keypair.pubkey(),
|
||||
100_000,
|
||||
)
|
||||
.unwrap();
|
||||
check_balance(100_000, &rpc_client, &config_validator.keypair.pubkey());
|
||||
|
||||
// Create vote account
|
||||
config_validator.command = CliCommand::CreateVoteAccount {
|
||||
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
|
||||
node_pubkey: config_validator.keypair.pubkey(),
|
||||
authorized_voter: None,
|
||||
authorized_withdrawer: None,
|
||||
commission: 0,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
// Create stake account
|
||||
config_validator.command = CliCommand::CreateStakeAccount {
|
||||
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
|
||||
staker: None,
|
||||
withdrawer: None,
|
||||
lockup: Lockup {
|
||||
custodian: Pubkey::default(),
|
||||
epoch: 0,
|
||||
},
|
||||
lamports: 50_000,
|
||||
};
|
||||
process_command(&config_validator).unwrap();
|
||||
|
||||
// Delegate stake offline
|
||||
config_validator.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
vote_account_pubkey: config_vote.keypair.pubkey(),
|
||||
force: true,
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
let sig_response = process_command(&config_validator).unwrap();
|
||||
let object: Value = serde_json::from_str(&sig_response).unwrap();
|
||||
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
|
||||
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
|
||||
let signers: Vec<_> = signer_strings
|
||||
.iter()
|
||||
.map(|signer_string| {
|
||||
let mut signer = signer_string.as_str().unwrap().split('=');
|
||||
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
|
||||
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
|
||||
(key, sig)
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Delegate stake online
|
||||
config_payer.command = CliCommand::DelegateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
vote_account_pubkey: config_vote.keypair.pubkey(),
|
||||
force: true,
|
||||
sign_only: false,
|
||||
signers: Some(signers),
|
||||
blockhash: Some(blockhash_str.parse::<Hash>().unwrap()),
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
|
||||
// Deactivate stake offline
|
||||
config_validator.command = CliCommand::DeactivateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
sign_only: true,
|
||||
signers: None,
|
||||
blockhash: None,
|
||||
};
|
||||
let sig_response = process_command(&config_validator).unwrap();
|
||||
let object: Value = serde_json::from_str(&sig_response).unwrap();
|
||||
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
|
||||
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
|
||||
let signers: Vec<_> = signer_strings
|
||||
.iter()
|
||||
.map(|signer_string| {
|
||||
let mut signer = signer_string.as_str().unwrap().split('=');
|
||||
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
|
||||
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
|
||||
(key, sig)
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Deactivate stake online
|
||||
config_payer.command = CliCommand::DeactivateStake {
|
||||
stake_account_pubkey: config_stake.keypair.pubkey(),
|
||||
sign_only: false,
|
||||
signers: Some(signers),
|
||||
blockhash: Some(blockhash_str.parse::<Hash>().unwrap()),
|
||||
};
|
||||
process_command(&config_payer).unwrap();
|
||||
|
||||
server.close().unwrap();
|
||||
remove_dir_all(ledger_path).unwrap();
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,10 +19,10 @@ reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tl
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
serde_json = "1.0.41"
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
|
||||
[dev-dependencies]
|
||||
jsonrpc-core = "14.0.3"
|
||||
jsonrpc-http-server = "14.0.3"
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
|
@ -1,12 +1,10 @@
|
||||
use crate::{client_error::ClientError, rpc_request::RpcRequest};
|
||||
use solana_sdk::commitment_config::CommitmentConfig;
|
||||
|
||||
pub(crate) trait GenericRpcClientRequest {
|
||||
fn send(
|
||||
&self,
|
||||
request: &RpcRequest,
|
||||
params: Option<serde_json::Value>,
|
||||
params: serde_json::Value,
|
||||
retries: usize,
|
||||
commitment_config: Option<CommitmentConfig>,
|
||||
) -> Result<serde_json::Value, ClientError>;
|
||||
}
|
||||
|
@ -5,7 +5,6 @@ use crate::{
|
||||
};
|
||||
use serde_json::{Number, Value};
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
fee_calculator::FeeCalculator,
|
||||
transaction::{self, TransactionError},
|
||||
};
|
||||
@ -28,17 +27,16 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
fn send(
|
||||
&self,
|
||||
request: &RpcRequest,
|
||||
params: Option<serde_json::Value>,
|
||||
params: serde_json::Value,
|
||||
_retries: usize,
|
||||
_commitment_config: Option<CommitmentConfig>,
|
||||
) -> Result<serde_json::Value, ClientError> {
|
||||
if self.url == "fails" {
|
||||
return Ok(Value::Null);
|
||||
}
|
||||
let val = match request {
|
||||
RpcRequest::ConfirmTransaction => {
|
||||
if let Some(Value::Array(param_array)) = params {
|
||||
if let Value::String(param_string) = ¶m_array[0] {
|
||||
if let Some(params_array) = params.as_array() {
|
||||
if let Value::String(param_string) = ¶ms_array[0] {
|
||||
Value::Bool(param_string == SIGNATURE)
|
||||
} else {
|
||||
Value::Null
|
||||
|
@ -4,14 +4,17 @@ use crate::{
|
||||
generic_rpc_client_request::GenericRpcClientRequest,
|
||||
mock_rpc_client_request::MockRpcClientRequest,
|
||||
rpc_client_request::RpcClientRequest,
|
||||
rpc_request::{RpcContactInfo, RpcEpochInfo, RpcRequest, RpcVersionInfo, RpcVoteAccountStatus},
|
||||
rpc_request::{
|
||||
RpcConfirmedBlock, RpcContactInfo, RpcEpochInfo, RpcLeaderSchedule, RpcRequest,
|
||||
RpcVersionInfo, RpcVoteAccountStatus,
|
||||
},
|
||||
};
|
||||
use bincode::serialize;
|
||||
use log::*;
|
||||
use serde_json::{json, Value};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{Slot, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT},
|
||||
clock::{Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_schedule::EpochSchedule,
|
||||
fee_calculator::FeeCalculator,
|
||||
@ -67,14 +70,12 @@ impl RpcClient {
|
||||
signature: &str,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResponse<bool> {
|
||||
let params = json!(signature);
|
||||
let response = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::ConfirmTransaction,
|
||||
Some(params),
|
||||
json!([signature, commitment_config]),
|
||||
0,
|
||||
Some(commitment_config),
|
||||
)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
@ -93,10 +94,9 @@ impl RpcClient {
|
||||
|
||||
pub fn send_transaction(&self, transaction: &Transaction) -> Result<String, ClientError> {
|
||||
let serialized = serialize(transaction).unwrap();
|
||||
let params = json!(serialized);
|
||||
let signature = self
|
||||
.client
|
||||
.send(&RpcRequest::SendTransaction, Some(params), 5, None)?;
|
||||
.send(&RpcRequest::SendTransaction, json!([serialized]), 5)?;
|
||||
if signature.as_str().is_none() {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -120,12 +120,10 @@ impl RpcClient {
|
||||
signature: &str,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> Result<Option<transaction::Result<()>>, ClientError> {
|
||||
let params = json!(signature.to_string());
|
||||
let signature_status = self.client.send(
|
||||
&RpcRequest::GetSignatureStatus,
|
||||
Some(params),
|
||||
json!([signature.to_string(), commitment_config]),
|
||||
5,
|
||||
commitment_config.ok(),
|
||||
)?;
|
||||
let result: Option<transaction::Result<()>> =
|
||||
serde_json::from_value(signature_status).unwrap();
|
||||
@ -142,7 +140,7 @@ impl RpcClient {
|
||||
) -> io::Result<Slot> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetSlot, None, 0, commitment_config.ok())
|
||||
.send(&RpcRequest::GetSlot, json!([commitment_config]), 0)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -161,7 +159,7 @@ impl RpcClient {
|
||||
pub fn get_vote_accounts(&self) -> io::Result<RpcVoteAccountStatus> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetVoteAccounts, None, 0, None)
|
||||
.send(&RpcRequest::GetVoteAccounts, Value::Null, 0)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -180,7 +178,7 @@ impl RpcClient {
|
||||
pub fn get_cluster_nodes(&self) -> io::Result<Vec<RpcContactInfo>> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetClusterNodes, None, 0, None)
|
||||
.send(&RpcRequest::GetClusterNodes, Value::Null, 0)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -196,6 +194,77 @@ impl RpcClient {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_confirmed_block(&self, slot: Slot) -> io::Result<RpcConfirmedBlock> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetConfirmedBlock, json!([slot]), 0)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetConfirmedBlock request failure: {:?}", err),
|
||||
)
|
||||
})?;
|
||||
|
||||
serde_json::from_value(response).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetConfirmedBlock parse failure: {}", err),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_confirmed_blocks(
|
||||
&self,
|
||||
start_slot: Slot,
|
||||
end_slot: Option<Slot>,
|
||||
) -> io::Result<Vec<Slot>> {
|
||||
let response = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetConfirmedBlocks,
|
||||
json!([start_slot, end_slot]),
|
||||
0,
|
||||
)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetConfirmedBlocks request failure: {:?}", err),
|
||||
)
|
||||
})?;
|
||||
|
||||
serde_json::from_value(response).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetConfirmedBlocks parse failure: {}", err),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_block_time(&self, slot: Slot) -> io::Result<UnixTimestamp> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetBlockTime, json!([slot]), 0);
|
||||
|
||||
response
|
||||
.map(|result_json| {
|
||||
if result_json.is_null() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Block Not Found: slot={}", slot),
|
||||
));
|
||||
}
|
||||
let result = serde_json::from_value(result_json)?;
|
||||
trace!("Response block timestamp {:?} {:?}", slot, result);
|
||||
Ok(result)
|
||||
})
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetBlockTime request failure: {:?}", err),
|
||||
)
|
||||
})?
|
||||
}
|
||||
|
||||
pub fn get_epoch_info(&self) -> io::Result<RpcEpochInfo> {
|
||||
self.get_epoch_info_with_commitment(CommitmentConfig::default())
|
||||
}
|
||||
@ -206,7 +275,7 @@ impl RpcClient {
|
||||
) -> io::Result<RpcEpochInfo> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetEpochInfo, None, 0, commitment_config.ok())
|
||||
.send(&RpcRequest::GetEpochInfo, json!([commitment_config]), 0)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -222,10 +291,41 @@ impl RpcClient {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_leader_schedule(&self, slot: Option<Slot>) -> io::Result<Option<RpcLeaderSchedule>> {
|
||||
self.get_leader_schedule_with_commitment(slot, CommitmentConfig::default())
|
||||
}
|
||||
|
||||
pub fn get_leader_schedule_with_commitment(
|
||||
&self,
|
||||
slot: Option<Slot>,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> io::Result<Option<RpcLeaderSchedule>> {
|
||||
let response = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetLeaderSchedule,
|
||||
json!([slot, commitment_config]),
|
||||
0,
|
||||
)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetLeaderSchedule request failure: {:?}", err),
|
||||
)
|
||||
})?;
|
||||
|
||||
serde_json::from_value(response).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetLeaderSchedule failure: {}", err),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_epoch_schedule(&self) -> io::Result<EpochSchedule> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetEpochSchedule, None, 0, None)
|
||||
.send(&RpcRequest::GetEpochSchedule, Value::Null, 0)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -244,7 +344,7 @@ impl RpcClient {
|
||||
pub fn get_inflation(&self) -> io::Result<Inflation> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetInflation, None, 0, None)
|
||||
.send(&RpcRequest::GetInflation, Value::Null, 0)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -263,7 +363,7 @@ impl RpcClient {
|
||||
pub fn get_version(&self) -> io::Result<RpcVersionInfo> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetVersion, None, 0, None)
|
||||
.send(&RpcRequest::GetVersion, Value::Null, 0)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -412,10 +512,13 @@ impl RpcClient {
|
||||
pubkey: &Pubkey,
|
||||
retries: usize,
|
||||
) -> Result<Option<u64>, Box<dyn error::Error>> {
|
||||
let params = json!(format!("{}", pubkey));
|
||||
let balance_json = self
|
||||
.client
|
||||
.send(&RpcRequest::GetBalance, Some(params), retries, None)
|
||||
.send(
|
||||
&RpcRequest::GetBalance,
|
||||
json!([pubkey.to_string()]),
|
||||
retries,
|
||||
)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -451,12 +554,10 @@ impl RpcClient {
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResponse<Option<Account>> {
|
||||
let params = json!(format!("{}", pubkey));
|
||||
let response = self.client.send(
|
||||
&RpcRequest::GetAccountInfo,
|
||||
Some(params),
|
||||
json!([pubkey.to_string(), commitment_config]),
|
||||
0,
|
||||
Some(commitment_config),
|
||||
);
|
||||
|
||||
response
|
||||
@ -484,14 +585,12 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> io::Result<u64> {
|
||||
let params = json!(data_len);
|
||||
let minimum_balance_json = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetMinimumBalanceForRentExemption,
|
||||
Some(params),
|
||||
json!([data_len]),
|
||||
0,
|
||||
None,
|
||||
)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
@ -529,14 +628,12 @@ impl RpcClient {
|
||||
pubkey: &Pubkey,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResponse<u64> {
|
||||
let params = json!(pubkey.to_string());
|
||||
let balance_json = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetBalance,
|
||||
Some(params),
|
||||
json!([pubkey.to_string(), commitment_config]),
|
||||
0,
|
||||
Some(commitment_config),
|
||||
)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
@ -554,10 +651,13 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> io::Result<Vec<(Pubkey, Account)>> {
|
||||
let params = json!(format!("{}", pubkey));
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetProgramAccounts, Some(params), 0, None)
|
||||
.send(
|
||||
&RpcRequest::GetProgramAccounts,
|
||||
json!([pubkey.to_string()]),
|
||||
0,
|
||||
)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -599,9 +699,8 @@ impl RpcClient {
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetTransactionCount,
|
||||
None,
|
||||
json!([commitment_config]),
|
||||
0,
|
||||
commitment_config.ok(),
|
||||
)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
@ -632,9 +731,8 @@ impl RpcClient {
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetRecentBlockhash,
|
||||
None,
|
||||
json!([commitment_config]),
|
||||
0,
|
||||
commitment_config.ok(),
|
||||
)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
@ -697,7 +795,7 @@ impl RpcClient {
|
||||
pub fn get_genesis_hash(&self) -> io::Result<Hash> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetGenesisHash, None, 0, None)
|
||||
.send(&RpcRequest::GetGenesisHash, Value::Null, 0)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -822,14 +920,12 @@ impl RpcClient {
|
||||
/// Check a signature in the bank.
|
||||
pub fn check_signature(&self, signature: &Signature) -> bool {
|
||||
trace!("check_signature: {:?}", signature);
|
||||
let params = json!(format!("{}", signature));
|
||||
|
||||
for _ in 0..30 {
|
||||
let response = self.client.send(
|
||||
&RpcRequest::ConfirmTransaction,
|
||||
Some(params.clone()),
|
||||
json!([signature.to_string(), CommitmentConfig::recent()]),
|
||||
0,
|
||||
Some(CommitmentConfig::recent()),
|
||||
);
|
||||
|
||||
match response {
|
||||
@ -916,16 +1012,14 @@ impl RpcClient {
|
||||
|
||||
pub fn get_num_blocks_since_signature_confirmation(
|
||||
&self,
|
||||
sig: &Signature,
|
||||
signature: &Signature,
|
||||
) -> io::Result<usize> {
|
||||
let params = json!(format!("{}", sig));
|
||||
let response = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetNumBlocksSinceSignatureConfirmation,
|
||||
Some(params.clone()),
|
||||
json!([signature.to_string(), CommitmentConfig::recent().ok()]),
|
||||
1,
|
||||
CommitmentConfig::recent().ok(),
|
||||
)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
@ -950,7 +1044,7 @@ impl RpcClient {
|
||||
pub fn validator_exit(&self) -> io::Result<bool> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::ValidatorExit, None, 0, None)
|
||||
.send(&RpcRequest::ValidatorExit, Value::Null, 0)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -968,11 +1062,11 @@ impl RpcClient {
|
||||
pub fn send(
|
||||
&self,
|
||||
request: &RpcRequest,
|
||||
params: Option<Value>,
|
||||
params: Value,
|
||||
retries: usize,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Value, ClientError> {
|
||||
self.client.send(request, params, retries, commitment)
|
||||
assert!(params.is_array() || params.is_null());
|
||||
self.client.send(request, params, retries)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1036,25 +1130,19 @@ mod tests {
|
||||
|
||||
let balance = rpc_client.send(
|
||||
&RpcRequest::GetBalance,
|
||||
Some(json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"])),
|
||||
json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"]),
|
||||
0,
|
||||
None,
|
||||
);
|
||||
assert_eq!(balance.unwrap().as_u64().unwrap(), 50);
|
||||
|
||||
let blockhash = rpc_client.send(&RpcRequest::GetRecentBlockhash, None, 0, None);
|
||||
let blockhash = rpc_client.send(&RpcRequest::GetRecentBlockhash, Value::Null, 0);
|
||||
assert_eq!(
|
||||
blockhash.unwrap().as_str().unwrap(),
|
||||
"deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"
|
||||
);
|
||||
|
||||
// Send erroneous parameter
|
||||
let blockhash = rpc_client.send(
|
||||
&RpcRequest::GetRecentBlockhash,
|
||||
Some(json!("parameter")),
|
||||
0,
|
||||
None,
|
||||
);
|
||||
let blockhash = rpc_client.send(&RpcRequest::GetRecentBlockhash, json!(["parameter"]), 0);
|
||||
assert_eq!(blockhash.is_err(), true);
|
||||
}
|
||||
|
||||
@ -1090,9 +1178,8 @@ mod tests {
|
||||
|
||||
let balance = rpc_client.send(
|
||||
&RpcRequest::GetBalance,
|
||||
Some(json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhw"])),
|
||||
json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhw"]),
|
||||
10,
|
||||
None,
|
||||
);
|
||||
assert_eq!(balance.unwrap().as_u64().unwrap(), 5);
|
||||
}
|
||||
|
@ -5,10 +5,7 @@ use crate::{
|
||||
};
|
||||
use log::*;
|
||||
use reqwest::{self, header::CONTENT_TYPE};
|
||||
use solana_sdk::{
|
||||
clock::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT},
|
||||
commitment_config::CommitmentConfig,
|
||||
};
|
||||
use solana_sdk::clock::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT};
|
||||
use std::{thread::sleep, time::Duration};
|
||||
|
||||
pub struct RpcClientRequest {
|
||||
@ -38,14 +35,13 @@ impl GenericRpcClientRequest for RpcClientRequest {
|
||||
fn send(
|
||||
&self,
|
||||
request: &RpcRequest,
|
||||
params: Option<serde_json::Value>,
|
||||
params: serde_json::Value,
|
||||
mut retries: usize,
|
||||
commitment_config: Option<CommitmentConfig>,
|
||||
) -> Result<serde_json::Value, ClientError> {
|
||||
// Concurrent requests are not supported so reuse the same request id for all requests
|
||||
let request_id = 1;
|
||||
|
||||
let request_json = request.build_request_json(request_id, params, commitment_config);
|
||||
let request_json = request.build_request_json(request_id, params);
|
||||
|
||||
loop {
|
||||
match self
|
||||
@ -56,6 +52,10 @@ impl GenericRpcClientRequest for RpcClientRequest {
|
||||
.send()
|
||||
{
|
||||
Ok(mut response) => {
|
||||
if !response.status().is_success() {
|
||||
return Err(response.error_for_status().unwrap_err().into());
|
||||
}
|
||||
|
||||
let json: serde_json::Value = serde_json::from_str(&response.text()?)?;
|
||||
if json["error"].is_object() {
|
||||
return Err(RpcError::RpcRequestError(format!(
|
||||
|
@ -2,11 +2,10 @@ use jsonrpc_core::Result as JsonResult;
|
||||
use serde_json::{json, Value};
|
||||
use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
commitment_config::CommitmentConfig,
|
||||
hash::Hash,
|
||||
transaction::{Result, Transaction},
|
||||
};
|
||||
use std::{error, fmt, io, net::SocketAddr};
|
||||
use std::{collections::HashMap, error, fmt, io, net::SocketAddr};
|
||||
|
||||
pub type RpcResponseIn<T> = JsonResult<Response<T>>;
|
||||
pub type RpcResponse<T> = io::Result<Response<T>>;
|
||||
@ -31,7 +30,7 @@ pub struct RpcConfirmedBlock {
|
||||
pub transactions: Vec<(Transaction, Option<RpcTransactionStatus>)>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct RpcTransactionStatus {
|
||||
pub status: Result<()>,
|
||||
pub fee: u64,
|
||||
@ -49,6 +48,9 @@ pub struct RpcContactInfo {
|
||||
pub rpc: Option<SocketAddr>,
|
||||
}
|
||||
|
||||
/// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot
|
||||
pub type RpcLeaderSchedule = HashMap<String, Vec<usize>>;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcEpochInfo {
|
||||
@ -97,6 +99,10 @@ pub struct RpcVoteAccountInfo {
|
||||
/// Whether this account is staked for the current epoch
|
||||
pub epoch_vote_account: bool,
|
||||
|
||||
/// History of how many credits earned by the end of each epoch
|
||||
/// each tuple is (Epoch, credits, prev_credits)
|
||||
pub epoch_credits: Vec<(Epoch, u64, u64)>,
|
||||
|
||||
/// Most recent slot voted on by this vote account (0 if no votes exist)
|
||||
pub last_vote: u64,
|
||||
|
||||
@ -111,11 +117,15 @@ pub enum RpcRequest {
|
||||
ValidatorExit,
|
||||
GetAccountInfo,
|
||||
GetBalance,
|
||||
GetBlockTime,
|
||||
GetClusterNodes,
|
||||
GetConfirmedBlock,
|
||||
GetConfirmedBlocks,
|
||||
GetEpochInfo,
|
||||
GetEpochSchedule,
|
||||
GetGenesisHash,
|
||||
GetInflation,
|
||||
GetLeaderSchedule,
|
||||
GetNumBlocksSinceSignatureConfirmation,
|
||||
GetProgramAccounts,
|
||||
GetRecentBlockhash,
|
||||
@ -137,12 +147,7 @@ pub enum RpcRequest {
|
||||
}
|
||||
|
||||
impl RpcRequest {
|
||||
pub(crate) fn build_request_json(
|
||||
&self,
|
||||
id: u64,
|
||||
params: Option<Value>,
|
||||
commitment_config: Option<CommitmentConfig>,
|
||||
) -> Value {
|
||||
pub(crate) fn build_request_json(&self, id: u64, params: Value) -> Value {
|
||||
let jsonrpc = "2.0";
|
||||
let method = match self {
|
||||
RpcRequest::ConfirmTransaction => "confirmTransaction",
|
||||
@ -150,11 +155,15 @@ impl RpcRequest {
|
||||
RpcRequest::ValidatorExit => "validatorExit",
|
||||
RpcRequest::GetAccountInfo => "getAccountInfo",
|
||||
RpcRequest::GetBalance => "getBalance",
|
||||
RpcRequest::GetBlockTime => "getBlockTime",
|
||||
RpcRequest::GetClusterNodes => "getClusterNodes",
|
||||
RpcRequest::GetConfirmedBlock => "getConfirmedBlock",
|
||||
RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks",
|
||||
RpcRequest::GetEpochInfo => "getEpochInfo",
|
||||
RpcRequest::GetEpochSchedule => "getEpochSchedule",
|
||||
RpcRequest::GetGenesisHash => "getGenesisHash",
|
||||
RpcRequest::GetInflation => "getInflation",
|
||||
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
|
||||
RpcRequest::GetNumBlocksSinceSignatureConfirmation => {
|
||||
"getNumBlocksSinceSignatureConfirmation"
|
||||
}
|
||||
@ -176,21 +185,12 @@ impl RpcRequest {
|
||||
RpcRequest::SignVote => "signVote",
|
||||
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
|
||||
};
|
||||
let mut request = json!({
|
||||
json!({
|
||||
"jsonrpc": jsonrpc,
|
||||
"id": id,
|
||||
"method": method,
|
||||
});
|
||||
if let Some(param_string) = params {
|
||||
if let Some(config) = commitment_config {
|
||||
request["params"] = json!([param_string, config]);
|
||||
} else {
|
||||
request["params"] = json!([param_string]);
|
||||
}
|
||||
} else if let Some(config) = commitment_config {
|
||||
request["params"] = json!([config]);
|
||||
}
|
||||
request
|
||||
"params": params,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -219,46 +219,46 @@ impl error::Error for RpcError {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_sdk::commitment_config::CommitmentLevel;
|
||||
use solana_sdk::commitment_config::{CommitmentConfig, CommitmentLevel};
|
||||
|
||||
#[test]
|
||||
fn test_build_request_json() {
|
||||
let test_request = RpcRequest::GetAccountInfo;
|
||||
let addr = json!("deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx");
|
||||
let request = test_request.build_request_json(1, Some(addr.clone()), None);
|
||||
let request = test_request.build_request_json(1, json!([addr.clone()]));
|
||||
assert_eq!(request["method"], "getAccountInfo");
|
||||
assert_eq!(request["params"], json!([addr]));
|
||||
|
||||
let test_request = RpcRequest::GetBalance;
|
||||
let request = test_request.build_request_json(1, Some(addr), None);
|
||||
let request = test_request.build_request_json(1, json!([addr]));
|
||||
assert_eq!(request["method"], "getBalance");
|
||||
|
||||
let test_request = RpcRequest::GetEpochInfo;
|
||||
let request = test_request.build_request_json(1, None, None);
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getEpochInfo");
|
||||
|
||||
let test_request = RpcRequest::GetInflation;
|
||||
let request = test_request.build_request_json(1, None, None);
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getInflation");
|
||||
|
||||
let test_request = RpcRequest::GetRecentBlockhash;
|
||||
let request = test_request.build_request_json(1, None, None);
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getRecentBlockhash");
|
||||
|
||||
let test_request = RpcRequest::GetSlot;
|
||||
let request = test_request.build_request_json(1, None, None);
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getSlot");
|
||||
|
||||
let test_request = RpcRequest::GetTransactionCount;
|
||||
let request = test_request.build_request_json(1, None, None);
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "getTransactionCount");
|
||||
|
||||
let test_request = RpcRequest::RequestAirdrop;
|
||||
let request = test_request.build_request_json(1, None, None);
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "requestAirdrop");
|
||||
|
||||
let test_request = RpcRequest::SendTransaction;
|
||||
let request = test_request.build_request_json(1, None, None);
|
||||
let request = test_request.build_request_json(1, Value::Null);
|
||||
assert_eq!(request["method"], "sendTransaction");
|
||||
}
|
||||
|
||||
@ -271,13 +271,13 @@ mod tests {
|
||||
|
||||
// Test request with CommitmentConfig and no params
|
||||
let test_request = RpcRequest::GetRecentBlockhash;
|
||||
let request = test_request.build_request_json(1, None, Some(commitment_config.clone()));
|
||||
let request = test_request.build_request_json(1, json!([commitment_config.clone()]));
|
||||
assert_eq!(request["params"], json!([commitment_config.clone()]));
|
||||
|
||||
// Test request with CommitmentConfig and params
|
||||
let test_request = RpcRequest::GetBalance;
|
||||
let request =
|
||||
test_request.build_request_json(1, Some(addr.clone()), Some(commitment_config.clone()));
|
||||
test_request.build_request_json(1, json!([addr.clone(), commitment_config.clone()]));
|
||||
assert_eq!(request["params"], json!([addr, commitment_config]));
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@ -41,25 +41,25 @@ rayon = "1.2.0"
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
serde_json = "1.0.41"
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.21.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.21.0" }
|
||||
solana-client = { path = "../client", version = "0.21.0" }
|
||||
solana-drone = { path = "../drone", version = "0.21.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.21.6" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.6" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.21.6" }
|
||||
solana-client = { path = "../client", version = "0.21.6" }
|
||||
solana-drone = { path = "../drone", version = "0.21.6" }
|
||||
ed25519-dalek = "1.0.0-pre.1"
|
||||
solana-ledger = { path = "../ledger", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.21.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.21.0" }
|
||||
solana-measure = { path = "../measure", version = "0.21.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.0" }
|
||||
solana-perf = { path = "../perf", version = "0.21.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.21.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.21.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.21.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.21.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.21.6" }
|
||||
solana-metrics = { path = "../metrics", version = "0.21.6" }
|
||||
solana-measure = { path = "../measure", version = "0.21.6" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.6" }
|
||||
solana-perf = { path = "../perf", version = "0.21.6" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.21.6" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.21.6" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.21.6" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.21.6" }
|
||||
symlink = "0.1.0"
|
||||
sys-info = "0.5.8"
|
||||
tempfile = "3.1.0"
|
||||
@ -68,7 +68,7 @@ tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
untrusted = "0.7.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.6" }
|
||||
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
|
||||
|
||||
[target."cfg(unix)".dependencies]
|
||||
|
@ -748,9 +748,8 @@ impl Archiver {
|
||||
Ok(rpc_client
|
||||
.send(
|
||||
&RpcRequest::GetSlotsPerSegment,
|
||||
None,
|
||||
serde_json::json!([client_commitment]),
|
||||
0,
|
||||
Some(client_commitment),
|
||||
)
|
||||
.map_err(|err| {
|
||||
warn!("Error while making rpc request {:?}", err);
|
||||
@ -803,7 +802,11 @@ impl Archiver {
|
||||
RpcClient::new_socket(rpc_peers[node_index].rpc)
|
||||
};
|
||||
let response = rpc_client
|
||||
.send(&RpcRequest::GetStorageTurn, None, 0, None)
|
||||
.send(
|
||||
&RpcRequest::GetStorageTurn,
|
||||
serde_json::value::Value::Null,
|
||||
0,
|
||||
)
|
||||
.map_err(|err| {
|
||||
warn!("Error while making rpc request {:?}", err);
|
||||
Error::IO(io::Error::new(ErrorKind::Other, "rpc error"))
|
||||
|
@ -1025,7 +1025,7 @@ impl ReplayStage {
|
||||
);
|
||||
|
||||
datapoint_error!(
|
||||
"replay-stage-entry_verification_failure",
|
||||
"replay-stage-block-error",
|
||||
("slot", bank.slot(), i64),
|
||||
("last_entry", last_entry.to_string(), String),
|
||||
);
|
||||
|
@ -295,7 +295,7 @@ mod tests {
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (bank_forks, _, cached_leader_schedule) =
|
||||
process_blocktree(&genesis_config, &blocktree, None, opts).unwrap();
|
||||
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap();
|
||||
let leader_schedule_cache = Arc::new(cached_leader_schedule);
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
|
||||
|
516
core/src/rpc.rs
516
core/src/rpc.rs
@ -12,15 +12,17 @@ use bincode::serialize;
|
||||
use jsonrpc_core::{Error, Metadata, Result};
|
||||
use jsonrpc_derive::rpc;
|
||||
use solana_client::rpc_request::{
|
||||
Response, RpcConfirmedBlock, RpcContactInfo, RpcEpochInfo, RpcResponseContext, RpcVersionInfo,
|
||||
RpcVoteAccountInfo, RpcVoteAccountStatus,
|
||||
Response, RpcConfirmedBlock, RpcContactInfo, RpcEpochInfo, RpcLeaderSchedule,
|
||||
RpcResponseContext, RpcVersionInfo, RpcVoteAccountInfo, RpcVoteAccountStatus,
|
||||
};
|
||||
use solana_drone::drone::request_airdrop_transaction;
|
||||
use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree};
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks, blocktree::Blocktree, rooted_slot_iterator::RootedSlotIterator,
|
||||
};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::Slot,
|
||||
clock::{Slot, UnixTimestamp},
|
||||
commitment_config::{CommitmentConfig, CommitmentLevel},
|
||||
epoch_schedule::EpochSchedule,
|
||||
fee_calculator::FeeCalculator,
|
||||
@ -28,10 +30,12 @@ use solana_sdk::{
|
||||
inflation::Inflation,
|
||||
pubkey::Pubkey,
|
||||
signature::Signature,
|
||||
timing::slot_duration_from_slots_per_year,
|
||||
transaction::{self, Transaction},
|
||||
};
|
||||
use solana_vote_program::vote_state::{VoteState, MAX_LOCKOUT_HISTORY};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::{SocketAddr, UdpSocket},
|
||||
sync::{Arc, RwLock},
|
||||
thread::sleep,
|
||||
@ -250,6 +254,7 @@ impl JsonRpcRequestProcessor {
|
||||
activated_stake: *activated_stake,
|
||||
commission: vote_state.commission,
|
||||
root_slot: vote_state.root_slot.unwrap_or(0),
|
||||
epoch_credits: vote_state.epoch_credits().clone(),
|
||||
epoch_vote_account,
|
||||
last_vote,
|
||||
}
|
||||
@ -261,9 +266,15 @@ impl JsonRpcRequestProcessor {
|
||||
vote_account_info.last_vote > 0
|
||||
}
|
||||
});
|
||||
|
||||
let delinquent_staked_vote_accounts = delinquent_vote_accounts
|
||||
.into_iter()
|
||||
.filter(|vote_account_info| vote_account_info.activated_stake > 0)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(RpcVoteAccountStatus {
|
||||
current: current_vote_accounts,
|
||||
delinquent: delinquent_vote_accounts,
|
||||
delinquent: delinquent_staked_vote_accounts,
|
||||
})
|
||||
}
|
||||
|
||||
@ -304,6 +315,44 @@ impl JsonRpcRequestProcessor {
|
||||
pub fn get_confirmed_block(&self, slot: Slot) -> Result<Option<RpcConfirmedBlock>> {
|
||||
Ok(self.blocktree.get_confirmed_block(slot).ok())
|
||||
}
|
||||
|
||||
pub fn get_confirmed_blocks(
|
||||
&self,
|
||||
start_slot: Slot,
|
||||
end_slot: Option<Slot>,
|
||||
) -> Result<Vec<Slot>> {
|
||||
let end_slot = end_slot.unwrap_or_else(|| self.bank(None).slot());
|
||||
if end_slot < start_slot {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let start_slot = (start_slot..end_slot).find(|&slot| self.blocktree.is_root(slot));
|
||||
if let Some(start_slot) = start_slot {
|
||||
let mut slots: Vec<Slot> = RootedSlotIterator::new(start_slot, &self.blocktree)
|
||||
.unwrap()
|
||||
.map(|(slot, _)| slot)
|
||||
.collect();
|
||||
slots.retain(|&x| x <= end_slot);
|
||||
Ok(slots)
|
||||
} else {
|
||||
Ok(vec![])
|
||||
}
|
||||
}
|
||||
|
||||
// The `get_block_time` method is not fully implemented. It currently returns `slot` *
|
||||
// DEFAULT_MS_PER_SLOT offset from 0 for all requests, and null for any values that would
|
||||
// overflow.
|
||||
pub fn get_block_time(&self, slot: Slot) -> Result<Option<UnixTimestamp>> {
|
||||
// This calculation currently assumes that bank.ticks_per_slot and bank.slots_per_year will
|
||||
// remain unchanged after genesis. If these values will be variable in the future, those
|
||||
// timing parameters will need to be stored persistently, and this calculation will likely
|
||||
// need to be moved upstream into blocktree. Also, an explicit commitment level will need
|
||||
// to be set.
|
||||
let bank = self.bank(None);
|
||||
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());
|
||||
|
||||
Ok(self.blocktree.get_block_time(slot, slot_duration))
|
||||
}
|
||||
}
|
||||
|
||||
fn get_tpu_addr(cluster_info: &Arc<RwLock<ClusterInfo>>) -> Result<SocketAddr> {
|
||||
@ -395,7 +444,7 @@ pub trait RpcSol {
|
||||
fn get_block_commitment(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
block: u64,
|
||||
block: Slot,
|
||||
) -> Result<(Option<BlockCommitment>, u64)>;
|
||||
|
||||
#[rpc(meta, name = "getGenesisHash")]
|
||||
@ -405,8 +454,9 @@ pub trait RpcSol {
|
||||
fn get_leader_schedule(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
slot: Option<Slot>,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Option<Vec<String>>>;
|
||||
) -> Result<Option<RpcLeaderSchedule>>;
|
||||
|
||||
#[rpc(meta, name = "getRecentBlockhash")]
|
||||
fn get_recent_blockhash(
|
||||
@ -513,6 +563,17 @@ pub trait RpcSol {
|
||||
meta: Self::Metadata,
|
||||
slot: Slot,
|
||||
) -> Result<Option<RpcConfirmedBlock>>;
|
||||
|
||||
#[rpc(meta, name = "getBlockTime")]
|
||||
fn get_block_time(&self, meta: Self::Metadata, slot: Slot) -> Result<Option<UnixTimestamp>>;
|
||||
|
||||
#[rpc(meta, name = "getConfirmedBlocks")]
|
||||
fn get_confirmed_blocks(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
start_slot: Slot,
|
||||
end_slot: Option<Slot>,
|
||||
) -> Result<Vec<Slot>>;
|
||||
}
|
||||
|
||||
pub struct RpcSolImpl;
|
||||
@ -652,8 +713,9 @@ impl RpcSol for RpcSolImpl {
|
||||
) -> Result<RpcEpochInfo> {
|
||||
let bank = meta.request_processor.read().unwrap().bank(commitment);
|
||||
let epoch_schedule = bank.epoch_schedule();
|
||||
let (epoch, slot_index) = epoch_schedule.get_epoch_and_slot_index(bank.slot());
|
||||
|
||||
let slot = bank.slot();
|
||||
let (epoch, slot_index) = epoch_schedule.get_epoch_and_slot_index(slot);
|
||||
Ok(RpcEpochInfo {
|
||||
epoch,
|
||||
slot_index,
|
||||
@ -682,17 +744,25 @@ impl RpcSol for RpcSolImpl {
|
||||
fn get_leader_schedule(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
slot: Option<Slot>,
|
||||
commitment: Option<CommitmentConfig>,
|
||||
) -> Result<Option<Vec<String>>> {
|
||||
) -> Result<Option<RpcLeaderSchedule>> {
|
||||
let bank = meta.request_processor.read().unwrap().bank(commitment);
|
||||
let slot = slot.unwrap_or_else(|| bank.slot());
|
||||
let epoch = bank.epoch_schedule().get_epoch(slot);
|
||||
|
||||
Ok(
|
||||
solana_ledger::leader_schedule_utils::leader_schedule(bank.epoch(), &bank).map(
|
||||
solana_ledger::leader_schedule_utils::leader_schedule(epoch, &bank).map(
|
||||
|leader_schedule| {
|
||||
leader_schedule
|
||||
.get_slot_leaders()
|
||||
.iter()
|
||||
.map(|pubkey| pubkey.to_string())
|
||||
.collect()
|
||||
let mut map = HashMap::new();
|
||||
|
||||
for (slot_index, pubkey) in
|
||||
leader_schedule.get_slot_leaders().iter().enumerate()
|
||||
{
|
||||
let pubkey = pubkey.to_string();
|
||||
map.entry(pubkey).or_insert_with(|| vec![]).push(slot_index);
|
||||
}
|
||||
map
|
||||
},
|
||||
),
|
||||
)
|
||||
@ -967,6 +1037,22 @@ impl RpcSol for RpcSolImpl {
|
||||
.unwrap()
|
||||
.get_confirmed_block(slot)
|
||||
}
|
||||
|
||||
fn get_confirmed_blocks(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
start_slot: Slot,
|
||||
end_slot: Option<Slot>,
|
||||
) -> Result<Vec<Slot>> {
|
||||
meta.request_processor
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_confirmed_blocks(start_slot, end_slot)
|
||||
}
|
||||
|
||||
fn get_block_time(&self, meta: Self::Metadata, slot: Slot) -> Result<Option<UnixTimestamp>> {
|
||||
meta.request_processor.read().unwrap().get_block_time(slot)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -978,7 +1064,10 @@ pub mod tests {
|
||||
replay_stage::tests::create_test_transactions_and_populate_blocktree,
|
||||
};
|
||||
use jsonrpc_core::{MetaIoHandler, Output, Response, Value};
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_ledger::{
|
||||
blocktree::entries_to_test_shreds, blocktree_processor::fill_blocktree_slot_with_ticks,
|
||||
entry::next_entry_mut, get_tmp_ledger_path,
|
||||
};
|
||||
use solana_sdk::{
|
||||
fee_calculator::DEFAULT_BURN_PERCENT,
|
||||
hash::{hash, Hash},
|
||||
@ -987,27 +1076,41 @@ pub mod tests {
|
||||
system_transaction,
|
||||
transaction::TransactionError,
|
||||
};
|
||||
use solana_vote_program::{
|
||||
vote_instruction,
|
||||
vote_state::{Vote, VoteInit, MAX_LOCKOUT_HISTORY},
|
||||
};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
thread,
|
||||
};
|
||||
|
||||
const TEST_MINT_LAMPORTS: u64 = 10_000;
|
||||
const TEST_MINT_LAMPORTS: u64 = 1_000_000;
|
||||
const TEST_SLOTS_PER_EPOCH: u64 = 50;
|
||||
|
||||
struct RpcHandler {
|
||||
io: MetaIoHandler<Meta>,
|
||||
meta: Meta,
|
||||
bank: Arc<Bank>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
blockhash: Hash,
|
||||
alice: Keypair,
|
||||
leader_pubkey: Pubkey,
|
||||
leader_vote_keypair: Keypair,
|
||||
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
confirmed_block_signatures: Vec<Signature>,
|
||||
}
|
||||
|
||||
fn start_rpc_handler_with_tx(pubkey: &Pubkey) -> RpcHandler {
|
||||
let (bank_forks, alice) = new_bank_forks();
|
||||
start_rpc_handler_with_tx_and_blocktree(pubkey, vec![])
|
||||
}
|
||||
|
||||
fn start_rpc_handler_with_tx_and_blocktree(
|
||||
pubkey: &Pubkey,
|
||||
blocktree_roots: Vec<Slot>,
|
||||
) -> RpcHandler {
|
||||
let (bank_forks, alice, leader_vote_keypair) = new_bank_forks();
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
|
||||
let commitment_slot0 = BlockCommitment::new([8; MAX_LOCKOUT_HISTORY]);
|
||||
@ -1036,6 +1139,55 @@ pub mod tests {
|
||||
blocktree.clone(),
|
||||
);
|
||||
|
||||
// Add timestamp vote to blocktree
|
||||
let vote = Vote {
|
||||
slots: vec![1],
|
||||
hash: Hash::default(),
|
||||
};
|
||||
let vote_ix = vote_instruction::vote(
|
||||
&leader_vote_keypair.pubkey(),
|
||||
&leader_vote_keypair.pubkey(),
|
||||
vote,
|
||||
);
|
||||
let vote_tx = Transaction::new_signed_instructions(
|
||||
&[&leader_vote_keypair],
|
||||
vec![vote_ix],
|
||||
Hash::default(),
|
||||
);
|
||||
let shreds = entries_to_test_shreds(
|
||||
vec![next_entry_mut(&mut Hash::default(), 0, vec![vote_tx])],
|
||||
1,
|
||||
0,
|
||||
true,
|
||||
0,
|
||||
);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
blocktree.set_roots(&[1]).unwrap();
|
||||
|
||||
let mut roots = blocktree_roots.clone();
|
||||
if !roots.is_empty() {
|
||||
roots.retain(|&x| x > 1);
|
||||
let mut parent_bank = bank;
|
||||
for (i, root) in roots.iter().enumerate() {
|
||||
let new_bank =
|
||||
Bank::new_from_parent(&parent_bank, parent_bank.collector_id(), *root);
|
||||
parent_bank = bank_forks.write().unwrap().insert(new_bank);
|
||||
parent_bank.squash();
|
||||
bank_forks.write().unwrap().set_root(*root, &None);
|
||||
let parent = if i > 0 { roots[i - 1] } else { 1 };
|
||||
fill_blocktree_slot_with_ticks(&blocktree, 5, *root, parent, Hash::default());
|
||||
}
|
||||
blocktree.set_roots(&roots).unwrap();
|
||||
let new_bank = Bank::new_from_parent(
|
||||
&parent_bank,
|
||||
parent_bank.collector_id(),
|
||||
roots.iter().max().unwrap() + 1,
|
||||
);
|
||||
bank_forks.write().unwrap().insert(new_bank);
|
||||
}
|
||||
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
|
||||
let leader_pubkey = *bank.collector_id();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let validator_exit = create_validator_exit(&exit);
|
||||
@ -1049,7 +1201,7 @@ pub mod tests {
|
||||
|
||||
let request_processor = Arc::new(RwLock::new(JsonRpcRequestProcessor::new(
|
||||
JsonRpcConfig::default(),
|
||||
bank_forks,
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache.clone(),
|
||||
blocktree,
|
||||
StorageState::default(),
|
||||
@ -1079,9 +1231,11 @@ pub mod tests {
|
||||
io,
|
||||
meta,
|
||||
bank,
|
||||
bank_forks,
|
||||
blockhash,
|
||||
alice,
|
||||
leader_pubkey,
|
||||
leader_vote_keypair,
|
||||
block_commitment_cache,
|
||||
confirmed_block_signatures,
|
||||
}
|
||||
@ -1092,7 +1246,7 @@ pub mod tests {
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let validator_exit = create_validator_exit(&exit);
|
||||
let (bank_forks, alice) = new_bank_forks();
|
||||
let (bank_forks, alice, _) = new_bank_forks();
|
||||
let bank = bank_forks.read().unwrap().working_bank();
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
@ -1298,6 +1452,62 @@ pub mod tests {
|
||||
assert_eq!(epoch_schedule, *bank.epoch_schedule());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_get_leader_schedule() {
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey);
|
||||
|
||||
for req in [
|
||||
r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [0]}"#,
|
||||
r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule"}"#,
|
||||
]
|
||||
.iter()
|
||||
{
|
||||
let rep = io.handle_request_sync(&req, meta.clone());
|
||||
let res: Response = serde_json::from_str(&rep.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
|
||||
let schedule: Option<RpcLeaderSchedule> = if let Response::Single(res) = res {
|
||||
if let Output::Success(res) = res {
|
||||
serde_json::from_value(res.result).unwrap()
|
||||
} else {
|
||||
panic!("Expected success for {}", req);
|
||||
}
|
||||
} else {
|
||||
panic!("Expected single response");
|
||||
};
|
||||
let schedule = schedule.expect("leader schedule");
|
||||
|
||||
let bob_schedule = schedule
|
||||
.get(&bank.collector_id().to_string())
|
||||
.expect("leader not in the leader schedule");
|
||||
|
||||
assert_eq!(
|
||||
bob_schedule.len(),
|
||||
solana_ledger::leader_schedule_utils::leader_schedule(bank.epoch(), &bank)
|
||||
.unwrap()
|
||||
.get_slot_leaders()
|
||||
.len()
|
||||
);
|
||||
}
|
||||
|
||||
let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [42424242]}"#;
|
||||
let rep = io.handle_request_sync(&req, meta);
|
||||
let res: Response = serde_json::from_str(&rep.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
|
||||
let schedule: Option<RpcLeaderSchedule> = if let Response::Single(res) = res {
|
||||
if let Output::Success(res) = res {
|
||||
serde_json::from_value(res.result).unwrap()
|
||||
} else {
|
||||
panic!("Expected success");
|
||||
}
|
||||
} else {
|
||||
panic!("Expected single response");
|
||||
};
|
||||
assert_eq!(schedule, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rpc_get_account_info() {
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
@ -1602,20 +1812,23 @@ pub mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
fn new_bank_forks() -> (Arc<RwLock<BankForks>>, Keypair) {
|
||||
fn new_bank_forks() -> (Arc<RwLock<BankForks>>, Keypair, Keypair) {
|
||||
let GenesisConfigInfo {
|
||||
mut genesis_config,
|
||||
mint_keypair,
|
||||
..
|
||||
voting_keypair,
|
||||
} = create_genesis_config(TEST_MINT_LAMPORTS);
|
||||
|
||||
genesis_config.rent.lamports_per_byte_year = 50;
|
||||
genesis_config.rent.exemption_threshold = 2.0;
|
||||
genesis_config.epoch_schedule =
|
||||
EpochSchedule::custom(TEST_SLOTS_PER_EPOCH, TEST_SLOTS_PER_EPOCH, false);
|
||||
|
||||
let bank = Bank::new(&genesis_config);
|
||||
(
|
||||
Arc::new(RwLock::new(BankForks::new(bank.slot(), bank))),
|
||||
mint_keypair,
|
||||
voting_keypair,
|
||||
)
|
||||
}
|
||||
|
||||
@ -1818,4 +2031,263 @@ pub mod tests {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_confirmed_blocks() {
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
let roots = vec![0, 1, 3, 4, 8];
|
||||
let RpcHandler { io, meta, .. } =
|
||||
start_rpc_handler_with_tx_and_blocktree(&bob_pubkey, roots.clone());
|
||||
|
||||
let req =
|
||||
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0]}}"#);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let result: Value = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap();
|
||||
assert_eq!(confirmed_blocks, roots);
|
||||
|
||||
let req =
|
||||
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[2]}}"#);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let result: Value = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap();
|
||||
assert_eq!(confirmed_blocks, vec![3, 4, 8]);
|
||||
|
||||
let req =
|
||||
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0, 4]}}"#);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let result: Value = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap();
|
||||
assert_eq!(confirmed_blocks, vec![0, 1, 3, 4]);
|
||||
|
||||
let req =
|
||||
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0, 7]}}"#);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let result: Value = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap();
|
||||
assert_eq!(confirmed_blocks, vec![0, 1, 3, 4]);
|
||||
|
||||
let req =
|
||||
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[9, 11]}}"#);
|
||||
let res = io.handle_request_sync(&req, meta);
|
||||
let result: Value = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap();
|
||||
assert_eq!(confirmed_blocks, Vec::<Slot>::new());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_block_time() {
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey);
|
||||
|
||||
let slot_duration = slot_duration_from_slots_per_year(bank.slots_per_year());
|
||||
|
||||
let slot = 100;
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockTime","params":[{}]}}"#,
|
||||
slot
|
||||
);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let expected = format!(
|
||||
r#"{{"jsonrpc":"2.0","result":{},"id":1}}"#,
|
||||
(slot * slot_duration).as_secs()
|
||||
);
|
||||
let expected: Response =
|
||||
serde_json::from_str(&expected).expect("expected response deserialization");
|
||||
let result: Response = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
assert_eq!(expected, result);
|
||||
|
||||
let slot = 12345;
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockTime","params":[{}]}}"#,
|
||||
slot
|
||||
);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let expected = format!(
|
||||
r#"{{"jsonrpc":"2.0","result":{},"id":1}}"#,
|
||||
(slot * slot_duration).as_secs()
|
||||
);
|
||||
let expected: Response =
|
||||
serde_json::from_str(&expected).expect("expected response deserialization");
|
||||
let result: Response = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
assert_eq!(expected, result);
|
||||
|
||||
let slot = 123450000000000000u64;
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getBlockTime","params":[{}]}}"#,
|
||||
slot
|
||||
);
|
||||
let res = io.handle_request_sync(&req, meta);
|
||||
let expected = format!(r#"{{"jsonrpc":"2.0","result":null,"id":1}}"#);
|
||||
let expected: Response =
|
||||
serde_json::from_str(&expected).expect("expected response deserialization");
|
||||
let result: Response = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
assert_eq!(expected, result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_vote_accounts() {
|
||||
let RpcHandler {
|
||||
io,
|
||||
meta,
|
||||
mut bank,
|
||||
bank_forks,
|
||||
alice,
|
||||
leader_vote_keypair,
|
||||
..
|
||||
} = start_rpc_handler_with_tx(&Pubkey::new_rand());
|
||||
|
||||
assert_eq!(bank.vote_accounts().len(), 1);
|
||||
|
||||
// Create a vote account with no stake.
|
||||
let alice_vote_keypair = Keypair::new();
|
||||
let instructions = vote_instruction::create_account(
|
||||
&alice.pubkey(),
|
||||
&alice_vote_keypair.pubkey(),
|
||||
&VoteInit {
|
||||
node_pubkey: alice.pubkey(),
|
||||
authorized_voter: alice_vote_keypair.pubkey(),
|
||||
authorized_withdrawer: alice_vote_keypair.pubkey(),
|
||||
commission: 0,
|
||||
},
|
||||
bank.get_minimum_balance_for_rent_exemption(VoteState::size_of()),
|
||||
);
|
||||
|
||||
let transaction = Transaction::new_signed_instructions(
|
||||
&[&alice, &alice_vote_keypair],
|
||||
instructions,
|
||||
bank.last_blockhash(),
|
||||
);
|
||||
bank.process_transaction(&transaction)
|
||||
.expect("process transaction");
|
||||
assert_eq!(bank.vote_accounts().len(), 2);
|
||||
|
||||
// Check getVoteAccounts: the bootstrap leader vote account will be delinquent as it has
|
||||
// stake but has never voted, and the vote account with no stake should not be present.
|
||||
{
|
||||
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts"}}"#);
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let result: Value = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
|
||||
let vote_account_status: RpcVoteAccountStatus =
|
||||
serde_json::from_value(result["result"].clone()).unwrap();
|
||||
|
||||
assert!(vote_account_status.current.is_empty());
|
||||
assert_eq!(vote_account_status.delinquent.len(), 1);
|
||||
for vote_account_info in vote_account_status.delinquent {
|
||||
assert_ne!(vote_account_info.activated_stake, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// Advance bank to the next epoch
|
||||
for _ in 0..TEST_SLOTS_PER_EPOCH {
|
||||
bank.freeze();
|
||||
|
||||
// Votes
|
||||
let instructions = vec![
|
||||
vote_instruction::vote(
|
||||
&leader_vote_keypair.pubkey(),
|
||||
&leader_vote_keypair.pubkey(),
|
||||
Vote {
|
||||
slots: vec![bank.slot()],
|
||||
hash: bank.hash(),
|
||||
},
|
||||
),
|
||||
vote_instruction::vote(
|
||||
&alice_vote_keypair.pubkey(),
|
||||
&alice_vote_keypair.pubkey(),
|
||||
Vote {
|
||||
slots: vec![bank.slot()],
|
||||
hash: bank.hash(),
|
||||
},
|
||||
),
|
||||
];
|
||||
|
||||
bank = bank_forks.write().unwrap().insert(Bank::new_from_parent(
|
||||
&bank,
|
||||
&Pubkey::default(),
|
||||
bank.slot() + 1,
|
||||
));
|
||||
|
||||
let transaction = Transaction::new_signed_with_payer(
|
||||
instructions,
|
||||
Some(&alice.pubkey()),
|
||||
&[&alice, &leader_vote_keypair, &alice_vote_keypair],
|
||||
bank.last_blockhash(),
|
||||
);
|
||||
|
||||
bank.process_transaction(&transaction)
|
||||
.expect("process transaction");
|
||||
}
|
||||
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts","params":{}}}"#,
|
||||
json!([CommitmentConfig::recent()])
|
||||
);
|
||||
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let result: Value = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
|
||||
let vote_account_status: RpcVoteAccountStatus =
|
||||
serde_json::from_value(result["result"].clone()).unwrap();
|
||||
|
||||
// The vote account with no stake should not be present.
|
||||
assert!(vote_account_status.delinquent.is_empty());
|
||||
|
||||
// Both accounts should be active and have voting history.
|
||||
assert_eq!(vote_account_status.current.len(), 2);
|
||||
//let leader_info = &vote_account_status.current[0];
|
||||
let leader_info = vote_account_status
|
||||
.current
|
||||
.iter()
|
||||
.find(|x| x.vote_pubkey == leader_vote_keypair.pubkey().to_string())
|
||||
.unwrap();
|
||||
assert_ne!(leader_info.activated_stake, 0);
|
||||
// Subtract one because the last vote always carries over to the next epoch
|
||||
let expected_credits = TEST_SLOTS_PER_EPOCH - MAX_LOCKOUT_HISTORY as u64 - 1;
|
||||
assert_eq!(leader_info.epoch_credits, vec![(0, expected_credits, 0)]);
|
||||
|
||||
// Advance bank with no voting
|
||||
bank.freeze();
|
||||
bank_forks.write().unwrap().insert(Bank::new_from_parent(
|
||||
&bank,
|
||||
&Pubkey::default(),
|
||||
bank.slot() + TEST_SLOTS_PER_EPOCH,
|
||||
));
|
||||
|
||||
// The leader vote account should now be delinquent, and the other vote account disappears
|
||||
// because it's inactive with no stake
|
||||
{
|
||||
let req = format!(
|
||||
r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts","params":{}}}"#,
|
||||
json!([CommitmentConfig::recent()])
|
||||
);
|
||||
|
||||
let res = io.handle_request_sync(&req, meta.clone());
|
||||
let result: Value = serde_json::from_str(&res.expect("actual response"))
|
||||
.expect("actual response deserialization");
|
||||
|
||||
let vote_account_status: RpcVoteAccountStatus =
|
||||
serde_json::from_value(result["result"].clone()).unwrap();
|
||||
|
||||
assert!(vote_account_status.current.is_empty());
|
||||
assert_eq!(vote_account_status.delinquent.len(), 1);
|
||||
for vote_account_info in vote_account_status.delinquent {
|
||||
assert_eq!(
|
||||
vote_account_info.vote_pubkey,
|
||||
leader_vote_keypair.pubkey().to_string()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ use crate::sigverify;
|
||||
use crate::streamer::{self, PacketReceiver};
|
||||
use crossbeam_channel::Sender as CrossbeamSender;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{datapoint_debug, inc_new_counter_info};
|
||||
use solana_metrics::datapoint_debug;
|
||||
use solana_perf::perf_libs;
|
||||
use solana_sdk::timing;
|
||||
use std::sync::mpsc::{Receiver, RecvTimeoutError};
|
||||
@ -65,7 +65,6 @@ impl SigVerifyStage {
|
||||
RECV_BATCH_MAX_CPU
|
||||
},
|
||||
)?;
|
||||
inc_new_counter_info!("sigverify_stage-packets_received", len);
|
||||
|
||||
let mut verify_batch_time = Measure::start("sigverify_batch_time");
|
||||
let batch_len = batch.len();
|
||||
@ -77,7 +76,6 @@ impl SigVerifyStage {
|
||||
);
|
||||
|
||||
let verified_batch = verifier.verify_batch(batch);
|
||||
inc_new_counter_info!("sigverify_stage-verified_packets_send", len);
|
||||
|
||||
for v in verified_batch {
|
||||
if sendr.send(vec![v]).is_err() {
|
||||
@ -87,10 +85,6 @@ impl SigVerifyStage {
|
||||
|
||||
verify_batch_time.stop();
|
||||
|
||||
inc_new_counter_info!(
|
||||
"sigverify_stage-time_ms",
|
||||
(verify_batch_time.as_ms() + recv_time) as usize
|
||||
);
|
||||
debug!(
|
||||
"@{:?} verifier: done. batches: {} total verify time: {:?} id: {} verified: {} v/s {}",
|
||||
timing::timestamp(),
|
||||
@ -103,9 +97,10 @@ impl SigVerifyStage {
|
||||
|
||||
datapoint_debug!(
|
||||
"sigverify_stage-total_verify_time",
|
||||
("batch_len", batch_len, i64),
|
||||
("len", len, i64),
|
||||
("total_time_ms", verify_batch_time.as_ms(), i64)
|
||||
("num_batches", batch_len, i64),
|
||||
("num_packets", len, i64),
|
||||
("verify_time_ms", verify_batch_time.as_ms(), i64),
|
||||
("recv_time", recv_time, i64),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
|
@ -87,7 +87,14 @@ impl SnapshotPackagerService {
|
||||
|
||||
// `storage_path` - The file path where the AppendVec itself is located
|
||||
// `output_path` - The directory where the AppendVec will be placed in the staging directory.
|
||||
symlink::symlink_dir(storage_path, output_path)?;
|
||||
let storage_path =
|
||||
fs::canonicalize(storage_path).expect("Could not get absolute path for accounts");
|
||||
symlink::symlink_dir(storage_path, &output_path)?;
|
||||
if !output_path.is_file() {
|
||||
return Err(Self::get_io_error(
|
||||
"Error trying to generate snapshot archive: storage path symlink is invalid",
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Tar the staging directory into the archive at `archive_path`
|
||||
@ -185,17 +192,42 @@ mod tests {
|
||||
use super::*;
|
||||
use solana_ledger::snapshot_utils;
|
||||
use solana_runtime::accounts_db::AccountStorageEntry;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
use std::{
|
||||
fs::{remove_dir_all, OpenOptions},
|
||||
io::Write,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use tempfile::TempDir;
|
||||
|
||||
// Create temporary placeholder directory for all test files
|
||||
fn make_tmp_dir_path() -> PathBuf {
|
||||
let out_dir = std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string());
|
||||
let path = PathBuf::from(format!("{}/tmp/test_package_snapshots", out_dir));
|
||||
|
||||
// whack any possible collision
|
||||
let _ignored = std::fs::remove_dir_all(&path);
|
||||
// whack any possible collision
|
||||
let _ignored = std::fs::remove_file(&path);
|
||||
|
||||
path
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_package_snapshots_relative_ledger_path() {
|
||||
let temp_dir = make_tmp_dir_path();
|
||||
create_and_verify_snapshot(&temp_dir);
|
||||
remove_dir_all(temp_dir).expect("should remove tmp dir");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_package_snapshots() {
|
||||
// Create temporary placeholder directory for all test files
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let accounts_dir = temp_dir.path().join("accounts");
|
||||
let snapshots_dir = temp_dir.path().join("snapshots");
|
||||
let snapshot_package_output_path = temp_dir.path().join("snapshots_output");
|
||||
create_and_verify_snapshot(TempDir::new().unwrap().path())
|
||||
}
|
||||
|
||||
fn create_and_verify_snapshot(temp_dir: &Path) {
|
||||
let accounts_dir = temp_dir.join("accounts");
|
||||
let snapshots_dir = temp_dir.join("snapshots");
|
||||
let snapshot_package_output_path = temp_dir.join("snapshots_output");
|
||||
fs::create_dir_all(&snapshot_package_output_path).unwrap();
|
||||
|
||||
// Create some storage entries
|
||||
@ -221,7 +253,7 @@ mod tests {
|
||||
.collect();
|
||||
|
||||
// Create directory of hard links for snapshots
|
||||
let link_snapshots_dir = tempfile::tempdir_in(temp_dir.path()).unwrap();
|
||||
let link_snapshots_dir = tempfile::tempdir_in(&temp_dir).unwrap();
|
||||
for snapshots_path in snapshots_paths {
|
||||
let snapshot_file_name = snapshots_path.file_name().unwrap();
|
||||
let link_path = link_snapshots_dir.path().join(snapshot_file_name);
|
||||
|
@ -41,7 +41,9 @@ fn recv_loop(
|
||||
}
|
||||
recv_count += len;
|
||||
call_count += 1;
|
||||
channel.send(msgs)?;
|
||||
if len > 0 {
|
||||
channel.send(msgs)?;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -55,9 +57,9 @@ fn recv_loop(
|
||||
);
|
||||
recv_count = 0;
|
||||
call_count = 0;
|
||||
now = Instant::now();
|
||||
num_max_received = 0;
|
||||
}
|
||||
now = Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
@ -100,7 +102,6 @@ pub fn recv_batch(recvr: &PacketReceiver, max_batch: usize) -> Result<(Vec<Packe
|
||||
trace!("got more msgs");
|
||||
len += more.packets.len();
|
||||
batch.push(more);
|
||||
|
||||
if len > max_batch {
|
||||
break;
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ use solana_ledger::{
|
||||
blocktree::{Blocktree, CompletedSlotsReceiver},
|
||||
blocktree_processor::{self, BankForksInfo},
|
||||
create_new_tmp_ledger,
|
||||
leader_schedule::FixedSchedule,
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
};
|
||||
use solana_metrics::datapoint_info;
|
||||
@ -59,12 +60,13 @@ pub struct ValidatorConfig {
|
||||
pub transaction_status_service_disabled: bool,
|
||||
pub blockstream_unix_socket: Option<PathBuf>,
|
||||
pub storage_slots_per_turn: u64,
|
||||
pub account_paths: Option<String>,
|
||||
pub account_paths: Vec<PathBuf>,
|
||||
pub rpc_config: JsonRpcConfig,
|
||||
pub snapshot_config: Option<SnapshotConfig>,
|
||||
pub max_ledger_slots: Option<u64>,
|
||||
pub broadcast_stage_type: BroadcastStageType,
|
||||
pub partition_cfg: Option<PartitionCfg>,
|
||||
pub fixed_leader_schedule: Option<FixedSchedule>,
|
||||
}
|
||||
|
||||
impl Default for ValidatorConfig {
|
||||
@ -78,11 +80,12 @@ impl Default for ValidatorConfig {
|
||||
blockstream_unix_socket: None,
|
||||
storage_slots_per_turn: DEFAULT_SLOTS_PER_TURN,
|
||||
max_ledger_slots: None,
|
||||
account_paths: None,
|
||||
account_paths: Vec::new(),
|
||||
rpc_config: JsonRpcConfig::default(),
|
||||
snapshot_config: None,
|
||||
broadcast_stage_type: BroadcastStageType::Standard,
|
||||
partition_cfg: None,
|
||||
fixed_leader_schedule: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -182,6 +185,7 @@ impl Validator {
|
||||
config.snapshot_config.clone(),
|
||||
poh_verify,
|
||||
config.dev_halt_at_slot,
|
||||
config.fixed_leader_schedule.clone(),
|
||||
);
|
||||
|
||||
let leader_schedule_cache = Arc::new(leader_schedule_cache);
|
||||
@ -465,10 +469,11 @@ impl Validator {
|
||||
pub fn new_banks_from_blocktree(
|
||||
expected_genesis_hash: Option<Hash>,
|
||||
blocktree_path: &Path,
|
||||
account_paths: Option<String>,
|
||||
account_paths: Vec<PathBuf>,
|
||||
snapshot_config: Option<SnapshotConfig>,
|
||||
poh_verify: bool,
|
||||
dev_halt_at_slot: Option<Slot>,
|
||||
fixed_leader_schedule: Option<FixedSchedule>,
|
||||
) -> (
|
||||
Hash,
|
||||
BankForks,
|
||||
@ -506,7 +511,7 @@ pub fn new_banks_from_blocktree(
|
||||
..blocktree_processor::ProcessOptions::default()
|
||||
};
|
||||
|
||||
let (mut bank_forks, bank_forks_info, leader_schedule_cache) = bank_forks_utils::load(
|
||||
let (mut bank_forks, bank_forks_info, mut leader_schedule_cache) = bank_forks_utils::load(
|
||||
&genesis_config,
|
||||
&blocktree,
|
||||
account_paths,
|
||||
@ -518,6 +523,8 @@ pub fn new_banks_from_blocktree(
|
||||
std::process::exit(1);
|
||||
});
|
||||
|
||||
leader_schedule_cache.set_fixed_leader_schedule(fixed_leader_schedule);
|
||||
|
||||
bank_forks.set_snapshot_config(snapshot_config);
|
||||
|
||||
(
|
||||
|
@ -52,7 +52,7 @@ mod tests {
|
||||
let genesis_config_info = create_genesis_config(10_000);
|
||||
let bank0 = Bank::new_with_paths(
|
||||
&genesis_config_info.genesis_config,
|
||||
Some(accounts_dir.path().to_str().unwrap().to_string()),
|
||||
vec![accounts_dir.path().to_path_buf()],
|
||||
);
|
||||
bank0.freeze();
|
||||
let mut bank_forks = BankForks::new(0, bank0);
|
||||
@ -73,7 +73,7 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn restore_from_snapshot(old_bank_forks: &BankForks, account_paths: String) {
|
||||
fn restore_from_snapshot(old_bank_forks: &BankForks, account_paths: Vec<PathBuf>) {
|
||||
let (snapshot_path, snapshot_package_output_path) = old_bank_forks
|
||||
.snapshot_config
|
||||
.as_ref()
|
||||
@ -81,7 +81,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let deserialized_bank = snapshot_utils::bank_from_archive(
|
||||
account_paths,
|
||||
&account_paths,
|
||||
&old_bank_forks
|
||||
.snapshot_config
|
||||
.as_ref()
|
||||
@ -151,10 +151,7 @@ mod tests {
|
||||
.unwrap();
|
||||
SnapshotPackagerService::package_snapshots(&snapshot_package).unwrap();
|
||||
|
||||
restore_from_snapshot(
|
||||
bank_forks,
|
||||
accounts_dir.path().to_str().unwrap().to_string(),
|
||||
);
|
||||
restore_from_snapshot(bank_forks, vec![accounts_dir.path().to_path_buf()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -237,7 +234,7 @@ mod tests {
|
||||
let key1 = Keypair::new().pubkey();
|
||||
let tx = system_transaction::transfer(&mint_keypair, &key1, 1, genesis_config.hash());
|
||||
assert_eq!(bank.process_transaction(&tx), Ok(()));
|
||||
bank.freeze();
|
||||
bank.squash();
|
||||
bank_forks.insert(bank);
|
||||
|
||||
let package_sender = {
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-drone"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
description = "Solana Drone"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,10 +19,10 @@ clap = "2.33"
|
||||
log = "0.4.8"
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
solana-metrics = { path = "../metrics", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-fixed-buf"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
description = "A fixed-size byte array that supports bincode serde"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-genesis-programs"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
description = "Solana genesis programs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,16 +10,16 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = { version = "0.4.8" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "0.21.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.21.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.21.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.21.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.21.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.21.0" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "0.21.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.21.0" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "0.21.6" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.21.6" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.21.6" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.21.6" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.21.6" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.21.6" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "0.21.6" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.21.6" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-genesis"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -16,11 +16,11 @@ serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
serde_json = "1.0.41"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.0" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "0.21.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.21.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.21.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.21.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.6" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "0.21.6" }
|
||||
solana-ledger = { path = "../ledger", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.21.6" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.21.6" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.21.6" }
|
||||
tempfile = "3.1.0"
|
||||
|
@ -24,6 +24,7 @@ impl AddressGenerator {
|
||||
.as_ref(),
|
||||
)
|
||||
}
|
||||
#[allow(clippy::should_implement_trait)]
|
||||
pub fn next(&mut self) -> Pubkey {
|
||||
let nth = self.nth;
|
||||
self.nth += 1;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,3 +1,9 @@
|
||||
pub mod address_generator;
|
||||
pub mod genesis_accounts;
|
||||
pub mod stakes;
|
||||
pub mod unlocks;
|
||||
pub mod validators;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// An account where the data is encoded as a Base64 string.
|
||||
|
@ -1,14 +1,9 @@
|
||||
//! A command-line executable for generating the chain's genesis config.
|
||||
|
||||
mod address_generator;
|
||||
mod genesis_accounts;
|
||||
mod stakes;
|
||||
mod unlocks;
|
||||
|
||||
use crate::genesis_accounts::add_genesis_accounts;
|
||||
use clap::{crate_description, crate_name, value_t, value_t_or_exit, App, Arg, ArgMatches};
|
||||
use solana_clap_utils::input_parsers::pubkey_of;
|
||||
use solana_genesis::Base64Account;
|
||||
use solana_clap_utils::input_validators::is_valid_percentage;
|
||||
use solana_genesis::{genesis_accounts::add_genesis_accounts, Base64Account};
|
||||
use solana_ledger::{blocktree::create_new_ledger, poh::compute_hashes_per_tick};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
@ -27,7 +22,15 @@ use solana_sdk::{
|
||||
use solana_stake_program::stake_state;
|
||||
use solana_storage_program::storage_contract;
|
||||
use solana_vote_program::vote_state;
|
||||
use std::{collections::HashMap, error, fs::File, io, path::PathBuf, str::FromStr, time::Duration};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
error,
|
||||
fs::File,
|
||||
io,
|
||||
path::PathBuf,
|
||||
str::FromStr,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
pub enum AccountFileFormat {
|
||||
Pubkey,
|
||||
@ -116,7 +119,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
)
|
||||
};
|
||||
let default_target_tick_duration =
|
||||
&timing::duration_as_ms(&PohConfig::default().target_tick_duration).to_string();
|
||||
timing::duration_as_us(&PohConfig::default().target_tick_duration);
|
||||
let default_ticks_per_slot = &clock::DEFAULT_TICKS_PER_SLOT.to_string();
|
||||
let default_operating_mode = "softlaunch";
|
||||
|
||||
@ -161,7 +164,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("bootstrap_vote_pubkey_file")
|
||||
.short("s")
|
||||
.long("bootstrap-vote-pubkey")
|
||||
.value_name("BOOTSTRAP VOTE PUBKEY")
|
||||
.takes_value(true)
|
||||
@ -170,13 +172,19 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("bootstrap_stake_pubkey_file")
|
||||
.short("k")
|
||||
.long("bootstrap-stake-pubkey")
|
||||
.value_name("BOOTSTRAP STAKE PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("Path to file containing the bootstrap leader's staking pubkey"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("bootstrap_stake_authorized_pubkey_file")
|
||||
.long("bootstrap-stake-authorized-pubkey")
|
||||
.value_name("BOOTSTRAP STAKE AUTHORIZED PUBKEY")
|
||||
.takes_value(true)
|
||||
.help("Path to file containing the pubkey authorized to manage the bootstrap leader's stake [default: --bootstrap-leader-pubkey]"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("bootstrap_storage_pubkey_file")
|
||||
.long("bootstrap-storage-pubkey")
|
||||
@ -241,7 +249,8 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.value_name("NUMBER")
|
||||
.takes_value(true)
|
||||
.default_value(default_rent_burn_percentage)
|
||||
.help("amount of rent to burn, as a fraction of std::u8::MAX."),
|
||||
.help("percentage of collected rent to burn")
|
||||
.validator(is_valid_percentage),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("target_signatures_per_slot")
|
||||
@ -261,7 +270,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.long("target-tick-duration")
|
||||
.value_name("MILLIS")
|
||||
.takes_value(true)
|
||||
.default_value(default_target_tick_duration)
|
||||
.help("The target tick rate of the cluster in milliseconds"),
|
||||
)
|
||||
.arg(
|
||||
@ -314,7 +322,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let faucet_lamports = value_t!(matches, "faucet_lamports", u64);
|
||||
let faucet_lamports = value_t!(matches, "faucet_lamports", u64).unwrap_or(0);
|
||||
let ledger_path = PathBuf::from(matches.value_of("ledger_path").unwrap());
|
||||
let bootstrap_leader_lamports = value_t_or_exit!(matches, "bootstrap_leader_lamports", u64);
|
||||
let bootstrap_leader_stake_lamports =
|
||||
@ -323,6 +331,8 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
let bootstrap_leader_pubkey = required_pubkey(&matches, "bootstrap_leader_pubkey_file")?;
|
||||
let bootstrap_vote_pubkey = required_pubkey(&matches, "bootstrap_vote_pubkey_file")?;
|
||||
let bootstrap_stake_pubkey = required_pubkey(&matches, "bootstrap_stake_pubkey_file")?;
|
||||
let bootstrap_stake_authorized_pubkey =
|
||||
pubkey_of(&matches, "bootstrap_stake_authorized_pubkey_file");
|
||||
let bootstrap_storage_pubkey = pubkey_of(&matches, "bootstrap_storage_pubkey_file");
|
||||
let faucet_pubkey = pubkey_of(&matches, "faucet_pubkey_file");
|
||||
|
||||
@ -336,14 +346,16 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
vote_state::create_account(&bootstrap_vote_pubkey, &bootstrap_leader_pubkey, 0, 1);
|
||||
|
||||
let bootstrap_leader_stake_account = stake_state::create_account(
|
||||
&bootstrap_leader_pubkey,
|
||||
bootstrap_stake_authorized_pubkey
|
||||
.as_ref()
|
||||
.unwrap_or(&bootstrap_leader_pubkey),
|
||||
&bootstrap_vote_pubkey,
|
||||
&bootstrap_leader_vote_account,
|
||||
&rent,
|
||||
bootstrap_leader_stake_lamports,
|
||||
);
|
||||
|
||||
let mut accounts = vec![
|
||||
let mut accounts: BTreeMap<Pubkey, Account> = [
|
||||
// node needs an account to issue votes from
|
||||
(
|
||||
bootstrap_leader_pubkey,
|
||||
@ -353,13 +365,16 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
(bootstrap_vote_pubkey, bootstrap_leader_vote_account),
|
||||
// bootstrap leader stake
|
||||
(bootstrap_stake_pubkey, bootstrap_leader_stake_account),
|
||||
];
|
||||
]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
if let Some(bootstrap_storage_pubkey) = bootstrap_storage_pubkey {
|
||||
accounts.push((
|
||||
accounts.insert(
|
||||
bootstrap_storage_pubkey,
|
||||
storage_contract::create_validator_storage_account(bootstrap_leader_pubkey, 1),
|
||||
));
|
||||
);
|
||||
}
|
||||
|
||||
let ticks_per_slot = value_t_or_exit!(matches, "ticks_per_slot", u64);
|
||||
@ -370,8 +385,11 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
);
|
||||
|
||||
let mut poh_config = PohConfig::default();
|
||||
poh_config.target_tick_duration =
|
||||
Duration::from_millis(value_t_or_exit!(matches, "target_tick_duration", u64));
|
||||
poh_config.target_tick_duration = if matches.is_present("target_tick_duration") {
|
||||
Duration::from_micros(value_t_or_exit!(matches, "target_tick_duration", u64))
|
||||
} else {
|
||||
Duration::from_micros(default_target_tick_duration)
|
||||
};
|
||||
|
||||
let operating_mode = if matches.value_of("operating_mode").unwrap() == "development" {
|
||||
OperatingMode::Development
|
||||
@ -429,7 +447,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
if let Some(faucet_pubkey) = faucet_pubkey {
|
||||
genesis_config.add_account(
|
||||
faucet_pubkey,
|
||||
Account::new(faucet_lamports.unwrap(), 0, &system_program::id()),
|
||||
Account::new(faucet_lamports, 0, &system_program::id()),
|
||||
);
|
||||
}
|
||||
|
||||
@ -443,12 +461,19 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
}
|
||||
}
|
||||
|
||||
add_genesis_accounts(&mut genesis_config);
|
||||
let issued_lamports = genesis_config
|
||||
.accounts
|
||||
.iter()
|
||||
.map(|(_key, account)| account.lamports)
|
||||
.sum::<u64>();
|
||||
|
||||
add_genesis_accounts(&mut genesis_config, issued_lamports - faucet_lamports);
|
||||
|
||||
create_new_ledger(&ledger_path, &genesis_config)?;
|
||||
|
||||
println!(
|
||||
"Genesis mode: {:?} hashes per tick: {:?} slots_per_epoch: {} capitalization: {}SOL in {} accounts",
|
||||
"Genesis hash: {}\nOperating mode: {:?}\nHashes per tick: {:?}\nSlots per epoch: {}\nCapitalization: {} SOL in {} accounts",
|
||||
genesis_config.hash(),
|
||||
operating_mode,
|
||||
genesis_config.poh_config.hashes_per_tick,
|
||||
slots_per_epoch,
|
||||
@ -535,27 +560,28 @@ mod tests {
|
||||
assert_eq!(genesis_config.accounts.len(), genesis_accounts.len());
|
||||
|
||||
// Test account data matches
|
||||
(0..genesis_accounts.len()).for_each(|i| {
|
||||
for (pubkey_str, b64_account) in genesis_accounts.iter() {
|
||||
let pubkey = pubkey_str.parse().unwrap();
|
||||
assert_eq!(
|
||||
genesis_accounts[&genesis_config.accounts[i].0.to_string()].owner,
|
||||
genesis_config.accounts[i].1.owner.to_string()
|
||||
b64_account.owner,
|
||||
genesis_config.accounts[&pubkey].owner.to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
genesis_accounts[&genesis_config.accounts[i].0.to_string()].balance,
|
||||
genesis_config.accounts[i].1.lamports
|
||||
b64_account.balance,
|
||||
genesis_config.accounts[&pubkey].lamports
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
genesis_accounts[&genesis_config.accounts[i].0.to_string()].executable,
|
||||
genesis_config.accounts[i].1.executable
|
||||
b64_account.executable,
|
||||
genesis_config.accounts[&pubkey].executable
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
genesis_accounts[&genesis_config.accounts[i].0.to_string()].data,
|
||||
base64::encode(&genesis_config.accounts[i].1.data)
|
||||
b64_account.data,
|
||||
base64::encode(&genesis_config.accounts[&pubkey].data)
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Test more accounts can be appended
|
||||
@ -608,54 +634,37 @@ mod tests {
|
||||
);
|
||||
|
||||
// Test old accounts are still there
|
||||
(0..genesis_accounts.len()).for_each(|i| {
|
||||
for (pubkey_str, b64_account) in genesis_accounts.iter() {
|
||||
let pubkey = &pubkey_str.parse().unwrap();
|
||||
assert_eq!(
|
||||
genesis_accounts[&genesis_config.accounts[i].0.to_string()].balance,
|
||||
genesis_config.accounts[i].1.lamports,
|
||||
b64_account.balance,
|
||||
genesis_config.accounts[&pubkey].lamports,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
// Test new account data matches
|
||||
(0..genesis_accounts1.len()).for_each(|i| {
|
||||
for (pubkey_str, b64_account) in genesis_accounts1.iter() {
|
||||
let pubkey = pubkey_str.parse().unwrap();
|
||||
assert_eq!(
|
||||
genesis_accounts1[&genesis_config.accounts[genesis_accounts.len() + i]
|
||||
.0
|
||||
.to_string()]
|
||||
.owner,
|
||||
genesis_config.accounts[genesis_accounts.len() + i]
|
||||
.1
|
||||
.owner
|
||||
.to_string(),
|
||||
b64_account.owner,
|
||||
genesis_config.accounts[&pubkey].owner.to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
genesis_accounts1[&genesis_config.accounts[genesis_accounts.len() + i]
|
||||
.0
|
||||
.to_string()]
|
||||
.balance,
|
||||
genesis_config.accounts[genesis_accounts.len() + i]
|
||||
.1
|
||||
.lamports,
|
||||
b64_account.balance,
|
||||
genesis_config.accounts[&pubkey].lamports,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
genesis_accounts1[&genesis_config.accounts[genesis_accounts.len() + i]
|
||||
.0
|
||||
.to_string()]
|
||||
.executable,
|
||||
genesis_config.accounts[genesis_accounts.len() + i]
|
||||
.1
|
||||
.executable,
|
||||
b64_account.executable,
|
||||
genesis_config.accounts[&pubkey].executable,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
genesis_accounts1[&genesis_config.accounts[genesis_accounts.len() + i]
|
||||
.0
|
||||
.to_string()]
|
||||
.data,
|
||||
base64::encode(&genesis_config.accounts[genesis_accounts.len() + i].1.data),
|
||||
b64_account.data,
|
||||
base64::encode(&genesis_config.accounts[&pubkey].data),
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
// Test accounts from keypairs can be appended
|
||||
let account_keypairs: Vec<_> = (0..3).map(|_| Keypair::new()).collect();
|
||||
@ -710,89 +719,60 @@ mod tests {
|
||||
);
|
||||
|
||||
// Test old accounts are still there
|
||||
(0..genesis_accounts.len()).for_each(|i| {
|
||||
for (pubkey_str, b64_account) in genesis_accounts {
|
||||
let pubkey = pubkey_str.parse().unwrap();
|
||||
assert_eq!(
|
||||
genesis_accounts[&genesis_config.accounts[i].0.to_string()].balance,
|
||||
genesis_config.accounts[i].1.lamports,
|
||||
b64_account.balance,
|
||||
genesis_config.accounts[&pubkey].lamports,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
// Test new account data matches
|
||||
(0..genesis_accounts1.len()).for_each(|i| {
|
||||
for (pubkey_str, b64_account) in genesis_accounts1 {
|
||||
let pubkey = pubkey_str.parse().unwrap();
|
||||
assert_eq!(
|
||||
genesis_accounts1[&genesis_config.accounts[genesis_accounts.len() + i]
|
||||
.0
|
||||
.to_string()]
|
||||
.owner,
|
||||
genesis_config.accounts[genesis_accounts.len() + i]
|
||||
.1
|
||||
.owner
|
||||
.to_string(),
|
||||
b64_account.owner,
|
||||
genesis_config.accounts[&pubkey].owner.to_string(),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
genesis_accounts1[&genesis_config.accounts[genesis_accounts.len() + i]
|
||||
.0
|
||||
.to_string()]
|
||||
.balance,
|
||||
genesis_config.accounts[genesis_accounts.len() + i]
|
||||
.1
|
||||
.lamports,
|
||||
b64_account.balance,
|
||||
genesis_config.accounts[&pubkey].lamports,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
genesis_accounts1[&genesis_config.accounts[genesis_accounts.len() + i]
|
||||
.0
|
||||
.to_string()]
|
||||
.executable,
|
||||
genesis_config.accounts[genesis_accounts.len() + i]
|
||||
.1
|
||||
.executable,
|
||||
b64_account.executable,
|
||||
genesis_config.accounts[&pubkey].executable,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
genesis_accounts1[&genesis_config.accounts[genesis_accounts.len() + i]
|
||||
.0
|
||||
.to_string()]
|
||||
.data,
|
||||
base64::encode(&genesis_config.accounts[genesis_accounts.len() + i].1.data),
|
||||
b64_account.data,
|
||||
base64::encode(&genesis_config.accounts[&pubkey].data),
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
let offset = genesis_accounts.len() + genesis_accounts1.len();
|
||||
// Test account data for keypairs matches
|
||||
account_keypairs.iter().for_each(|keypair| {
|
||||
let mut i = 0;
|
||||
(offset..(offset + account_keypairs.len())).for_each(|n| {
|
||||
if keypair.pubkey() == genesis_config.accounts[n].0 {
|
||||
i = n;
|
||||
}
|
||||
});
|
||||
|
||||
assert_ne!(i, 0);
|
||||
|
||||
let keypair_str = serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap();
|
||||
let pubkey = keypair.pubkey();
|
||||
assert_eq!(
|
||||
genesis_accounts2[&serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap()]
|
||||
.owner,
|
||||
genesis_config.accounts[i].1.owner.to_string(),
|
||||
genesis_accounts2[&keypair_str].owner,
|
||||
genesis_config.accounts[&pubkey].owner.to_string(),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
genesis_accounts2[&serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap()]
|
||||
.balance,
|
||||
genesis_config.accounts[i].1.lamports,
|
||||
genesis_accounts2[&keypair_str].balance,
|
||||
genesis_config.accounts[&pubkey].lamports,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
genesis_accounts2[&serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap()]
|
||||
.executable,
|
||||
genesis_config.accounts[i].1.executable,
|
||||
genesis_accounts2[&keypair_str].executable,
|
||||
genesis_config.accounts[&pubkey].executable,
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
genesis_accounts2[&serde_json::to_string(&keypair.to_bytes().to_vec()).unwrap()]
|
||||
.data,
|
||||
base64::encode(&genesis_config.accounts[i].1.data),
|
||||
genesis_accounts2[&keypair_str].data,
|
||||
base64::encode(&genesis_config.accounts[&pubkey].data),
|
||||
);
|
||||
});
|
||||
}
|
||||
|
@ -8,30 +8,27 @@ use solana_sdk::{
|
||||
pubkey::Pubkey, system_program, timing::years_as_slots,
|
||||
};
|
||||
use solana_stake_program::stake_state::{
|
||||
create_lockup_stake_account, get_stake_rent_exempt_reserve, Authorized, Lockup,
|
||||
create_lockup_stake_account, Authorized, Lockup, StakeState,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct StakerInfo {
|
||||
pub name: &'static str,
|
||||
pub staker: &'static str,
|
||||
pub withdrawer: &'static str,
|
||||
pub sol: f64,
|
||||
pub custodian: &'static str,
|
||||
}
|
||||
|
||||
// lamports required to run staking operations for one year
|
||||
// the staker account needs to be rent exempt *and* carry enough
|
||||
// the staker account needs carry enough
|
||||
// lamports to cover TX fees (delegation) for one year,
|
||||
// and we support one delegation per epoch
|
||||
fn calculate_staker_lamports(genesis_config: &GenesisConfig) -> u64 {
|
||||
genesis_config.rent.minimum_balance(0).max(1)
|
||||
+ genesis_config.fee_calculator.max_lamports_per_signature
|
||||
* genesis_config.epoch_schedule.get_epoch(years_as_slots(
|
||||
1.0,
|
||||
&genesis_config.poh_config.target_tick_duration,
|
||||
genesis_config.ticks_per_slot,
|
||||
) as Slot)
|
||||
fn calculate_staker_fees(genesis_config: &GenesisConfig, years: f64) -> u64 {
|
||||
genesis_config.fee_calculator.max_lamports_per_signature
|
||||
* genesis_config.epoch_schedule.get_epoch(years_as_slots(
|
||||
years,
|
||||
&genesis_config.poh_config.target_tick_duration,
|
||||
genesis_config.ticks_per_slot,
|
||||
) as Slot)
|
||||
}
|
||||
|
||||
/// create stake accounts for lamports with at most stake_granularity in each
|
||||
@ -45,27 +42,38 @@ pub fn create_and_add_stakes(
|
||||
// the largest each stake account should be, in lamports
|
||||
granularity: u64,
|
||||
) -> u64 {
|
||||
let authorized = Authorized {
|
||||
staker: Pubkey::new(&hex::decode(staker_info.staker).expect("hex")),
|
||||
withdrawer: Pubkey::new(&hex::decode(staker_info.withdrawer).expect("hex")),
|
||||
};
|
||||
let custodian = Pubkey::new(&hex::decode(staker_info.custodian).expect("hex"));
|
||||
let authorized = Authorized::auto(
|
||||
&staker_info
|
||||
.staker
|
||||
.parse::<Pubkey>()
|
||||
.expect("invalid staker"),
|
||||
);
|
||||
let custodian = unlock_info
|
||||
.custodian
|
||||
.parse::<Pubkey>()
|
||||
.expect("invalid custodian");
|
||||
|
||||
let total_lamports = sol_to_lamports(staker_info.sol);
|
||||
|
||||
let staker_lamports = calculate_staker_lamports(genesis_config);
|
||||
let staker_account = (
|
||||
authorized.staker,
|
||||
Account::new(staker_lamports, 0, &system_program::id()),
|
||||
);
|
||||
// staker is a system account
|
||||
let staker_rent_reserve = genesis_config.rent.minimum_balance(0).max(1);
|
||||
let staker_fees = calculate_staker_fees(genesis_config, 1.0);
|
||||
|
||||
let stakes_lamports = if !genesis_config.accounts.contains(&staker_account) {
|
||||
genesis_config.accounts.push(staker_account);
|
||||
let mut stakes_lamports = total_lamports - staker_fees;
|
||||
|
||||
total_lamports - staker_lamports
|
||||
} else {
|
||||
total_lamports
|
||||
};
|
||||
// lamports required to run staking operations for one year
|
||||
// the staker account needs to be rent exempt *and* carry enough
|
||||
// lamports to cover TX fees (delegation) for one year,
|
||||
// and we support one delegation per epoch
|
||||
// a single staker may administer any number of accounts
|
||||
genesis_config
|
||||
.accounts
|
||||
.entry(authorized.staker)
|
||||
.or_insert_with(|| {
|
||||
stakes_lamports -= staker_rent_reserve;
|
||||
Account::new(staker_rent_reserve, 0, &system_program::id())
|
||||
})
|
||||
.lamports += staker_fees;
|
||||
|
||||
// the staker account needs to be rent exempt *and* carry enough
|
||||
// lamports to cover TX fees (delegation) for one year
|
||||
@ -82,7 +90,7 @@ pub fn create_and_add_stakes(
|
||||
|
||||
let mut address_generator = AddressGenerator::new(&authorized.staker, staker_info.name);
|
||||
|
||||
let stake_rent_exempt_reserve = get_stake_rent_exempt_reserve(&genesis_config.rent);
|
||||
let stake_rent_reserve = StakeState::get_rent_exempt_reserve(&genesis_config.rent);
|
||||
|
||||
for unlock in unlocks {
|
||||
let lamports = unlock.amount(stakes_lamports);
|
||||
@ -108,7 +116,7 @@ pub fn create_and_add_stakes(
|
||||
),
|
||||
);
|
||||
}
|
||||
if remainder <= stake_rent_exempt_reserve {
|
||||
if remainder <= stake_rent_reserve {
|
||||
genesis_config.add_account(
|
||||
address_generator.next(),
|
||||
create_lockup_stake_account(
|
||||
@ -150,11 +158,10 @@ mod tests {
|
||||
granularity: u64,
|
||||
len: usize,
|
||||
) {
|
||||
assert!(
|
||||
total_lamports
|
||||
== create_and_add_stakes(genesis_config, staker_info, unlock_info, granularity)
|
||||
assert_eq!(
|
||||
total_lamports,
|
||||
create_and_add_stakes(genesis_config, staker_info, unlock_info, granularity)
|
||||
);
|
||||
|
||||
assert_eq!(genesis_config.accounts.len(), len);
|
||||
assert_eq!(
|
||||
genesis_config
|
||||
@ -169,9 +176,41 @@ mod tests {
|
||||
.iter()
|
||||
.all(|(_pubkey, account)| account.lamports <= granularity
|
||||
|| account.lamports - granularity
|
||||
< get_stake_rent_exempt_reserve(&genesis_config.rent)));
|
||||
<= StakeState::get_rent_exempt_reserve(&genesis_config.rent)));
|
||||
}
|
||||
|
||||
// #[ignore]
|
||||
// #[test]
|
||||
// fn hex_test_keys_to_bs58() {
|
||||
// vec![
|
||||
// "ab22196afde08a090a3721eb20e3e1ea84d36e14d1a3f0815b236b300d9d33ef", // CX2sgoat51bnDgCN2YeesrTcscgVhnhWnwxtWEEEqBs4
|
||||
// "a2a7ae9098f862f4b3ba7d102d174de5e84a560444c39c035f3eeecce442eadc", // BwwM47pLHwUgjJXKQKVNiRfGhtPNWfNLH27na2HJQHhd
|
||||
// "6a56514c29f6b1de4d46164621d6bd25b337a711f569f9283c1143c7e8fb546e", // 8A6ZEEW2odkqXNjTWHNG6tUk7uj6zCzHueTyEr9pM1tH
|
||||
// "b420af728f58d9f269d6e07fbbaecf6ed6535e5348538e3f39f2710351f2b940", // D89HyaBmr2WmrTehsfkQrY23wCXcDfsFnN9gMfUXHaDd
|
||||
// "ddf2e4c81eafae2d68ac99171b066c87bddb168d6b7c07333cd951f36640163d", // FwPvDpvUmnco1CSfwXQDTbUbuhG5eP7h2vgCKYKVL7at
|
||||
// "312fa06ccf1b671b26404a34136161ed2aba9e66f248441b4fddb5c592fde560", // 4K16iBoC9kAQRT8pUEKeD2h9WEx1zsRgEmJFssXcXmqq
|
||||
// "0cbf98cd35ceff84ca72b752c32cc3eeee4f765ca1bef1140927ebf5c6e74339", // rmLpENW4V6QNeEhdJJVxo9Xt99oKgNUFZS4Y4375amW
|
||||
// "467e06fa25a9e06824eedc926ce431947ed99c728bed36be54561354c1330959", // 5kAztE3XtrpeyGZZxckSUt3ZWojNTmph1QSC9S2682z4
|
||||
// "ef1562bf9edfd0f5e62530cce4244e8de544a3a30075a2cd5c9074edfbcbe78a", // H6HMVuDR8XCw3EuhLvFG4EciVvGo76Agq1kSBL2ozoDs
|
||||
// "2ab26abb9d8131a30a4a63446125cf961ece4b926c31cce0eb84da4eac3f836e", // 3sfv8tk5ZSDBWbTkFkvFxCvJUyW5yDJUu6VMJcUARQWq
|
||||
// ]
|
||||
// .iter()
|
||||
// .for_each(|_hex| {
|
||||
// print(
|
||||
// "\n\"{}\", // {:?}",
|
||||
// hex,
|
||||
// Pubkey::new(&hex::decode(hex).unwrap())
|
||||
// );
|
||||
// });
|
||||
// println();
|
||||
// println(
|
||||
// "{:?}",
|
||||
// "P1aceHo1derPubkey11111111111111111111111111"
|
||||
// .parse::<Pubkey>()
|
||||
// .unwrap()
|
||||
// );
|
||||
//}
|
||||
|
||||
#[test]
|
||||
fn test_create_stakes() {
|
||||
// 2 unlocks
|
||||
@ -182,11 +221,12 @@ mod tests {
|
||||
..Rent::default()
|
||||
};
|
||||
|
||||
let reserve = get_stake_rent_exempt_reserve(&rent);
|
||||
let reserve = StakeState::get_rent_exempt_reserve(&rent);
|
||||
let staker_reserve = rent.minimum_balance(0);
|
||||
|
||||
// verify that a small remainder ends up in the last stake
|
||||
let granularity = reserve;
|
||||
let total_lamports = reserve + reserve * 2 + 1;
|
||||
let total_lamports = staker_reserve + reserve * 2 + 1;
|
||||
create_and_check_stakes(
|
||||
&mut GenesisConfig {
|
||||
rent,
|
||||
@ -194,16 +234,15 @@ mod tests {
|
||||
},
|
||||
&StakerInfo {
|
||||
name: "fun",
|
||||
staker: "cafebabedeadbeef000000000000000000000000000000000000000000000000",
|
||||
withdrawer: "cafebabedeadbeef000000000000000000000000000000000000000000000000",
|
||||
staker: "P1aceHo1derPubkey11111111111111111111111111",
|
||||
sol: lamports_to_sol(total_lamports),
|
||||
custodian: "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
&UnlockInfo {
|
||||
cliff_fraction: 0.5,
|
||||
cliff_years: 0.5,
|
||||
unlocks: 1,
|
||||
unlock_years: 0.5,
|
||||
custodian: "11111111111111111111111111111111",
|
||||
},
|
||||
total_lamports,
|
||||
granularity,
|
||||
@ -212,7 +251,7 @@ mod tests {
|
||||
|
||||
// huge granularity doesn't blow up
|
||||
let granularity = std::u64::MAX;
|
||||
let total_lamports = reserve + reserve * 2 + 1;
|
||||
let total_lamports = staker_reserve + reserve * 2 + 1;
|
||||
create_and_check_stakes(
|
||||
&mut GenesisConfig {
|
||||
rent,
|
||||
@ -220,25 +259,24 @@ mod tests {
|
||||
},
|
||||
&StakerInfo {
|
||||
name: "fun",
|
||||
staker: "cafebabedeadbeef000000000000000000000000000000000000000000000000",
|
||||
withdrawer: "cafebabedeadbeef000000000000000000000000000000000000000000000000",
|
||||
staker: "P1aceHo1derPubkey11111111111111111111111111",
|
||||
sol: lamports_to_sol(total_lamports),
|
||||
custodian: "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
&UnlockInfo {
|
||||
cliff_fraction: 0.5,
|
||||
cliff_years: 0.5,
|
||||
unlocks: 1,
|
||||
unlock_years: 0.5,
|
||||
custodian: "11111111111111111111111111111111",
|
||||
},
|
||||
total_lamports,
|
||||
granularity,
|
||||
2 + 1,
|
||||
);
|
||||
|
||||
// exactly reserve as a remainder
|
||||
// exactly reserve as a remainder, reserve gets folded in
|
||||
let granularity = reserve * 3;
|
||||
let total_lamports = reserve + (granularity + reserve) * 2;
|
||||
let total_lamports = staker_reserve + (granularity + reserve) * 2;
|
||||
create_and_check_stakes(
|
||||
&mut GenesisConfig {
|
||||
rent,
|
||||
@ -246,16 +284,39 @@ mod tests {
|
||||
},
|
||||
&StakerInfo {
|
||||
name: "fun",
|
||||
staker: "cafebabedeadbeef000000000000000000000000000000000000000000000000",
|
||||
withdrawer: "cafebabedeadbeef000000000000000000000000000000000000000000000000",
|
||||
staker: "P1aceHo1derPubkey11111111111111111111111111",
|
||||
sol: lamports_to_sol(total_lamports),
|
||||
custodian: "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
},
|
||||
&UnlockInfo {
|
||||
cliff_fraction: 0.5,
|
||||
cliff_years: 0.5,
|
||||
unlocks: 1,
|
||||
unlock_years: 0.5,
|
||||
custodian: "11111111111111111111111111111111",
|
||||
},
|
||||
total_lamports,
|
||||
granularity,
|
||||
2 + 1,
|
||||
);
|
||||
// exactly reserve + 1 as a remainder, reserve + 1 gets its own stake
|
||||
let granularity = reserve * 3;
|
||||
let total_lamports = staker_reserve + (granularity + reserve + 1) * 2;
|
||||
create_and_check_stakes(
|
||||
&mut GenesisConfig {
|
||||
rent,
|
||||
..GenesisConfig::default()
|
||||
},
|
||||
&StakerInfo {
|
||||
name: "fun",
|
||||
staker: "P1aceHo1derPubkey11111111111111111111111111",
|
||||
sol: lamports_to_sol(total_lamports),
|
||||
},
|
||||
&UnlockInfo {
|
||||
cliff_fraction: 0.5,
|
||||
cliff_years: 0.5,
|
||||
unlocks: 1,
|
||||
unlock_years: 0.5,
|
||||
custodian: "11111111111111111111111111111111",
|
||||
},
|
||||
total_lamports,
|
||||
granularity,
|
||||
|
@ -8,6 +8,7 @@ pub struct UnlockInfo {
|
||||
pub cliff_years: f64,
|
||||
pub unlocks: usize,
|
||||
pub unlock_years: f64,
|
||||
pub custodian: &'static str,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
|
209
genesis/src/validators.rs
Normal file
209
genesis/src/validators.rs
Normal file
@ -0,0 +1,209 @@
|
||||
//! validators generator
|
||||
use solana_sdk::{
|
||||
account::Account, genesis_config::GenesisConfig, native_token::sol_to_lamports, pubkey::Pubkey,
|
||||
system_program, timing::years_as_slots,
|
||||
};
|
||||
use solana_vote_program::vote_state::{self, VoteState};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ValidatorInfo {
|
||||
pub name: &'static str,
|
||||
pub node: &'static str,
|
||||
pub node_sol: f64,
|
||||
pub vote: &'static str,
|
||||
pub commission: u8,
|
||||
}
|
||||
|
||||
// the node's account needs carry enough
|
||||
// lamports to cover TX fees for voting for one year,
|
||||
// validators can vote once per slot
|
||||
fn calculate_voting_fees(genesis_config: &GenesisConfig, years: f64) -> u64 {
|
||||
genesis_config.fee_calculator.max_lamports_per_signature
|
||||
* years_as_slots(
|
||||
years,
|
||||
&genesis_config.poh_config.target_tick_duration,
|
||||
genesis_config.ticks_per_slot,
|
||||
) as u64
|
||||
}
|
||||
|
||||
/// create and add vote and node id accounts for a validator
|
||||
pub fn create_and_add_validator(
|
||||
genesis_config: &mut GenesisConfig,
|
||||
// information about this validator
|
||||
validator_info: &ValidatorInfo,
|
||||
) -> u64 {
|
||||
let node: Pubkey = validator_info.node.parse().expect("invalid node");
|
||||
let vote: Pubkey = validator_info.vote.parse().expect("invalid vote");
|
||||
let node_lamports = sol_to_lamports(validator_info.node_sol);
|
||||
|
||||
// node is the system account from which votes will be issued
|
||||
let node_rent_reserve = genesis_config.rent.minimum_balance(0).max(1);
|
||||
let node_voting_fees = calculate_voting_fees(genesis_config, 1.0);
|
||||
|
||||
let vote_rent_reserve = VoteState::get_rent_exempt_reserve(&genesis_config.rent).max(1);
|
||||
|
||||
let mut total_lamports = node_voting_fees + vote_rent_reserve + node_lamports;
|
||||
|
||||
genesis_config
|
||||
.accounts
|
||||
.entry(node)
|
||||
.or_insert_with(|| {
|
||||
total_lamports += node_rent_reserve;
|
||||
Account::new(node_rent_reserve, 0, &system_program::id())
|
||||
})
|
||||
.lamports += node_voting_fees + node_lamports;
|
||||
|
||||
assert!(
|
||||
genesis_config.accounts.get(&vote).is_none(),
|
||||
"{} is already in genesis",
|
||||
vote
|
||||
);
|
||||
|
||||
genesis_config.add_account(
|
||||
vote,
|
||||
vote_state::create_account(&vote, &node, validator_info.commission, vote_rent_reserve),
|
||||
);
|
||||
|
||||
total_lamports
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_sdk::rent::Rent;
|
||||
|
||||
fn create_and_check_validators(
|
||||
genesis_config: &mut GenesisConfig,
|
||||
validator_infos: &[ValidatorInfo],
|
||||
total_lamports: u64,
|
||||
len: usize,
|
||||
) {
|
||||
assert_eq!(
|
||||
validator_infos
|
||||
.iter()
|
||||
.map(|validator_info| create_and_add_validator(genesis_config, validator_info))
|
||||
.sum::<u64>(),
|
||||
total_lamports
|
||||
);
|
||||
assert_eq!(genesis_config.accounts.len(), len);
|
||||
assert_eq!(
|
||||
genesis_config
|
||||
.accounts
|
||||
.iter()
|
||||
.map(|(_pubkey, account)| account.lamports)
|
||||
.sum::<u64>(),
|
||||
total_lamports,
|
||||
);
|
||||
assert!(genesis_config
|
||||
.accounts
|
||||
.iter()
|
||||
.all(|(_pubkey, account)| account.lamports
|
||||
>= genesis_config.rent.minimum_balance(0).max(1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_one_validator() {
|
||||
let rent = Rent {
|
||||
lamports_per_byte_year: 1,
|
||||
exemption_threshold: 1.0,
|
||||
..Rent::default()
|
||||
};
|
||||
let mut genesis_config = GenesisConfig {
|
||||
rent,
|
||||
..GenesisConfig::default()
|
||||
};
|
||||
|
||||
let total_lamports = VoteState::get_rent_exempt_reserve(&rent)
|
||||
+ calculate_voting_fees(&genesis_config, 1.0)
|
||||
+ rent.minimum_balance(0);
|
||||
|
||||
create_and_check_validators(
|
||||
&mut genesis_config,
|
||||
&[ValidatorInfo {
|
||||
name: "fun",
|
||||
node: "AiTDdNHW2vNtHt7PqWMHx3B8cMPRDNgc7kMiLPJM25QC", // random pubkeys
|
||||
node_sol: 0.0,
|
||||
vote: "77TQYZTHodhnxJcSuVjUvx8GYRCkykPyHtmFTFLjj1Rc",
|
||||
commission: 50,
|
||||
}],
|
||||
total_lamports,
|
||||
2,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_one_validator_two_votes() {
|
||||
let rent = Rent {
|
||||
lamports_per_byte_year: 1,
|
||||
exemption_threshold: 1.0,
|
||||
..Rent::default()
|
||||
};
|
||||
let mut genesis_config = GenesisConfig {
|
||||
rent,
|
||||
..GenesisConfig::default()
|
||||
};
|
||||
let total_lamports = VoteState::get_rent_exempt_reserve(&rent) * 2
|
||||
+ calculate_voting_fees(&genesis_config, 1.0) * 2 // two vote accounts
|
||||
+ rent.minimum_balance(0) // one node account
|
||||
+ sol_to_lamports(1.0); // 2nd vote account ask has SOL
|
||||
|
||||
// weird case, just wanted to verify that the duplicated node account gets double fees
|
||||
create_and_check_validators(
|
||||
&mut genesis_config,
|
||||
&[
|
||||
ValidatorInfo {
|
||||
name: "fun",
|
||||
node: "3VTm54dw8w6jTTsPH4BfoV5vo6mF985JAMtNDRYcaGFc", // random pubkeys
|
||||
node_sol: 0.0,
|
||||
vote: "GTKWbUoLw3Bv7Ld92crhyXcEk9zUu3VEKfzeuWJZdnfW",
|
||||
commission: 50,
|
||||
},
|
||||
ValidatorInfo {
|
||||
name: "unfun",
|
||||
node: "3VTm54dw8w6jTTsPH4BfoV5vo6mF985JAMtNDRYcaGFc", // random pubkeys, same node
|
||||
node_sol: 1.0,
|
||||
vote: "8XrFPRULg98kSm535kFaLV4GMnK5JQSuAymyrCHXsUcy",
|
||||
commission: 50,
|
||||
},
|
||||
],
|
||||
total_lamports,
|
||||
3,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_vote_collision() {
|
||||
let rent = Rent {
|
||||
lamports_per_byte_year: 1,
|
||||
exemption_threshold: 1.0,
|
||||
..Rent::default()
|
||||
};
|
||||
let mut genesis_config = GenesisConfig {
|
||||
rent,
|
||||
..GenesisConfig::default()
|
||||
};
|
||||
|
||||
create_and_check_validators(
|
||||
&mut genesis_config,
|
||||
&[
|
||||
ValidatorInfo {
|
||||
name: "fun",
|
||||
node: "3VTm54dw8w6jTTsPH4BfoV5vo6mF985JAMtNDRYcaGFc", // random pubkeys
|
||||
node_sol: 0.0,
|
||||
vote: "GTKWbUoLw3Bv7Ld92crhyXcEk9zUu3VEKfzeuWJZdnfW",
|
||||
commission: 50,
|
||||
},
|
||||
ValidatorInfo {
|
||||
name: "unfun",
|
||||
node: "3VTm54dw8w6jTTsPH4BfoV5vo6mF985JAMtNDRYcaGFc", // random pubkeys, same node
|
||||
node_sol: 0.0,
|
||||
vote: "GTKWbUoLw3Bv7Ld92crhyXcEk9zUu3VEKfzeuWJZdnfW", // duplicate vote, bad juju
|
||||
commission: 50,
|
||||
},
|
||||
],
|
||||
0,
|
||||
0,
|
||||
);
|
||||
}
|
||||
}
|
@ -3,19 +3,19 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-gossip"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.0" }
|
||||
solana-core = { path = "../core", version = "0.21.0" }
|
||||
solana-client = { path = "../client", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.6" }
|
||||
solana-core = { path = "../core", version = "0.21.6" }
|
||||
solana-client = { path = "../client", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
solana-net-utils = { path = "../net-utils", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
|
||||
|
||||
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-install"
|
||||
description = "The solana cluster software installer"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -26,11 +26,11 @@ reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tl
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.0" }
|
||||
solana-client = { path = "../client", version = "0.21.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.6" }
|
||||
solana-client = { path = "../client", version = "0.21.6" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
tar = "0.4.26"
|
||||
tempdir = "0.3.7"
|
||||
url = "2.1.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-keygen"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
description = "Solana key generation utility"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -14,8 +14,8 @@ clap = "2.33"
|
||||
dirs = "2.0.2"
|
||||
num_cpus = "1.11.1"
|
||||
rpassword = "4.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
tiny-bip39 = "0.6.2"
|
||||
|
||||
[[bin]]
|
||||
|
@ -1,15 +1,18 @@
|
||||
use bip39::{Language, Mnemonic, MnemonicType, Seed};
|
||||
use bs58;
|
||||
use clap::{
|
||||
crate_description, crate_name, values_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand,
|
||||
crate_description, crate_name, value_t, values_t_or_exit, App, AppSettings, Arg, ArgMatches,
|
||||
SubCommand,
|
||||
};
|
||||
use num_cpus;
|
||||
use solana_clap_utils::keypair::{keypair_from_seed_phrase, SKIP_SEED_PHRASE_VALIDATION_ARG};
|
||||
use solana_clap_utils::keypair::{
|
||||
keypair_from_seed_phrase, prompt_passphrase, ASK_KEYWORD, SKIP_SEED_PHRASE_VALIDATION_ARG,
|
||||
};
|
||||
use solana_sdk::{
|
||||
pubkey::write_pubkey_file,
|
||||
signature::{
|
||||
keypair_from_seed, read_keypair, read_keypair_file, write_keypair, write_keypair_file,
|
||||
Keypair, KeypairUtil,
|
||||
Keypair, KeypairUtil, Signature,
|
||||
},
|
||||
};
|
||||
use std::{
|
||||
@ -35,6 +38,26 @@ fn check_for_overwrite(outfile: &str, matches: &ArgMatches) {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_keypair_from_matches(matches: &ArgMatches) -> Result<Keypair, Box<dyn error::Error>> {
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let infile = if matches.is_present("infile") {
|
||||
matches.value_of("infile").unwrap()
|
||||
} else {
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
|
||||
if infile == "-" {
|
||||
let mut stdin = std::io::stdin();
|
||||
read_keypair(&mut stdin)
|
||||
} else if infile == ASK_KEYWORD {
|
||||
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
||||
keypair_from_seed_phrase("pubkey recovery", skip_validation, false)
|
||||
} else {
|
||||
read_keypair_file(infile)
|
||||
}
|
||||
}
|
||||
|
||||
fn output_keypair(
|
||||
keypair: &Keypair,
|
||||
outfile: &str,
|
||||
@ -55,6 +78,24 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.about(crate_description!())
|
||||
.version(solana_clap_utils::version!())
|
||||
.setting(AppSettings::SubcommandRequiredElseHelp)
|
||||
.subcommand(
|
||||
SubCommand::with_name("verify")
|
||||
.about("Verify a keypair can sign and verify a message.")
|
||||
.arg(
|
||||
Arg::with_name("infile")
|
||||
.index(1)
|
||||
.value_name("PATH")
|
||||
.takes_value(true)
|
||||
.help("Path to keypair file"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("pubkey")
|
||||
.index(2)
|
||||
.value_name("BASE58_PUBKEY")
|
||||
.takes_value(true)
|
||||
.help("Public key"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("new")
|
||||
.about("Generate new keypair file from a passphrase and random seed phrase")
|
||||
@ -73,11 +114,26 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.long("force")
|
||||
.help("Overwrite the output file if it exists"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("word_count")
|
||||
.long("word-count")
|
||||
.possible_values(&["12", "15", "18", "21", "24"])
|
||||
.default_value("12")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.help("Specify the number of words that will be present in the generated seed phrase"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("no_passphrase")
|
||||
.long("no-passphrase")
|
||||
.help("Do not prompt for a passphrase"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("no_outfile")
|
||||
.long("no-outfile")
|
||||
.conflicts_with_all(&["outfile", "silent"])
|
||||
.help("Only print a seed phrase and pubkey. Do not output a keypair file"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("silent")
|
||||
.short("s")
|
||||
@ -132,6 +188,11 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.takes_value(true)
|
||||
.help("Path to keypair file"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name)
|
||||
.long(SKIP_SEED_PHRASE_VALIDATION_ARG.long)
|
||||
.help(SKIP_SEED_PHRASE_VALIDATION_ARG.help),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("outfile")
|
||||
.short("o")
|
||||
@ -176,19 +237,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
|
||||
match matches.subcommand() {
|
||||
("pubkey", Some(matches)) => {
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let infile = if matches.is_present("infile") {
|
||||
matches.value_of("infile").unwrap()
|
||||
} else {
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
};
|
||||
let keypair = if infile == "-" {
|
||||
let mut stdin = std::io::stdin();
|
||||
read_keypair(&mut stdin)?
|
||||
} else {
|
||||
read_keypair_file(infile)?
|
||||
};
|
||||
let keypair = get_keypair_from_matches(matches)?;
|
||||
|
||||
if matches.is_present("outfile") {
|
||||
let outfile = matches.value_of("outfile").unwrap();
|
||||
@ -201,29 +250,37 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
("new", Some(matches)) => {
|
||||
let mut path = dirs::home_dir().expect("home directory");
|
||||
let outfile = if matches.is_present("outfile") {
|
||||
matches.value_of("outfile").unwrap()
|
||||
matches.value_of("outfile")
|
||||
} else if matches.is_present("no_outfile") {
|
||||
None
|
||||
} else {
|
||||
path.extend(&[".config", "solana", "id.json"]);
|
||||
path.to_str().unwrap()
|
||||
Some(path.to_str().unwrap())
|
||||
};
|
||||
|
||||
if outfile != "-" {
|
||||
check_for_overwrite(&outfile, &matches);
|
||||
match outfile {
|
||||
Some("-") => (),
|
||||
Some(outfile) => check_for_overwrite(&outfile, &matches),
|
||||
None => (),
|
||||
}
|
||||
|
||||
let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English);
|
||||
let word_count = value_t!(matches.value_of("word_count"), usize).unwrap();
|
||||
let mnemonic_type = MnemonicType::for_word_count(word_count)?;
|
||||
let mnemonic = Mnemonic::new(mnemonic_type, Language::English);
|
||||
let passphrase = if matches.is_present("no_passphrase") {
|
||||
NO_PASSPHRASE.to_string()
|
||||
} else {
|
||||
eprintln!("Generating a new keypair");
|
||||
rpassword::prompt_password_stderr(
|
||||
"For added security, enter a passphrase (empty for no passphrase):",
|
||||
prompt_passphrase(
|
||||
"For added security, enter a passphrase (empty for no passphrase): ",
|
||||
)?
|
||||
};
|
||||
let seed = Seed::new(&mnemonic, &passphrase);
|
||||
let keypair = keypair_from_seed(seed.as_bytes())?;
|
||||
|
||||
output_keypair(&keypair, &outfile, "new")?;
|
||||
if let Some(outfile) = outfile {
|
||||
output_keypair(&keypair, &outfile, "new")?;
|
||||
}
|
||||
|
||||
let silent = matches.is_present("silent");
|
||||
if !silent {
|
||||
@ -249,7 +306,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
}
|
||||
|
||||
let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name);
|
||||
let keypair = keypair_from_seed_phrase("recover", skip_validation)?;
|
||||
let keypair = keypair_from_seed_phrase("recover", skip_validation, true)?;
|
||||
output_keypair(&keypair, &outfile, "recovered")?;
|
||||
}
|
||||
("grind", Some(matches)) => {
|
||||
@ -331,6 +388,19 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.collect::<Vec<_>>();
|
||||
thread::park();
|
||||
}
|
||||
("verify", Some(matches)) => {
|
||||
let keypair = get_keypair_from_matches(matches)?;
|
||||
let test_data = b"test";
|
||||
let signature = Signature::new(&keypair.sign(test_data).to_bytes());
|
||||
let pubkey_bs58 = matches.value_of("pubkey").unwrap();
|
||||
let pubkey = bs58::decode(pubkey_bs58).into_vec().unwrap();
|
||||
if signature.verify(&pubkey, test_data) {
|
||||
println!("Verification for public key: {}: Success", pubkey_bs58);
|
||||
} else {
|
||||
println!("Verification for public key: {}: Failed", pubkey_bs58);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-ledger-tool"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -15,12 +15,12 @@ serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
serde_json = "1.0.41"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.21.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.6" }
|
||||
solana-ledger = { path = "../ledger", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.21.6" }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "0.11"
|
||||
|
@ -30,6 +30,7 @@ enum LedgerOutputMethod {
|
||||
}
|
||||
|
||||
fn output_slot(blocktree: &Blocktree, slot: Slot, method: &LedgerOutputMethod) {
|
||||
println!("Slot Meta {:?}", blocktree.meta(slot));
|
||||
let entries = blocktree
|
||||
.get_slot_entries(slot, 0, None)
|
||||
.unwrap_or_else(|err| {
|
||||
@ -436,7 +437,14 @@ fn main() {
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("bounds")
|
||||
.about("Print lowest and highest non-empty slots. Note: This ignores gaps in slots")
|
||||
.about("Print lowest and highest non-empty slots. Note that there may be empty slots within the bounds")
|
||||
.arg(
|
||||
Arg::with_name("all")
|
||||
.long("all")
|
||||
.takes_value(false)
|
||||
.required(false)
|
||||
.help("Additionally print all the non-empty slots within the bounds"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("json")
|
||||
@ -581,9 +589,9 @@ fn main() {
|
||||
})
|
||||
};
|
||||
let account_paths = if let Some(account_paths) = matches.value_of("account_paths") {
|
||||
Some(account_paths.to_string())
|
||||
account_paths.split(',').map(PathBuf::from).collect()
|
||||
} else {
|
||||
Some(ledger_path.join("accounts").to_str().unwrap().to_string())
|
||||
vec![ledger_path.join("accounts")]
|
||||
};
|
||||
|
||||
let process_options = blocktree_processor::ProcessOptions {
|
||||
@ -719,8 +727,10 @@ fn main() {
|
||||
}
|
||||
});
|
||||
}
|
||||
("bounds", _) => match blocktree.slot_meta_iterator(0) {
|
||||
("bounds", Some(args_matches)) => match blocktree.slot_meta_iterator(0) {
|
||||
Ok(metas) => {
|
||||
let all = args_matches.is_present("all");
|
||||
|
||||
println!("Collecting Ledger information...");
|
||||
let slots: Vec<_> = metas.map(|(slot, _)| slot).collect();
|
||||
if slots.is_empty() {
|
||||
@ -729,10 +739,10 @@ fn main() {
|
||||
let first = slots.first().unwrap();
|
||||
let last = slots.last().unwrap_or_else(|| first);
|
||||
if first != last {
|
||||
println!(
|
||||
"Ledger contains some data for slots {:?} to {:?}",
|
||||
first, last
|
||||
);
|
||||
println!("Ledger contains data from slots {:?} to {:?}", first, last);
|
||||
if all {
|
||||
println!("Non-empty slots: {:?}", slots);
|
||||
}
|
||||
} else {
|
||||
println!("Ledger only contains some data for slot {:?}", first);
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ fn bad_arguments() {
|
||||
fn nominal() {
|
||||
let genesis_config = create_genesis_config(100).genesis_config;
|
||||
let ticks_per_slot = genesis_config.ticks_per_slot;
|
||||
let meta_lines = 1;
|
||||
|
||||
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
let ticks = ticks_per_slot as usize;
|
||||
@ -44,5 +45,5 @@ fn nominal() {
|
||||
// Print everything
|
||||
let output = run_ledger_tool(&["-l", &ledger_path, "print"]);
|
||||
assert!(output.status.success());
|
||||
assert_eq!(count_newlines(&output.stdout), ticks + 1);
|
||||
assert_eq!(count_newlines(&output.stdout), ticks + meta_lines + 1);
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-ledger"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
description = "Solana ledger"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -29,19 +29,19 @@ rayon = "1.2.0"
|
||||
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
solana-client = { path = "../client", version = "0.21.0" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-measure = { path = "../measure", version = "0.21.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.21.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.21.0" }
|
||||
solana-perf = { path = "../perf", version = "0.21.0" }
|
||||
solana-client = { path = "../client", version = "0.21.6" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
solana-measure = { path = "../measure", version = "0.21.6" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.21.6" }
|
||||
solana-metrics = { path = "../metrics", version = "0.21.6" }
|
||||
solana-perf = { path = "../perf", version = "0.21.6" }
|
||||
ed25519-dalek = "1.0.0-pre.1"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.21.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.21.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.6" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.21.6" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.21.6" }
|
||||
sys-info = "0.5.8"
|
||||
tar = "0.4.26"
|
||||
tempfile = "3.1.0"
|
||||
@ -54,8 +54,9 @@ default-features = false
|
||||
features = ["lz4"]
|
||||
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.3.0"
|
||||
matches = "0.1.6"
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.21.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.21.6" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@ -7,12 +7,12 @@ use crate::{
|
||||
};
|
||||
use log::*;
|
||||
use solana_sdk::genesis_config::GenesisConfig;
|
||||
use std::{fs, sync::Arc};
|
||||
use std::{fs, path::PathBuf, sync::Arc};
|
||||
|
||||
pub fn load(
|
||||
genesis_config: &GenesisConfig,
|
||||
blocktree: &Blocktree,
|
||||
account_paths: Option<String>,
|
||||
account_paths: Vec<PathBuf>,
|
||||
snapshot_config: Option<&SnapshotConfig>,
|
||||
process_options: ProcessOptions,
|
||||
) -> Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> {
|
||||
@ -30,10 +30,13 @@ pub fn load(
|
||||
if tar.exists() {
|
||||
info!("Loading snapshot package: {:?}", tar);
|
||||
// Fail hard here if snapshot fails to load, don't silently continue
|
||||
|
||||
if account_paths.is_empty() {
|
||||
panic!("Account paths not present when booting from snapshot")
|
||||
}
|
||||
|
||||
let deserialized_bank = snapshot_utils::bank_from_archive(
|
||||
account_paths
|
||||
.clone()
|
||||
.expect("Account paths not present when booting from snapshot"),
|
||||
&account_paths,
|
||||
&snapshot_config.snapshot_path,
|
||||
&tar,
|
||||
)
|
||||
|
@ -16,6 +16,7 @@ pub use crate::{
|
||||
blocktree_meta::SlotMeta,
|
||||
};
|
||||
use bincode::deserialize;
|
||||
use chrono::{offset::TimeZone, Duration as ChronoDuration, Utc};
|
||||
use log::*;
|
||||
use rayon::{
|
||||
iter::{IntoParallelRefIterator, ParallelIterator},
|
||||
@ -27,17 +28,18 @@ use solana_measure::measure::Measure;
|
||||
use solana_metrics::{datapoint_debug, datapoint_error};
|
||||
use solana_rayon_threadlimit::get_thread_count;
|
||||
use solana_sdk::{
|
||||
clock::{Slot, DEFAULT_TICKS_PER_SECOND},
|
||||
clock::{Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND},
|
||||
genesis_config::GenesisConfig,
|
||||
hash::Hash,
|
||||
signature::{Keypair, KeypairUtil, Signature},
|
||||
timing::timestamp,
|
||||
timing::{duration_as_ms, timestamp},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
cmp,
|
||||
collections::HashMap,
|
||||
convert::TryFrom,
|
||||
fs,
|
||||
path::{Path, PathBuf},
|
||||
rc::Rc,
|
||||
@ -45,6 +47,7 @@ use std::{
|
||||
mpsc::{sync_channel, Receiver, SyncSender, TrySendError},
|
||||
Arc, Mutex, RwLock,
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
pub const BLOCKTREE_DIRECTORY: &str = "rocksdb";
|
||||
@ -154,7 +157,7 @@ impl Blocktree {
|
||||
adjust_ulimit_nofile();
|
||||
|
||||
// Open the database
|
||||
let measure = Measure::start("open");
|
||||
let mut measure = Measure::start("open");
|
||||
let db = Database::open(&blocktree_path)?;
|
||||
|
||||
// Create the metadata column family
|
||||
@ -185,6 +188,7 @@ impl Blocktree {
|
||||
.unwrap_or(0);
|
||||
let last_root = Arc::new(RwLock::new(max_root));
|
||||
|
||||
measure.stop();
|
||||
info!("{:?} {}", blocktree_path, measure);
|
||||
Ok(Blocktree {
|
||||
db,
|
||||
@ -353,10 +357,20 @@ impl Blocktree {
|
||||
Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
|
||||
}
|
||||
|
||||
pub fn slot_coding_iterator<'a>(
|
||||
&'a self,
|
||||
slot: Slot,
|
||||
) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + 'a> {
|
||||
let slot_iterator = self
|
||||
.db
|
||||
.iter::<cf::ShredCode>(IteratorMode::From((slot, 0), IteratorDirection::Forward))?;
|
||||
Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
|
||||
}
|
||||
|
||||
fn try_shred_recovery(
|
||||
db: &Database,
|
||||
erasure_metas: &HashMap<(u64, u64), ErasureMeta>,
|
||||
index_working_set: &HashMap<u64, IndexMetaWorkingSetEntry>,
|
||||
index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>,
|
||||
prev_inserted_datas: &mut HashMap<(u64, u64), Shred>,
|
||||
prev_inserted_codes: &mut HashMap<(u64, u64), Shred>,
|
||||
) -> Vec<Shred> {
|
||||
@ -381,8 +395,8 @@ impl Blocktree {
|
||||
);
|
||||
};
|
||||
|
||||
let index_meta_entry = index_working_set.get(&slot).expect("Index");
|
||||
let index = &index_meta_entry.index;
|
||||
let index_meta_entry = index_working_set.get_mut(&slot).expect("Index");
|
||||
let index = &mut index_meta_entry.index;
|
||||
match erasure_meta.status(&index) {
|
||||
ErasureMetaStatus::CanRecover => {
|
||||
// Find shreds for this erasure set and try recovery
|
||||
@ -409,8 +423,17 @@ impl Blocktree {
|
||||
});
|
||||
(set_index..set_index + erasure_meta.config.num_coding() as u64).for_each(
|
||||
|i| {
|
||||
if let Some(shred) =
|
||||
prev_inserted_codes.remove(&(slot, i)).or_else(|| {
|
||||
if let Some(shred) = prev_inserted_codes
|
||||
.remove(&(slot, i))
|
||||
.map(|s| {
|
||||
// Remove from the index so it doesn't get committed. We know
|
||||
// this is safe to do because everything in
|
||||
// `prev_inserted_codes` does not yet exist in blocktree
|
||||
// (guaranteed by `check_cache_coding_shred`)
|
||||
index.coding_mut().set_present(i, false);
|
||||
s
|
||||
})
|
||||
.or_else(|| {
|
||||
if index.coding().is_present(i) {
|
||||
let some_code = code_cf
|
||||
.get_bytes((slot, i))
|
||||
@ -446,8 +469,14 @@ impl Blocktree {
|
||||
ErasureMetaStatus::DataFull => {
|
||||
(set_index..set_index + erasure_meta.config.num_coding() as u64).for_each(
|
||||
|i| {
|
||||
// Remove saved coding shreds. We don't need these for future recovery
|
||||
let _ = prev_inserted_codes.remove(&(slot, i));
|
||||
// Remove saved coding shreds. We don't need these for future recovery.
|
||||
if prev_inserted_codes.remove(&(slot, i)).is_some() {
|
||||
// Remove from the index so it doesn't get committed. We know
|
||||
// this is safe to do because everything in
|
||||
// `prev_inserted_codes` does not yet exist in blocktree
|
||||
// (guaranteed by `check_cache_coding_shred`)
|
||||
index.coding_mut().set_present(i, false);
|
||||
}
|
||||
},
|
||||
);
|
||||
submit_metrics(false, "complete".into(), 0);
|
||||
@ -520,7 +549,7 @@ impl Blocktree {
|
||||
let recovered_data = Self::try_shred_recovery(
|
||||
&db,
|
||||
&erasure_metas,
|
||||
&index_working_set,
|
||||
&mut index_working_set,
|
||||
&mut just_inserted_data_shreds,
|
||||
&mut just_inserted_coding_shreds,
|
||||
);
|
||||
@ -677,6 +706,13 @@ impl Blocktree {
|
||||
);
|
||||
}
|
||||
|
||||
// Should be safe to modify index_meta here. Two cases
|
||||
// 1) Recovery happens: Then all inserted erasure metas are removed
|
||||
// from just_received_coding_shreds, and nothing wll be committed by
|
||||
// `check_insert_coding_shred`, so the coding index meta will not be
|
||||
// committed
|
||||
index_meta.coding_mut().set_present(shred_index, true);
|
||||
|
||||
just_received_coding_shreds
|
||||
.entry((slot, shred_index))
|
||||
.or_insert_with(|| shred);
|
||||
@ -858,19 +894,10 @@ impl Blocktree {
|
||||
// Parent for slot meta should have been set by this point
|
||||
assert!(!is_orphan(slot_meta));
|
||||
|
||||
let data_cf = self.db.column::<cf::ShredData>();
|
||||
|
||||
let check_data_cf = |slot, index| {
|
||||
data_cf
|
||||
.get_bytes((slot, index))
|
||||
.map(|opt| opt.is_some())
|
||||
.unwrap_or(false)
|
||||
};
|
||||
|
||||
let new_consumed = if slot_meta.consumed == index {
|
||||
let mut current_index = index + 1;
|
||||
|
||||
while data_index.is_present(current_index) || check_data_cf(slot, current_index) {
|
||||
while data_index.is_present(current_index) {
|
||||
current_index += 1;
|
||||
}
|
||||
current_index
|
||||
@ -1129,6 +1156,25 @@ impl Blocktree {
|
||||
}
|
||||
}
|
||||
|
||||
// The `get_block_time` method is not fully implemented (depends on validator timestamp
|
||||
// transactions). It currently returns Some(`slot` * DEFAULT_MS_PER_SLOT) offset from 0 for all
|
||||
// transactions, and None for any values that would overflow any step.
|
||||
pub fn get_block_time(&self, slot: Slot, slot_duration: Duration) -> Option<UnixTimestamp> {
|
||||
let ms_per_slot = duration_as_ms(&slot_duration);
|
||||
let (offset_millis, overflow) = slot.overflowing_mul(ms_per_slot);
|
||||
if !overflow {
|
||||
i64::try_from(offset_millis)
|
||||
.ok()
|
||||
.and_then(|millis| {
|
||||
let median_datetime = Utc.timestamp(0, 0);
|
||||
median_datetime.checked_add_signed(ChronoDuration::milliseconds(millis))
|
||||
})
|
||||
.map(|dt| dt.timestamp())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_confirmed_block(&self, slot: Slot) -> Result<RpcConfirmedBlock> {
|
||||
if self.is_root(slot) {
|
||||
let slot_meta_cf = self.db.column::<cf::SlotMeta>();
|
||||
@ -1137,24 +1183,30 @@ impl Blocktree {
|
||||
.expect("Rooted slot must exist in SlotMeta");
|
||||
|
||||
let slot_entries = self.get_slot_entries(slot, 0, None)?;
|
||||
let slot_transaction_iterator = slot_entries
|
||||
.iter()
|
||||
.cloned()
|
||||
.flat_map(|entry| entry.transactions);
|
||||
let parent_slot_entries = self.get_slot_entries(slot_meta.parent_slot, 0, None)?;
|
||||
if !slot_entries.is_empty() {
|
||||
let slot_transaction_iterator = slot_entries
|
||||
.iter()
|
||||
.cloned()
|
||||
.flat_map(|entry| entry.transactions);
|
||||
let parent_slot_entries = self.get_slot_entries(slot_meta.parent_slot, 0, None)?;
|
||||
let previous_blockhash = if !parent_slot_entries.is_empty() {
|
||||
get_last_hash(parent_slot_entries.iter()).unwrap()
|
||||
} else {
|
||||
Hash::default()
|
||||
};
|
||||
|
||||
let block = RpcConfirmedBlock {
|
||||
previous_blockhash: get_last_hash(parent_slot_entries.iter())
|
||||
.expect("Rooted parent slot must have blockhash"),
|
||||
blockhash: get_last_hash(slot_entries.iter())
|
||||
.expect("Rooted slot must have blockhash"),
|
||||
parent_slot: slot_meta.parent_slot,
|
||||
transactions: self.map_transactions_to_statuses(slot, slot_transaction_iterator),
|
||||
};
|
||||
Ok(block)
|
||||
} else {
|
||||
Err(BlocktreeError::SlotNotRooted)
|
||||
let block = RpcConfirmedBlock {
|
||||
previous_blockhash,
|
||||
blockhash: get_last_hash(slot_entries.iter())
|
||||
.unwrap_or_else(|| panic!("Rooted slot {:?} must have blockhash", slot)),
|
||||
parent_slot: slot_meta.parent_slot,
|
||||
transactions: self
|
||||
.map_transactions_to_statuses(slot, slot_transaction_iterator),
|
||||
};
|
||||
return Ok(block);
|
||||
}
|
||||
}
|
||||
Err(BlocktreeError::SlotNotRooted)
|
||||
}
|
||||
|
||||
fn map_transactions_to_statuses<'a>(
|
||||
@ -2069,12 +2121,16 @@ pub mod tests {
|
||||
use crate::{
|
||||
entry::{next_entry, next_entry_mut},
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
leader_schedule::{FixedSchedule, LeaderSchedule},
|
||||
shred::{max_ticks_per_n_shreds, DataShredHeader},
|
||||
};
|
||||
use assert_matches::assert_matches;
|
||||
use bincode::serialize;
|
||||
use itertools::Itertools;
|
||||
use rand::{seq::SliceRandom, thread_rng};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
hash::{self, Hash},
|
||||
hash::{self, hash, Hash},
|
||||
instruction::CompiledInstruction,
|
||||
packet::PACKET_DATA_SIZE,
|
||||
pubkey::Pubkey,
|
||||
@ -2084,13 +2140,9 @@ pub mod tests {
|
||||
use std::{iter::FromIterator, time::Duration};
|
||||
|
||||
// used for tests only
|
||||
fn make_slot_entries_with_transactions(
|
||||
slot: Slot,
|
||||
parent_slot: Slot,
|
||||
num_entries: u64,
|
||||
) -> (Vec<Shred>, Vec<Entry>) {
|
||||
fn make_slot_entries_with_transactions(num_entries: u64) -> Vec<Entry> {
|
||||
let mut entries: Vec<Entry> = Vec::new();
|
||||
for _ in 0..num_entries {
|
||||
for x in 0..num_entries {
|
||||
let transaction = Transaction::new_with_compiled_instructions(
|
||||
&[&Keypair::new()],
|
||||
&[Pubkey::new_rand()],
|
||||
@ -2099,11 +2151,10 @@ pub mod tests {
|
||||
vec![CompiledInstruction::new(1, &(), vec![0])],
|
||||
);
|
||||
entries.push(next_entry_mut(&mut Hash::default(), 0, vec![transaction]));
|
||||
let mut tick = create_ticks(1, 0, Hash::default());
|
||||
let mut tick = create_ticks(1, 0, hash(&serialize(&x).unwrap()));
|
||||
entries.append(&mut tick);
|
||||
}
|
||||
let shreds = entries_to_test_shreds(entries.clone(), slot, parent_slot, true, 0);
|
||||
(shreds, entries)
|
||||
entries
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -4146,13 +4197,22 @@ pub mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_get_confirmed_block() {
|
||||
let slot = 0;
|
||||
let (shreds, entries) = make_slot_entries_with_transactions(slot, 0, 100);
|
||||
|
||||
let slot = 10;
|
||||
let entries = make_slot_entries_with_transactions(100);
|
||||
let blockhash = get_last_hash(entries.iter()).unwrap();
|
||||
let shreds = entries_to_test_shreds(entries.clone(), slot, slot - 1, true, 0);
|
||||
let more_shreds = entries_to_test_shreds(entries.clone(), slot + 1, slot, true, 0);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let ledger = Blocktree::open(&ledger_path).unwrap();
|
||||
ledger.insert_shreds(shreds, None, false).unwrap();
|
||||
ledger.set_roots(&[0]).unwrap();
|
||||
ledger.insert_shreds(more_shreds, None, false).unwrap();
|
||||
ledger.set_roots(&[slot - 1, slot, slot + 1]).unwrap();
|
||||
|
||||
let mut parent_meta = SlotMeta::default();
|
||||
parent_meta.parent_slot = std::u64::MAX;
|
||||
ledger
|
||||
.put_meta_bytes(slot - 1, &serialize(&parent_meta).unwrap())
|
||||
.unwrap();
|
||||
|
||||
let expected_transactions: Vec<(Transaction, Option<RpcTransactionStatus>)> = entries
|
||||
.iter()
|
||||
@ -4171,6 +4231,16 @@ pub mod tests {
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
ledger
|
||||
.transaction_status_cf
|
||||
.put(
|
||||
(slot + 1, signature),
|
||||
&RpcTransactionStatus {
|
||||
status: Ok(()),
|
||||
fee: 42,
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
(
|
||||
transaction,
|
||||
Some(RpcTransactionStatus {
|
||||
@ -4181,17 +4251,33 @@ pub mod tests {
|
||||
})
|
||||
.collect();
|
||||
|
||||
let confirmed_block = ledger.get_confirmed_block(0).unwrap();
|
||||
// Even if marked as root, a slot that is empty of entries should return an error
|
||||
let confirmed_block_err = ledger.get_confirmed_block(slot - 1).unwrap_err();
|
||||
assert_matches!(confirmed_block_err, BlocktreeError::SlotNotRooted);
|
||||
|
||||
let confirmed_block = ledger.get_confirmed_block(slot).unwrap();
|
||||
assert_eq!(confirmed_block.transactions.len(), 100);
|
||||
|
||||
let mut expected_block = RpcConfirmedBlock::default();
|
||||
expected_block.transactions = expected_transactions.clone();
|
||||
expected_block.parent_slot = slot - 1;
|
||||
expected_block.blockhash = blockhash;
|
||||
// The previous_blockhash of `expected_block` is default because its parent slot is a
|
||||
// root, but empty of entries. This is special handling for snapshot root slots.
|
||||
assert_eq!(confirmed_block, expected_block);
|
||||
|
||||
let confirmed_block = ledger.get_confirmed_block(slot + 1).unwrap();
|
||||
assert_eq!(confirmed_block.transactions.len(), 100);
|
||||
|
||||
let mut expected_block = RpcConfirmedBlock::default();
|
||||
expected_block.transactions = expected_transactions;
|
||||
// The blockhash and previous_blockhash of `expected_block` are default only because
|
||||
// `make_slot_entries_with_transactions` sets all entry hashes to default
|
||||
expected_block.parent_slot = slot;
|
||||
expected_block.previous_blockhash = blockhash;
|
||||
expected_block.blockhash = blockhash;
|
||||
assert_eq!(confirmed_block, expected_block);
|
||||
|
||||
let not_root = ledger.get_confirmed_block(1);
|
||||
assert!(not_root.is_err());
|
||||
let not_root = ledger.get_confirmed_block(slot + 2).unwrap_err();
|
||||
assert_matches!(not_root, BlocktreeError::SlotNotRooted);
|
||||
|
||||
drop(ledger);
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
@ -4319,4 +4405,236 @@ pub mod tests {
|
||||
}
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_recovery() {
|
||||
let slot = 1;
|
||||
let (data_shreds, coding_shreds, leader_schedule_cache) =
|
||||
setup_erasure_shreds(slot, 0, 100, 1.0);
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
blocktree
|
||||
.insert_shreds(coding_shreds, Some(&leader_schedule_cache), false)
|
||||
.unwrap();
|
||||
let shred_bufs: Vec<_> = data_shreds
|
||||
.iter()
|
||||
.map(|shred| shred.payload.clone())
|
||||
.collect();
|
||||
|
||||
// Check all the data shreds were recovered
|
||||
for (s, buf) in data_shreds.iter().zip(shred_bufs) {
|
||||
assert_eq!(
|
||||
blocktree
|
||||
.get_data_shred(s.slot(), s.index() as u64)
|
||||
.unwrap()
|
||||
.unwrap(),
|
||||
buf
|
||||
);
|
||||
}
|
||||
|
||||
verify_index_integrity(&blocktree, slot);
|
||||
}
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_index_integrity() {
|
||||
let slot = 1;
|
||||
let num_entries = 100;
|
||||
let (data_shreds, coding_shreds, leader_schedule_cache) =
|
||||
setup_erasure_shreds(slot, 0, num_entries, 1.0);
|
||||
assert!(data_shreds.len() > 3);
|
||||
assert!(coding_shreds.len() > 3);
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
// Test inserting all the shreds
|
||||
let all_shreds: Vec<_> = data_shreds
|
||||
.iter()
|
||||
.cloned()
|
||||
.chain(coding_shreds.iter().cloned())
|
||||
.collect();
|
||||
blocktree
|
||||
.insert_shreds(all_shreds, Some(&leader_schedule_cache), false)
|
||||
.unwrap();
|
||||
verify_index_integrity(&blocktree, slot);
|
||||
blocktree.purge_slots(0, Some(slot));
|
||||
|
||||
// Test inserting just the codes, enough for recovery
|
||||
blocktree
|
||||
.insert_shreds(coding_shreds.clone(), Some(&leader_schedule_cache), false)
|
||||
.unwrap();
|
||||
verify_index_integrity(&blocktree, slot);
|
||||
blocktree.purge_slots(0, Some(slot));
|
||||
|
||||
// Test inserting some codes, but not enough for recovery
|
||||
blocktree
|
||||
.insert_shreds(
|
||||
coding_shreds[..coding_shreds.len() - 1].to_vec(),
|
||||
Some(&leader_schedule_cache),
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
verify_index_integrity(&blocktree, slot);
|
||||
blocktree.purge_slots(0, Some(slot));
|
||||
|
||||
// Test inserting just the codes, and some data, enough for recovery
|
||||
let shreds: Vec<_> = data_shreds[..data_shreds.len() - 1]
|
||||
.iter()
|
||||
.cloned()
|
||||
.chain(coding_shreds[..coding_shreds.len() - 1].iter().cloned())
|
||||
.collect();
|
||||
blocktree
|
||||
.insert_shreds(shreds, Some(&leader_schedule_cache), false)
|
||||
.unwrap();
|
||||
verify_index_integrity(&blocktree, slot);
|
||||
blocktree.purge_slots(0, Some(slot));
|
||||
|
||||
// Test inserting some codes, and some data, but enough for recovery
|
||||
let shreds: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
|
||||
.iter()
|
||||
.cloned()
|
||||
.chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned())
|
||||
.collect();
|
||||
blocktree
|
||||
.insert_shreds(shreds, Some(&leader_schedule_cache), false)
|
||||
.unwrap();
|
||||
verify_index_integrity(&blocktree, slot);
|
||||
blocktree.purge_slots(0, Some(slot));
|
||||
|
||||
// Test inserting all shreds in 2 rounds, make sure nothing is lost
|
||||
let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
|
||||
.iter()
|
||||
.cloned()
|
||||
.chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned())
|
||||
.collect();
|
||||
let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 1..]
|
||||
.iter()
|
||||
.cloned()
|
||||
.chain(coding_shreds[coding_shreds.len() / 2 - 1..].iter().cloned())
|
||||
.collect();
|
||||
blocktree
|
||||
.insert_shreds(shreds1, Some(&leader_schedule_cache), false)
|
||||
.unwrap();
|
||||
blocktree
|
||||
.insert_shreds(shreds2, Some(&leader_schedule_cache), false)
|
||||
.unwrap();
|
||||
verify_index_integrity(&blocktree, slot);
|
||||
blocktree.purge_slots(0, Some(slot));
|
||||
|
||||
// Test not all, but enough data and coding shreds in 2 rounds to trigger recovery,
|
||||
// make sure nothing is lost
|
||||
let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
|
||||
.iter()
|
||||
.cloned()
|
||||
.chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned())
|
||||
.collect();
|
||||
let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 1..data_shreds.len() / 2]
|
||||
.iter()
|
||||
.cloned()
|
||||
.chain(
|
||||
coding_shreds[coding_shreds.len() / 2 - 1..data_shreds.len() / 2]
|
||||
.iter()
|
||||
.cloned(),
|
||||
)
|
||||
.collect();
|
||||
blocktree
|
||||
.insert_shreds(shreds1, Some(&leader_schedule_cache), false)
|
||||
.unwrap();
|
||||
blocktree
|
||||
.insert_shreds(shreds2, Some(&leader_schedule_cache), false)
|
||||
.unwrap();
|
||||
verify_index_integrity(&blocktree, slot);
|
||||
blocktree.purge_slots(0, Some(slot));
|
||||
|
||||
// Test insert shreds in 2 rounds, but not enough to trigger
|
||||
// recovery, make sure nothing is lost
|
||||
let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 2]
|
||||
.iter()
|
||||
.cloned()
|
||||
.chain(coding_shreds[..coding_shreds.len() / 2 - 2].iter().cloned())
|
||||
.collect();
|
||||
let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 2..data_shreds.len() / 2 - 1]
|
||||
.iter()
|
||||
.cloned()
|
||||
.chain(
|
||||
coding_shreds[coding_shreds.len() / 2 - 2..coding_shreds.len() / 2 - 1]
|
||||
.iter()
|
||||
.cloned(),
|
||||
)
|
||||
.collect();
|
||||
blocktree
|
||||
.insert_shreds(shreds1, Some(&leader_schedule_cache), false)
|
||||
.unwrap();
|
||||
blocktree
|
||||
.insert_shreds(shreds2, Some(&leader_schedule_cache), false)
|
||||
.unwrap();
|
||||
verify_index_integrity(&blocktree, slot);
|
||||
blocktree.purge_slots(0, Some(slot));
|
||||
}
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
fn setup_erasure_shreds(
|
||||
slot: u64,
|
||||
parent_slot: u64,
|
||||
num_entries: u64,
|
||||
erasure_rate: f32,
|
||||
) -> (Vec<Shred>, Vec<Shred>, Arc<LeaderScheduleCache>) {
|
||||
let entries = make_slot_entries_with_transactions(num_entries);
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let shredder = Shredder::new(
|
||||
slot,
|
||||
parent_slot,
|
||||
erasure_rate,
|
||||
leader_keypair.clone(),
|
||||
0,
|
||||
0,
|
||||
)
|
||||
.expect("Failed in creating shredder");
|
||||
let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 0);
|
||||
|
||||
let genesis_config = create_genesis_config(2).genesis_config;
|
||||
let bank = Arc::new(Bank::new(&genesis_config));
|
||||
let mut leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank);
|
||||
let fixed_schedule = FixedSchedule {
|
||||
leader_schedule: Arc::new(LeaderSchedule::new_from_schedule(vec![
|
||||
leader_keypair.pubkey()
|
||||
])),
|
||||
start_epoch: 0,
|
||||
};
|
||||
leader_schedule_cache.set_fixed_leader_schedule(Some(fixed_schedule));
|
||||
|
||||
(data_shreds, coding_shreds, Arc::new(leader_schedule_cache))
|
||||
}
|
||||
|
||||
fn verify_index_integrity(blocktree: &Blocktree, slot: u64) {
|
||||
let index = blocktree.get_index(slot).unwrap().unwrap();
|
||||
// Test the set of data shreds in the index and in the data column
|
||||
// family are the same
|
||||
let data_iter = blocktree.slot_data_iterator(slot).unwrap();
|
||||
let mut num_data = 0;
|
||||
for ((slot, index), _) in data_iter {
|
||||
num_data += 1;
|
||||
assert!(blocktree.get_data_shred(slot, index).unwrap().is_some());
|
||||
}
|
||||
|
||||
// Test the data index doesn't have anything extra
|
||||
let num_data_in_index = index.data().num_data();
|
||||
assert_eq!(num_data_in_index, num_data);
|
||||
|
||||
// Test the set of coding shreds in the index and in the coding column
|
||||
// family are the same
|
||||
let coding_iter = blocktree.slot_coding_iterator(slot).unwrap();
|
||||
let mut num_coding = 0;
|
||||
for ((slot, index), _) in coding_iter {
|
||||
num_coding += 1;
|
||||
assert!(blocktree.get_coding_shred(slot, index).unwrap().is_some());
|
||||
}
|
||||
|
||||
// Test the data index doesn't have anything extra
|
||||
let num_coding_in_index = index.coding().num_coding();
|
||||
assert_eq!(num_coding_in_index, num_coding);
|
||||
}
|
||||
}
|
||||
|
@ -13,8 +13,6 @@ use solana_client::rpc_request::RpcTransactionStatus;
|
||||
use solana_sdk::{clock::Slot, signature::Signature};
|
||||
use std::{collections::HashMap, fs, marker::PhantomData, path::Path, sync::Arc};
|
||||
|
||||
// A good value for this is the number of cores on the machine
|
||||
const TOTAL_THREADS: i32 = 8;
|
||||
const MAX_WRITE_BUFFER_SIZE: u64 = 256 * 1024 * 1024; // 256MB
|
||||
|
||||
// Column family for metadata about a leader slot
|
||||
@ -718,8 +716,15 @@ fn get_cf_options() -> Options {
|
||||
// 256 * 8 = 2GB. 6 of these columns should take at most 12GB of RAM
|
||||
options.set_max_write_buffer_number(8);
|
||||
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE as usize);
|
||||
options.set_target_file_size_base(MAX_WRITE_BUFFER_SIZE / 10);
|
||||
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE);
|
||||
let file_num_compaction_trigger = 4;
|
||||
// Recommend that this be around the size of level 0. Level 0 estimated size in stable state is
|
||||
// write_buffer_size * min_write_buffer_number_to_merge * level0_file_num_compaction_trigger
|
||||
// Source: https://docs.rs/rocksdb/0.6.0/rocksdb/struct.Options.html#method.set_level_zero_file_num_compaction_trigger
|
||||
let total_size_base = MAX_WRITE_BUFFER_SIZE * file_num_compaction_trigger;
|
||||
let file_size_base = total_size_base / 10;
|
||||
options.set_level_zero_file_num_compaction_trigger(file_num_compaction_trigger as i32);
|
||||
options.set_max_bytes_for_level_base(total_size_base);
|
||||
options.set_target_file_size_base(file_size_base);
|
||||
options
|
||||
}
|
||||
|
||||
@ -727,8 +732,7 @@ fn get_db_options() -> Options {
|
||||
let mut options = Options::default();
|
||||
options.create_if_missing(true);
|
||||
options.create_missing_column_families(true);
|
||||
options.increase_parallelism(TOTAL_THREADS);
|
||||
options.set_max_background_flushes(4);
|
||||
options.set_max_background_compactions(4);
|
||||
// A good value for this is the number of cores on the machine
|
||||
options.increase_parallelism(sys_info::cpu_num().unwrap() as i32);
|
||||
options
|
||||
}
|
||||
|
@ -97,6 +97,10 @@ impl Index {
|
||||
}
|
||||
|
||||
impl CodingIndex {
|
||||
pub fn num_coding(&self) -> usize {
|
||||
self.index.len()
|
||||
}
|
||||
|
||||
pub fn present_in_bounds(&self, bounds: impl RangeBounds<u64>) -> usize {
|
||||
self.index.range(bounds).count()
|
||||
}
|
||||
@ -121,6 +125,10 @@ impl CodingIndex {
|
||||
}
|
||||
|
||||
impl DataIndex {
|
||||
pub fn num_data(&self) -> usize {
|
||||
self.index.len()
|
||||
}
|
||||
|
||||
pub fn present_in_bounds(&self, bounds: impl RangeBounds<u64>) -> usize {
|
||||
self.index.range(bounds).count()
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ use solana_sdk::{
|
||||
};
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
path::PathBuf,
|
||||
result,
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
@ -230,6 +231,7 @@ pub enum BlocktreeProcessorError {
|
||||
FailedToLoadMeta,
|
||||
InvalidBlock(BlockError),
|
||||
InvalidTransaction,
|
||||
NoValidForksFound,
|
||||
}
|
||||
|
||||
impl From<BlockError> for BlocktreeProcessorError {
|
||||
@ -253,7 +255,7 @@ pub struct ProcessOptions {
|
||||
pub fn process_blocktree(
|
||||
genesis_config: &GenesisConfig,
|
||||
blocktree: &Blocktree,
|
||||
account_paths: Option<String>,
|
||||
account_paths: Vec<PathBuf>,
|
||||
opts: ProcessOptions,
|
||||
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> {
|
||||
if let Some(num_threads) = opts.override_num_threads {
|
||||
@ -267,7 +269,7 @@ pub fn process_blocktree(
|
||||
|
||||
// Setup bank for slot 0
|
||||
let bank0 = Arc::new(Bank::new_with_paths(&genesis_config, account_paths));
|
||||
info!("processing ledger for bank 0...");
|
||||
info!("processing ledger for slot 0...");
|
||||
process_bank_0(&bank0, blocktree, &opts)?;
|
||||
process_blocktree_from_root(genesis_config, blocktree, bank0, &opts)
|
||||
}
|
||||
@ -279,7 +281,7 @@ pub fn process_blocktree_from_root(
|
||||
bank: Arc<Bank>,
|
||||
opts: &ProcessOptions,
|
||||
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> {
|
||||
info!("processing ledger from root: {}...", bank.slot());
|
||||
info!("processing ledger from root slot {}...", bank.slot());
|
||||
// Starting slot must be a root, and thus has no parents
|
||||
assert!(bank.parent().is_none());
|
||||
let start_slot = bank.slot();
|
||||
@ -292,7 +294,7 @@ pub fn process_blocktree_from_root(
|
||||
|
||||
blocktree
|
||||
.set_roots(&[start_slot])
|
||||
.expect("Couldn't set root on startup");
|
||||
.expect("Couldn't set root slot on startup");
|
||||
|
||||
let meta = blocktree.meta(start_slot).unwrap();
|
||||
|
||||
@ -313,6 +315,9 @@ pub fn process_blocktree_from_root(
|
||||
opts,
|
||||
)?;
|
||||
let (banks, bank_forks_info): (Vec<_>, Vec<_>) = fork_info.into_iter().unzip();
|
||||
if banks.is_empty() {
|
||||
return Err(BlocktreeProcessorError::NoValidForksFound);
|
||||
}
|
||||
let bank_forks = BankForks::new_from_banks(&banks, rooted_path);
|
||||
(bank_forks, bank_forks_info, leader_schedule_cache)
|
||||
} else {
|
||||
@ -495,7 +500,7 @@ fn process_pending_slots(
|
||||
let (slot, meta, bank, last_entry_hash) = pending_slots.pop().unwrap();
|
||||
|
||||
if last_status_report.elapsed() > Duration::from_secs(2) {
|
||||
info!("processing ledger...block {}", slot);
|
||||
info!("processing ledger...slot {}", slot);
|
||||
last_status_report = Instant::now();
|
||||
}
|
||||
|
||||
@ -510,7 +515,10 @@ fn process_pending_slots(
|
||||
BlocktreeProcessorError::FailedToLoadEntries
|
||||
})?;
|
||||
|
||||
verify_and_process_slot_entries(&bank, &entries, last_entry_hash, opts)?;
|
||||
if let Err(err) = verify_and_process_slot_entries(&bank, &entries, last_entry_hash, opts) {
|
||||
warn!("slot {} failed to verify: {:?}", slot, err);
|
||||
continue;
|
||||
}
|
||||
|
||||
bank.freeze(); // all banks handled by this routine are created from complete slots
|
||||
|
||||
@ -643,8 +651,8 @@ pub mod tests {
|
||||
let parent_slot = 0;
|
||||
let slot = 1;
|
||||
let entries = create_ticks(ticks_per_slot, hashes_per_tick - 1, blockhash);
|
||||
blocktree
|
||||
.write_entries(
|
||||
assert_matches!(
|
||||
blocktree.write_entries(
|
||||
slot,
|
||||
0,
|
||||
0,
|
||||
@ -654,18 +662,22 @@ pub mod tests {
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.expect("Expected to write shredded entries to blocktree");
|
||||
),
|
||||
Ok(_)
|
||||
);
|
||||
|
||||
let opts = ProcessOptions {
|
||||
poh_verify: true,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
assert_eq!(
|
||||
process_blocktree(&genesis_config, &blocktree, None, opts).err(),
|
||||
Some(BlocktreeProcessorError::InvalidBlock(
|
||||
BlockError::InvalidTickHashCount
|
||||
)),
|
||||
process_blocktree(
|
||||
&genesis_config,
|
||||
&blocktree,
|
||||
Vec::new(),
|
||||
ProcessOptions {
|
||||
poh_verify: true,
|
||||
..ProcessOptions::default()
|
||||
}
|
||||
)
|
||||
.err(),
|
||||
Some(BlocktreeProcessorError::NoValidForksFound)
|
||||
);
|
||||
}
|
||||
|
||||
@ -684,8 +696,8 @@ pub mod tests {
|
||||
let parent_slot = 0;
|
||||
let slot = 1;
|
||||
let entries = create_ticks(ticks_per_slot - 1, 0, blockhash);
|
||||
blocktree
|
||||
.write_entries(
|
||||
assert_matches!(
|
||||
blocktree.write_entries(
|
||||
slot,
|
||||
0,
|
||||
0,
|
||||
@ -695,19 +707,42 @@ pub mod tests {
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.expect("Expected to write shredded entries to blocktree");
|
||||
|
||||
let opts = ProcessOptions {
|
||||
poh_verify: true,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
assert_eq!(
|
||||
process_blocktree(&genesis_config, &blocktree, None, opts).err(),
|
||||
Some(BlocktreeProcessorError::InvalidBlock(
|
||||
BlockError::InvalidTickCount
|
||||
)),
|
||||
),
|
||||
Ok(_)
|
||||
);
|
||||
|
||||
// No valid forks in blocktree, expect a failure
|
||||
assert_eq!(
|
||||
process_blocktree(
|
||||
&genesis_config,
|
||||
&blocktree,
|
||||
Vec::new(),
|
||||
ProcessOptions {
|
||||
poh_verify: true,
|
||||
..ProcessOptions::default()
|
||||
}
|
||||
)
|
||||
.err(),
|
||||
Some(BlocktreeProcessorError::NoValidForksFound)
|
||||
);
|
||||
|
||||
// Write slot 2 fully
|
||||
let _last_slot2_entry_hash =
|
||||
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 0, blockhash);
|
||||
|
||||
let (_bank_forks, bank_forks_info, _) = process_blocktree(
|
||||
&genesis_config,
|
||||
&blocktree,
|
||||
Vec::new(),
|
||||
ProcessOptions {
|
||||
poh_verify: true,
|
||||
..ProcessOptions::default()
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// One valid fork, one bad fork. process_blocktree() should only return the valid fork
|
||||
assert_eq!(bank_forks_info, vec![BankForksInfo { bank_slot: 2 }]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -736,8 +771,8 @@ pub mod tests {
|
||||
// per slot.
|
||||
let parent_slot = 0;
|
||||
let slot = 1;
|
||||
blocktree
|
||||
.write_entries(
|
||||
assert_matches!(
|
||||
blocktree.write_entries(
|
||||
slot,
|
||||
0,
|
||||
0,
|
||||
@ -747,18 +782,17 @@ pub mod tests {
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.expect("Expected to write shredded entries to blocktree");
|
||||
),
|
||||
Ok(_)
|
||||
);
|
||||
|
||||
let opts = ProcessOptions {
|
||||
poh_verify: true,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
assert_eq!(
|
||||
process_blocktree(&genesis_config, &blocktree, None, opts).err(),
|
||||
Some(BlocktreeProcessorError::InvalidBlock(
|
||||
BlockError::TrailingEntry
|
||||
)),
|
||||
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).err(),
|
||||
Some(BlocktreeProcessorError::NoValidForksFound)
|
||||
);
|
||||
}
|
||||
|
||||
@ -799,8 +833,8 @@ pub mod tests {
|
||||
// throw away last one
|
||||
entries.pop();
|
||||
|
||||
blocktree
|
||||
.write_entries(
|
||||
assert_matches!(
|
||||
blocktree.write_entries(
|
||||
slot,
|
||||
0,
|
||||
0,
|
||||
@ -810,8 +844,9 @@ pub mod tests {
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.expect("Expected to write shredded entries to blocktree");
|
||||
),
|
||||
Ok(_)
|
||||
);
|
||||
}
|
||||
|
||||
// slot 2, points at slot 1
|
||||
@ -822,7 +857,7 @@ pub mod tests {
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (mut _bank_forks, bank_forks_info, _) =
|
||||
process_blocktree(&genesis_config, &blocktree, None, opts).unwrap();
|
||||
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 1);
|
||||
assert_eq!(
|
||||
@ -884,7 +919,7 @@ pub mod tests {
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
process_blocktree(&genesis_config, &blocktree, None, opts).unwrap();
|
||||
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 1); // One fork, other one is ignored b/c not a descendant of the root
|
||||
|
||||
@ -958,7 +993,7 @@ pub mod tests {
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
process_blocktree(&genesis_config, &blocktree, None, opts).unwrap();
|
||||
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 2); // There are two forks
|
||||
assert_eq!(
|
||||
@ -1022,9 +1057,13 @@ pub mod tests {
|
||||
blocktree.set_dead_slot(2).unwrap();
|
||||
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 3, 1, slot1_blockhash);
|
||||
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
process_blocktree(&genesis_config, &blocktree, None, ProcessOptions::default())
|
||||
.unwrap();
|
||||
let (bank_forks, bank_forks_info, _) = process_blocktree(
|
||||
&genesis_config,
|
||||
&blocktree,
|
||||
Vec::new(),
|
||||
ProcessOptions::default(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 1);
|
||||
assert_eq!(bank_forks_info[0], BankForksInfo { bank_slot: 3 });
|
||||
@ -1054,7 +1093,7 @@ pub mod tests {
|
||||
Blocktree::open(&ledger_path).expect("Expected to successfully open database ledger");
|
||||
|
||||
// Let last_slot be the number of slots in the first two epochs
|
||||
let epoch_schedule = get_epoch_schedule(&genesis_config, None);
|
||||
let epoch_schedule = get_epoch_schedule(&genesis_config, Vec::new());
|
||||
let last_slot = epoch_schedule.get_last_slot_in_epoch(1);
|
||||
|
||||
// Create a single chain of slots with all indexes in the range [0, last_slot + 1]
|
||||
@ -1081,7 +1120,7 @@ pub mod tests {
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
process_blocktree(&genesis_config, &blocktree, None, opts).unwrap();
|
||||
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 1); // There is one fork
|
||||
assert_eq!(
|
||||
@ -1229,7 +1268,7 @@ pub mod tests {
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
process_blocktree(&genesis_config, &blocktree, None, opts).unwrap();
|
||||
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 1);
|
||||
assert_eq!(bank_forks.root(), 0);
|
||||
@ -1258,7 +1297,7 @@ pub mod tests {
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
process_blocktree(&genesis_config, &blocktree, None, opts).unwrap();
|
||||
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap();
|
||||
|
||||
assert_eq!(bank_forks_info.len(), 1);
|
||||
assert_eq!(bank_forks_info[0], BankForksInfo { bank_slot: 0 });
|
||||
@ -1276,7 +1315,7 @@ pub mod tests {
|
||||
override_num_threads: Some(1),
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
process_blocktree(&genesis_config, &blocktree, None, opts).unwrap();
|
||||
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap();
|
||||
PAR_THREAD_POOL.with(|pool| {
|
||||
assert_eq!(pool.borrow().current_num_threads(), 1);
|
||||
});
|
||||
@ -1293,7 +1332,7 @@ pub mod tests {
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (_bank_forks, _bank_forks_info, cached_leader_schedule) =
|
||||
process_blocktree(&genesis_config, &blocktree, None, opts).unwrap();
|
||||
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap();
|
||||
assert_eq!(cached_leader_schedule.max_schedules(), std::usize::MAX);
|
||||
}
|
||||
|
||||
@ -1353,7 +1392,7 @@ pub mod tests {
|
||||
entry_callback: Some(entry_callback),
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
process_blocktree(&genesis_config, &blocktree, None, opts).unwrap();
|
||||
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).unwrap();
|
||||
assert_eq!(*callback_counter.write().unwrap(), 2);
|
||||
}
|
||||
|
||||
@ -2188,7 +2227,7 @@ pub mod tests {
|
||||
|
||||
fn get_epoch_schedule(
|
||||
genesis_config: &GenesisConfig,
|
||||
account_paths: Option<String>,
|
||||
account_paths: Vec<PathBuf>,
|
||||
) -> EpochSchedule {
|
||||
let bank = Bank::new_with_paths(&genesis_config, account_paths);
|
||||
bank.epoch_schedule().clone()
|
||||
|
@ -3,6 +3,14 @@ use rand::SeedableRng;
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use std::ops::Index;
|
||||
use std::sync::Arc;
|
||||
|
||||
// Used for testing
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct FixedSchedule {
|
||||
pub leader_schedule: Arc<LeaderSchedule>,
|
||||
pub start_epoch: u64,
|
||||
}
|
||||
|
||||
/// Stake-weighted leader schedule for one epoch.
|
||||
#[derive(Debug, Default, PartialEq)]
|
||||
@ -30,9 +38,17 @@ impl LeaderSchedule {
|
||||
Self { slot_leaders }
|
||||
}
|
||||
|
||||
pub fn new_from_schedule(slot_leaders: Vec<Pubkey>) -> Self {
|
||||
Self { slot_leaders }
|
||||
}
|
||||
|
||||
pub fn get_slot_leaders(&self) -> &[Pubkey] {
|
||||
&self.slot_leaders
|
||||
}
|
||||
|
||||
pub fn num_slots(&self) -> usize {
|
||||
self.slot_leaders.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<u64> for LeaderSchedule {
|
||||
|
@ -1,4 +1,8 @@
|
||||
use crate::{blocktree::Blocktree, leader_schedule::LeaderSchedule, leader_schedule_utils};
|
||||
use crate::{
|
||||
blocktree::Blocktree,
|
||||
leader_schedule::{FixedSchedule, LeaderSchedule},
|
||||
leader_schedule_utils,
|
||||
};
|
||||
use log::*;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
@ -28,6 +32,7 @@ pub struct LeaderScheduleCache {
|
||||
epoch_schedule: EpochSchedule,
|
||||
max_epoch: RwLock<Epoch>,
|
||||
max_schedules: CacheCapacity,
|
||||
fixed_schedule: Option<Arc<FixedSchedule>>,
|
||||
}
|
||||
|
||||
impl LeaderScheduleCache {
|
||||
@ -41,6 +46,7 @@ impl LeaderScheduleCache {
|
||||
epoch_schedule,
|
||||
max_epoch: RwLock::new(0),
|
||||
max_schedules: CacheCapacity::default(),
|
||||
fixed_schedule: None,
|
||||
};
|
||||
|
||||
// This sets the root and calculates the schedule at leader_schedule_epoch(root)
|
||||
@ -153,8 +159,17 @@ impl LeaderScheduleCache {
|
||||
first_slot.map(|slot| (slot, last_slot))
|
||||
}
|
||||
|
||||
pub fn set_fixed_leader_schedule(&mut self, fixed_schedule: Option<FixedSchedule>) {
|
||||
self.fixed_schedule = fixed_schedule.map(Arc::new);
|
||||
}
|
||||
|
||||
fn slot_leader_at_no_compute(&self, slot: Slot) -> Option<Pubkey> {
|
||||
let (epoch, slot_index) = self.epoch_schedule.get_epoch_and_slot_index(slot);
|
||||
if let Some(ref fixed_schedule) = self.fixed_schedule {
|
||||
if epoch >= fixed_schedule.start_epoch {
|
||||
return Some(fixed_schedule.leader_schedule[slot_index]);
|
||||
}
|
||||
}
|
||||
self.cached_schedules
|
||||
.read()
|
||||
.unwrap()
|
||||
@ -191,6 +206,11 @@ impl LeaderScheduleCache {
|
||||
epoch: Epoch,
|
||||
bank: &Bank,
|
||||
) -> Option<Arc<LeaderSchedule>> {
|
||||
if let Some(ref fixed_schedule) = self.fixed_schedule {
|
||||
if epoch >= fixed_schedule.start_epoch {
|
||||
return Some(fixed_schedule.leader_schedule.clone());
|
||||
}
|
||||
}
|
||||
let epoch_schedule = self.cached_schedules.read().unwrap().0.get(&epoch).cloned();
|
||||
|
||||
if epoch_schedule.is_some() {
|
||||
|
@ -86,7 +86,7 @@ pub fn package_snapshot<P: AsRef<Path>, Q: AsRef<Path>>(
|
||||
// Get a reference to all the relevant AccountStorageEntries
|
||||
let account_storage_entries: Vec<_> = bank
|
||||
.rc
|
||||
.get_storage_entries()
|
||||
.get_rooted_storage_entries()
|
||||
.into_iter()
|
||||
.filter(|x| x.slot_id() <= bank.slot())
|
||||
.collect();
|
||||
@ -207,7 +207,7 @@ pub fn bank_slot_from_archive<P: AsRef<Path>>(snapshot_tar: P) -> Result<u64> {
|
||||
}
|
||||
|
||||
pub fn bank_from_archive<P: AsRef<Path>>(
|
||||
account_paths: String,
|
||||
account_paths: &[PathBuf],
|
||||
snapshot_path: &PathBuf,
|
||||
snapshot_tar: P,
|
||||
) -> Result<Bank> {
|
||||
@ -266,7 +266,7 @@ pub fn untar_snapshot_in<P: AsRef<Path>, Q: AsRef<Path>>(
|
||||
}
|
||||
|
||||
fn rebuild_bank_from_snapshots<P>(
|
||||
local_account_paths: String,
|
||||
local_account_paths: &[PathBuf],
|
||||
unpacked_snapshots_dir: &PathBuf,
|
||||
append_vecs_path: P,
|
||||
) -> Result<Bank>
|
||||
|
@ -3,31 +3,32 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-local-cluster"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
itertools = "0.8.1"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
solana-config-program = { path = "../programs/config", version = "0.21.0" }
|
||||
solana-core = { path = "../core", version = "0.21.0" }
|
||||
solana-client = { path = "../client", version = "0.21.0" }
|
||||
solana-drone = { path = "../drone", version = "0.21.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.21.0" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "0.21.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.21.0" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.21.0" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "0.21.0" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.21.0" }
|
||||
solana-config-program = { path = "../programs/config", version = "0.21.6" }
|
||||
solana-core = { path = "../core", version = "0.21.6" }
|
||||
solana-client = { path = "../client", version = "0.21.6" }
|
||||
solana-drone = { path = "../drone", version = "0.21.6" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "0.21.6" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "0.21.6" }
|
||||
solana-ledger = { path = "../ledger", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
solana-runtime = { path = "../runtime", version = "0.21.6" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "0.21.6" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "0.21.6" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "0.21.6" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "0.21.6" }
|
||||
symlink = "0.1.0"
|
||||
tempfile = "3.1.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.6" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.2.0"
|
||||
|
@ -1,5 +1,6 @@
|
||||
use solana_client::thin_client::ThinClient;
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::validator::Validator;
|
||||
use solana_core::validator::ValidatorConfig;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Keypair;
|
||||
@ -17,13 +18,19 @@ pub struct ValidatorInfo {
|
||||
pub struct ClusterValidatorInfo {
|
||||
pub info: ValidatorInfo,
|
||||
pub config: ValidatorConfig,
|
||||
pub validator: Option<Validator>,
|
||||
}
|
||||
|
||||
impl ClusterValidatorInfo {
|
||||
pub fn new(validator_info: ValidatorInfo, config: ValidatorConfig) -> Self {
|
||||
pub fn new(
|
||||
validator_info: ValidatorInfo,
|
||||
config: ValidatorConfig,
|
||||
validator: Validator,
|
||||
) -> Self {
|
||||
Self {
|
||||
info: validator_info,
|
||||
config,
|
||||
validator: Some(validator),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -15,9 +15,12 @@ use solana_ledger::{
|
||||
};
|
||||
use solana_sdk::{
|
||||
client::SyncClient,
|
||||
clock::{Slot, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, NUM_CONSECUTIVE_LEADER_SLOTS},
|
||||
clock::{
|
||||
Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT,
|
||||
NUM_CONSECUTIVE_LEADER_SLOTS,
|
||||
},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_schedule::MINIMUM_SLOTS_PER_EPOCH,
|
||||
epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH},
|
||||
hash::Hash,
|
||||
poh_config::PohConfig,
|
||||
pubkey::Pubkey,
|
||||
@ -169,6 +172,11 @@ pub fn verify_ledger_ticks(ledger_path: &Path, ticks_per_slot: usize) {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn time_until_nth_epoch(epoch: u64, slots_per_epoch: u64, stakers_slot_offset: u64) -> u64 {
|
||||
let epoch_schedule = EpochSchedule::custom(slots_per_epoch, stakers_slot_offset, true);
|
||||
epoch_schedule.get_last_slot_in_epoch(epoch) * DEFAULT_MS_PER_SLOT
|
||||
}
|
||||
|
||||
pub fn sleep_n_epochs(
|
||||
num_epochs: f64,
|
||||
config: &PohConfig,
|
||||
|
@ -1,4 +1,5 @@
|
||||
use crate::cluster::{Cluster, ClusterValidatorInfo, ValidatorInfo};
|
||||
use itertools::izip;
|
||||
use log::*;
|
||||
use solana_client::thin_client::{create_client, ThinClient};
|
||||
use solana_core::{
|
||||
@ -39,6 +40,7 @@ use std::{
|
||||
collections::HashMap,
|
||||
fs::remove_dir_all,
|
||||
io::{Error, ErrorKind, Result},
|
||||
iter,
|
||||
path::PathBuf,
|
||||
sync::Arc,
|
||||
};
|
||||
@ -66,6 +68,8 @@ pub struct ClusterConfig {
|
||||
pub num_archivers: usize,
|
||||
/// Number of nodes that are unstaked and not voting (a.k.a listening)
|
||||
pub num_listeners: u64,
|
||||
/// The specific pubkeys of each node if specified
|
||||
pub validator_keys: Option<Vec<Arc<Keypair>>>,
|
||||
/// The stakes of each node
|
||||
pub node_stakes: Vec<u64>,
|
||||
/// The total lamports available to the cluster
|
||||
@ -85,6 +89,7 @@ impl Default for ClusterConfig {
|
||||
validator_configs: vec![],
|
||||
num_archivers: 0,
|
||||
num_listeners: 0,
|
||||
validator_keys: None,
|
||||
node_stakes: vec![],
|
||||
cluster_lamports: 0,
|
||||
ticks_per_slot: DEFAULT_TICKS_PER_SLOT,
|
||||
@ -103,9 +108,7 @@ pub struct LocalCluster {
|
||||
pub funding_keypair: Keypair,
|
||||
/// Entry point from which the rest of the network can be discovered
|
||||
pub entry_point_info: ContactInfo,
|
||||
pub validator_infos: HashMap<Pubkey, ClusterValidatorInfo>,
|
||||
pub listener_infos: HashMap<Pubkey, ClusterValidatorInfo>,
|
||||
validators: HashMap<Pubkey, Validator>,
|
||||
pub validators: HashMap<Pubkey, ClusterValidatorInfo>,
|
||||
pub genesis_config: GenesisConfig,
|
||||
archivers: Vec<Archiver>,
|
||||
pub archiver_infos: HashMap<Pubkey, ArchiverInfo>,
|
||||
@ -129,9 +132,20 @@ impl LocalCluster {
|
||||
|
||||
pub fn new(config: &ClusterConfig) -> Self {
|
||||
assert_eq!(config.validator_configs.len(), config.node_stakes.len());
|
||||
let leader_keypair = Arc::new(Keypair::new());
|
||||
let validator_keys = {
|
||||
if let Some(ref keys) = config.validator_keys {
|
||||
assert_eq!(config.validator_configs.len(), keys.len());
|
||||
keys.clone()
|
||||
} else {
|
||||
iter::repeat_with(|| Arc::new(Keypair::new()))
|
||||
.take(config.validator_configs.len())
|
||||
.collect()
|
||||
}
|
||||
};
|
||||
|
||||
let leader_keypair = &validator_keys[0];
|
||||
let leader_pubkey = leader_keypair.pubkey();
|
||||
let leader_node = Node::new_localhost_with_pubkey(&leader_keypair.pubkey());
|
||||
let leader_node = Node::new_localhost_with_pubkey(&leader_pubkey);
|
||||
let GenesisConfigInfo {
|
||||
mut genesis_config,
|
||||
mint_keypair,
|
||||
@ -151,7 +165,10 @@ impl LocalCluster {
|
||||
match genesis_config.operating_mode {
|
||||
OperatingMode::SoftLaunch => {
|
||||
genesis_config.native_instruction_processors =
|
||||
solana_genesis_programs::get_programs(genesis_config.operating_mode, 0).unwrap()
|
||||
solana_genesis_programs::get_programs(genesis_config.operating_mode, 0)
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.collect()
|
||||
}
|
||||
// create_genesis_config_with_leader() assumes OperatingMode::Development so do
|
||||
// nothing...
|
||||
@ -169,18 +186,13 @@ impl LocalCluster {
|
||||
.push(solana_storage_program!());
|
||||
|
||||
let storage_keypair = Keypair::new();
|
||||
genesis_config.accounts.push((
|
||||
genesis_config.add_account(
|
||||
storage_keypair.pubkey(),
|
||||
storage_contract::create_validator_storage_account(leader_pubkey, 1),
|
||||
));
|
||||
);
|
||||
|
||||
// Replace staking config
|
||||
genesis_config.accounts = genesis_config
|
||||
.accounts
|
||||
.into_iter()
|
||||
.filter(|(pubkey, _)| *pubkey != stake_config::id())
|
||||
.collect();
|
||||
genesis_config.accounts.push((
|
||||
genesis_config.add_account(
|
||||
stake_config::id(),
|
||||
stake_config::create_account(
|
||||
1,
|
||||
@ -189,7 +201,7 @@ impl LocalCluster {
|
||||
slash_penalty: std::u8::MAX,
|
||||
},
|
||||
),
|
||||
));
|
||||
);
|
||||
|
||||
let (leader_ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
|
||||
let leader_contact_info = leader_node.info.clone();
|
||||
@ -210,20 +222,22 @@ impl LocalCluster {
|
||||
);
|
||||
|
||||
let mut validators = HashMap::new();
|
||||
let mut validator_infos = HashMap::new();
|
||||
validators.insert(leader_pubkey, leader_server);
|
||||
error!("leader_pubkey: {}", leader_pubkey);
|
||||
let leader_info = ValidatorInfo {
|
||||
keypair: leader_keypair,
|
||||
keypair: leader_keypair.clone(),
|
||||
voting_keypair: leader_voting_keypair,
|
||||
storage_keypair: leader_storage_keypair,
|
||||
ledger_path: leader_ledger_path,
|
||||
contact_info: leader_contact_info.clone(),
|
||||
};
|
||||
|
||||
let cluster_leader =
|
||||
ClusterValidatorInfo::new(leader_info, config.validator_configs[0].clone());
|
||||
let cluster_leader = ClusterValidatorInfo::new(
|
||||
leader_info,
|
||||
config.validator_configs[0].clone(),
|
||||
leader_server,
|
||||
);
|
||||
|
||||
validator_infos.insert(leader_pubkey, cluster_leader);
|
||||
validators.insert(leader_pubkey, cluster_leader);
|
||||
|
||||
let mut cluster = Self {
|
||||
funding_keypair: mint_keypair,
|
||||
@ -231,23 +245,24 @@ impl LocalCluster {
|
||||
validators,
|
||||
archivers: vec![],
|
||||
genesis_config,
|
||||
validator_infos,
|
||||
archiver_infos: HashMap::new(),
|
||||
listener_infos: HashMap::new(),
|
||||
};
|
||||
|
||||
for (stake, validator_config) in (&config.node_stakes[1..])
|
||||
.iter()
|
||||
.zip((&config.validator_configs[1..]).iter())
|
||||
{
|
||||
cluster.add_validator(validator_config, *stake);
|
||||
for (stake, validator_config, key) in izip!(
|
||||
(&config.node_stakes[1..]).iter(),
|
||||
config.validator_configs[1..].iter(),
|
||||
validator_keys[1..].iter(),
|
||||
) {
|
||||
cluster.add_validator(validator_config, *stake, key.clone());
|
||||
}
|
||||
|
||||
let listener_config = ValidatorConfig {
|
||||
voting_disabled: true,
|
||||
..config.validator_configs[0].clone()
|
||||
};
|
||||
(0..config.num_listeners).for_each(|_| cluster.add_validator(&listener_config, 0));
|
||||
(0..config.num_listeners).for_each(|_| {
|
||||
cluster.add_validator(&listener_config, 0, Arc::new(Keypair::new()));
|
||||
});
|
||||
|
||||
discover_cluster(
|
||||
&cluster.entry_point_info.gossip,
|
||||
@ -270,14 +285,18 @@ impl LocalCluster {
|
||||
|
||||
pub fn exit(&mut self) {
|
||||
for node in self.validators.values_mut() {
|
||||
node.exit();
|
||||
if let Some(ref mut v) = node.validator {
|
||||
v.exit();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn close_preserve_ledgers(&mut self) {
|
||||
self.exit();
|
||||
for (_, node) in self.validators.drain() {
|
||||
node.join().unwrap();
|
||||
for (_, node) in self.validators.iter_mut() {
|
||||
if let Some(v) = node.validator.take() {
|
||||
v.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
while let Some(archiver) = self.archivers.pop() {
|
||||
@ -285,14 +304,18 @@ impl LocalCluster {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_validator(&mut self, validator_config: &ValidatorConfig, stake: u64) {
|
||||
pub fn add_validator(
|
||||
&mut self,
|
||||
validator_config: &ValidatorConfig,
|
||||
stake: u64,
|
||||
validator_keypair: Arc<Keypair>,
|
||||
) -> Pubkey {
|
||||
let client = create_client(
|
||||
self.entry_point_info.client_facing_addr(),
|
||||
VALIDATOR_PORT_RANGE,
|
||||
);
|
||||
|
||||
// Must have enough tokens to fund vote account and set delegate
|
||||
let validator_keypair = Arc::new(Keypair::new());
|
||||
let voting_keypair = Keypair::new();
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let validator_pubkey = validator_keypair.pubkey();
|
||||
@ -343,8 +366,6 @@ impl LocalCluster {
|
||||
&config,
|
||||
);
|
||||
|
||||
self.validators
|
||||
.insert(validator_keypair.pubkey(), validator_server);
|
||||
let validator_pubkey = validator_keypair.pubkey();
|
||||
let validator_info = ClusterValidatorInfo::new(
|
||||
ValidatorInfo {
|
||||
@ -355,14 +376,11 @@ impl LocalCluster {
|
||||
contact_info,
|
||||
},
|
||||
validator_config.clone(),
|
||||
validator_server,
|
||||
);
|
||||
|
||||
if validator_config.voting_disabled {
|
||||
self.listener_infos.insert(validator_pubkey, validator_info);
|
||||
} else {
|
||||
self.validator_infos
|
||||
.insert(validator_pubkey, validator_info);
|
||||
}
|
||||
self.validators.insert(validator_pubkey, validator_info);
|
||||
validator_pubkey
|
||||
}
|
||||
|
||||
fn add_archiver(&mut self) {
|
||||
@ -407,7 +425,7 @@ impl LocalCluster {
|
||||
fn close(&mut self) {
|
||||
self.close_preserve_ledgers();
|
||||
for ledger_path in self
|
||||
.validator_infos
|
||||
.validators
|
||||
.values()
|
||||
.map(|f| &f.info.ledger_path)
|
||||
.chain(self.archiver_infos.values().map(|info| &info.ledger_path))
|
||||
@ -618,7 +636,7 @@ impl Cluster for LocalCluster {
|
||||
}
|
||||
|
||||
fn get_validator_client(&self, pubkey: &Pubkey) -> Option<ThinClient> {
|
||||
self.validator_infos.get(pubkey).map(|f| {
|
||||
self.validators.get(pubkey).map(|f| {
|
||||
create_client(
|
||||
f.info.contact_info.client_facing_addr(),
|
||||
VALIDATOR_PORT_RANGE,
|
||||
@ -630,10 +648,10 @@ impl Cluster for LocalCluster {
|
||||
let mut node = self.validators.remove(&pubkey).unwrap();
|
||||
|
||||
// Shut down the validator
|
||||
node.exit();
|
||||
node.join().unwrap();
|
||||
|
||||
self.validator_infos.remove(&pubkey).unwrap()
|
||||
let mut validator = node.validator.take().expect("Validator must be running");
|
||||
validator.exit();
|
||||
validator.join().unwrap();
|
||||
node
|
||||
}
|
||||
|
||||
fn restart_node(&mut self, pubkey: &Pubkey, mut cluster_validator_info: ClusterValidatorInfo) {
|
||||
@ -668,8 +686,8 @@ impl Cluster for LocalCluster {
|
||||
&cluster_validator_info.config,
|
||||
);
|
||||
|
||||
self.validators.insert(*pubkey, restarted_node);
|
||||
self.validator_infos.insert(*pubkey, cluster_validator_info);
|
||||
cluster_validator_info.validator = Some(restarted_node);
|
||||
self.validators.insert(*pubkey, cluster_validator_info);
|
||||
}
|
||||
|
||||
fn exit_restart_node(&mut self, pubkey: &Pubkey, validator_config: ValidatorConfig) {
|
||||
|
@ -8,13 +8,15 @@ use solana_core::{
|
||||
partition_cfg::{Partition, PartitionCfg},
|
||||
validator::ValidatorConfig,
|
||||
};
|
||||
use solana_ledger::{bank_forks::SnapshotConfig, blocktree::Blocktree, snapshot_utils};
|
||||
use solana_ledger::{
|
||||
bank_forks::SnapshotConfig, blocktree::Blocktree, leader_schedule::FixedSchedule,
|
||||
leader_schedule::LeaderSchedule, snapshot_utils,
|
||||
};
|
||||
use solana_local_cluster::{
|
||||
cluster::Cluster,
|
||||
cluster_tests,
|
||||
local_cluster::{ClusterConfig, LocalCluster},
|
||||
};
|
||||
use solana_runtime::accounts_db::AccountsDB;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use solana_sdk::{
|
||||
client::SyncClient,
|
||||
@ -23,13 +25,15 @@ use solana_sdk::{
|
||||
epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH},
|
||||
genesis_config::OperatingMode,
|
||||
poh_config::PohConfig,
|
||||
signature::{Keypair, KeypairUtil},
|
||||
};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
fs,
|
||||
fs, iter,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
thread::sleep,
|
||||
time::Duration,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tempfile::TempDir;
|
||||
|
||||
@ -60,7 +64,7 @@ fn test_ledger_cleanup_service() {
|
||||
);
|
||||
cluster.close_preserve_ledgers();
|
||||
//check everyone's ledgers and make sure only ~100 slots are stored
|
||||
for (_, info) in &cluster.validator_infos {
|
||||
for (_, info) in &cluster.validators {
|
||||
let mut slots = 0;
|
||||
let blocktree = Blocktree::open(&info.info.ledger_path).unwrap();
|
||||
blocktree
|
||||
@ -188,71 +192,166 @@ fn test_leader_failure_4() {
|
||||
);
|
||||
}
|
||||
|
||||
fn run_network_partition(partitions: &[usize]) {
|
||||
/// This function runs a network, initiates a partition based on a
|
||||
/// configuration, resolve the partition, then checks that the network
|
||||
/// continues to achieve consensus
|
||||
/// # Arguments
|
||||
/// * `partitions` - A slice of partition configurations, where each partition
|
||||
/// configuration is a slice of (usize, bool), representing a node's stake and
|
||||
/// whether or not it should be killed during the partition
|
||||
/// * `leader_schedule` - An option that specifies whether the cluster should
|
||||
/// run with a fixed, predetermined leader schedule
|
||||
fn run_cluster_partition(
|
||||
partitions: &[&[(usize, bool)]],
|
||||
leader_schedule: Option<(LeaderSchedule, Vec<Arc<Keypair>>)>,
|
||||
) {
|
||||
solana_logger::setup();
|
||||
info!("PARTITION_TEST!");
|
||||
let num_nodes = partitions.iter().sum();
|
||||
let validator_config = ValidatorConfig::default();
|
||||
let num_nodes = partitions.len();
|
||||
let node_stakes: Vec<_> = partitions
|
||||
.iter()
|
||||
.flat_map(|p| p.iter().map(|(stake_weight, _)| 100 * *stake_weight as u64))
|
||||
.collect();
|
||||
assert_eq!(node_stakes.len(), num_nodes);
|
||||
let cluster_lamports = node_stakes.iter().sum::<u64>() * 2;
|
||||
let partition_start_epoch = 2;
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
|
||||
// Returns:
|
||||
// 1) The keys for the validiators
|
||||
// 2) The amount of time it would take to iterate through one full iteration of the given
|
||||
// leader schedule
|
||||
let (validator_keys, leader_schedule_time): (Vec<_>, u64) = {
|
||||
if let Some((leader_schedule, validator_keys)) = leader_schedule {
|
||||
assert_eq!(validator_keys.len(), num_nodes);
|
||||
let num_slots_per_rotation = leader_schedule.num_slots() as u64;
|
||||
let fixed_schedule = FixedSchedule {
|
||||
start_epoch: partition_start_epoch,
|
||||
leader_schedule: Arc::new(leader_schedule),
|
||||
};
|
||||
validator_config.fixed_leader_schedule = Some(fixed_schedule);
|
||||
(
|
||||
validator_keys,
|
||||
num_slots_per_rotation * clock::DEFAULT_MS_PER_SLOT,
|
||||
)
|
||||
} else {
|
||||
(
|
||||
iter::repeat_with(|| Arc::new(Keypair::new()))
|
||||
.take(partitions.len())
|
||||
.collect(),
|
||||
10_000,
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
let validator_pubkeys: Vec<_> = validator_keys.iter().map(|v| v.pubkey()).collect();
|
||||
let mut config = ClusterConfig {
|
||||
cluster_lamports: 10_000,
|
||||
node_stakes: vec![100; num_nodes],
|
||||
cluster_lamports,
|
||||
node_stakes,
|
||||
validator_configs: vec![validator_config.clone(); num_nodes],
|
||||
validator_keys: Some(validator_keys),
|
||||
..ClusterConfig::default()
|
||||
};
|
||||
|
||||
let now = timestamp();
|
||||
let partition_start = now + 60_000;
|
||||
let partition_end = partition_start + 10_000;
|
||||
let mut total = 0;
|
||||
for (j, pn) in partitions.iter().enumerate() {
|
||||
info!(
|
||||
"PARTITION_TEST configuring partition {} for nodes {} - {}",
|
||||
j,
|
||||
total,
|
||||
total + *pn
|
||||
);
|
||||
for i in total..(total + *pn) {
|
||||
// Partition needs to start after the first few shorter warmup epochs, otherwise
|
||||
// no root will be set before the partition is resolved, the leader schedule will
|
||||
// not be computable, and the cluster wll halt.
|
||||
let partition_epoch_start_offset = cluster_tests::time_until_nth_epoch(
|
||||
partition_start_epoch,
|
||||
config.slots_per_epoch,
|
||||
config.stakers_slot_offset,
|
||||
);
|
||||
// Assume it takes <= 10 seconds for `LocalCluster::new` to boot up.
|
||||
let local_cluster_boot_time = 10_000;
|
||||
let partition_start = now + partition_epoch_start_offset + local_cluster_boot_time;
|
||||
let partition_end = partition_start + leader_schedule_time as u64;
|
||||
let mut validator_index = 0;
|
||||
for (i, partition) in partitions.iter().enumerate() {
|
||||
for _ in partition.iter() {
|
||||
let mut p1 = Partition::default();
|
||||
p1.num_partitions = partitions.len();
|
||||
p1.my_partition = j;
|
||||
p1.my_partition = i;
|
||||
p1.start_ts = partition_start;
|
||||
p1.end_ts = partition_end;
|
||||
config.validator_configs[i].partition_cfg = Some(PartitionCfg::new(vec![p1]));
|
||||
config.validator_configs[validator_index].partition_cfg =
|
||||
Some(PartitionCfg::new(vec![p1]));
|
||||
validator_index += 1;
|
||||
}
|
||||
total += *pn;
|
||||
}
|
||||
info!(
|
||||
"PARTITION_TEST starting cluster with {:?} partitions",
|
||||
partitions
|
||||
);
|
||||
let cluster = LocalCluster::new(&config);
|
||||
let now = Instant::now();
|
||||
let mut cluster = LocalCluster::new(&config);
|
||||
let elapsed = now.elapsed();
|
||||
assert!(elapsed.as_millis() < local_cluster_boot_time as u128);
|
||||
|
||||
let now = timestamp();
|
||||
let timeout = partition_start as i64 - now as i64;
|
||||
let timeout = partition_start as u64 - now as u64;
|
||||
info!(
|
||||
"PARTITION_TEST sleeping until partition start timeout {}",
|
||||
timeout
|
||||
);
|
||||
let mut dead_nodes = HashSet::new();
|
||||
if timeout > 0 {
|
||||
sleep(Duration::from_millis(timeout as u64));
|
||||
}
|
||||
info!("PARTITION_TEST done sleeping until partition start timeout");
|
||||
let now = timestamp();
|
||||
let timeout = partition_end as i64 - now as i64;
|
||||
let timeout = partition_end as u64 - now as u64;
|
||||
info!(
|
||||
"PARTITION_TEST sleeping until partition end timeout {}",
|
||||
timeout
|
||||
);
|
||||
let mut alive_node_contact_infos = vec![];
|
||||
let should_exits: Vec<_> = partitions
|
||||
.iter()
|
||||
.flat_map(|p| p.iter().map(|(_, should_exit)| should_exit))
|
||||
.collect();
|
||||
assert_eq!(should_exits.len(), validator_pubkeys.len());
|
||||
if timeout > 0 {
|
||||
sleep(Duration::from_millis(timeout as u64));
|
||||
// Give partitions time to propagate their blocks from durinig the partition
|
||||
// after the partition resolves
|
||||
let propagation_time = leader_schedule_time;
|
||||
info!("PARTITION_TEST resolving partition");
|
||||
sleep(Duration::from_millis(timeout));
|
||||
info!("PARTITION_TEST waiting for blocks to propagate after partition");
|
||||
sleep(Duration::from_millis(propagation_time));
|
||||
info!("PARTITION_TEST resuming normal operation");
|
||||
for (pubkey, should_exit) in validator_pubkeys.iter().zip(should_exits) {
|
||||
if *should_exit {
|
||||
info!("Killing validator with id: {}", pubkey);
|
||||
cluster.exit_node(pubkey);
|
||||
dead_nodes.insert(*pubkey);
|
||||
} else {
|
||||
alive_node_contact_infos.push(
|
||||
cluster
|
||||
.validators
|
||||
.get(pubkey)
|
||||
.unwrap()
|
||||
.info
|
||||
.contact_info
|
||||
.clone(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("PARTITION_TEST done sleeping until partition end timeout");
|
||||
|
||||
assert!(alive_node_contact_infos.len() > 0);
|
||||
info!("PARTITION_TEST discovering nodes");
|
||||
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, num_nodes).unwrap();
|
||||
let (cluster_nodes, _) = discover_cluster(
|
||||
&alive_node_contact_infos[0].gossip,
|
||||
alive_node_contact_infos.len(),
|
||||
)
|
||||
.unwrap();
|
||||
info!("PARTITION_TEST discovered {} nodes", cluster_nodes.len());
|
||||
info!("PARTITION_TEST looking for new roots on all nodes");
|
||||
let mut roots = vec![HashSet::new(); cluster_nodes.len()];
|
||||
let mut roots = vec![HashSet::new(); alive_node_contact_infos.len()];
|
||||
let mut done = false;
|
||||
while !done {
|
||||
for (i, ingress_node) in cluster_nodes.iter().enumerate() {
|
||||
for (i, ingress_node) in alive_node_contact_infos.iter().enumerate() {
|
||||
let client = create_client(
|
||||
ingress_node.client_facing_addr(),
|
||||
solana_core::cluster_info::VALIDATOR_PORT_RANGE,
|
||||
@ -272,22 +371,64 @@ fn run_network_partition(partitions: &[usize]) {
|
||||
#[ignore]
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_network_partition_1_2() {
|
||||
run_network_partition(&[1, 2])
|
||||
fn test_cluster_partition_1_2() {
|
||||
run_cluster_partition(&[&[(1, false)], &[(1, false), (1, false)]], None)
|
||||
}
|
||||
|
||||
#[allow(unused_attributes)]
|
||||
#[ignore]
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_network_partition_1_1() {
|
||||
run_network_partition(&[1, 1])
|
||||
fn test_cluster_partition_1_1() {
|
||||
run_cluster_partition(&[&[(1, false)], &[(1, false)]], None)
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_network_partition_1_1_1() {
|
||||
run_network_partition(&[1, 1, 1])
|
||||
fn test_cluster_partition_1_1_1() {
|
||||
run_cluster_partition(&[&[(1, false)], &[(1, false)], &[(1, false)]], None)
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_kill_partition() {
|
||||
// This test:
|
||||
// 1) Spins up three partitions
|
||||
// 2) Forces more slots in the leader schedule for the first partition so
|
||||
// that this partition will be the heaviiest
|
||||
// 3) Schedules the other validators for sufficient slots in the schedule
|
||||
// so that they will still be locked out of voting for the major partitoin
|
||||
// when the partition resolves
|
||||
// 4) Kills the major partition. Validators are locked out, but should be
|
||||
// able to reset to the major partition
|
||||
// 5) Check for recovery
|
||||
let mut leader_schedule = vec![];
|
||||
let num_slots_per_validator = 8;
|
||||
let partitions: [&[(usize, bool)]; 3] = [&[(9, true)], &[(10, false)], &[(10, false)]];
|
||||
let validator_keys: Vec<_> = iter::repeat_with(|| Arc::new(Keypair::new()))
|
||||
.take(partitions.len())
|
||||
.collect();
|
||||
for (i, k) in validator_keys.iter().enumerate() {
|
||||
let num_slots = {
|
||||
if i == 0 {
|
||||
// Set up the leader to have 50% of the slots
|
||||
num_slots_per_validator * (partitions.len() - 1)
|
||||
} else {
|
||||
num_slots_per_validator
|
||||
}
|
||||
};
|
||||
for _ in 0..num_slots {
|
||||
leader_schedule.push(k.pubkey())
|
||||
}
|
||||
}
|
||||
|
||||
run_cluster_partition(
|
||||
&partitions,
|
||||
Some((
|
||||
LeaderSchedule::new_from_schedule(leader_schedule),
|
||||
validator_keys,
|
||||
)),
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -319,10 +460,7 @@ fn test_two_unbalanced_stakes() {
|
||||
);
|
||||
cluster.close_preserve_ledgers();
|
||||
let leader_pubkey = cluster.entry_point_info.id;
|
||||
let leader_ledger = cluster.validator_infos[&leader_pubkey]
|
||||
.info
|
||||
.ledger_path
|
||||
.clone();
|
||||
let leader_ledger = cluster.validators[&leader_pubkey].info.ledger_path.clone();
|
||||
cluster_tests::verify_ledger_ticks(&leader_ledger, num_ticks_per_slot as usize);
|
||||
}
|
||||
|
||||
@ -560,6 +698,7 @@ fn test_snapshots_blocktree_floor() {
|
||||
cluster.add_validator(
|
||||
&validator_snapshot_test_config.validator_config,
|
||||
validator_stake,
|
||||
Arc::new(Keypair::new()),
|
||||
);
|
||||
let all_pubkeys = cluster.get_node_pubkeys();
|
||||
let validator_id = all_pubkeys
|
||||
@ -583,7 +722,7 @@ fn test_snapshots_blocktree_floor() {
|
||||
|
||||
// Check the validator ledger doesn't contain any slots < slot_floor
|
||||
cluster.close_preserve_ledgers();
|
||||
let validator_ledger_path = &cluster.validator_infos[&validator_id];
|
||||
let validator_ledger_path = &cluster.validators[&validator_id];
|
||||
let blocktree = Blocktree::open(&validator_ledger_path.info.ledger_path).unwrap();
|
||||
|
||||
// Skip the zeroth slot in blocktree that the ledger is initialized with
|
||||
@ -648,7 +787,7 @@ fn test_snapshots_restart_validity() {
|
||||
let (new_account_storage_dirs, new_account_storage_paths) =
|
||||
generate_account_paths(num_account_paths);
|
||||
all_account_storage_dirs.push(new_account_storage_dirs);
|
||||
snapshot_test_config.validator_config.account_paths = Some(new_account_storage_paths);
|
||||
snapshot_test_config.validator_config.account_paths = new_account_storage_paths;
|
||||
|
||||
// Restart node
|
||||
trace!("Restarting cluster from snapshot");
|
||||
@ -721,7 +860,7 @@ fn test_faulty_node(faulty_node_type: BroadcastStageType) {
|
||||
);
|
||||
|
||||
let corrupt_node = cluster
|
||||
.validator_infos
|
||||
.validators
|
||||
.iter()
|
||||
.find(|(_, v)| v.config.broadcast_stage_type == faulty_node_type)
|
||||
.unwrap()
|
||||
@ -768,10 +907,7 @@ fn test_no_voting() {
|
||||
|
||||
cluster.close_preserve_ledgers();
|
||||
let leader_pubkey = cluster.entry_point_info.id;
|
||||
let ledger_path = cluster.validator_infos[&leader_pubkey]
|
||||
.info
|
||||
.ledger_path
|
||||
.clone();
|
||||
let ledger_path = cluster.validators[&leader_pubkey].info.ledger_path.clone();
|
||||
let ledger = Blocktree::open(&ledger_path).unwrap();
|
||||
for i in 0..2 * VOTE_THRESHOLD_DEPTH {
|
||||
let meta = ledger.meta(i as u64).unwrap().unwrap();
|
||||
@ -850,7 +986,7 @@ fn run_repairman_catchup(num_repairmen: u64) {
|
||||
// Start up a new node, wait for catchup. Backwards repair won't be sufficient because the
|
||||
// leader is sending shreds past this validator's first two confirmed epochs. Thus, the repairman
|
||||
// protocol will have to kick in for this validator to repair.
|
||||
cluster.add_validator(&validator_config, repairee_stake);
|
||||
cluster.add_validator(&validator_config, repairee_stake, Arc::new(Keypair::new()));
|
||||
|
||||
let all_pubkeys = cluster.get_node_pubkeys();
|
||||
let repairee_id = all_pubkeys
|
||||
@ -903,15 +1039,14 @@ fn wait_for_next_snapshot<P: AsRef<Path>>(cluster: &LocalCluster, tar: P) {
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_account_paths(num_account_paths: usize) -> (Vec<TempDir>, String) {
|
||||
fn generate_account_paths(num_account_paths: usize) -> (Vec<TempDir>, Vec<PathBuf>) {
|
||||
let account_storage_dirs: Vec<TempDir> = (0..num_account_paths)
|
||||
.map(|_| TempDir::new().unwrap())
|
||||
.collect();
|
||||
let account_storage_paths: Vec<_> = account_storage_dirs
|
||||
.iter()
|
||||
.map(|a| a.path().to_str().unwrap().to_string())
|
||||
.map(|a| a.path().to_path_buf())
|
||||
.collect();
|
||||
let account_storage_paths = AccountsDB::format_paths(account_storage_paths);
|
||||
(account_storage_dirs, account_storage_paths)
|
||||
}
|
||||
|
||||
@ -942,7 +1077,7 @@ fn setup_snapshot_validator_config(
|
||||
let mut validator_config = ValidatorConfig::default();
|
||||
validator_config.rpc_config.enable_validator_exit = true;
|
||||
validator_config.snapshot_config = Some(snapshot_config);
|
||||
validator_config.account_paths = Some(account_storage_paths);
|
||||
validator_config.account_paths = account_storage_paths;
|
||||
|
||||
SnapshotValidatorConfig {
|
||||
_snapshot_dir: snapshot_dir,
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-log-analyzer"
|
||||
description = "The solana cluster network analysis tool"
|
||||
version = "0.1.0"
|
||||
version = "0.21.6"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -17,8 +17,8 @@ semver = "0.9.0"
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
serde_json = "1.0.41"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
|
||||
[[bin]]
|
||||
name = "solana-log-analyzer"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-logger"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
description = "Solana Logger"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-measure"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@ -11,4 +11,4 @@ license = "Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-merkle-tree"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
description = "Solana Merkle Tree"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -9,7 +9,7 @@ homepage = "https://solana.com/"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-metrics"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
description = "Solana Metrics"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -13,7 +13,7 @@ env_logger = "0.7.1"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.8"
|
||||
reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tls"] }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
sys-info = "0.5.8"
|
||||
|
||||
[dev-dependencies]
|
||||
|
@ -95,7 +95,24 @@ elif channel == 'stable':
|
||||
'text': 'testnet-perf',
|
||||
'value': 'testnet-perf'}],
|
||||
'query': 'testnet,testnet-perf',
|
||||
'type': 'custom'}]
|
||||
'type': 'custom'},
|
||||
{'allValue': ".*",
|
||||
'datasource': '$datasource',
|
||||
'hide': 0,
|
||||
'includeAll': True,
|
||||
'label': 'HostID',
|
||||
'multi': False,
|
||||
'name': 'hostid',
|
||||
'options': [],
|
||||
'query': 'SELECT DISTINCT(\"id\") FROM \"$testnet\".\"autogen\".\"validator-new\" ',
|
||||
'refresh': 2,
|
||||
'regex': '',
|
||||
'sort': 1,
|
||||
'tagValuesQuery': '',
|
||||
'tags': [],
|
||||
'tagsQuery': '',
|
||||
'type': 'query',
|
||||
'useTags': False}]
|
||||
else:
|
||||
# Non-stable dashboard only allows the user to select between all testnet
|
||||
# databases
|
||||
|
@ -4538,7 +4538,7 @@
|
||||
"x": 0,
|
||||
"y": 50
|
||||
},
|
||||
"id": 68,
|
||||
"id": 38,
|
||||
"links": [],
|
||||
"pageSize": null,
|
||||
"scroll": true,
|
||||
@ -4621,7 +4621,7 @@
|
||||
"x": 0,
|
||||
"y": 55
|
||||
},
|
||||
"id": 38,
|
||||
"id": 39,
|
||||
"panels": [],
|
||||
"title": "Bench Exchange",
|
||||
"type": "row"
|
||||
@ -4639,7 +4639,7 @@
|
||||
"x": 0,
|
||||
"y": 56
|
||||
},
|
||||
"id": 39,
|
||||
"id": 40,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
@ -4790,7 +4790,7 @@
|
||||
"x": 12,
|
||||
"y": 56
|
||||
},
|
||||
"id": 40,
|
||||
"id": 41,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
@ -4973,7 +4973,7 @@
|
||||
"x": 0,
|
||||
"y": 61
|
||||
},
|
||||
"id": 41,
|
||||
"id": 42,
|
||||
"panels": [],
|
||||
"title": "Validator Streamer",
|
||||
"type": "row"
|
||||
@ -4991,7 +4991,7 @@
|
||||
"x": 0,
|
||||
"y": 62
|
||||
},
|
||||
"id": 42,
|
||||
"id": 43,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -5093,45 +5093,6 @@
|
||||
],
|
||||
"tags": []
|
||||
},
|
||||
{
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$__interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"hide": false,
|
||||
"measurement": "cluster_info-vote-count",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "autogen",
|
||||
"query": "SELECT mean(\"clone_and_seed\") AS \"clone_and_seed\" FROM \"$testnet\".\"autogen\".\"broadcast-bank-stats\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"rawQuery": true,
|
||||
"refId": "C",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"count"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "sum"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": []
|
||||
},
|
||||
{
|
||||
"groupBy": [
|
||||
{
|
||||
@ -5248,6 +5209,45 @@
|
||||
]
|
||||
],
|
||||
"tags": []
|
||||
},
|
||||
{
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$__interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"hide": false,
|
||||
"measurement": "cluster_info-vote-count",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "autogen",
|
||||
"query": "SELECT mean(\"insertion_time\") AS \"insertion_time\" FROM \"$testnet\".\"autogen\".\"broadcast-bank-stats\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
|
||||
"rawQuery": true,
|
||||
"refId": "D",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"count"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "sum"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": []
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
@ -5310,7 +5310,7 @@
|
||||
"x": 8,
|
||||
"y": 62
|
||||
},
|
||||
"id": 43,
|
||||
"id": 44,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -5668,7 +5668,7 @@
|
||||
"x": 16,
|
||||
"y": 62
|
||||
},
|
||||
"id": 44,
|
||||
"id": 45,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -5980,7 +5980,7 @@
|
||||
"x": 0,
|
||||
"y": 68
|
||||
},
|
||||
"id": 45,
|
||||
"id": 46,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -6636,7 +6636,7 @@
|
||||
"x": 16,
|
||||
"y": 68
|
||||
},
|
||||
"id": 47,
|
||||
"id": 48,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -6757,7 +6757,7 @@
|
||||
"x": 16,
|
||||
"y": 73
|
||||
},
|
||||
"id": 48,
|
||||
"id": 49,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -6913,7 +6913,7 @@
|
||||
"x": 0,
|
||||
"y": 74
|
||||
},
|
||||
"id": 49,
|
||||
"id": 50,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -7069,7 +7069,7 @@
|
||||
"x": 8,
|
||||
"y": 74
|
||||
},
|
||||
"id": 50,
|
||||
"id": 51,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -7184,7 +7184,7 @@
|
||||
"x": 16,
|
||||
"y": 78
|
||||
},
|
||||
"id": 51,
|
||||
"id": 52,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -7388,7 +7388,7 @@
|
||||
"x": 0,
|
||||
"y": 79
|
||||
},
|
||||
"id": 52,
|
||||
"id": 53,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -7690,7 +7690,7 @@
|
||||
"x": 8,
|
||||
"y": 79
|
||||
},
|
||||
"id": 53,
|
||||
"id": 54,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -7838,7 +7838,7 @@
|
||||
"x": 0,
|
||||
"y": 84
|
||||
},
|
||||
"id": 54,
|
||||
"id": 55,
|
||||
"panels": [],
|
||||
"title": "Tower Consensus",
|
||||
"type": "row"
|
||||
@ -7861,7 +7861,7 @@
|
||||
"x": 0,
|
||||
"y": 85
|
||||
},
|
||||
"id": 55,
|
||||
"id": 56,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -8021,7 +8021,7 @@
|
||||
"x": 8,
|
||||
"y": 85
|
||||
},
|
||||
"id": 56,
|
||||
"id": 57,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -8181,7 +8181,7 @@
|
||||
"x": 16,
|
||||
"y": 85
|
||||
},
|
||||
"id": 57,
|
||||
"id": 58,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -8366,7 +8366,7 @@
|
||||
"x": 0,
|
||||
"y": 90
|
||||
},
|
||||
"id": 58,
|
||||
"id": 59,
|
||||
"panels": [],
|
||||
"repeat": null,
|
||||
"title": "IP Network",
|
||||
@ -8385,7 +8385,7 @@
|
||||
"x": 0,
|
||||
"y": 91
|
||||
},
|
||||
"id": 59,
|
||||
"id": 60,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -8618,7 +8618,7 @@
|
||||
"x": 12,
|
||||
"y": 91
|
||||
},
|
||||
"id": 60,
|
||||
"id": 61,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -8771,7 +8771,7 @@
|
||||
"x": 0,
|
||||
"y": 96
|
||||
},
|
||||
"id": 61,
|
||||
"id": 62,
|
||||
"panels": [],
|
||||
"title": "Signature Verification",
|
||||
"type": "row"
|
||||
@ -8789,7 +8789,7 @@
|
||||
"x": 0,
|
||||
"y": 97
|
||||
},
|
||||
"id": 62,
|
||||
"id": 63,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
@ -8830,7 +8830,83 @@
|
||||
"measurement": "cluster_info-vote-count",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "autogen",
|
||||
"query": "SELECT max(\"total_time_ms\") AS \"max\" FROM \"$testnet\".\"autogen\".\"sigverify_stage-total_verify_time\" WHERE $timeFilter GROUP BY time($__interval) FILL(0)\n\n",
|
||||
"query": "SELECT mean(\"num_packets\") AS \"num_packets\" FROM \"$testnet\".\"autogen\".\"sigverify_stage-total_verify_time\" WHERE $timeFilter GROUP BY time(500ms) FILL(0)\n\n",
|
||||
"rawQuery": true,
|
||||
"refId": "B",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"count"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "sum"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": []
|
||||
},
|
||||
{
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$__interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"measurement": "cluster_info-vote-count",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "autogen",
|
||||
"query": "SELECT mean(\"verify_time_ms\") AS \"verify_time\" FROM \"$testnet\".\"autogen\".\"sigverify_stage-total_verify_time\" WHERE $timeFilter GROUP BY time(500ms) FILL(0)\n\n",
|
||||
"rawQuery": true,
|
||||
"refId": "C",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"count"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "sum"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": []
|
||||
},
|
||||
{
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$__interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"measurement": "cluster_info-vote-count",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "autogen",
|
||||
"query": "SELECT mean(\"recv_time\") AS \"recv_time\" FROM \"$testnet\".\"autogen\".\"sigverify_stage-total_verify_time\" WHERE $timeFilter GROUP BY time(500ms) FILL(0)\n\n",
|
||||
"rawQuery": true,
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
@ -8910,7 +8986,7 @@
|
||||
"x": 12,
|
||||
"y": 97
|
||||
},
|
||||
"id": 63,
|
||||
"id": 64,
|
||||
"legend": {
|
||||
"alignAsTable": false,
|
||||
"avg": false,
|
||||
@ -9059,7 +9135,7 @@
|
||||
"x": 0,
|
||||
"y": 102
|
||||
},
|
||||
"id": 64,
|
||||
"id": 65,
|
||||
"panels": [],
|
||||
"title": "Snapshots",
|
||||
"type": "row"
|
||||
@ -9077,7 +9153,7 @@
|
||||
"x": 0,
|
||||
"y": 103
|
||||
},
|
||||
"id": 65,
|
||||
"id": 66,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
@ -9269,7 +9345,7 @@
|
||||
"x": 8,
|
||||
"y": 103
|
||||
},
|
||||
"id": 66,
|
||||
"id": 67,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
@ -9461,7 +9537,7 @@
|
||||
"x": 16,
|
||||
"y": 103
|
||||
},
|
||||
"id": 67,
|
||||
"id": 68,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
@ -9650,7 +9726,7 @@
|
||||
"x": 0,
|
||||
"y": 109
|
||||
},
|
||||
"id": 74,
|
||||
"id": 69,
|
||||
"panels": [],
|
||||
"title": "Resources",
|
||||
"type": "row"
|
||||
@ -9905,6 +9981,7 @@
|
||||
{
|
||||
"allValue": ".*",
|
||||
"current": {
|
||||
"selected": false,
|
||||
"text": "testnet",
|
||||
"value": "testnet"
|
||||
},
|
||||
@ -9979,4 +10056,4 @@
|
||||
"title": "Testnet Monitor (edge)",
|
||||
"uid": "testnet-edge",
|
||||
"version": 2
|
||||
}
|
||||
}
|
@ -262,7 +262,7 @@ setup_validator_accounts() {
|
||||
fi
|
||||
|
||||
echo "Creating validator vote account"
|
||||
wallet create-vote-account "$voting_keypair_path" "$identity_keypair_path" --commission 127 || return $?
|
||||
wallet create-vote-account "$voting_keypair_path" "$identity_keypair_path" --commission 50 || return $?
|
||||
fi
|
||||
echo "Validator vote account configured"
|
||||
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-net-shaper"
|
||||
description = "The solana cluster network shaping tool"
|
||||
version = "0.1.0"
|
||||
version = "0.21.6"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -16,8 +16,8 @@ semver = "0.9.0"
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
serde_json = "1.0.41"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
rand = "0.6.5"
|
||||
|
||||
[[bin]]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-net-utils"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
description = "Solana Network Utilities"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -18,8 +18,8 @@ rand = "0.6.1"
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
socket2 = "0.3.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
|
||||
|
@ -42,83 +42,145 @@ pub fn ip_echo_server(tcp: std::net::TcpListener) -> IpEchoServer {
|
||||
.incoming()
|
||||
.map_err(|err| warn!("accept failed: {:?}", err))
|
||||
.for_each(move |socket| {
|
||||
let ip = socket.peer_addr().expect("Expect peer_addr()").ip();
|
||||
info!("connection from {:?}", ip);
|
||||
let peer_addr = socket.peer_addr().expect("Expect peer_addr()");
|
||||
info!("connection from {:?}", peer_addr);
|
||||
|
||||
let framed = BytesCodec::new().framed(socket);
|
||||
let (writer, reader) = framed.split();
|
||||
|
||||
let processor = reader
|
||||
.and_then(move |bytes| {
|
||||
bincode::deserialize::<IpEchoServerMessage>(&bytes).or_else(|err| {
|
||||
Err(io::Error::new(
|
||||
.and_then(move |data| {
|
||||
if data.len() < 4 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Failed to deserialize IpEchoServerMessage: {:?}", err),
|
||||
))
|
||||
})
|
||||
format!("Request too short, received {} bytes", data.len()),
|
||||
));
|
||||
}
|
||||
let request_header: String = data[0..4].iter().map(|b| *b as char).collect();
|
||||
if request_header != "\0\0\0\0" {
|
||||
// Explicitly check for HTTP GET/POST requests to more gracefully handle
|
||||
// the case where a user accidentally tried to use a gossip entrypoint in
|
||||
// place of a JSON RPC URL:
|
||||
if request_header == "GET " || request_header == "POST" {
|
||||
return Ok(None); // None -> Send HTTP error response
|
||||
}
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Bad request header: {}", request_header),
|
||||
));
|
||||
}
|
||||
|
||||
let expected_len =
|
||||
bincode::serialized_size(&IpEchoServerMessage::default()).unwrap() as usize;
|
||||
let actual_len = data[4..].len();
|
||||
if actual_len < expected_len {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"Request too short, actual {} < expected {}",
|
||||
actual_len, expected_len
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
bincode::deserialize::<IpEchoServerMessage>(&data[4..])
|
||||
.map(Some)
|
||||
.or_else(|err| {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Failed to deserialize IpEchoServerMessage: {:?}", err),
|
||||
))
|
||||
})
|
||||
})
|
||||
.and_then(move |msg| {
|
||||
// Fire a datagram at each non-zero UDP port
|
||||
if !msg.udp_ports.is_empty() {
|
||||
match std::net::UdpSocket::bind("0.0.0.0:0") {
|
||||
Ok(udp_socket) => {
|
||||
for udp_port in &msg.udp_ports {
|
||||
if *udp_port != 0 {
|
||||
match udp_socket
|
||||
.send_to(&[0], SocketAddr::from((ip, *udp_port)))
|
||||
{
|
||||
Ok(_) => debug!("Successful send_to udp/{}", udp_port),
|
||||
Err(err) => {
|
||||
info!("Failed to send_to udp/{}: {}", udp_port, err)
|
||||
.and_then(move |maybe_msg| {
|
||||
match maybe_msg {
|
||||
None => None, // Send HTTP error response
|
||||
Some(msg) => {
|
||||
// Fire a datagram at each non-zero UDP port
|
||||
if !msg.udp_ports.is_empty() {
|
||||
match std::net::UdpSocket::bind("0.0.0.0:0") {
|
||||
Ok(udp_socket) => {
|
||||
for udp_port in &msg.udp_ports {
|
||||
if *udp_port != 0 {
|
||||
match udp_socket.send_to(
|
||||
&[0],
|
||||
SocketAddr::from((peer_addr.ip(), *udp_port)),
|
||||
) {
|
||||
Ok(_) => debug!(
|
||||
"Successful send_to udp/{}",
|
||||
udp_port
|
||||
),
|
||||
Err(err) => info!(
|
||||
"Failed to send_to udp/{}: {}",
|
||||
udp_port, err
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("Failed to bind local udp socket: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("Failed to bind local udp socket: {}", err);
|
||||
}
|
||||
|
||||
// Try to connect to each non-zero TCP port
|
||||
let tcp_futures: Vec<_> =
|
||||
msg.tcp_ports
|
||||
.iter()
|
||||
.filter_map(|tcp_port| {
|
||||
let tcp_port = *tcp_port;
|
||||
if tcp_port == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
tokio::net::TcpStream::connect(&SocketAddr::new(
|
||||
peer_addr.ip(),
|
||||
tcp_port,
|
||||
))
|
||||
.and_then(move |tcp_stream| {
|
||||
debug!(
|
||||
"Connection established to tcp/{}",
|
||||
tcp_port
|
||||
);
|
||||
let _ = tcp_stream
|
||||
.shutdown(std::net::Shutdown::Both);
|
||||
Ok(())
|
||||
})
|
||||
.timeout(Duration::from_secs(5))
|
||||
.or_else(move |err| {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"Connection timeout to {}: {:?}",
|
||||
tcp_port, err
|
||||
),
|
||||
))
|
||||
}),
|
||||
)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Some(future::join_all(tcp_futures))
|
||||
}
|
||||
}
|
||||
|
||||
// Try to connect to each non-zero TCP port
|
||||
let tcp_futures: Vec<_> = msg
|
||||
.tcp_ports
|
||||
.iter()
|
||||
.filter_map(|tcp_port| {
|
||||
let tcp_port = *tcp_port;
|
||||
if tcp_port == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
tokio::net::TcpStream::connect(&SocketAddr::new(ip, tcp_port))
|
||||
.and_then(move |tcp_stream| {
|
||||
debug!("Connection established to tcp/{}", tcp_port);
|
||||
let _ = tcp_stream.shutdown(std::net::Shutdown::Both);
|
||||
Ok(())
|
||||
})
|
||||
.timeout(Duration::from_secs(5))
|
||||
.or_else(move |err| {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"Connection timeout to {}: {:?}",
|
||||
tcp_port, err
|
||||
),
|
||||
))
|
||||
}),
|
||||
)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
future::join_all(tcp_futures)
|
||||
})
|
||||
.and_then(move |_| {
|
||||
let ip = bincode::serialize(&ip).unwrap_or_else(|err| {
|
||||
warn!("Failed to serialize: {:?}", err);
|
||||
vec![]
|
||||
});
|
||||
Ok(Bytes::from(ip))
|
||||
.and_then(move |valid_request| {
|
||||
if valid_request.is_none() {
|
||||
Ok(Bytes::from(
|
||||
"HTTP/1.1 400 Bad Request\nContent-length: 0\n\n",
|
||||
))
|
||||
} else {
|
||||
// "\0\0\0\0" header is added to ensure a valid response will never
|
||||
// conflict with the first four bytes of a valid HTTP response.
|
||||
let mut bytes = vec![
|
||||
0;
|
||||
4 + bincode::serialized_size(&peer_addr.ip()).unwrap()
|
||||
as usize
|
||||
];
|
||||
bincode::serialize_into(&mut bytes[4..], &peer_addr.ip()).unwrap();
|
||||
Ok(Bytes::from(bytes))
|
||||
}
|
||||
});
|
||||
|
||||
let connection = writer
|
||||
|
@ -29,8 +29,15 @@ fn ip_echo_server_request(
|
||||
let timeout = Duration::new(5, 0);
|
||||
TcpStream::connect_timeout(ip_echo_server_addr, timeout)
|
||||
.and_then(|mut stream| {
|
||||
let msg = bincode::serialize(&msg).expect("serialize IpEchoServerMessage");
|
||||
stream.write_all(&msg)?;
|
||||
let mut bytes = vec![0; 4]; // Start with 4 null bytes to avoid looking like an HTTP GET/POST request
|
||||
|
||||
bytes.append(&mut bincode::serialize(&msg).expect("serialize IpEchoServerMessage"));
|
||||
|
||||
// End with '\n' to make this request look HTTP-ish and tickle an error response back
|
||||
// from an HTTP server
|
||||
bytes.push(b'\n');
|
||||
|
||||
stream.write_all(&bytes)?;
|
||||
stream.shutdown(std::net::Shutdown::Write)?;
|
||||
stream
|
||||
.set_read_timeout(Some(Duration::new(10, 0)))
|
||||
@ -38,7 +45,38 @@ fn ip_echo_server_request(
|
||||
stream.read_to_end(&mut data)
|
||||
})
|
||||
.and_then(|_| {
|
||||
bincode::deserialize(&data).map_err(|err| {
|
||||
// It's common for users to accidentally confuse the validator's gossip port and JSON
|
||||
// RPC port. Attempt to detect when this occurs by looking for the standard HTTP
|
||||
// response header and provide the user with a helpful error message
|
||||
if data.len() < 4 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Response too short, received {} bytes", data.len()),
|
||||
));
|
||||
}
|
||||
|
||||
let response_header: String = data[0..4].iter().map(|b| *b as char).collect();
|
||||
if response_header != "\0\0\0\0" {
|
||||
if response_header == "HTTP" {
|
||||
let http_response = data.iter().map(|b| *b as char).collect::<String>();
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"Invalid gossip entrypoint. {} looks to be an HTTP port: {}",
|
||||
ip_echo_server_addr, http_response
|
||||
),
|
||||
));
|
||||
}
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"Invalid gossip entrypoint. {} provided an invalid response header: '{}'",
|
||||
ip_echo_server_addr, response_header
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
bincode::deserialize(&data[4..]).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("Failed to deserialize: {:?}", err),
|
||||
@ -102,7 +140,7 @@ pub fn verify_reachable_ports(
|
||||
);
|
||||
std::process::exit(1);
|
||||
});
|
||||
info!("tdp/{} is reachable", port);
|
||||
info!("tcp/{} is reachable", port);
|
||||
}
|
||||
|
||||
// Wait for a datagram to arrive at each UDP port
|
||||
@ -435,6 +473,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_get_public_ip_addr() {
|
||||
solana_logger::setup();
|
||||
let (_server_port, (server_udp_socket, server_tcp_listener)) =
|
||||
bind_common_in_range((3200, 3250)).unwrap();
|
||||
let (client_port, (client_udp_socket, client_tcp_listener)) =
|
||||
@ -443,7 +482,10 @@ mod tests {
|
||||
let _runtime = ip_echo_server(server_tcp_listener);
|
||||
|
||||
let ip_echo_server_addr = server_udp_socket.local_addr().unwrap();
|
||||
get_public_ip_addr(&ip_echo_server_addr).unwrap();
|
||||
assert_eq!(
|
||||
get_public_ip_addr(&ip_echo_server_addr),
|
||||
parse_host("127.0.0.1"),
|
||||
);
|
||||
|
||||
verify_reachable_ports(
|
||||
&ip_echo_server_addr,
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-perf"
|
||||
version = "0.21.0"
|
||||
version = "0.21.6"
|
||||
description = "Solana Performance APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -18,11 +18,11 @@ serde_derive = "1.0.102"
|
||||
dlopen_derive = "0.1.4"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.8"
|
||||
solana-sdk = { path = "../sdk", version = "0.21.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.0" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.21.0" }
|
||||
solana-logger = { path = "../logger", version = "0.21.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.21.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.21.6" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.6" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "0.21.6" }
|
||||
solana-logger = { path = "../logger", version = "0.21.6" }
|
||||
solana-metrics = { path = "../metrics", version = "0.21.6" }
|
||||
|
||||
[lib]
|
||||
name = "solana_perf"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user