Compare commits

..

66 Commits

Author SHA1 Message Date
mergify[bot]
535ee281e8 Filter old CrdsValues received via Pull Responses in Gossip (#8150) (#8277)
automerge
2020-02-14 10:21:26 -08:00
mergify[bot]
da843a7ace Fix larger than necessary allocations in streamer (#8187) (#8191)
automerge
2020-02-10 12:52:16 -08:00
Michael Vines
772cf8288c Bump version to 0.22.8 2020-02-03 21:08:54 -07:00
Michael Vines
e81a40ba55 Lock snapshot version to 0.22.6 2020-02-03 17:06:29 -07:00
Michael Vines
a52359a6be Cargo.lock 2020-02-03 17:05:35 -07:00
sakridge
2fe0853fba Fix consensus threshold when new root is created (#8093)
When a new root is created, the oldest slot is popped off
but when the logic checks for identical slots, it assumes
that any difference means a slot was popped off the front.
2020-02-03 16:47:02 -07:00
Sagar Dhawan
de3630f76c Filter repairman peers based on shred_version (#8069)
(cherry picked from commit b9988b62e4)
2020-02-01 08:58:26 -07:00
carllin
ff9e388843 Fix stale gossip entrypoint (#8053)
(cherry picked from commit fd207b6907)
2020-01-31 00:34:29 -07:00
Michael Vines
67a7995f04 Bump version to 0.22.7 2020-01-30 15:47:22 -07:00
Michael Vines
f9d793023c Only error if --expected-shred-version was not provided 2020-01-30 13:25:25 -07:00
Michael Vines
99b2504b38 Rename rpc_peers() to all_rpc_peers() for consistency 2020-01-30 13:21:04 -07:00
Michael Vines
3f3aec29d1 Add different shred test to test_tvu_peers_and_stakes
(cherry picked from commit 0c55b37976)
2020-01-30 11:28:18 -07:00
Justin Starry
7be8124b9e Ignore slow archiver tests (#8032)
automerge

(cherry picked from commit 400412d76c)
2020-01-30 09:38:53 -07:00
Sagar Dhawan
81259daa3f Add shred version filters to Crds Accessors (#8027)
* Add shred version filters to Crds Accessors

* Adopt entrypoint shred_version if one isn't provided

(cherry picked from commit 64c42e28dc)
2020-01-30 08:59:00 -07:00
Michael Vines
136fa5b561 Add leader-schedule subcommand 2020-01-29 20:08:32 -07:00
Michael Vines
63ca6118fa Add --expected-shred-version option 2020-01-29 20:08:32 -07:00
Michael Vines
850d729739 Wait for supermajority by default, add --no-wait-for-supermajority flag to override 2020-01-29 20:08:32 -07:00
Michael Vines
62f9183d17 getClusterNodes now excludes validators with a different shred version 2020-01-29 20:08:32 -07:00
Justin Starry
cfe3481ba4 Log solana-validator args on startup to aid debugging
(cherry picked from commit effe6e3ff3)
2020-01-29 09:40:18 -07:00
Michael Vines
788e9f321c Bump version to v0.22.6 2020-01-28 08:44:44 -07:00
Michael Vines
265e88e734 Fix compute_shred_version() 2020-01-27 19:05:17 -07:00
Michael Vines
e80c74d955 Drop v prefix 2020-01-27 19:05:17 -07:00
Michael Vines
d3efe2317b Remove stray key 2020-01-26 14:36:00 -07:00
Michael Vines
05a661dd88 Bump version to v0.22.5 2020-01-24 21:52:01 -07:00
Michael Vines
84090df770 Bump perf libs to v0.18.0 for CUDA 10.2 support 2020-01-24 21:38:51 -07:00
Stephen Akridge
3f7fe04124 Consensus fix, don't consider threshold check if
lockouts are not increased
2020-01-24 21:34:16 -07:00
mergify[bot]
ac4e3c2426 Add ability to hard fork at any slot (#7801) (#7970)
automerge
2020-01-24 18:57:08 -08:00
Jack May
13af049988 Install move-loader binaries (#7768)
(cherry picked from commit 5cb23c814d)
2020-01-24 18:13:03 -07:00
Michael Vines
bd07f9bdcb Move testnet.solana.com and TdS to their own GCP projects 2020-01-24 16:28:04 -07:00
mergify[bot]
82927fee20 Increase --wait-for-supermajority to wait for 75% online stake (#7957)
automerge
2020-01-23 23:03:13 -08:00
Michael Vines
57d5534bab Add create-snapshot command 2020-01-23 22:21:36 -07:00
Michael Vines
d2c15b596f Add BlockstoreProcessorResult 2020-01-23 21:03:57 -07:00
Michael Vines
5d8dc78718 Move snapshot archive generation out of the SnapshotPackagerService 2020-01-23 15:58:59 -07:00
Michael Vines
c945e80618 Type grooming 2020-01-23 15:58:59 -07:00
Michael Vines
0802793d37 Unify ledger_path arg handling with validator/ 2020-01-23 15:58:59 -07:00
Michael Vines
a5c3750a58 Pass bank_forks by reference 2020-01-23 15:58:59 -07:00
Michael Vines
dc1c5f8b1e --halt-at-slot 1 now halts at slot 1 2020-01-23 15:58:59 -07:00
Michael Vines
653bec01f0 Set BankRc slot correctly when restoring a bank snapshot 2020-01-23 15:58:59 -07:00
Michael Vines
49c94fad60 add_snapshot now returns SlotSnapshotPaths 2020-01-23 15:58:59 -07:00
Michael Vines
98fd1b3fcb Remove superfluous accounts arg 2020-01-23 15:58:59 -07:00
mergify[bot]
93301d1c81 Make run.sh not overwrite genesis if existing (#7837) (#7939)
automerge
2020-01-22 23:38:41 -08:00
mergify[bot]
5aa8ee8ede Uninteresting cleanup (#7938)
automerge
2020-01-22 21:16:25 -08:00
mergify[bot]
28f81bd0a3 Avoid unsorted recent_blockhashes for determinism (#7918) (#7936)
automerge
2020-01-22 18:52:39 -08:00
Michael Vines
1f4ae4318b Reject CI on failed mergify.io backports (#7927)
automerge

(cherry picked from commit 9bd6be779f)
2020-01-22 16:11:07 -07:00
mergify[bot]
bec1cf3145 CLI: Cleanup authority arg usage inconsistencies (#7922) (#7924)
automerge
2020-01-22 14:09:26 -08:00
Michael Vines
5b4b086ebf Add mechanism to load v0.22.3 snapshots on newer Solana versions 2020-01-22 13:19:07 -07:00
Michael Vines
0ef33b6462 don't put accounts in a weird location, use the defaults (#7921)
automerge

(cherry picked from commit f9323c5273)
2020-01-22 12:58:06 -07:00
mergify[bot]
e401bc6997 CLI: Support offline authorities (#7905) (#7920)
automerge
2020-01-22 10:57:16 -08:00
mergify[bot]
8ffd2c12a3 Add and use minimumLedgerSlot RPC API in block-production command (bp #7901) (#7903)
automerge
2020-01-21 14:07:32 -08:00
mergify[bot]
ec4134f26d Revert "Generate MAX_DATA_SHREDS_PER_FEC_BLOCK coding shreds for each FEC block (#7474)" (#7898) (#7899)
automerge
2020-01-21 12:40:42 -08:00
mergify[bot]
35e7b2f975 Remove redundant threadpools in sigverify (bp #7888) (#7890)
automerge
2020-01-20 21:31:56 -08:00
Michael Vines
3509f1158f Assume 1 or more validators 2020-01-20 19:19:29 -07:00
mergify[bot]
1ca33d1967 --limit-ledger-size now accepts an optional slot count value (#7885)
automerge
2020-01-20 14:22:37 -08:00
mergify[bot]
19474ecaae Create ledger directory if it doesn't already exist (#7878)
automerge
2020-01-20 10:41:40 -08:00
Michael Vines
e317940ebc Try running testnet.solana.com with only two validators 2020-01-20 10:23:43 -07:00
mergify[bot]
fbbfa93524 Spy just for RPC to avoid premature supermajority (#7856) (#7875)
automerge
2020-01-19 18:51:13 -08:00
mergify[bot]
c759a04fbc If a bad RPC node is selected try another one instead of aborting (#7871)
automerge
2020-01-18 10:52:15 -08:00
Michael Vines
d1d37db717 Abort if a snapshot download fails for any reason other than 404
(cherry picked from commit e28508ad56)
2020-01-18 09:35:43 -07:00
mergify[bot]
4904b6a532 CLI: Support offline and nonced stake subcommands (#7831) (#7861)
automerge
2020-01-17 13:10:38 -08:00
mergify[bot]
f80a657764 Nonce: Rename instructions with VerbNoun scheme (#7775) (#7778)
automerge
2020-01-17 10:48:33 -08:00
mergify[bot]
344c528b63 Reduce grace ticks, and ignore grace ticks for missing leaders (#7764) (#7779)
automerge
2020-01-16 19:57:41 -08:00
mergify[bot]
ee1300a671 Improve bench-tps keypair generation (#7723) (#7853)
automerge
2020-01-16 19:30:00 -08:00
mergify[bot]
6c2534a8be Add logging surrounding failure in get_slot_entries_with_shred_info() (#7846) (#7851)
automerge
2020-01-16 17:27:52 -08:00
Michael Vines
28a979c7d3 Cargo.lock 2020-01-16 16:34:33 -07:00
mergify[bot]
d071674b03 ignore prost is part of move (#7848) (#7850)
automerge
2020-01-16 15:24:05 -08:00
Michael Vines
8c5f676df0 Bump version to 0.22.4 2020-01-15 18:55:50 -07:00
140 changed files with 3688 additions and 1895 deletions

586
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-archiver"
version = "0.22.3"
version = "0.22.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,10 +10,10 @@ homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
console = "0.9.1"
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
solana-core = { path = "../core", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-metrics = { path = "../metrics", version = "0.22.3" }
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.8" }
solana-core = { path = "../core", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
solana-metrics = { path = "../metrics", version = "0.22.8" }
solana-net-utils = { path = "../net-utils", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-banking-bench"
version = "0.22.3"
version = "0.22.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -10,11 +10,11 @@ homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.2.0"
solana-core = { path = "../core", version = "0.22.3" }
solana-ledger = { path = "../ledger", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-runtime = { path = "../runtime", version = "0.22.3" }
solana-measure = { path = "../measure", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-core = { path = "../core", version = "0.22.8" }
solana-ledger = { path = "../ledger", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
solana-runtime = { path = "../runtime", version = "0.22.8" }
solana-measure = { path = "../measure", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
rand = "0.6.5"
crossbeam-channel = "0.3"

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-exchange"
version = "0.22.3"
version = "0.22.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -23,19 +23,19 @@ serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
solana-core = { path = "../core", version = "0.22.3" }
solana-genesis = { path = "../genesis", version = "0.22.3" }
solana-client = { path = "../client", version = "0.22.3" }
solana-faucet = { path = "../faucet", version = "0.22.3" }
solana-exchange-program = { path = "../programs/exchange", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-metrics = { path = "../metrics", version = "0.22.3" }
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
solana-runtime = { path = "../runtime", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.8" }
solana-core = { path = "../core", version = "0.22.8" }
solana-genesis = { path = "../genesis", version = "0.22.8" }
solana-client = { path = "../client", version = "0.22.8" }
solana-faucet = { path = "../faucet", version = "0.22.8" }
solana-exchange-program = { path = "../programs/exchange", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
solana-metrics = { path = "../metrics", version = "0.22.8" }
solana-net-utils = { path = "../net-utils", version = "0.22.8" }
solana-runtime = { path = "../runtime", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
untrusted = "0.7.0"
ws = "0.9.1"
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "0.22.3" }
solana-local-cluster = { path = "../local-cluster", version = "0.22.8" }

View File

@@ -2,14 +2,14 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "0.22.3"
version = "0.22.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
solana-core = { path = "../core", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.8" }
solana-core = { path = "../core", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
solana-net-utils = { path = "../net-utils", version = "0.22.8" }

View File

@@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "0.22.3"
version = "0.22.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -16,24 +16,24 @@ serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
solana-core = { path = "../core", version = "0.22.3" }
solana-genesis = { path = "../genesis", version = "0.22.3" }
solana-client = { path = "../client", version = "0.22.3" }
solana-faucet = { path = "../faucet", version = "0.22.3" }
solana-librapay = { path = "../programs/librapay", version = "0.22.3", optional = true }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-metrics = { path = "../metrics", version = "0.22.3" }
solana-measure = { path = "../measure", version = "0.22.3" }
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
solana-runtime = { path = "../runtime", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-move-loader-program = { path = "../programs/move_loader", version = "0.22.3", optional = true }
solana-clap-utils = { path = "../clap-utils", version = "0.22.8" }
solana-core = { path = "../core", version = "0.22.8" }
solana-genesis = { path = "../genesis", version = "0.22.8" }
solana-client = { path = "../client", version = "0.22.8" }
solana-faucet = { path = "../faucet", version = "0.22.8" }
solana-librapay = { path = "../programs/librapay", version = "0.22.8", optional = true }
solana-logger = { path = "../logger", version = "0.22.8" }
solana-metrics = { path = "../metrics", version = "0.22.8" }
solana-measure = { path = "../measure", version = "0.22.8" }
solana-net-utils = { path = "../net-utils", version = "0.22.8" }
solana-runtime = { path = "../runtime", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
solana-move-loader-program = { path = "../programs/move_loader", version = "0.22.8", optional = true }
[dev-dependencies]
serial_test = "0.3.2"
serial_test_derive = "0.3.1"
solana-local-cluster = { path = "../local-cluster", version = "0.22.3" }
solana-local-cluster = { path = "../local-cluster", version = "0.22.8" }
[features]
move = ["solana-librapay", "solana-move-loader-program"]

View File

@@ -21,8 +21,7 @@ use solana_sdk::{
transaction::Transaction,
};
use std::{
cmp,
collections::VecDeque,
collections::{HashSet, VecDeque},
net::SocketAddr,
process::exit,
sync::{
@@ -66,10 +65,9 @@ fn get_recent_blockhash<T: Client>(client: &T) -> (Hash, FeeCalculator) {
}
pub fn do_bench_tps<T>(
clients: Vec<T>,
client: Arc<T>,
config: Config,
gen_keypairs: Vec<Keypair>,
keypair0_balance: u64,
libra_args: Option<LibraKeys>,
) -> u64
where
@@ -82,13 +80,9 @@ where
duration,
tx_count,
sustained,
num_lamports_per_account,
..
} = config;
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
let client = &clients[0];
let mut source_keypair_chunks: Vec<Vec<&Keypair>> = Vec::new();
let mut dest_keypair_chunks: Vec<VecDeque<&Keypair>> = Vec::new();
assert!(gen_keypairs.len() >= 2 * tx_count);
@@ -115,20 +109,17 @@ where
let maxes = Arc::new(RwLock::new(Vec::new()));
let sample_period = 1; // in seconds
info!("Sampling TPS every {} second...", sample_period);
let v_threads: Vec<_> = clients
.iter()
.map(|client| {
let exit_signal = exit_signal.clone();
let maxes = maxes.clone();
let client = client.clone();
Builder::new()
.name("solana-client-sample".to_string())
.spawn(move || {
sample_txs(&exit_signal, &maxes, sample_period, &client);
})
.unwrap()
})
.collect();
let sample_thread = {
let exit_signal = exit_signal.clone();
let maxes = maxes.clone();
let client = client.clone();
Builder::new()
.name("solana-client-sample".to_string())
.spawn(move || {
sample_txs(&exit_signal, &maxes, sample_period, &client);
})
.unwrap()
};
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
@@ -174,11 +165,10 @@ where
// generate and send transactions for the specified duration
let start = Instant::now();
let keypair_chunks = source_keypair_chunks.len() as u64;
let keypair_chunks = source_keypair_chunks.len();
let mut reclaim_lamports_back_to_source_account = false;
let mut i = keypair0_balance;
let mut chunk_index = 0;
while start.elapsed() < duration {
let chunk_index = (i % keypair_chunks) as usize;
generate_txs(
&shared_txs,
&recent_blockhash,
@@ -206,8 +196,11 @@ where
// transaction signatures even when blockhash is reused.
dest_keypair_chunks[chunk_index].rotate_left(1);
i += 1;
if should_switch_directions(num_lamports_per_account, keypair_chunks, i) {
// Move on to next chunk
chunk_index = (chunk_index + 1) % keypair_chunks;
// Switch directions after transfering for each "chunk"
if chunk_index == 0 {
reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account;
}
}
@@ -215,11 +208,9 @@ where
// Stop the sampling threads so it will collect the stats
exit_signal.store(true, Ordering::Relaxed);
info!("Waiting for validator threads...");
for t in v_threads {
if let Err(err) = t.join() {
info!(" join() failed with: {:?}", err);
}
info!("Waiting for sampler threads...");
if let Err(err) = sample_thread.join() {
info!(" join() failed with: {:?}", err);
}
// join the tx send threads
@@ -500,177 +491,218 @@ fn do_tx_transfers<T: Client>(
}
}
fn verify_funding_transfer<T: Client>(client: &T, tx: &Transaction, amount: u64) -> bool {
fn verify_funding_transfer<T: Client>(client: &Arc<T>, tx: &Transaction, amount: u64) -> bool {
for a in &tx.message().account_keys[1..] {
if client
.get_balance_with_commitment(a, CommitmentConfig::recent())
.unwrap_or(0)
>= amount
{
return true;
match client.get_balance_with_commitment(a, CommitmentConfig::recent()) {
Ok(balance) => return balance >= amount,
Err(err) => error!("failed to get balance {:?}", err),
}
}
false
}
trait FundingTransactions<'a> {
fn fund<T: 'static + Client + Send + Sync>(
&mut self,
client: &Arc<T>,
to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)],
to_lamports: u64,
);
fn make(&mut self, to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)]);
fn sign(&mut self, blockhash: Hash);
fn send<T: Client>(&self, client: &Arc<T>);
fn verify<T: 'static + Client + Send + Sync>(&mut self, client: &Arc<T>, to_lamports: u64);
}
impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> {
fn fund<T: 'static + Client + Send + Sync>(
&mut self,
client: &Arc<T>,
to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)],
to_lamports: u64,
) {
self.make(to_fund);
let mut tries = 0;
while !self.is_empty() {
info!(
"{} {} each to {} accounts in {} txs",
if tries == 0 {
"transferring"
} else {
" retrying"
},
to_lamports,
self.len() * MAX_SPENDS_PER_TX as usize,
self.len(),
);
let (blockhash, _fee_calculator) = get_recent_blockhash(client.as_ref());
// re-sign retained to_fund_txes with updated blockhash
self.sign(blockhash);
self.send(&client);
// Sleep a few slots to allow transactions to process
sleep(Duration::from_secs(1));
self.verify(&client, to_lamports);
// retry anything that seems to have dropped through cracks
// again since these txs are all or nothing, they're fine to
// retry
tries += 1;
}
info!("transferred");
}
fn make(&mut self, to_fund: &[(&'a Keypair, Vec<(Pubkey, u64)>)]) {
let mut make_txs = Measure::start("make_txs");
let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund
.par_iter()
.map(|(k, t)| {
let tx = Transaction::new_unsigned_instructions(system_instruction::transfer_many(
&k.pubkey(),
&t,
));
(*k, tx)
})
.collect();
make_txs.stop();
debug!(
"make {} unsigned txs: {}us",
to_fund_txs.len(),
make_txs.as_us()
);
self.extend(to_fund_txs);
}
fn sign(&mut self, blockhash: Hash) {
let mut sign_txs = Measure::start("sign_txs");
self.par_iter_mut().for_each(|(k, tx)| {
tx.sign(&[*k], blockhash);
});
sign_txs.stop();
debug!("sign {} txs: {}us", self.len(), sign_txs.as_us());
}
fn send<T: Client>(&self, client: &Arc<T>) {
let mut send_txs = Measure::start("send_txs");
self.iter().for_each(|(_, tx)| {
client.async_send_transaction(tx.clone()).expect("transfer");
});
send_txs.stop();
debug!("send {} txs: {}us", self.len(), send_txs.as_us());
}
fn verify<T: 'static + Client + Send + Sync>(&mut self, client: &Arc<T>, to_lamports: u64) {
let starting_txs = self.len();
let verified_txs = Arc::new(AtomicUsize::new(0));
let too_many_failures = Arc::new(AtomicBool::new(false));
let loops = if starting_txs < 1000 { 3 } else { 1 };
// Only loop multiple times for small (quick) transaction batches
for _ in 0..loops {
let failed_verify = Arc::new(AtomicUsize::new(0));
let client = client.clone();
let verified_txs = &verified_txs;
let failed_verify = &failed_verify;
let too_many_failures = &too_many_failures;
let verified_set: HashSet<Pubkey> = self
.par_iter()
.filter_map(move |(k, tx)| {
if too_many_failures.load(Ordering::Relaxed) {
return None;
}
let verified = if verify_funding_transfer(&client, &tx, to_lamports) {
verified_txs.fetch_add(1, Ordering::Relaxed);
Some(k.pubkey())
} else {
failed_verify.fetch_add(1, Ordering::Relaxed);
None
};
let verified_txs = verified_txs.load(Ordering::Relaxed);
let failed_verify = failed_verify.load(Ordering::Relaxed);
let remaining_count = starting_txs.saturating_sub(verified_txs + failed_verify);
if failed_verify > 100 && failed_verify > verified_txs {
too_many_failures.store(true, Ordering::Relaxed);
warn!(
"Too many failed transfers... {} remaining, {} verified, {} failures",
remaining_count, verified_txs, failed_verify
);
}
if remaining_count % 100 == 0 {
info!(
"Verifying transfers... {} remaining, {} verified, {} failures",
remaining_count, verified_txs, failed_verify
);
}
verified
})
.collect();
self.retain(|(k, _)| !verified_set.contains(&k.pubkey()));
if self.is_empty() {
break;
}
info!("Looping verifications");
let verified_txs = verified_txs.load(Ordering::Relaxed);
let failed_verify = failed_verify.load(Ordering::Relaxed);
let remaining_count = starting_txs.saturating_sub(verified_txs + failed_verify);
info!(
"Verifying transfers... {} remaining, {} verified, {} failures",
remaining_count, verified_txs, failed_verify
);
sleep(Duration::from_millis(100));
}
}
}
/// fund the dests keys by spending all of the source keys into MAX_SPENDS_PER_TX
/// on every iteration. This allows us to replay the transfers because the source is either empty,
/// or full
pub fn fund_keys<T: Client>(
client: &T,
pub fn fund_keys<T: 'static + Client + Send + Sync>(
client: Arc<T>,
source: &Keypair,
dests: &[Keypair],
total: u64,
max_fee: u64,
mut extra: u64,
lamports_per_account: u64,
) {
let mut funded: Vec<(&Keypair, u64)> = vec![(source, total)];
let mut notfunded: Vec<&Keypair> = dests.iter().collect();
let lamports_per_account = (total - (extra * max_fee)) / (notfunded.len() as u64 + 1);
info!(
"funding keys {} with lamports: {:?} total: {}",
dests.len(),
client.get_balance(&source.pubkey()),
total
);
while !notfunded.is_empty() {
let mut new_funded: Vec<(&Keypair, u64)> = vec![];
let mut to_fund = vec![];
info!("creating from... {}", funded.len());
let mut build_to_fund = Measure::start("build_to_fund");
for f in &mut funded {
let max_units = cmp::min(notfunded.len() as u64, MAX_SPENDS_PER_TX);
if max_units == 0 {
break;
}
let start = notfunded.len() - max_units as usize;
let fees = if extra > 0 { max_fee } else { 0 };
let per_unit = (f.1 - lamports_per_account - fees) / max_units;
let moves: Vec<_> = notfunded[start..]
.iter()
.map(|k| (k.pubkey(), per_unit))
.collect();
notfunded[start..]
.iter()
.for_each(|k| new_funded.push((k, per_unit)));
notfunded.truncate(start);
if !moves.is_empty() {
to_fund.push((f.0, moves));
}
extra -= 1;
let mut funded: Vec<&Keypair> = vec![source];
let mut funded_funds = total;
let mut not_funded: Vec<&Keypair> = dests.iter().collect();
while !not_funded.is_empty() {
// Build to fund list and prepare funding sources for next iteration
let mut new_funded: Vec<&Keypair> = vec![];
let mut to_fund: Vec<(&Keypair, Vec<(Pubkey, u64)>)> = vec![];
let to_lamports = (funded_funds - lamports_per_account - max_fee) / MAX_SPENDS_PER_TX;
for f in funded {
let start = not_funded.len() - MAX_SPENDS_PER_TX as usize;
let dests: Vec<_> = not_funded.drain(start..).collect();
let spends: Vec<_> = dests.iter().map(|k| (k.pubkey(), to_lamports)).collect();
to_fund.push((f, spends));
new_funded.extend(dests.into_iter());
}
build_to_fund.stop();
debug!("build to_fund vec: {}us", build_to_fund.as_us());
// try to transfer a "few" at a time with recent blockhash
// assume 4MB network buffers, and 512 byte packets
const FUND_CHUNK_LEN: usize = 4 * 1024 * 1024 / 512;
to_fund.chunks(FUND_CHUNK_LEN).for_each(|chunk| {
let mut tries = 0;
let mut make_txs = Measure::start("make_txs");
// this set of transactions just initializes us for bookkeeping
#[allow(clippy::clone_double_ref)] // sigh
let mut to_fund_txs: Vec<_> = chunk
.par_iter()
.map(|(k, m)| {
let tx = Transaction::new_unsigned_instructions(
system_instruction::transfer_many(&k.pubkey(), &m),
);
(k.clone(), tx)
})
.collect();
make_txs.stop();
debug!(
"make {} unsigned txs: {}us",
to_fund_txs.len(),
make_txs.as_us()
Vec::<(&Keypair, Transaction)>::with_capacity(chunk.len()).fund(
&client,
chunk,
to_lamports,
);
let amount = chunk[0].1[0].1;
while !to_fund_txs.is_empty() {
let receivers = to_fund_txs
.iter()
.fold(0, |len, (_, tx)| len + tx.message().instructions.len());
info!(
"{} {} to {} in {} txs",
if tries == 0 {
"transferring"
} else {
" retrying"
},
amount,
receivers,
to_fund_txs.len(),
);
let (blockhash, _fee_calculator) = get_recent_blockhash(client);
// re-sign retained to_fund_txes with updated blockhash
let mut sign_txs = Measure::start("sign_txs");
to_fund_txs.par_iter_mut().for_each(|(k, tx)| {
tx.sign(&[*k], blockhash);
});
sign_txs.stop();
debug!("sign {} txs: {}us", to_fund_txs.len(), sign_txs.as_us());
let mut send_txs = Measure::start("send_txs");
to_fund_txs.iter().for_each(|(_, tx)| {
client.async_send_transaction(tx.clone()).expect("transfer");
});
send_txs.stop();
debug!("send {} txs: {}us", to_fund_txs.len(), send_txs.as_us());
let mut verify_txs = Measure::start("verify_txs");
let mut starting_txs = to_fund_txs.len();
let mut verified_txs = 0;
let mut failed_verify = 0;
// Only loop multiple times for small (quick) transaction batches
for _ in 0..(if starting_txs < 1000 { 3 } else { 1 }) {
let mut timer = Instant::now();
to_fund_txs.retain(|(_, tx)| {
if timer.elapsed() >= Duration::from_secs(5) {
if failed_verify > 0 {
debug!("total txs failed verify: {}", failed_verify);
}
info!(
"Verifying transfers... {} remaining",
starting_txs - verified_txs
);
timer = Instant::now();
}
let verified = verify_funding_transfer(client, &tx, amount);
if verified {
verified_txs += 1;
} else {
failed_verify += 1;
}
!verified
});
if to_fund_txs.is_empty() {
break;
}
debug!("Looping verifications");
info!("Verifying transfers... {} remaining", to_fund_txs.len());
sleep(Duration::from_millis(100));
}
starting_txs -= to_fund_txs.len();
verify_txs.stop();
debug!("verified {} txs: {}us", starting_txs, verify_txs.as_us());
// retry anything that seems to have dropped through cracks
// again since these txs are all or nothing, they're fine to
// retry
tries += 1;
}
info!("transferred");
});
info!("funded: {} left: {}", new_funded.len(), notfunded.len());
info!("funded: {} left: {}", new_funded.len(), not_funded.len());
funded = new_funded;
funded_funds = to_lamports;
}
}
@@ -678,14 +710,14 @@ pub fn airdrop_lamports<T: Client>(
client: &T,
faucet_addr: &SocketAddr,
id: &Keypair,
tx_count: u64,
desired_balance: u64,
) -> Result<()> {
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_lamport_balance(starting_balance);
info!("starting balance {}", starting_balance);
if starting_balance < tx_count {
let airdrop_amount = tx_count - starting_balance;
if starting_balance < desired_balance {
let airdrop_amount = desired_balance - starting_balance;
info!(
"Airdropping {:?} lamports from {} for {}",
airdrop_amount,
@@ -810,17 +842,6 @@ fn compute_and_report_stats(
);
}
// First transfer 2/3 of the lamports to the dest accounts
// then ping-pong 1/3 of the lamports back to the other account
// this leaves 1/3 lamport buffer in each account
fn should_switch_directions(num_lamports_per_account: u64, keypair_chunks: u64, i: u64) -> bool {
if i < keypair_chunks * (2 * num_lamports_per_account) / 3 {
return false;
}
i % (keypair_chunks * num_lamports_per_account / 3) == 0
}
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u64) {
let mut seed = [0u8; 32];
seed.copy_from_slice(&seed_keypair.to_bytes()[..32]);
@@ -1004,23 +1025,25 @@ fn fund_move_keys<T: Client>(
info!("done funding keys, took {} ms", funding_time.as_ms());
}
pub fn generate_and_fund_keypairs<T: Client>(
client: &T,
pub fn generate_and_fund_keypairs<T: 'static + Client + Send + Sync>(
client: Arc<T>,
faucet_addr: Option<SocketAddr>,
funding_key: &Keypair,
keypair_count: usize,
lamports_per_account: u64,
use_move: bool,
) -> Result<(Vec<Keypair>, Option<LibraKeys>, u64)> {
) -> Result<(Vec<Keypair>, Option<LibraKeys>)> {
info!("Creating {} keypairs...", keypair_count);
let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64);
info!("Get lamports...");
// Sample the first keypair, see if it has lamports, if so then resume.
// This logic is to prevent lamport loss on repeated solana-bench-tps executions
let last_keypair_balance = client
.get_balance(&keypairs[keypair_count - 1].pubkey())
.unwrap_or(0);
// Sample the first keypair, to prevent lamport loss on repeated solana-bench-tps executions
let first_key = keypairs[0].pubkey();
let first_keypair_balance = client.get_balance(&first_key).unwrap_or(0);
// Sample the last keypair, to check if funding was already completed
let last_key = keypairs[keypair_count - 1].pubkey();
let last_keypair_balance = client.get_balance(&last_key).unwrap_or(0);
#[cfg(feature = "move")]
let mut move_keypairs_ret = None;
@@ -1028,31 +1051,38 @@ pub fn generate_and_fund_keypairs<T: Client>(
#[cfg(not(feature = "move"))]
let move_keypairs_ret = None;
if lamports_per_account > last_keypair_balance {
let (_blockhash, fee_calculator) = get_recent_blockhash(client);
let account_desired_balance =
lamports_per_account - last_keypair_balance + fee_calculator.max_lamports_per_signature;
let extra_fees = extra * fee_calculator.max_lamports_per_signature;
let mut total = account_desired_balance * (1 + keypairs.len() as u64) + extra_fees;
// Repeated runs will eat up keypair balances from transaction fees. In order to quickly
// start another bench-tps run without re-funding all of the keypairs, check if the
// keypairs still have at least 80% of the expected funds. That should be enough to
// pay for the transaction fees in a new run.
let enough_lamports = 8 * lamports_per_account / 10;
if first_keypair_balance < enough_lamports || last_keypair_balance < enough_lamports {
let (_blockhash, fee_calculator) = get_recent_blockhash(client.as_ref());
let max_fee = fee_calculator.max_lamports_per_signature;
let extra_fees = extra * max_fee;
let total_keypairs = keypairs.len() as u64 + 1; // Add one for funding keypair
let mut total = lamports_per_account * total_keypairs + extra_fees;
if use_move {
total *= 3;
}
info!("Previous key balance: {} max_fee: {} lamports_per_account: {} extra: {} desired_balance: {} total: {}",
last_keypair_balance, fee_calculator.max_lamports_per_signature, lamports_per_account, extra,
account_desired_balance, total
);
let funding_key_balance = client.get_balance(&funding_key.pubkey()).unwrap_or(0);
info!(
"Funding keypair balance: {} max_fee: {} lamports_per_account: {} extra: {} total: {}",
funding_key_balance, max_fee, lamports_per_account, extra, total
);
if client.get_balance(&funding_key.pubkey()).unwrap_or(0) < total {
airdrop_lamports(client, &faucet_addr.unwrap(), funding_key, total)?;
airdrop_lamports(client.as_ref(), &faucet_addr.unwrap(), funding_key, total)?;
}
#[cfg(feature = "move")]
{
if use_move {
let libra_genesis_keypair = create_genesis(&funding_key, client, 10_000_000);
let libra_mint_program_id = upload_mint_script(&funding_key, client);
let libra_pay_program_id = upload_payment_script(&funding_key, client);
let libra_genesis_keypair =
create_genesis(&funding_key, client.as_ref(), 10_000_000);
let libra_mint_program_id = upload_mint_script(&funding_key, client.as_ref());
let libra_pay_program_id = upload_payment_script(&funding_key, client.as_ref());
// Generate another set of keypairs for move accounts.
// Still fund the solana ones which will be used for fees.
@@ -1060,7 +1090,7 @@ pub fn generate_and_fund_keypairs<T: Client>(
let mut rnd = GenKeys::new(seed);
let move_keypairs = rnd.gen_n_keypairs(keypair_count as u64);
fund_move_keys(
client,
client.as_ref(),
funding_key,
&move_keypairs,
total / 3,
@@ -1085,15 +1115,15 @@ pub fn generate_and_fund_keypairs<T: Client>(
funding_key,
&keypairs,
total,
fee_calculator.max_lamports_per_signature,
extra,
max_fee,
lamports_per_account,
);
}
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
keypairs.truncate(keypair_count);
Ok((keypairs, move_keypairs_ret, last_keypair_balance))
Ok((keypairs, move_keypairs_ret))
}
#[cfg(test)]
@@ -1105,30 +1135,11 @@ mod tests {
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::genesis_config::create_genesis_config;
#[test]
fn test_switch_directions() {
assert_eq!(should_switch_directions(30, 1, 0), false);
assert_eq!(should_switch_directions(30, 1, 1), false);
assert_eq!(should_switch_directions(30, 1, 20), true);
assert_eq!(should_switch_directions(30, 1, 21), false);
assert_eq!(should_switch_directions(30, 1, 30), true);
assert_eq!(should_switch_directions(30, 1, 90), true);
assert_eq!(should_switch_directions(30, 1, 91), false);
assert_eq!(should_switch_directions(30, 2, 0), false);
assert_eq!(should_switch_directions(30, 2, 1), false);
assert_eq!(should_switch_directions(30, 2, 20), false);
assert_eq!(should_switch_directions(30, 2, 40), true);
assert_eq!(should_switch_directions(30, 2, 90), false);
assert_eq!(should_switch_directions(30, 2, 100), true);
assert_eq!(should_switch_directions(30, 2, 101), false);
}
#[test]
fn test_bench_tps_bank_client() {
let (genesis_config, id) = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let clients = vec![BankClient::new(bank)];
let client = Arc::new(BankClient::new(bank));
let mut config = Config::default();
config.id = id;
@@ -1136,23 +1147,24 @@ mod tests {
config.duration = Duration::from_secs(5);
let keypair_count = config.tx_count * config.keypair_multiplier;
let (keypairs, _move_keypairs, _keypair_balance) =
generate_and_fund_keypairs(&clients[0], None, &config.id, keypair_count, 20, false)
let (keypairs, _move_keypairs) =
generate_and_fund_keypairs(client.clone(), None, &config.id, keypair_count, 20, false)
.unwrap();
do_bench_tps(clients, config, keypairs, 0, None);
do_bench_tps(client, config, keypairs, None);
}
#[test]
fn test_bench_tps_fund_keys() {
let (genesis_config, id) = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let client = BankClient::new(bank);
let client = Arc::new(BankClient::new(bank));
let keypair_count = 20;
let lamports = 20;
let (keypairs, _move_keypairs, _keypair_balance) =
generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap();
let (keypairs, _move_keypairs) =
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
.unwrap();
for kp in &keypairs {
assert_eq!(
@@ -1170,23 +1182,16 @@ mod tests {
let fee_calculator = FeeCalculator::new(11, 0);
genesis_config.fee_calculator = fee_calculator;
let bank = Bank::new(&genesis_config);
let client = BankClient::new(bank);
let client = Arc::new(BankClient::new(bank));
let keypair_count = 20;
let lamports = 20;
let (keypairs, _move_keypairs, _keypair_balance) =
generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap();
let (keypairs, _move_keypairs) =
generate_and_fund_keypairs(client.clone(), None, &id, keypair_count, lamports, false)
.unwrap();
let max_fee = client
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())
.unwrap()
.1
.max_lamports_per_signature;
for kp in &keypairs {
assert_eq!(
client.get_balance(&kp.pubkey()).unwrap(),
lamports + max_fee
);
assert_eq!(client.get_balance(&kp.pubkey()).unwrap(), lamports);
}
}
}

View File

@@ -6,7 +6,7 @@ use solana_genesis::Base64Account;
use solana_sdk::fee_calculator::FeeCalculator;
use solana_sdk::signature::{Keypair, KeypairUtil};
use solana_sdk::system_program;
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit};
use std::{collections::HashMap, fs::File, io::prelude::*, path::Path, process::exit, sync::Arc};
/// Number of signatures for all transactions in ~1 week at ~100K TPS
pub const NUM_SIGNATURES_FOR_TXS: u64 = 100_000 * 60 * 60 * 24 * 7;
@@ -82,12 +82,12 @@ fn main() {
);
exit(1);
}
client
Arc::new(client)
} else {
get_client(&nodes)
Arc::new(get_client(&nodes))
};
let (keypairs, move_keypairs, keypair_balance) = if *read_from_client_file && !use_move {
let (keypairs, move_keypairs) = if *read_from_client_file && !use_move {
let path = Path::new(&client_ids_and_stake_file);
let file = File::open(path).unwrap();
@@ -117,10 +117,10 @@ fn main() {
// This prevents the amount of storage needed for bench-tps accounts from creeping up
// across multiple runs.
keypairs.sort_by(|x, y| x.pubkey().to_string().cmp(&y.pubkey().to_string()));
(keypairs, None, last_balance)
(keypairs, None)
} else {
generate_and_fund_keypairs(
&client,
client.clone(),
Some(*faucet_addr),
&id,
keypair_count,
@@ -133,11 +133,5 @@ fn main() {
})
};
do_bench_tps(
vec![client],
cli_config,
keypairs,
keypair_balance,
move_keypairs,
);
do_bench_tps(client, cli_config, keypairs, move_keypairs);
}

View File

@@ -9,7 +9,7 @@ use solana_local_cluster::local_cluster::{ClusterConfig, LocalCluster};
#[cfg(feature = "move")]
use solana_sdk::move_loader::solana_move_loader_program;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::sync::mpsc::channel;
use std::sync::{mpsc::channel, Arc};
use std::time::Duration;
fn test_bench_tps_local_cluster(config: Config) {
@@ -36,10 +36,10 @@ fn test_bench_tps_local_cluster(config: Config) {
100_000_000,
);
let client = create_client(
let client = Arc::new(create_client(
(cluster.entry_point_info.rpc, cluster.entry_point_info.tpu),
VALIDATOR_PORT_RANGE,
);
));
let (addr_sender, addr_receiver) = channel();
run_local_faucet(faucet_keypair, addr_sender, None);
@@ -48,8 +48,8 @@ fn test_bench_tps_local_cluster(config: Config) {
let lamports_per_account = 100;
let keypair_count = config.tx_count * config.keypair_multiplier;
let (keypairs, move_keypairs, _keypair_balance) = generate_and_fund_keypairs(
&client,
let (keypairs, move_keypairs) = generate_and_fund_keypairs(
client.clone(),
Some(faucet_addr),
&config.id,
keypair_count,
@@ -58,7 +58,7 @@ fn test_bench_tps_local_cluster(config: Config) {
)
.unwrap();
let _total = do_bench_tps(vec![client], config, keypairs, 0, move_keypairs);
let _total = do_bench_tps(client, config, keypairs, move_keypairs);
#[cfg(not(debug_assertions))]
assert!(_total > 100);

View File

@@ -177,7 +177,7 @@ $ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
## Usage
### solana-cli
```text
solana-cli 0.22.3 [channel=unknown commit=unknown]
solana-cli 0.22.8 [channel=unknown commit=unknown]
Blockchain, Rebuilt for Scale
USAGE:
@@ -322,13 +322,16 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce-authority <KEYPAIR> Specify nonce authority if different from account
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
ARGS:
<NONCE_ACCOUNT> Address of the nonce account
@@ -694,20 +697,25 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY> Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <nonce_authority> Provide the nonce authority keypair to use when signing a nonced
transaction
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY>
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--stake-authority <KEYPAIR of PUBKEY> Public key of authorized staker (defaults to cli config pubkey)
ARGS:
<STAKE ACCOUNT> Stake account to be deactivated.
@@ -730,20 +738,25 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY> Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <nonce_authority> Provide the nonce authority keypair to use when signing a nonced
transaction
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY>
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--stake-authority <KEYPAIR of PUBKEY> Public key of authorized staker (defaults to cli config pubkey)
ARGS:
<STAKE ACCOUNT> Stake account to delegate
@@ -1009,13 +1022,17 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce-authority <KEYPAIR> Specify nonce authority if different from account
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
ARGS:
<NONCE ACCOUNT> Address of the nonce account
@@ -1039,23 +1056,27 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY> Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <nonce_authority> Provide the nonce authority keypair to use when signing a nonced
transaction
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--after <DATETIME> A timestamp after which transaction will execute
--require-timestamp-from <PUBKEY> Require timestamp from this third party
--require-signature-from <PUBKEY>... Any third party signatures required to unlock the lamports
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY>
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--after <DATETIME> A timestamp after which transaction will execute
--require-timestamp-from <PUBKEY> Require timestamp from this third party
--require-signature-from <PUBKEY>... Any third party signatures required to unlock the lamports
ARGS:
<TO PUBKEY> The pubkey of recipient
@@ -1431,12 +1452,25 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY>
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--stake-authority <KEYPAIR of PUBKEY> Public key of authorized staker (defaults to cli config pubkey)
ARGS:
<STAKE ACCOUNT> Stake account in which to set the authorized staker
@@ -1459,12 +1493,25 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
--blockhash <BLOCKHASH> Use the supplied blockhash
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce <PUBKEY>
Provide the nonce account to use when creating a nonced
transaction. Nonced transactions are useful when a transaction
requires a lengthy signing process. Learn more about nonced
transactions at https://docs.solana.com/offline-signing/durable-nonce
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
--signer <PUBKEY=BASE58_SIG>... Provide a public-key/signature pair for the transaction
--withdraw-authority <KEYPAIR or PUBKEY> Public key of authorized withdrawer (defaults to cli config pubkey)
ARGS:
<STAKE ACCOUNT> Stake account in which to set the authorized withdrawer
@@ -1630,13 +1677,17 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce-authority <KEYPAIR> Specify nonce authority if different from account
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--nonce-authority <KEYPAIR or PUBKEY>
Provide the nonce authority keypair to use when signing a nonced transaction
ARGS:
<NONCE ACCOUNT> Nonce account from to withdraw from
@@ -1661,12 +1712,15 @@ FLAGS:
-v, --verbose Show extra information header
OPTIONS:
--ask-seed-phrase <KEYPAIR NAME> Securely recover a keypair using a seed phrase and optional passphrase
[possible values: keypair]
-C, --config <PATH> Configuration file to use [default:
~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--ask-seed-phrase <KEYPAIR NAME>
Recover a keypair using a seed phrase and optional passphrase [possible values: keypair]
-C, --config <PATH>
Configuration file to use [default: ~/.config/solana/cli/config.yml]
-u, --url <URL> JSON RPC URL for the solana cluster
-k, --keypair <PATH> /path/to/id.json
--withdraw-authority <KEYPAIR or PUBKEY> Public key of authorized withdrawer (defaults to cli config pubkey)
ARGS:
<STAKE ACCOUNT> Stake account from which to withdraw

View File

@@ -40,6 +40,7 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
* [getTotalSupply](jsonrpc-api.md#gettotalsupply)
* [getVersion](jsonrpc-api.md#getversion)
* [getVoteAccounts](jsonrpc-api.md#getvoteaccounts)
* [minimumLedgerSlot](jsonrpc-api.md#minimumledgerslot)
* [requestAirdrop](jsonrpc-api.md#requestairdrop)
* [sendTransaction](jsonrpc-api.md#sendtransaction)
* [startSubscriptionChannel](jsonrpc-api.md#startsubscriptionchannel)
@@ -585,7 +586,7 @@ Returns the current slot the node is processing
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlot"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":"1234","id":1}
{"jsonrpc":"2.0","result":1234,"id":1}
```
### getSlotLeader
@@ -628,7 +629,7 @@ Returns the current storage segment size in terms of slots
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getSlotsPerSegment"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":"1024","id":1}
{"jsonrpc":"2.0","result":1024,"id":1}
```
### getStorageTurn
@@ -772,6 +773,29 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
{"jsonrpc":"2.0","result":{"current":[{"commission":0,"epochVoteAccount":true,"nodePubkey":"B97CCUW3AEZFGy6uUg6zUdnNYvnVq5VG8PUtb2HayTDD","lastVote":147,"activatedStake":42,"votePubkey":"3ZT31jkAGhUaw8jsy4bTknwBMP8i4Eueh52By4zXcsVw"}],"delinquent":[{"commission":127,"epochVoteAccount":false,"nodePubkey":"6ZPxeQaDo4bkZLRsdNrCzchNQr5LN9QMc9sipXv9Kw8f","lastVote":0,"activatedStake":0,"votePubkey":"CmgCk4aMS7KW1SHX3s9K5tBJ6Yng2LBaC8MFov4wx9sm"}]},"id":1}
```
### minimumLedgerSlot
Returns the lowest slot that the node has information about in its ledger. This
value may increase over time if the node is configured to purge older ledger data
#### Parameters:
None
#### Results:
* `u64` - Minimum ledger slot
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"minimumLedgerSlot"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":1234,"id":1}
```
### requestAirdrop
Requests an airdrop of lamports to a Pubkey

View File

@@ -26,7 +26,7 @@ account data. A transaction is now constructed in the normal way, but with the
following additional requirements:
1) The durable nonce value is used in the `recent_blockhash` field
2) A `NonceAdvance` instruction is the first issued in the transaction
2) An `AdvanceNonceAccount` instruction is the first issued in the transaction
### Contract Mechanics
@@ -67,7 +67,7 @@ A client wishing to use this feature starts by creating a nonce account under
the system program. This account will be in the `Uninitialized` state with no
stored hash, and thus unusable.
To initialize a newly created account, a `NonceInitialize` instruction must be
To initialize a newly created account, an `InitializeNonceAccount` instruction must be
issued. This instruction takes one parameter, the `Pubkey` of the account's
[authority](../offline-signing/durable-nonce.md#nonce-authority). Nonce accounts
must be [rent-exempt](rent.md#two-tiered-rent-regime) to meet the data-persistence
@@ -76,27 +76,27 @@ deposited before they can be initialized. Upon successful initialization, the
cluster's most recent blockhash is stored along with specified nonce authority
`Pubkey`.
The `NonceAdvance` instruction is used to manage the account's stored nonce
The `AdvanceNonceAccount` instruction is used to manage the account's stored nonce
value. It stores the cluster's most recent blockhash in the account's state data,
failing if that matches the value already stored there. This check prevents
replaying transactions within the same block.
Due to nonce accounts' [rent-exempt](rent.md#two-tiered-rent-regime) requirement,
a custom withdraw instruction is used to move funds out of the account.
The `NonceWithdraw` instruction takes a single argument, lamports to withdraw,
The `WithdrawNonceAccount` instruction takes a single argument, lamports to withdraw,
and enforces rent-exemption by preventing the account's balance from falling
below the rent-exempt minimum. An exception to this check is if the final balance
would be zero lamports, which makes the account eligible for deletion. This
account closure detail has an additional requirement that the stored nonce value
must not match the cluster's most recent blockhash, as per `NonceAdvance`.
must not match the cluster's most recent blockhash, as per `AdvanceNonceAccount`.
The account's [nonce authority](../offline-signing/durable-nonce.md#nonce-authority)
can be changed using the `NonceAuthorize` instruction. It takes one parameter,
can be changed using the `AuthorizeNonceAccount` instruction. It takes one parameter,
the `Pubkey` of the new authority. Executing this instruction grants full
control over the account and its balance to the new authority.
{% hint style="info" %}
`NonceAdvance`, `NonceWithdraw` and `NonceAuthorize` all require the current
`AdvanceNonceAccount`, `WithdrawNonceAccount` and `AuthorizeNonceAccount` all require the current
[nonce authority](../offline-signing/durable-nonce.md#nonce-authority) for the
account to sign the transaction.
{% endhint %}
@@ -108,7 +108,7 @@ an extant `recent_blockhash` on the transaction and prevent fee theft via
failed transaction replay, runtime modifications are necessary.
Any transaction failing the usual `check_hash_age` validation will be tested
for a Durable Transaction Nonce. This is signaled by including a `NonceAdvance`
for a Durable Transaction Nonce. This is signaled by including a `AdvanceNonceAccount`
instruction as the first instruction in the transaction.
If the runtime determines that a Durable Transaction Nonce is in use, it will
@@ -124,10 +124,10 @@ If all three of the above checks succeed, the transaction is allowed to continue
validation.
Since transactions that fail with an `InstructionError` are charged a fee and
changes to their state rolled back, there is an opportunity for fee theft if a
`NonceAdvance` instruction is reverted. A malicious validator could replay the
changes to their state rolled back, there is an opportunity for fee theft if an
`AdvanceNonceAccount` instruction is reverted. A malicious validator could replay the
failed transaction until the stored nonce is successfully advanced. Runtime
changes prevent this behavior. When a durable nonce transaction fails with an
`InstructionError` aside from the `NonceAdvance` instruction, the nonce account
`InstructionError` aside from the `AdvanceNonceAccount` instruction, the nonce account
is rolled back to its pre-execution state as usual. Then the runtime advances
its nonce value and the advanced nonce account stored as if it succeeded.

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-chacha-sys"
version = "0.22.3"
version = "0.22.8"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -10,6 +10,9 @@ source ci/rust-version.sh nightly
export RUST_BACKTRACE=1
export RUSTFLAGS="-D warnings"
# Look for failed mergify.io backports
_ git show HEAD --check --oneline
_ cargo +"$rust_stable" fmt --all -- --check
# Clippy gets stuck for unknown reasons if sdk-c is included in the build, so check it separately.
@@ -19,7 +22,7 @@ _ cargo +"$rust_stable" clippy --all --exclude solana-sdk-c -- --deny=warnings
_ cargo +"$rust_stable" clippy --manifest-path sdk-c/Cargo.toml -- --deny=warnings
_ cargo +"$rust_stable" audit --version
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2019-0013 --ignore RUSTSEC-2018-0015 --ignore RUSTSEC-2019-0031
_ cargo +"$rust_stable" audit --ignore RUSTSEC-2019-0013 --ignore RUSTSEC-2018-0015 --ignore RUSTSEC-2019-0031 --ignore RUSTSEC-2020-0002
_ ci/nits.sh
_ ci/order-crates-for-publishing.py
_ book/build.sh

View File

@@ -142,6 +142,7 @@ testnet-beta|testnet-beta-perf)
testnet)
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
CHANNEL_BRANCH=$STABLE_CHANNEL
export CLOUDSDK_CORE_PROJECT=testnet-solana-com
;;
testnet-perf)
CHANNEL_OR_TAG=$STABLE_CHANNEL_LATEST_TAG
@@ -157,6 +158,7 @@ tds)
: "${TDS_CHANNEL_OR_TAG:=edge}"
CHANNEL_OR_TAG="$TDS_CHANNEL_OR_TAG"
CHANNEL_BRANCH="$CI_BRANCH"
export CLOUDSDK_CORE_PROJECT=tour-de-sol
;;
*)
echo "Error: Invalid TESTNET=$TESTNET"
@@ -375,7 +377,7 @@ deploy() {
(
set -x
ci/testnet-deploy.sh -p testnet-solana-com -C gce -z us-west1-b \
-t "$CHANNEL_OR_TAG" -n 3 -c 0 -u -P \
-t "$CHANNEL_OR_TAG" -n 1 -c 0 -u -P \
-a testnet-solana-com --letsencrypt testnet.solana.com \
--limit-ledger-size \
${skipCreate:+-e} \

View File

@@ -60,7 +60,7 @@ trap shutdown EXIT INT
set -x
for zone in "$@"; do
echo "--- $cloudProvider config [$zone]"
timeout 5m net/"$cloudProvider".sh config $maybePublicNetwork -p "$netName" -z "$zone"
timeout 5m net/"$cloudProvider".sh config $maybePublicNetwork -n 1 -p "$netName" -z "$zone"
net/init-metrics.sh -e
echo "+++ $cloudProvider.sh info"
net/"$cloudProvider".sh info

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "0.22.3"
version = "0.22.8"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,7 +12,7 @@ edition = "2018"
clap = "2.33.0"
rpassword = "4.0"
semver = "0.9.0"
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
tiny-bip39 = "0.6.2"
url = "2.1.0"
chrono = "0.4"

View File

@@ -43,6 +43,11 @@ pub fn is_pubkey_or_keypair(string: String) -> Result<(), String> {
is_pubkey(string.clone()).or_else(|_| is_keypair(string))
}
// Return an error if string cannot be parsed as pubkey or keypair file or keypair ask keyword
pub fn is_pubkey_or_keypair_or_ask_keyword(string: String) -> Result<(), String> {
is_pubkey(string.clone()).or_else(|_| is_keypair_or_ask_keyword(string))
}
// Return an error if string cannot be parsed as pubkey=signature string
pub fn is_pubkey_sig(string: String) -> Result<(), String> {
let mut signer = string.split('=');

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "0.22.3"
version = "0.22.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -28,24 +28,24 @@ serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
serde_yaml = "0.8.11"
solana-budget-program = { path = "../programs/budget", version = "0.22.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
solana-client = { path = "../client", version = "0.22.3" }
solana-config-program = { path = "../programs/config", version = "0.22.3" }
solana-faucet = { path = "../faucet", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
solana-runtime = { path = "../runtime", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-stake-program = { path = "../programs/stake", version = "0.22.3" }
solana-storage-program = { path = "../programs/storage", version = "0.22.3" }
solana-vote-program = { path = "../programs/vote", version = "0.22.3" }
solana-vote-signer = { path = "../vote-signer", version = "0.22.3" }
solana-budget-program = { path = "../programs/budget", version = "0.22.8" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.8" }
solana-client = { path = "../client", version = "0.22.8" }
solana-config-program = { path = "../programs/config", version = "0.22.8" }
solana-faucet = { path = "../faucet", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
solana-net-utils = { path = "../net-utils", version = "0.22.8" }
solana-runtime = { path = "../runtime", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
solana-stake-program = { path = "../programs/stake", version = "0.22.8" }
solana-storage-program = { path = "../programs/storage", version = "0.22.8" }
solana-vote-program = { path = "../programs/vote", version = "0.22.8" }
solana-vote-signer = { path = "../vote-signer", version = "0.22.8" }
url = "2.1.0"
[dev-dependencies]
solana-core = { path = "../core", version = "0.22.3" }
solana-budget-program = { path = "../programs/budget", version = "0.22.3" }
solana-core = { path = "../core", version = "0.22.8" }
solana-budget-program = { path = "../programs/budget", version = "0.22.8" }
tempfile = "3.1.0"
[[bin]]

View File

@@ -1,7 +1,7 @@
use crate::{
cluster_query::*,
display::{println_name_value, println_signers},
nonce::*,
nonce::{self, *},
stake::*,
storage::*,
validator_info::*,
@@ -72,6 +72,84 @@ impl std::ops::Deref for KeypairEq {
}
}
#[derive(Debug)]
pub enum SigningAuthority {
Online(Keypair),
// We hold a random keypair alongside our legit pubkey in order
// to generate a placeholder signature in the transaction
Offline(Pubkey, Keypair),
}
impl SigningAuthority {
pub fn new_from_matches(
matches: &ArgMatches<'_>,
name: &str,
signers: Option<&[(Pubkey, Signature)]>,
) -> Result<Self, CliError> {
keypair_of(matches, name)
.map(|keypair| keypair.into())
.or_else(|| {
pubkey_of(matches, name)
.filter(|pubkey| {
signers
.and_then(|signers| {
signers.iter().find(|(signer, _sig)| *signer == *pubkey)
})
.is_some()
})
.map(|pubkey| pubkey.into())
})
.ok_or_else(|| CliError::BadParameter("Invalid authority".to_string()))
}
pub fn keypair(&self) -> &Keypair {
match self {
SigningAuthority::Online(keypair) => keypair,
SigningAuthority::Offline(_pubkey, keypair) => keypair,
}
}
pub fn pubkey(&self) -> Pubkey {
match self {
SigningAuthority::Online(keypair) => keypair.pubkey(),
SigningAuthority::Offline(pubkey, _keypair) => *pubkey,
}
}
}
impl From<Keypair> for SigningAuthority {
fn from(keypair: Keypair) -> Self {
SigningAuthority::Online(keypair)
}
}
impl From<Pubkey> for SigningAuthority {
fn from(pubkey: Pubkey) -> Self {
SigningAuthority::Offline(pubkey, Keypair::new())
}
}
impl PartialEq for SigningAuthority {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(SigningAuthority::Online(keypair1), SigningAuthority::Online(keypair2)) => {
keypair1.pubkey() == keypair2.pubkey()
}
(SigningAuthority::Online(keypair), SigningAuthority::Offline(pubkey, _))
| (SigningAuthority::Offline(pubkey, _), SigningAuthority::Online(keypair)) => {
keypair.pubkey() == *pubkey
}
(SigningAuthority::Offline(pubkey1, _), SigningAuthority::Offline(pubkey2, _)) => {
pubkey1 == pubkey2
}
}
}
}
pub fn nonce_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
nonce::nonce_authority_arg().requires(NONCE_ARG.name)
}
#[derive(Default, Debug, PartialEq)]
pub struct PayCommand {
pub lamports: u64,
@@ -84,7 +162,7 @@ pub struct PayCommand {
pub signers: Option<Vec<(Pubkey, Signature)>>,
pub blockhash: Option<Hash>,
pub nonce_account: Option<Pubkey>,
pub nonce_authority: Option<KeypairEq>,
pub nonce_authority: Option<SigningAuthority>,
}
#[derive(Debug, PartialEq)]
@@ -114,6 +192,7 @@ pub enum CliCommand {
GetTransactionCount {
commitment_config: CommitmentConfig,
},
LeaderSchedule,
Ping {
lamports: u64,
interval: Duration,
@@ -132,7 +211,7 @@ pub enum CliCommand {
// Nonce commands
AuthorizeNonceAccount {
nonce_account: Pubkey,
nonce_authority: Option<KeypairEq>,
nonce_authority: Option<SigningAuthority>,
new_authority: Pubkey,
},
CreateNonceAccount {
@@ -143,7 +222,7 @@ pub enum CliCommand {
GetNonce(Pubkey),
NewNonce {
nonce_account: Pubkey,
nonce_authority: Option<KeypairEq>,
nonce_authority: Option<SigningAuthority>,
},
ShowNonceAccount {
nonce_account_pubkey: Pubkey,
@@ -151,7 +230,7 @@ pub enum CliCommand {
},
WithdrawFromNonceAccount {
nonce_account: Pubkey,
nonce_authority: Option<KeypairEq>,
nonce_authority: Option<SigningAuthority>,
destination_account_pubkey: Pubkey,
lamports: u64,
},
@@ -167,23 +246,23 @@ pub enum CliCommand {
},
DeactivateStake {
stake_account_pubkey: Pubkey,
stake_authority: Option<KeypairEq>,
stake_authority: Option<SigningAuthority>,
sign_only: bool,
signers: Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>,
nonce_account: Option<Pubkey>,
nonce_authority: Option<KeypairEq>,
nonce_authority: Option<SigningAuthority>,
},
DelegateStake {
stake_account_pubkey: Pubkey,
vote_account_pubkey: Pubkey,
stake_authority: Option<KeypairEq>,
stake_authority: Option<SigningAuthority>,
force: bool,
sign_only: bool,
signers: Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>,
nonce_account: Option<Pubkey>,
nonce_authority: Option<KeypairEq>,
nonce_authority: Option<SigningAuthority>,
},
RedeemVoteCredits(Pubkey, Pubkey),
ShowStakeHistory {
@@ -197,13 +276,18 @@ pub enum CliCommand {
stake_account_pubkey: Pubkey,
new_authorized_pubkey: Pubkey,
stake_authorize: StakeAuthorize,
authority: Option<KeypairEq>,
authority: Option<SigningAuthority>,
sign_only: bool,
signers: Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>,
nonce_account: Option<Pubkey>,
nonce_authority: Option<SigningAuthority>,
},
WithdrawStake {
stake_account_pubkey: Pubkey,
destination_account_pubkey: Pubkey,
lamports: u64,
withdraw_authority: Option<KeypairEq>,
withdraw_authority: Option<SigningAuthority>,
},
// Storage Commands
CreateStorageAccount {
@@ -366,6 +450,10 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
}),
("get-slot", Some(matches)) => parse_get_slot(matches),
("get-transaction-count", Some(matches)) => parse_get_transaction_count(matches),
("leader-schedule", Some(_matches)) => Ok(CliCommandInfo {
command: CliCommand::LeaderSchedule,
require_keypair: false,
}),
("ping", Some(matches)) => parse_cluster_ping(matches),
("show-block-production", Some(matches)) => parse_show_block_production(matches),
("show-gossip", Some(_matches)) => Ok(CliCommandInfo {
@@ -521,11 +609,11 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
let blockhash = value_of(&matches, "blockhash");
let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
let authority =
keypair_of(&matches, NONCE_AUTHORITY_ARG.name).ok_or_else(|| {
CliError::BadParameter("Invalid keypair for nonce-authority".into())
})?;
Some(authority.into())
Some(SigningAuthority::new_from_matches(
&matches,
NONCE_AUTHORITY_ARG.name,
signers.as_deref(),
)?)
} else {
None
};
@@ -921,7 +1009,7 @@ fn process_pay(
signers: &Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>,
nonce_account: Option<Pubkey>,
nonce_authority: Option<&Keypair>,
nonce_authority: Option<&SigningAuthority>,
) -> ProcessResult {
check_unique_pubkeys(
(&config.keypair.pubkey(), "cli keypair".to_string()),
@@ -939,7 +1027,9 @@ fn process_pay(
if timestamp == None && *witnesses == None {
let mut tx = if let Some(nonce_account) = &nonce_account {
let nonce_authority: &Keypair = nonce_authority.unwrap_or(&config.keypair);
let nonce_authority: &Keypair = nonce_authority
.map(|authority| authority.keypair())
.unwrap_or(&config.keypair);
system_transaction::nonced_transfer(
&config.keypair,
to,
@@ -960,9 +1050,11 @@ fn process_pay(
return_signers(&tx)
} else {
if let Some(nonce_account) = &nonce_account {
let nonce_authority: &Keypair = nonce_authority.unwrap_or(&config.keypair);
let nonce_authority: Pubkey = nonce_authority
.map(|authority| authority.pubkey())
.unwrap_or_else(|| config.keypair.pubkey());
let nonce_account = rpc_client.get_account(nonce_account)?;
check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &blockhash)?;
check_nonce_account(&nonce_account, &nonce_authority, &blockhash)?;
}
check_account_for_fee(
rpc_client,
@@ -1173,6 +1265,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::GetTransactionCount { commitment_config } => {
process_get_transaction_count(&rpc_client, commitment_config)
}
CliCommand::LeaderSchedule => process_leader_schedule(&rpc_client),
CliCommand::Ping {
lamports,
interval,
@@ -1207,7 +1300,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
&rpc_client,
config,
nonce_account,
nonce_authority.as_deref(),
nonce_authority.as_ref(),
new_authority,
),
// Create nonce account
@@ -1230,12 +1323,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
CliCommand::NewNonce {
nonce_account,
ref nonce_authority,
} => process_new_nonce(
&rpc_client,
config,
nonce_account,
nonce_authority.as_deref(),
),
} => process_new_nonce(&rpc_client, config, nonce_account, nonce_authority.as_ref()),
// Show the contents of a nonce account
CliCommand::ShowNonceAccount {
nonce_account_pubkey,
@@ -1251,7 +1339,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
&rpc_client,
config,
&nonce_account,
nonce_authority.as_deref(),
nonce_authority.as_ref(),
&destination_account_pubkey,
*lamports,
),
@@ -1294,12 +1382,12 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
&rpc_client,
config,
&stake_account_pubkey,
stake_authority.as_deref(),
stake_authority.as_ref(),
*sign_only,
signers,
*blockhash,
*nonce_account,
nonce_authority.as_deref(),
nonce_authority.as_ref(),
),
CliCommand::DelegateStake {
stake_account_pubkey,
@@ -1316,13 +1404,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
config,
&stake_account_pubkey,
&vote_account_pubkey,
stake_authority.as_deref(),
stake_authority.as_ref(),
*force,
*sign_only,
signers,
*blockhash,
*nonce_account,
nonce_authority.as_deref(),
nonce_authority.as_ref(),
),
CliCommand::RedeemVoteCredits(stake_account_pubkey, vote_account_pubkey) => {
process_redeem_vote_credits(
@@ -1349,13 +1437,23 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
new_authorized_pubkey,
stake_authorize,
ref authority,
sign_only,
ref signers,
blockhash,
nonce_account,
ref nonce_authority,
} => process_stake_authorize(
&rpc_client,
config,
&stake_account_pubkey,
&new_authorized_pubkey,
*stake_authorize,
authority.as_deref(),
authority.as_ref(),
*sign_only,
signers,
*blockhash,
*nonce_account,
nonce_authority.as_ref(),
),
CliCommand::WithdrawStake {
@@ -1369,7 +1467,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
&stake_account_pubkey,
&destination_account_pubkey,
*lamports,
withdraw_authority.as_deref(),
withdraw_authority.as_ref(),
),
// Storage Commands
@@ -1539,7 +1637,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
signers,
*blockhash,
*nonce_account,
nonce_authority.as_deref(),
nonce_authority.as_ref(),
),
CliCommand::ShowAccount {
pubkey,
@@ -1881,23 +1979,8 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
.takes_value(false)
.help("Sign the transaction offline"),
)
.arg(
Arg::with_name(NONCE_ARG.name)
.long(NONCE_ARG.long)
.takes_value(true)
.value_name("PUBKEY")
.requires("blockhash")
.validator(is_pubkey_or_keypair)
.help(NONCE_ARG.help),
)
.arg(
Arg::with_name(NONCE_AUTHORITY_ARG.name)
.long(NONCE_AUTHORITY_ARG.long)
.takes_value(true)
.requires(NONCE_ARG.name)
.validator(is_keypair_or_ask_keyword)
.help(NONCE_AUTHORITY_ARG.help),
)
.arg(nonce_arg())
.arg(nonce_authority_arg())
.arg(
Arg::with_name("signer")
.long("signer")
@@ -2451,6 +2534,67 @@ mod tests {
}
);
// Test Pay Subcommand w/ Nonce and Offline Nonce Authority
let keypair = read_keypair_file(&keypair_file).unwrap();
let authority_pubkey = keypair.pubkey();
let authority_pubkey_string = format!("{}", authority_pubkey);
let sig = keypair.sign_message(&[0u8]);
let signer_arg = format!("{}={}", authority_pubkey, sig);
let test_pay = test_commands.clone().get_matches_from(vec![
"test",
"pay",
&pubkey_string,
"50",
"lamports",
"--blockhash",
&blockhash_string,
"--nonce",
&pubkey_string,
"--nonce-authority",
&authority_pubkey_string,
"--signer",
&signer_arg,
]);
assert_eq!(
parse_command(&test_pay).unwrap(),
CliCommandInfo {
command: CliCommand::Pay(PayCommand {
lamports: 50,
to: pubkey,
blockhash: Some(blockhash),
nonce_account: Some(pubkey),
nonce_authority: Some(authority_pubkey.into()),
signers: Some(vec![(authority_pubkey, sig)]),
..PayCommand::default()
}),
require_keypair: true
}
);
// Test Pay Subcommand w/ Nonce and Offline Nonce Authority
// authority pubkey not in signers fails
let keypair = read_keypair_file(&keypair_file).unwrap();
let authority_pubkey = keypair.pubkey();
let authority_pubkey_string = format!("{}", authority_pubkey);
let sig = keypair.sign_message(&[0u8]);
let signer_arg = format!("{}={}", Pubkey::new_rand(), sig);
let test_pay = test_commands.clone().get_matches_from(vec![
"test",
"pay",
&pubkey_string,
"50",
"lamports",
"--blockhash",
&blockhash_string,
"--nonce",
&pubkey_string,
"--nonce-authority",
&authority_pubkey_string,
"--signer",
&signer_arg,
]);
assert!(parse_command(&test_pay).is_err());
// Test Send-Signature Subcommand
let test_send_signature = test_commands.clone().get_matches_from(vec![
"test",

View File

@@ -54,6 +54,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
.about("Get the version of the cluster entrypoint"),
)
.subcommand(SubCommand::with_name("fees").about("Display current cluster fees"))
.subcommand(SubCommand::with_name("leader-schedule").about("Display leader schedule"))
.subcommand(SubCommand::with_name("get-block-time")
.about("Get estimated production time of a block")
.arg(
@@ -364,6 +365,41 @@ pub fn process_fees(rpc_client: &RpcClient) -> ProcessResult {
))
}
pub fn process_leader_schedule(rpc_client: &RpcClient) -> ProcessResult {
let epoch_info = rpc_client.get_epoch_info()?;
let first_slot_in_epoch = epoch_info.absolute_slot - epoch_info.slot_index;
let leader_schedule = rpc_client.get_leader_schedule(Some(first_slot_in_epoch))?;
if leader_schedule.is_none() {
return Err(format!(
"Unable to fetch leader schedule for slot {}",
first_slot_in_epoch
)
.into());
}
let leader_schedule = leader_schedule.unwrap();
let mut leader_per_slot_index = Vec::new();
for (pubkey, leader_slots) in leader_schedule.iter() {
for slot_index in leader_slots.iter() {
if *slot_index >= leader_per_slot_index.len() {
leader_per_slot_index.resize(*slot_index + 1, "?");
}
leader_per_slot_index[*slot_index] = pubkey;
}
}
for (slot_index, leader) in leader_per_slot_index.iter().enumerate() {
println!(
" {:<15} {:<44}",
first_slot_in_epoch + slot_index as u64,
leader
);
}
Ok("".to_string())
}
pub fn process_get_block_time(rpc_client: &RpcClient, slot: Slot) -> ProcessResult {
let timestamp = rpc_client.get_block_time(slot)?;
Ok(timestamp.to_string())
@@ -434,19 +470,39 @@ pub fn process_show_block_production(
return Err(format!("Epoch {} is in the future", epoch).into());
}
let minimum_ledger_slot = rpc_client.minimum_ledger_slot()?;
let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch);
let end_slot = std::cmp::min(
epoch_info.absolute_slot,
epoch_schedule.get_last_slot_in_epoch(epoch),
);
let start_slot = if let Some(slot_limit) = slot_limit {
let mut start_slot = if let Some(slot_limit) = slot_limit {
std::cmp::max(end_slot.saturating_sub(slot_limit), first_slot_in_epoch)
} else {
first_slot_in_epoch
};
let start_slot_index = (start_slot - first_slot_in_epoch) as usize;
let end_slot_index = (end_slot - first_slot_in_epoch) as usize;
if minimum_ledger_slot > end_slot {
return Err(format!(
"Ledger data not available for slots {} to {} (minimum ledger slot is {})",
start_slot, end_slot, minimum_ledger_slot
)
.into());
}
if minimum_ledger_slot > start_slot {
println!(
"\n{}",
style(format!(
"Note: Requested start slot was {} but minimum ledger slot is {}",
start_slot, minimum_ledger_slot
))
.italic(),
);
start_slot = minimum_ledger_slot;
}
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message(&format!(
@@ -455,6 +511,8 @@ pub fn process_show_block_production(
));
let confirmed_blocks = rpc_client.get_confirmed_blocks(start_slot, Some(end_slot))?;
let start_slot_index = (start_slot - first_slot_in_epoch) as usize;
let end_slot_index = (end_slot - first_slot_in_epoch) as usize;
let total_slots = end_slot_index - start_slot_index + 1;
let total_blocks = confirmed_blocks.len();
assert!(total_blocks <= total_slots);

View File

@@ -1,7 +1,7 @@
use crate::cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys,
log_instruction_custom_error, required_lamports_from, CliCommand, CliCommandInfo, CliConfig,
CliError, ProcessResult,
CliError, ProcessResult, SigningAuthority,
};
use clap::{App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{input_parsers::*, input_validators::*, ArgConstant};
@@ -49,13 +49,23 @@ pub trait NonceSubCommands {
fn nonce_subcommands(self) -> Self;
}
fn nonce_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("nonce_authority")
.long("nonce-authority")
pub fn nonce_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(NONCE_ARG.name)
.long(NONCE_ARG.long)
.takes_value(true)
.value_name("KEYPAIR")
.validator(is_keypair_or_ask_keyword)
.help("Specify nonce authority if different from account")
.value_name("PUBKEY")
.requires("blockhash")
.validator(is_pubkey)
.help(NONCE_ARG.help)
}
pub fn nonce_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(NONCE_AUTHORITY_ARG.name)
.long(NONCE_AUTHORITY_ARG.long)
.takes_value(true)
.value_name("KEYPAIR or PUBKEY")
.validator(is_pubkey_or_keypair_or_ask_keyword)
.help(NONCE_AUTHORITY_ARG.help)
}
impl NonceSubCommands for App<'_, '_> {
@@ -113,8 +123,8 @@ impl NonceSubCommands for App<'_, '_> {
.help("Specify unit to use for request"),
)
.arg(
Arg::with_name("nonce_authority")
.long("nonce-authority")
Arg::with_name(NONCE_AUTHORITY_ARG.name)
.long(NONCE_AUTHORITY_ARG.long)
.takes_value(true)
.value_name("BASE58_PUBKEY")
.validator(is_pubkey_or_keypair)
@@ -213,7 +223,15 @@ impl NonceSubCommands for App<'_, '_> {
pub fn parse_authorize_nonce_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
let new_authority = pubkey_of(matches, "new_authority").unwrap();
let nonce_authority = keypair_of(matches, "nonce_authority").map(|kp| kp.into());
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
&matches,
NONCE_AUTHORITY_ARG.name,
None,
)?)
} else {
None
};
Ok(CliCommandInfo {
command: CliCommand::AuthorizeNonceAccount {
@@ -228,7 +246,7 @@ pub fn parse_authorize_nonce_account(matches: &ArgMatches<'_>) -> Result<CliComm
pub fn parse_nonce_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let nonce_account = keypair_of(matches, "nonce_account_keypair").unwrap();
let lamports = required_lamports_from(matches, "amount", "unit")?;
let nonce_authority = pubkey_of(matches, "nonce_authority");
let nonce_authority = pubkey_of(matches, NONCE_AUTHORITY_ARG.name);
Ok(CliCommandInfo {
command: CliCommand::CreateNonceAccount {
@@ -251,7 +269,15 @@ pub fn parse_get_nonce(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliEr
pub fn parse_new_nonce(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
let nonce_authority = keypair_of(matches, "nonce_authority").map(|kp| kp.into());
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
&matches,
NONCE_AUTHORITY_ARG.name,
None,
)?)
} else {
None
};
Ok(CliCommandInfo {
command: CliCommand::NewNonce {
@@ -281,7 +307,15 @@ pub fn parse_withdraw_from_nonce_account(
let nonce_account = pubkey_of(matches, "nonce_account_keypair").unwrap();
let destination_account_pubkey = pubkey_of(matches, "destination_account_pubkey").unwrap();
let lamports = required_lamports_from(matches, "amount", "unit")?;
let nonce_authority = keypair_of(matches, "nonce_authority").map(|kp| kp.into());
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
&matches,
NONCE_AUTHORITY_ARG.name,
None,
)?)
} else {
None
};
Ok(CliCommandInfo {
command: CliCommand::WithdrawFromNonceAccount {
@@ -326,12 +360,14 @@ pub fn process_authorize_nonce_account(
rpc_client: &RpcClient,
config: &CliConfig,
nonce_account: &Pubkey,
nonce_authority: Option<&Keypair>,
nonce_authority: Option<&SigningAuthority>,
new_authority: &Pubkey,
) -> ProcessResult {
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let nonce_authority = nonce_authority.unwrap_or(&config.keypair);
let nonce_authority = nonce_authority
.map(|a| a.keypair())
.unwrap_or(&config.keypair);
let ix = nonce_authorize(nonce_account, &nonce_authority.pubkey(), new_authority);
let mut tx = Transaction::new_signed_with_payer(
vec![ix],
@@ -435,7 +471,7 @@ pub fn process_new_nonce(
rpc_client: &RpcClient,
config: &CliConfig,
nonce_account: &Pubkey,
nonce_authority: Option<&Keypair>,
nonce_authority: Option<&SigningAuthority>,
) -> ProcessResult {
check_unique_pubkeys(
(&config.keypair.pubkey(), "cli keypair".to_string()),
@@ -449,7 +485,9 @@ pub fn process_new_nonce(
.into());
}
let nonce_authority = nonce_authority.unwrap_or(&config.keypair);
let nonce_authority = nonce_authority
.map(|a| a.keypair())
.unwrap_or(&config.keypair);
let ix = nonce_advance(&nonce_account, &nonce_authority.pubkey());
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let mut tx = Transaction::new_signed_with_payer(
@@ -516,13 +554,15 @@ pub fn process_withdraw_from_nonce_account(
rpc_client: &RpcClient,
config: &CliConfig,
nonce_account: &Pubkey,
nonce_authority: Option<&Keypair>,
nonce_authority: Option<&SigningAuthority>,
destination_account_pubkey: &Pubkey,
lamports: u64,
) -> ProcessResult {
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let nonce_authority = nonce_authority.unwrap_or(&config.keypair);
let nonce_authority = nonce_authority
.map(|a| a.keypair())
.unwrap_or(&config.keypair);
let ix = nonce_withdraw(
nonce_account,
&nonce_authority.pubkey(),

View File

@@ -1,11 +1,11 @@
use crate::{
cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys,
get_blockhash_fee_calculator, log_instruction_custom_error, replace_signatures,
required_lamports_from, return_signers, CliCommand, CliCommandInfo, CliConfig, CliError,
ProcessResult,
get_blockhash_fee_calculator, log_instruction_custom_error, nonce_authority_arg,
replace_signatures, required_lamports_from, return_signers, CliCommand, CliCommandInfo,
CliConfig, CliError, ProcessResult, SigningAuthority,
},
nonce::{check_nonce_account, NONCE_ARG, NONCE_AUTHORITY_ARG},
nonce::{check_nonce_account, nonce_arg, NONCE_ARG, NONCE_AUTHORITY_ARG},
};
use clap::{App, Arg, ArgMatches, SubCommand};
use console::style;
@@ -48,8 +48,8 @@ fn stake_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(STAKE_AUTHORITY_ARG.name)
.long(STAKE_AUTHORITY_ARG.long)
.takes_value(true)
.value_name("KEYPAIR")
.validator(is_keypair_or_ask_keyword)
.value_name("KEYPAIR of PUBKEY")
.validator(is_pubkey_or_keypair_or_ask_keyword)
.help(STAKE_AUTHORITY_ARG.help)
}
@@ -57,8 +57,8 @@ fn withdraw_authority_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name(WITHDRAW_AUTHORITY_ARG.name)
.long(WITHDRAW_AUTHORITY_ARG.long)
.takes_value(true)
.value_name("KEYPAIR")
.validator(is_keypair_or_ask_keyword)
.value_name("KEYPAIR or PUBKEY")
.validator(is_pubkey_or_keypair_or_ask_keyword)
.help(WITHDRAW_AUTHORITY_ARG.help)
}
@@ -189,23 +189,8 @@ impl StakeSubCommands for App<'_, '_> {
.validator(is_hash)
.help("Use the supplied blockhash"),
)
.arg(
Arg::with_name(NONCE_ARG.name)
.long(NONCE_ARG.long)
.takes_value(true)
.value_name("PUBKEY")
.requires("blockhash")
.validator(is_pubkey)
.help(NONCE_ARG.help)
)
.arg(
Arg::with_name(NONCE_AUTHORITY_ARG.name)
.long(NONCE_AUTHORITY_ARG.long)
.takes_value(true)
.requires(NONCE_ARG.name)
.validator(is_keypair_or_ask_keyword)
.help(NONCE_AUTHORITY_ARG.help)
),
.arg(nonce_arg())
.arg(nonce_authority_arg())
)
.subcommand(
SubCommand::with_name("stake-authorize-staker")
@@ -229,6 +214,31 @@ impl StakeSubCommands for App<'_, '_> {
.help("New authorized staker")
)
.arg(stake_authority_arg())
.arg(
Arg::with_name("sign_only")
.long("sign-only")
.takes_value(false)
.help("Sign the transaction offline"),
)
.arg(
Arg::with_name("signer")
.long("signer")
.value_name("PUBKEY=BASE58_SIG")
.takes_value(true)
.validator(is_pubkey_sig)
.multiple(true)
.help("Provide a public-key/signature pair for the transaction"),
)
.arg(
Arg::with_name("blockhash")
.long("blockhash")
.value_name("BLOCKHASH")
.takes_value(true)
.validator(is_hash)
.help("Use the supplied blockhash"),
)
.arg(nonce_arg())
.arg(nonce_authority_arg())
)
.subcommand(
SubCommand::with_name("stake-authorize-withdrawer")
@@ -252,6 +262,31 @@ impl StakeSubCommands for App<'_, '_> {
.help("New authorized withdrawer")
)
.arg(withdraw_authority_arg())
.arg(
Arg::with_name("sign_only")
.long("sign-only")
.takes_value(false)
.help("Sign the transaction offline"),
)
.arg(
Arg::with_name("signer")
.long("signer")
.value_name("PUBKEY=BASE58_SIG")
.takes_value(true)
.validator(is_pubkey_sig)
.multiple(true)
.help("Provide a public-key/signature pair for the transaction"),
)
.arg(
Arg::with_name("blockhash")
.long("blockhash")
.value_name("BLOCKHASH")
.takes_value(true)
.validator(is_hash)
.help("Use the supplied blockhash"),
)
.arg(nonce_arg())
.arg(nonce_authority_arg())
)
.subcommand(
SubCommand::with_name("deactivate-stake")
@@ -288,23 +323,8 @@ impl StakeSubCommands for App<'_, '_> {
.validator(is_hash)
.help("Use the supplied blockhash"),
)
.arg(
Arg::with_name(NONCE_ARG.name)
.long(NONCE_ARG.long)
.takes_value(true)
.value_name("PUBKEY")
.requires("blockhash")
.validator(is_pubkey)
.help(NONCE_ARG.help)
)
.arg(
Arg::with_name(NONCE_AUTHORITY_ARG.name)
.long(NONCE_AUTHORITY_ARG.long)
.takes_value(true)
.requires(NONCE_ARG.name)
.validator(is_keypair_or_ask_keyword)
.help(NONCE_AUTHORITY_ARG.help)
),
.arg(nonce_arg())
.arg(nonce_authority_arg())
)
.subcommand(
SubCommand::with_name("withdraw-stake")
@@ -428,23 +448,27 @@ pub fn parse_stake_create_account(matches: &ArgMatches<'_>) -> Result<CliCommand
pub fn parse_stake_delegate_stake(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
let stake_authority = if matches.is_present(STAKE_AUTHORITY_ARG.name) {
let authority = keypair_of(&matches, STAKE_AUTHORITY_ARG.name)
.ok_or_else(|| CliError::BadParameter("Invalid keypair for stake-authority".into()))?;
Some(authority.into())
} else {
None
};
let force = matches.is_present("force");
let sign_only = matches.is_present("sign_only");
let signers = pubkeys_sigs_of(&matches, "signer");
let blockhash = value_of(matches, "blockhash");
let require_keypair = signers.is_none();
let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
let stake_authority = if matches.is_present(STAKE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
&matches,
STAKE_AUTHORITY_ARG.name,
signers.as_deref(),
)?)
} else {
None
};
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
let authority = keypair_of(&matches, NONCE_AUTHORITY_ARG.name)
.ok_or_else(|| CliError::BadParameter("Invalid keypair for nonce-authority".into()))?;
Some(authority.into())
Some(SigningAuthority::new_from_matches(
&matches,
NONCE_AUTHORITY_ARG.name,
signers.as_deref(),
)?)
} else {
None
};
@@ -475,11 +499,25 @@ pub fn parse_stake_authorize(
StakeAuthorize::Staker => STAKE_AUTHORITY_ARG.name,
StakeAuthorize::Withdrawer => WITHDRAW_AUTHORITY_ARG.name,
};
let sign_only = matches.is_present("sign_only");
let signers = pubkeys_sigs_of(&matches, "signer");
let authority = if matches.is_present(authority_flag) {
let authority = keypair_of(&matches, authority_flag).ok_or_else(|| {
CliError::BadParameter(format!("Invalid keypair for {}", authority_flag))
})?;
Some(authority.into())
Some(SigningAuthority::new_from_matches(
&matches,
authority_flag,
signers.as_deref(),
)?)
} else {
None
};
let blockhash = value_of(matches, "blockhash");
let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
&matches,
NONCE_AUTHORITY_ARG.name,
signers.as_deref(),
)?)
} else {
None
};
@@ -490,6 +528,11 @@ pub fn parse_stake_authorize(
new_authorized_pubkey,
stake_authorize,
authority,
sign_only,
signers,
blockhash,
nonce_account,
nonce_authority,
},
require_keypair: true,
})
@@ -507,22 +550,26 @@ pub fn parse_redeem_vote_credits(matches: &ArgMatches<'_>) -> Result<CliCommandI
pub fn parse_stake_deactivate_stake(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
let stake_authority = if matches.is_present(STAKE_AUTHORITY_ARG.name) {
let authority = keypair_of(&matches, STAKE_AUTHORITY_ARG.name)
.ok_or_else(|| CliError::BadParameter("Invalid keypair for stake-authority".into()))?;
Some(authority.into())
} else {
None
};
let sign_only = matches.is_present("sign_only");
let signers = pubkeys_sigs_of(&matches, "signer");
let blockhash = value_of(matches, "blockhash");
let require_keypair = signers.is_none();
let nonce_account = pubkey_of(&matches, NONCE_ARG.name);
let stake_authority = if matches.is_present(STAKE_AUTHORITY_ARG.name) {
Some(SigningAuthority::new_from_matches(
&matches,
STAKE_AUTHORITY_ARG.name,
signers.as_deref(),
)?)
} else {
None
};
let nonce_authority = if matches.is_present(NONCE_AUTHORITY_ARG.name) {
let authority = keypair_of(&matches, NONCE_AUTHORITY_ARG.name)
.ok_or_else(|| CliError::BadParameter("Invalid keypair for nonce-authority".into()))?;
Some(authority.into())
Some(SigningAuthority::new_from_matches(
&matches,
NONCE_AUTHORITY_ARG.name,
signers.as_deref(),
)?)
} else {
None
};
@@ -546,10 +593,11 @@ pub fn parse_stake_withdraw_stake(matches: &ArgMatches<'_>) -> Result<CliCommand
let destination_account_pubkey = pubkey_of(matches, "destination_account_pubkey").unwrap();
let lamports = required_lamports_from(matches, "amount", "unit")?;
let withdraw_authority = if matches.is_present(WITHDRAW_AUTHORITY_ARG.name) {
let authority = keypair_of(&matches, WITHDRAW_AUTHORITY_ARG.name).ok_or_else(|| {
CliError::BadParameter("Invalid keypair for withdraw-authority".into())
})?;
Some(authority.into())
Some(SigningAuthority::new_from_matches(
&matches,
WITHDRAW_AUTHORITY_ARG.name,
None,
)?)
} else {
None
};
@@ -653,20 +701,27 @@ pub fn process_create_stake_account(
log_instruction_custom_error::<SystemError>(result)
}
#[allow(clippy::too_many_arguments)]
pub fn process_stake_authorize(
rpc_client: &RpcClient,
config: &CliConfig,
stake_account_pubkey: &Pubkey,
authorized_pubkey: &Pubkey,
stake_authorize: StakeAuthorize,
authority: Option<&Keypair>,
authority: Option<&SigningAuthority>,
sign_only: bool,
signers: &Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>,
nonce_account: Option<Pubkey>,
nonce_authority: Option<&SigningAuthority>,
) -> ProcessResult {
check_unique_pubkeys(
(stake_account_pubkey, "stake_account_pubkey".to_string()),
(authorized_pubkey, "new_authorized_pubkey".to_string()),
)?;
let authority = authority.unwrap_or(&config.keypair);
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let authority = authority.map(|a| a.keypair()).unwrap_or(&config.keypair);
let (recent_blockhash, fee_calculator) =
get_blockhash_fee_calculator(rpc_client, sign_only, blockhash)?;
let ixs = vec![stake_instruction::authorize(
stake_account_pubkey, // stake account to update
&authority.pubkey(), // currently authorized
@@ -674,42 +729,71 @@ pub fn process_stake_authorize(
stake_authorize, // stake or withdraw
)];
let mut tx = Transaction::new_signed_with_payer(
ixs,
Some(&config.keypair.pubkey()),
&[&config.keypair, authority],
recent_blockhash,
);
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<StakeError>(result)
let (nonce_authority, nonce_authority_pubkey) = nonce_authority
.map(|a| (a.keypair(), a.pubkey()))
.unwrap_or((&config.keypair, config.keypair.pubkey()));
let mut tx = if let Some(nonce_account) = &nonce_account {
Transaction::new_signed_with_nonce(
ixs,
Some(&config.keypair.pubkey()),
&[&config.keypair, nonce_authority, authority],
nonce_account,
&nonce_authority.pubkey(),
recent_blockhash,
)
} else {
Transaction::new_signed_with_payer(
ixs,
Some(&config.keypair.pubkey()),
&[&config.keypair, authority],
recent_blockhash,
)
};
if let Some(signers) = signers {
replace_signatures(&mut tx, &signers)?;
}
if sign_only {
return_signers(&tx)
} else {
if let Some(nonce_account) = &nonce_account {
let nonce_account = rpc_client.get_account(nonce_account)?;
check_nonce_account(&nonce_account, &nonce_authority_pubkey, &recent_blockhash)?;
}
check_account_for_fee(
rpc_client,
&tx.message.account_keys[0],
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<StakeError>(result)
}
}
pub fn process_deactivate_stake_account(
rpc_client: &RpcClient,
config: &CliConfig,
stake_account_pubkey: &Pubkey,
stake_authority: Option<&Keypair>,
stake_authority: Option<&SigningAuthority>,
sign_only: bool,
signers: &Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>,
nonce_account: Option<Pubkey>,
nonce_authority: Option<&Keypair>,
nonce_authority: Option<&SigningAuthority>,
) -> ProcessResult {
let (recent_blockhash, fee_calculator) =
get_blockhash_fee_calculator(rpc_client, sign_only, blockhash)?;
let stake_authority = stake_authority.unwrap_or(&config.keypair);
let stake_authority = stake_authority
.map(|a| a.keypair())
.unwrap_or(&config.keypair);
let ixs = vec![stake_instruction::deactivate_stake(
stake_account_pubkey,
&stake_authority.pubkey(),
)];
let (nonce_authority, nonce_authority_pubkey) = nonce_authority
.map(|a| (a.keypair(), a.pubkey()))
.unwrap_or((&config.keypair, config.keypair.pubkey()));
let mut tx = if let Some(nonce_account) = &nonce_account {
let nonce_authority: &Keypair = nonce_authority.unwrap_or(&config.keypair);
Transaction::new_signed_with_nonce(
ixs,
Some(&config.keypair.pubkey()),
@@ -733,9 +817,8 @@ pub fn process_deactivate_stake_account(
return_signers(&tx)
} else {
if let Some(nonce_account) = &nonce_account {
let nonce_authority: &Keypair = nonce_authority.unwrap_or(&config.keypair);
let nonce_account = rpc_client.get_account(nonce_account)?;
check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &recent_blockhash)?;
check_nonce_account(&nonce_account, &nonce_authority_pubkey, &recent_blockhash)?;
}
check_account_for_fee(
rpc_client,
@@ -754,10 +837,12 @@ pub fn process_withdraw_stake(
stake_account_pubkey: &Pubkey,
destination_account_pubkey: &Pubkey,
lamports: u64,
withdraw_authority: Option<&Keypair>,
withdraw_authority: Option<&SigningAuthority>,
) -> ProcessResult {
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let withdraw_authority = withdraw_authority.unwrap_or(&config.keypair);
let withdraw_authority = withdraw_authority
.map(|a| a.keypair())
.unwrap_or(&config.keypair);
let ixs = vec![stake_instruction::withdraw(
stake_account_pubkey,
@@ -925,19 +1010,21 @@ pub fn process_delegate_stake(
config: &CliConfig,
stake_account_pubkey: &Pubkey,
vote_account_pubkey: &Pubkey,
stake_authority: Option<&Keypair>,
stake_authority: Option<&SigningAuthority>,
force: bool,
sign_only: bool,
signers: &Option<Vec<(Pubkey, Signature)>>,
blockhash: Option<Hash>,
nonce_account: Option<Pubkey>,
nonce_authority: Option<&Keypair>,
nonce_authority: Option<&SigningAuthority>,
) -> ProcessResult {
check_unique_pubkeys(
(&config.keypair.pubkey(), "cli keypair".to_string()),
(stake_account_pubkey, "stake_account_pubkey".to_string()),
)?;
let stake_authority = stake_authority.unwrap_or(&config.keypair);
let stake_authority = stake_authority
.map(|a| a.keypair())
.unwrap_or(&config.keypair);
// Sanity check the vote account to ensure it is attached to a validator that has recently
// voted at the tip of the ledger
@@ -987,8 +1074,10 @@ pub fn process_delegate_stake(
&stake_authority.pubkey(),
vote_account_pubkey,
)];
let (nonce_authority, nonce_authority_pubkey) = nonce_authority
.map(|a| (a.keypair(), a.pubkey()))
.unwrap_or((&config.keypair, config.keypair.pubkey()));
let mut tx = if let Some(nonce_account) = &nonce_account {
let nonce_authority: &Keypair = nonce_authority.unwrap_or(&config.keypair);
Transaction::new_signed_with_nonce(
ixs,
Some(&config.keypair.pubkey()),
@@ -1012,9 +1101,8 @@ pub fn process_delegate_stake(
return_signers(&tx)
} else {
if let Some(nonce_account) = &nonce_account {
let nonce_authority: &Keypair = nonce_authority.unwrap_or(&config.keypair);
let nonce_account = rpc_client.get_account(nonce_account)?;
check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &recent_blockhash)?;
check_nonce_account(&nonce_account, &nonce_authority_pubkey, &recent_blockhash)?;
}
check_account_for_fee(
rpc_client,
@@ -1067,6 +1155,11 @@ mod tests {
new_authorized_pubkey: stake_account_pubkey,
stake_authorize,
authority: None,
sign_only: false,
signers: None,
blockhash: None,
nonce_account: None,
nonce_authority: None,
},
require_keypair: true
}
@@ -1088,6 +1181,159 @@ mod tests {
new_authorized_pubkey: stake_account_pubkey,
stake_authorize,
authority: Some(read_keypair_file(&authority_keypair_file).unwrap().into()),
sign_only: false,
signers: None,
blockhash: None,
nonce_account: None,
nonce_authority: None,
},
require_keypair: true
}
);
// Test Authorize Subcommand w/ sign-only
let test_authorize = test_commands.clone().get_matches_from(vec![
"test",
&subcommand,
&stake_account_string,
&stake_account_string,
"--sign-only",
]);
assert_eq!(
parse_command(&test_authorize).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: stake_account_pubkey,
stake_authorize,
authority: None,
sign_only: true,
signers: None,
blockhash: None,
nonce_account: None,
nonce_authority: None,
},
require_keypair: true
}
);
// Test Authorize Subcommand w/ signer
let keypair = Keypair::new();
let sig = keypair.sign_message(&[0u8]);
let signer = format!("{}={}", keypair.pubkey(), sig);
let test_authorize = test_commands.clone().get_matches_from(vec![
"test",
&subcommand,
&stake_account_string,
&stake_account_string,
"--signer",
&signer,
]);
assert_eq!(
parse_command(&test_authorize).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: stake_account_pubkey,
stake_authorize,
authority: None,
sign_only: false,
signers: Some(vec![(keypair.pubkey(), sig)]),
blockhash: None,
nonce_account: None,
nonce_authority: None,
},
require_keypair: true
}
);
// Test Authorize Subcommand w/ signers
let keypair2 = Keypair::new();
let sig2 = keypair.sign_message(&[0u8]);
let signer2 = format!("{}={}", keypair2.pubkey(), sig2);
let test_authorize = test_commands.clone().get_matches_from(vec![
"test",
&subcommand,
&stake_account_string,
&stake_account_string,
"--signer",
&signer,
"--signer",
&signer2,
]);
assert_eq!(
parse_command(&test_authorize).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: stake_account_pubkey,
stake_authorize,
authority: None,
sign_only: false,
signers: Some(vec![(keypair.pubkey(), sig), (keypair2.pubkey(), sig2),]),
blockhash: None,
nonce_account: None,
nonce_authority: None,
},
require_keypair: true
}
);
// Test Authorize Subcommand w/ blockhash
let blockhash = Hash::default();
let blockhash_string = format!("{}", blockhash);
let test_authorize = test_commands.clone().get_matches_from(vec![
"test",
&subcommand,
&stake_account_string,
&stake_account_string,
"--blockhash",
&blockhash_string,
]);
assert_eq!(
parse_command(&test_authorize).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: stake_account_pubkey,
stake_authorize,
authority: None,
sign_only: false,
signers: None,
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
},
require_keypair: true
}
);
// Test Authorize Subcommand w/ nonce
let (nonce_keypair_file, mut nonce_tmp_file) = make_tmp_file();
let nonce_authority_keypair = Keypair::new();
write_keypair(&nonce_authority_keypair, nonce_tmp_file.as_file_mut()).unwrap();
let nonce_account_pubkey = nonce_authority_keypair.pubkey();
let nonce_account_string = nonce_account_pubkey.to_string();
let test_authorize = test_commands.clone().get_matches_from(vec![
"test",
&subcommand,
&stake_account_string,
&stake_account_string,
"--blockhash",
&blockhash_string,
"--nonce",
&nonce_account_string,
"--nonce-authority",
&nonce_keypair_file,
]);
assert_eq!(
parse_command(&test_authorize).unwrap(),
CliCommandInfo {
command: CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: stake_account_pubkey,
stake_authorize,
authority: None,
sign_only: false,
signers: None,
blockhash: Some(blockhash),
nonce_account: Some(nonce_account_pubkey),
nonce_authority: Some(nonce_authority_keypair.into()),
},
require_keypair: true
}

View File

@@ -1,5 +1,5 @@
use solana_cli::cli::{
process_command, request_and_confirm_airdrop, CliCommand, CliConfig, KeypairEq,
process_command, request_and_confirm_airdrop, CliCommand, CliConfig, SigningAuthority,
};
use solana_client::rpc_client::RpcClient;
use solana_faucet::faucet::run_local_faucet;
@@ -104,7 +104,7 @@ fn test_nonce_with_authority() {
remove_dir_all(ledger_path).unwrap();
}
fn read_keypair_from_option(keypair_file: &Option<&str>) -> Option<KeypairEq> {
fn read_keypair_from_option(keypair_file: &Option<&str>) -> Option<SigningAuthority> {
keypair_file.map(|akf| read_keypair_file(&akf).unwrap().into())
}
@@ -129,7 +129,7 @@ fn full_battery_tests(
config_payer.command = CliCommand::CreateNonceAccount {
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().into(),
nonce_authority: read_keypair_from_option(&authority_keypair_file)
.map(|na: KeypairEq| na.pubkey()),
.map(|na: SigningAuthority| na.pubkey()),
lamports: 1000,
};
process_command(&config_payer).unwrap();

View File

@@ -39,6 +39,23 @@ fn check_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
});
}
fn parse_sign_only_reply_string(reply: &str) -> (Hash, Vec<(Pubkey, Signature)>) {
let object: Value = serde_json::from_str(&reply).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let blockhash = blockhash_str.parse::<Hash>().unwrap();
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let signers = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
(blockhash, signers)
}
#[test]
fn test_stake_delegation_and_deactivation() {
solana_logger::setup();
@@ -198,18 +215,7 @@ fn test_stake_delegation_and_deactivation_offline() {
nonce_authority: None,
};
let sig_response = process_command(&config_validator).unwrap();
let object: Value = serde_json::from_str(&sig_response).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let signers: Vec<_> = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
// Delegate stake online
config_payer.command = CliCommand::DelegateStake {
@@ -219,7 +225,7 @@ fn test_stake_delegation_and_deactivation_offline() {
force: true,
sign_only: false,
signers: Some(signers),
blockhash: Some(blockhash_str.parse::<Hash>().unwrap()),
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
};
@@ -236,18 +242,7 @@ fn test_stake_delegation_and_deactivation_offline() {
nonce_authority: None,
};
let sig_response = process_command(&config_validator).unwrap();
let object: Value = serde_json::from_str(&sig_response).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let signers: Vec<_> = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
let (blockhash, signers) = parse_sign_only_reply_string(&sig_response);
// Deactivate stake online
config_payer.command = CliCommand::DeactivateStake {
@@ -255,7 +250,7 @@ fn test_stake_delegation_and_deactivation_offline() {
stake_authority: None,
sign_only: false,
signers: Some(signers),
blockhash: Some(blockhash_str.parse::<Hash>().unwrap()),
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
};
@@ -411,6 +406,11 @@ fn test_stake_authorize() {
new_authorized_pubkey: online_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: None,
sign_only: false,
signers: None,
blockhash: None,
nonce_account: None,
nonce_authority: None,
};
process_command(&config).unwrap();
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
@@ -424,13 +424,18 @@ fn test_stake_authorize() {
// Assign new offline stake authority
let offline_authority = Keypair::new();
let offline_authority_pubkey = offline_authority.pubkey();
let (_offline_authority_file, mut tmp_file) = make_tmp_file();
let (offline_authority_file, mut tmp_file) = make_tmp_file();
write_keypair(&offline_authority, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: offline_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: Some(read_keypair_file(&online_authority_file).unwrap().into()),
sign_only: false,
signers: None,
blockhash: None,
nonce_account: None,
nonce_authority: None,
};
process_command(&config).unwrap();
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
@@ -441,6 +446,111 @@ fn test_stake_authorize() {
};
assert_eq!(current_authority, offline_authority_pubkey);
// Offline assignment of new nonced stake authority
let nonced_authority = Keypair::new();
let nonced_authority_pubkey = nonced_authority.pubkey();
let (nonced_authority_file, mut tmp_file) = make_tmp_file();
write_keypair(&nonced_authority, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: nonced_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: Some(read_keypair_file(&offline_authority_file).unwrap().into()),
sign_only: true,
signers: None,
blockhash: None,
nonce_account: None,
nonce_authority: None,
};
let sign_reply = process_command(&config).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: nonced_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: Some(offline_authority_pubkey.into()),
sign_only: false,
signers: Some(signers),
blockhash: Some(blockhash),
nonce_account: None,
nonce_authority: None,
};
process_command(&config).unwrap();
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
let stake_state: StakeState = stake_account.state().unwrap();
let current_authority = match stake_state {
StakeState::Initialized(meta) => meta.authorized.staker,
_ => panic!("Unexpected stake state!"),
};
assert_eq!(current_authority, nonced_authority_pubkey);
// Create nonce account
let minimum_nonce_balance = rpc_client
.get_minimum_balance_for_rent_exemption(NonceState::size())
.unwrap();
let nonce_account = Keypair::new();
let (nonce_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&nonce_account, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::CreateNonceAccount {
nonce_account: read_keypair_file(&nonce_keypair_file).unwrap().into(),
nonce_authority: Some(config.keypair.pubkey()),
lamports: minimum_nonce_balance,
};
process_command(&config).unwrap();
// Fetch nonce hash
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state: NonceState = account.state().unwrap();
let nonce_hash = match nonce_state {
NonceState::Initialized(_meta, hash) => hash,
_ => panic!("Nonce is not initialized"),
};
// Nonced assignment of new online stake authority
let online_authority = Keypair::new();
let online_authority_pubkey = online_authority.pubkey();
let (_online_authority_file, mut tmp_file) = make_tmp_file();
write_keypair(&online_authority, tmp_file.as_file_mut()).unwrap();
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: online_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: Some(read_keypair_file(&nonced_authority_file).unwrap().into()),
sign_only: true,
signers: None,
blockhash: Some(nonce_hash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: None,
};
let sign_reply = process_command(&config).unwrap();
let (blockhash, signers) = parse_sign_only_reply_string(&sign_reply);
assert_eq!(blockhash, nonce_hash);
config.command = CliCommand::StakeAuthorize {
stake_account_pubkey,
new_authorized_pubkey: online_authority_pubkey,
stake_authorize: StakeAuthorize::Staker,
authority: Some(nonced_authority_pubkey.into()),
sign_only: false,
signers: Some(signers),
blockhash: Some(blockhash),
nonce_account: Some(nonce_account.pubkey()),
nonce_authority: None,
};
process_command(&config).unwrap();
let stake_account = rpc_client.get_account(&stake_account_pubkey).unwrap();
let stake_state: StakeState = stake_account.state().unwrap();
let current_authority = match stake_state {
StakeState::Initialized(meta) => meta.authorized.staker,
_ => panic!("Unexpected stake state!"),
};
assert_eq!(current_authority, online_authority_pubkey);
let account = rpc_client.get_account(&nonce_account.pubkey()).unwrap();
let nonce_state: NonceState = account.state().unwrap();
let new_nonce_hash = match nonce_state {
NonceState::Initialized(_meta, hash) => hash,
_ => panic!("Nonce is not initialized"),
};
assert_ne!(nonce_hash, new_nonce_hash);
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "0.22.3"
version = "0.22.8"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,11 +19,11 @@ reqwest = { version = "0.9.24", default-features = false, features = ["rustls-tl
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-net-utils = { path = "../net-utils", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
[dev-dependencies]
assert_matches = "1.3.0"
jsonrpc-core = "14.0.5"
jsonrpc-http-server = "14.0.5"
solana-logger = { path = "../logger", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.8" }

View File

@@ -386,6 +386,25 @@ impl RpcClient {
})
}
pub fn minimum_ledger_slot(&self) -> io::Result<Slot> {
let response = self
.client
.send(&RpcRequest::MinimumLedgerSlot, Value::Null, 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("MinimumLedgerSlot request failure: {:?}", err),
)
})?;
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("MinimumLedgerSlot parse failure: {}", err),
)
})
}
pub fn send_and_confirm_transaction<T: KeypairUtil>(
&self,
transaction: &mut Transaction,

View File

@@ -35,6 +35,7 @@ pub enum RpcRequest {
SendTransaction,
SignVote,
GetMinimumBalanceForRentExemption,
MinimumLedgerSlot,
}
impl RpcRequest {
@@ -75,6 +76,7 @@ impl RpcRequest {
RpcRequest::SendTransaction => "sendTransaction",
RpcRequest::SignVote => "signVote",
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",
};
json!({
"jsonrpc": jsonrpc,

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "0.22.3"
version = "0.22.8"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@@ -41,26 +41,26 @@ rayon = "1.2.0"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
solana-budget-program = { path = "../programs/budget", version = "0.22.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.22.3" }
solana-client = { path = "../client", version = "0.22.3" }
solana-faucet = { path = "../faucet", version = "0.22.3" }
solana-budget-program = { path = "../programs/budget", version = "0.22.8" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.8" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.22.8" }
solana-client = { path = "../client", version = "0.22.8" }
solana-faucet = { path = "../faucet", version = "0.22.8" }
ed25519-dalek = "=1.0.0-pre.1"
solana-ledger = { path = "../ledger", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.22.3" }
solana-metrics = { path = "../metrics", version = "0.22.3" }
solana-measure = { path = "../measure", version = "0.22.3" }
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
solana-perf = { path = "../perf", version = "0.22.3" }
solana-runtime = { path = "../runtime", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-stake-program = { path = "../programs/stake", version = "0.22.3" }
solana-storage-program = { path = "../programs/storage", version = "0.22.3" }
solana-vote-program = { path = "../programs/vote", version = "0.22.3" }
solana-vote-signer = { path = "../vote-signer", version = "0.22.3" }
solana-sys-tuner = { path = "../sys-tuner", version = "0.22.3" }
solana-ledger = { path = "../ledger", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.22.8" }
solana-metrics = { path = "../metrics", version = "0.22.8" }
solana-measure = { path = "../measure", version = "0.22.8" }
solana-net-utils = { path = "../net-utils", version = "0.22.8" }
solana-perf = { path = "../perf", version = "0.22.8" }
solana-runtime = { path = "../runtime", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
solana-stake-program = { path = "../programs/stake", version = "0.22.8" }
solana-storage-program = { path = "../programs/storage", version = "0.22.8" }
solana-vote-program = { path = "../programs/vote", version = "0.22.8" }
solana-vote-signer = { path = "../vote-signer", version = "0.22.8" }
solana-sys-tuner = { path = "../sys-tuner", version = "0.22.8" }
symlink = "0.1.0"
sys-info = "0.5.8"
tempfile = "3.1.0"
@@ -69,7 +69,7 @@ tokio-codec = "0.1"
tokio-fs = "0.1"
tokio-io = "0.1"
untrusted = "0.7.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.22.3" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.22.8" }
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
[dev-dependencies]

View File

@@ -556,6 +556,8 @@ impl Archiver {
let mut contact_info = node_info.clone();
contact_info.tvu = "0.0.0.0:0".parse().unwrap();
contact_info.wallclock = timestamp();
// copy over the adopted shred_version from the entrypoint
contact_info.shred_version = cluster_info.read().unwrap().my_data().shred_version;
{
let mut cluster_info_w = cluster_info.write().unwrap();
cluster_info_w.insert_self(contact_info);
@@ -740,7 +742,7 @@ impl Archiver {
) -> result::Result<u64, Error> {
let rpc_peers = {
let cluster_info = cluster_info.read().unwrap();
cluster_info.rpc_peers()
cluster_info.all_rpc_peers()
};
debug!("rpc peers: {:?}", rpc_peers);
if !rpc_peers.is_empty() {
@@ -796,7 +798,7 @@ impl Archiver {
loop {
let rpc_peers = {
let cluster_info = cluster_info.read().unwrap();
cluster_info.rpc_peers()
cluster_info.all_rpc_peers()
};
debug!("rpc peers: {:?}", rpc_peers);
if !rpc_peers.is_empty() {

View File

@@ -272,7 +272,7 @@ impl ClusterInfo {
let ip_addr = node.gossip.ip();
format!(
"{:15} {:2}| {:5} | {:44} | {:5}| {:5}| {:5} | {:5}| {:5} | {:5}| {:5} | {:5}| {:5}| v{}\n",
"{:15} {:2}| {:5} | {:44} | {:5}| {:5}| {:5} | {:5}| {:5} | {:5}| {:5} | {:5}| {:5}| {}\n",
if ContactInfo::is_valid_address(&node.gossip) {
ip_addr.to_string()
} else {
@@ -405,7 +405,8 @@ impl ClusterInfo {
.map(|x| x.value.contact_info().unwrap())
}
pub fn rpc_peers(&self) -> Vec<ContactInfo> {
/// all validators that have a valid rpc port regardless of `shred_version`.
pub fn all_rpc_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
self.gossip
.crds
@@ -440,13 +441,15 @@ impl ClusterInfo {
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| x.id != me)
/* shred_version not considered for gossip peers (ie, spy nodes do not set
shred_version) */
.filter(|x| ContactInfo::is_valid_address(&x.gossip))
.cloned()
.collect()
}
/// all validators that have a valid tvu port.
pub fn tvu_peers(&self) -> Vec<ContactInfo> {
/// all validators that have a valid tvu port regardless of `shred_version`.
pub fn all_tvu_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
self.gossip
.crds
@@ -460,7 +463,37 @@ impl ClusterInfo {
.collect()
}
/// all peers that have a valid storage addr
/// all validators that have a valid tvu port and are on the same `shred_version`.
pub fn tvu_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
self.gossip
.crds
.table
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| ContactInfo::is_valid_address(&x.tvu))
.filter(|x| !ClusterInfo::is_archiver(x))
.filter(|x| x.id != me.id)
.filter(|x| x.shred_version == me.shred_version)
.cloned()
.collect()
}
/// all peers that have a valid storage addr regardless of `shred_version`.
pub fn all_storage_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
self.gossip
.crds
.table
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| ContactInfo::is_valid_address(&x.storage_addr))
.filter(|x| x.id != me.id)
.cloned()
.collect()
}
/// all peers that have a valid storage addr and are on the same `shred_version`.
pub fn storage_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data();
self.gossip
@@ -470,6 +503,7 @@ impl ClusterInfo {
.filter_map(|x| x.value.contact_info())
.filter(|x| ContactInfo::is_valid_address(&x.storage_addr))
.filter(|x| x.id != me.id)
.filter(|x| x.shred_version == me.shred_version)
.cloned()
.collect()
}
@@ -483,6 +517,7 @@ impl ClusterInfo {
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| x.id != me.id)
.filter(|x| x.shred_version == me.shred_version)
.filter(|x| ContactInfo::is_valid_address(&x.tvu))
.filter(|x| ContactInfo::is_valid_address(&x.tvu_forwards))
.cloned()
@@ -495,6 +530,7 @@ impl ClusterInfo {
ClusterInfo::tvu_peers(self)
.into_iter()
.filter(|x| x.id != me.id)
.filter(|x| x.shred_version == me.shred_version)
.filter(|x| ContactInfo::is_valid_address(&x.gossip))
.filter(|x| {
self.get_epoch_state_for_node(&x.id, None)
@@ -1057,6 +1093,7 @@ impl ClusterInfo {
.spawn(move || {
let mut last_push = timestamp();
let mut last_contact_info_trace = timestamp();
let mut adopt_shred_version = obj.read().unwrap().my_data().shred_version == 0;
let recycler = PacketsRecycler::default();
loop {
let start = timestamp();
@@ -1094,9 +1131,32 @@ impl ClusterInfo {
let table_size = obj.read().unwrap().gossip.crds.table.len();
datapoint_debug!(
"cluster_info-purge",
("tabel_size", table_size as i64, i64),
("table_size", table_size as i64, i64),
("purge_stake_timeout", timeout as i64, i64)
);
// Adopt the entrypoint's `shred_version` if ours is unset
if adopt_shred_version {
// If gossip was given an entrypoint, lookup its id
let entrypoint_id = obj.read().unwrap().entrypoint.as_ref().map(|e| e.id);
if let Some(entrypoint_id) = entrypoint_id {
// If a pull from the entrypoint was successful, it should exist in the crds table
let entrypoint = obj.read().unwrap().lookup(&entrypoint_id).cloned();
if let Some(entrypoint) = entrypoint {
let mut self_info = obj.read().unwrap().my_data();
if entrypoint.shred_version == 0 {
info!("Unable to adopt entrypoint's shred version");
} else {
info!(
"Setting shred version to {:?} from entrypoint {:?}",
entrypoint.shred_version, entrypoint.id
);
self_info.shred_version = entrypoint.shred_version;
obj.write().unwrap().insert_self(self_info);
adopt_shred_version = false;
}
}
}
}
//TODO: possibly tune this parameter
//we saw a deadlock passing an obj.read().unwrap().timeout into sleep
if start - last_push > CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2 {
@@ -1227,10 +1287,12 @@ impl ClusterInfo {
stakes: &HashMap<Pubkey, u64>,
packets: Packets,
response_sender: &PacketSender,
epoch_ms: u64,
) {
// iter over the packets, collect pulls separately and process everything else
let allocated = thread_mem_usage::Allocatedp::default();
let mut gossip_pull_data: Vec<PullData> = vec![];
let timeouts = me.read().unwrap().gossip.make_timeouts(&stakes, epoch_ms);
packets.packets.iter().for_each(|packet| {
let from_addr = packet.meta.addr();
limited_deserialize(&packet.data[..packet.meta.size])
@@ -1272,7 +1334,7 @@ impl ClusterInfo {
}
ret
});
Self::handle_pull_response(me, &from, data);
Self::handle_pull_response(me, &from, data, &timeouts);
datapoint_debug!(
"solana-gossip-listen-memory",
("pull_response", (allocated.get() - start) as i64, i64),
@@ -1391,7 +1453,12 @@ impl ClusterInfo {
Some(packets)
}
fn handle_pull_response(me: &Arc<RwLock<Self>>, from: &Pubkey, data: Vec<CrdsValue>) {
fn handle_pull_response(
me: &Arc<RwLock<Self>>,
from: &Pubkey,
data: Vec<CrdsValue>,
timeouts: &HashMap<Pubkey, u64>,
) {
let len = data.len();
let now = Instant::now();
let self_id = me.read().unwrap().gossip.id;
@@ -1399,7 +1466,7 @@ impl ClusterInfo {
me.write()
.unwrap()
.gossip
.process_pull_response(from, data, timestamp());
.process_pull_response(from, timeouts, data, timestamp());
inc_new_counter_debug!("cluster_info-pull_request_response", 1);
inc_new_counter_debug!("cluster_info-pull_request_response-size", len);
@@ -1569,14 +1636,31 @@ impl ClusterInfo {
//TODO cache connections
let timeout = Duration::new(1, 0);
let reqs = requests_receiver.recv_timeout(timeout)?;
let epoch_ms;
let stakes: HashMap<_, _> = match bank_forks {
Some(ref bank_forks) => {
staking_utils::staked_nodes(&bank_forks.read().unwrap().working_bank())
let bank = bank_forks.read().unwrap().working_bank();
let epoch = bank.epoch();
let epoch_schedule = bank.epoch_schedule();
epoch_ms = epoch_schedule.get_slots_in_epoch(epoch) * DEFAULT_MS_PER_SLOT;
staking_utils::staked_nodes(&bank)
}
None => {
inc_new_counter_info!("cluster_info-purge-no_working_bank", 1);
epoch_ms = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
HashMap::new()
}
None => HashMap::new(),
};
Self::handle_packets(obj, &recycler, blockstore, &stakes, reqs, response_sender);
Self::handle_packets(
obj,
&recycler,
blockstore,
&stakes,
reqs,
response_sender,
epoch_ms,
);
Ok(())
}
pub fn listen(
@@ -1616,6 +1700,30 @@ impl ClusterInfo {
.unwrap()
}
fn gossip_contact_info(id: &Pubkey, gossip_addr: SocketAddr) -> ContactInfo {
let dummy_addr = socketaddr_any!();
ContactInfo::new(
id,
gossip_addr,
dummy_addr,
dummy_addr,
dummy_addr,
dummy_addr,
dummy_addr,
dummy_addr,
dummy_addr,
dummy_addr,
timestamp(),
)
}
pub fn spy_contact_info(id: &Pubkey) -> ContactInfo {
let dummy_addr = socketaddr_any!();
Self::gossip_contact_info(id, dummy_addr)
}
/// An alternative to Spy Node that has a valid gossip address and fully participate in Gossip.
pub fn gossip_node(
id: &Pubkey,
@@ -1623,43 +1731,17 @@ impl ClusterInfo {
) -> (ContactInfo, UdpSocket, Option<TcpListener>) {
let (port, (gossip_socket, ip_echo)) =
Node::get_gossip_port(gossip_addr, VALIDATOR_PORT_RANGE);
let daddr = socketaddr_any!();
let contact_info = Self::gossip_contact_info(id, SocketAddr::new(gossip_addr.ip(), port));
let node = ContactInfo::new(
id,
SocketAddr::new(gossip_addr.ip(), port),
daddr,
daddr,
daddr,
daddr,
daddr,
daddr,
daddr,
daddr,
timestamp(),
);
(node, gossip_socket, Some(ip_echo))
(contact_info, gossip_socket, Some(ip_echo))
}
/// A Node with invalid ports to spy on gossip via pull requests
/// A Node with dummy ports to spy on gossip via pull requests
pub fn spy_node(id: &Pubkey) -> (ContactInfo, UdpSocket, Option<TcpListener>) {
let (_, gossip_socket) = bind_in_range(VALIDATOR_PORT_RANGE).unwrap();
let daddr = socketaddr_any!();
let contact_info = Self::spy_contact_info(id);
let node = ContactInfo::new(
id,
daddr,
daddr,
daddr,
daddr,
daddr,
daddr,
daddr,
daddr,
daddr,
timestamp(),
);
(node, gossip_socket, None)
(contact_info, gossip_socket, None)
}
}
@@ -2539,10 +2621,12 @@ mod tests {
let entrypoint_crdsvalue =
CrdsValue::new_unsigned(CrdsData::ContactInfo(entrypoint.clone()));
let cluster_info = Arc::new(RwLock::new(cluster_info));
let timeouts = cluster_info.read().unwrap().gossip.make_timeouts_test();
ClusterInfo::handle_pull_response(
&cluster_info,
&entrypoint_pubkey,
vec![entrypoint_crdsvalue],
&timeouts,
);
let pulls = cluster_info
.write()
@@ -2665,6 +2749,14 @@ mod tests {
cluster_info.insert_info(contact_info);
stakes.insert(id3, 10);
// normal but with different shred version
let id4 = Pubkey::new(&[4u8; 32]);
let mut contact_info = ContactInfo::new_localhost(&id4, timestamp());
contact_info.shred_version = 1;
assert_ne!(contact_info.shred_version, d.shred_version);
cluster_info.insert_info(contact_info.clone());
stakes.insert(id4, 10);
let stakes = Arc::new(stakes);
let (peers, peers_and_stakes) = cluster_info.sorted_tvu_peers_and_stakes(Some(stakes));
assert_eq!(peers.len(), 2);

View File

@@ -135,7 +135,7 @@ impl ClusterInfoRepairListener {
}
let lowest_slot = blockstore.lowest_slot();
let peers = cluster_info.read().unwrap().gossip_peers();
let peers = cluster_info.read().unwrap().tvu_peers();
let mut peers_needing_repairs: HashMap<Pubkey, EpochSlots> = HashMap::new();
// Iterate through all the known nodes in the network, looking for ones that

View File

@@ -321,12 +321,21 @@ impl Tower {
if let Some(fork_stake) = stake_lockouts.get(&vote.slot) {
let lockout = fork_stake.stake as f64 / total_staked as f64;
trace!(
"fork_stake {} {} {} {}",
"fork_stake slot: {} lockout: {} fork_stake: {} total_stake: {}",
slot,
lockout,
fork_stake.stake,
total_staked
);
if vote.confirmation_count as usize > self.threshold_depth {
for old_vote in &self.lockouts.votes {
if old_vote.slot == vote.slot
&& old_vote.confirmation_count == vote.confirmation_count
{
return true;
}
}
}
lockout > self.threshold_size
} else {
false
@@ -542,6 +551,24 @@ mod test {
assert!(tower.check_vote_stake_threshold(0, &stakes, 2));
}
#[test]
fn test_check_vote_threshold_no_skip_lockout_with_new_root() {
solana_logger::setup();
let mut tower = Tower::new_for_tests(4, 0.67);
let mut stakes = HashMap::new();
for i in 0..(MAX_LOCKOUT_HISTORY as u64 + 1) {
stakes.insert(
i,
StakeLockout {
stake: 1,
lockout: 8,
},
);
tower.record_vote(i, Hash::default());
}
assert!(!tower.check_vote_stake_threshold(MAX_LOCKOUT_HISTORY as u64 + 1, &stakes, 2));
}
#[test]
fn test_is_slot_confirmed_not_enough_stake_failure() {
let tower = Tower::new_for_tests(1, 0.67);
@@ -742,6 +769,34 @@ mod test {
assert!(!tower.check_vote_stake_threshold(1, &stakes, 2));
}
#[test]
fn test_check_vote_threshold_lockouts_not_updated() {
solana_logger::setup();
let mut tower = Tower::new_for_tests(1, 0.67);
let stakes = vec![
(
0,
StakeLockout {
stake: 1,
lockout: 8,
},
),
(
1,
StakeLockout {
stake: 2,
lockout: 8,
},
),
]
.into_iter()
.collect();
tower.record_vote(0, Hash::default());
tower.record_vote(1, Hash::default());
tower.record_vote(2, Hash::default());
assert!(tower.check_vote_stake_threshold(6, &stakes, 2));
}
#[test]
fn test_lockout_is_updated_for_entire_branch() {
let mut stake_lockouts = HashMap::new();

View File

@@ -156,11 +156,12 @@ impl CrdsGossip {
pub fn process_pull_response(
&mut self,
from: &Pubkey,
timeouts: &HashMap<Pubkey, u64>,
response: Vec<CrdsValue>,
now: u64,
) -> usize {
self.pull
.process_pull_response(&mut self.crds, from, response, now)
.process_pull_response(&mut self.crds, from, timeouts, response, now)
}
pub fn make_timeouts_test(&self) -> HashMap<Pubkey, u64> {

View File

@@ -25,6 +25,8 @@ use std::collections::HashMap;
use std::collections::VecDeque;
pub const CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS: u64 = 15000;
// The maximum age of a value received over pull responses
pub const CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS: u64 = 60000;
pub const FALSE_RATE: f64 = 0.1f64;
pub const KEYS: f64 = 8f64;
@@ -117,6 +119,7 @@ pub struct CrdsGossipPull {
/// hash and insert time
purged_values: VecDeque<(Hash, u64)>,
pub crds_timeout: u64,
pub msg_timeout: u64,
}
impl Default for CrdsGossipPull {
@@ -125,6 +128,7 @@ impl Default for CrdsGossipPull {
purged_values: VecDeque::new(),
pull_request_time: HashMap::new(),
crds_timeout: CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS,
msg_timeout: CRDS_GOSSIP_PULL_MSG_TIMEOUT_MS,
}
}
}
@@ -210,12 +214,56 @@ impl CrdsGossipPull {
&mut self,
crds: &mut Crds,
from: &Pubkey,
timeouts: &HashMap<Pubkey, u64>,
response: Vec<CrdsValue>,
now: u64,
) -> usize {
let mut failed = 0;
for r in response {
let owner = r.label().pubkey();
// Check if the crds value is older than the msg_timeout
if now
> r.wallclock()
.checked_add(self.msg_timeout)
.unwrap_or_else(|| 0)
|| now + self.msg_timeout < r.wallclock()
{
match &r.label() {
CrdsValueLabel::ContactInfo(_) => {
// Check if this ContactInfo is actually too old, it's possible that it has
// stake and so might have a longer effective timeout
let timeout = *timeouts
.get(&owner)
.unwrap_or_else(|| timeouts.get(&Pubkey::default()).unwrap());
if now > r.wallclock().checked_add(timeout).unwrap_or_else(|| 0)
|| now + timeout < r.wallclock()
{
inc_new_counter_warn!(
"cluster_info-gossip_pull_response_value_timeout",
1
);
failed += 1;
continue;
}
}
_ => {
// Before discarding this value, check if a ContactInfo for the owner
// exists in the table. If it doesn't, that implies that this value can be discarded
if crds.lookup(&CrdsValueLabel::ContactInfo(owner)).is_none() {
inc_new_counter_warn!(
"cluster_info-gossip_pull_response_value_timeout",
1
);
failed += 1;
continue;
} else {
// Silently insert this old value without bumping record timestamps
failed += crds.insert(r, now).is_err() as usize;
continue;
}
}
}
}
let old = crds.insert(r, now);
failed += old.is_err() as usize;
old.ok().map(|opt| {
@@ -322,8 +370,9 @@ impl CrdsGossipPull {
mod test {
use super::*;
use crate::contact_info::ContactInfo;
use crate::crds_value::CrdsData;
use crate::crds_value::{CrdsData, Vote};
use itertools::Itertools;
use solana_perf::test_tx::test_tx;
use solana_sdk::hash::hash;
use solana_sdk::packet::PACKET_DATA_SIZE;
@@ -534,8 +583,13 @@ mod test {
continue;
}
assert_eq!(rsp.len(), 1);
let failed =
node.process_pull_response(&mut node_crds, &node_pubkey, rsp.pop().unwrap(), 1);
let failed = node.process_pull_response(
&mut node_crds,
&node_pubkey,
&node.make_timeouts_def(&node_pubkey, &HashMap::new(), 0, 1),
rsp.pop().unwrap(),
1,
);
assert_eq!(failed, 0);
assert_eq!(
node_crds
@@ -675,4 +729,87 @@ mod test {
.collect();
assert_eq!(masks.len(), 2u64.pow(mask_bits) as usize)
}
#[test]
fn test_process_pull_response() {
let mut node_crds = Crds::default();
let mut node = CrdsGossipPull::default();
let peer_pubkey = Pubkey::new_rand();
let peer_entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(
ContactInfo::new_localhost(&peer_pubkey, 0),
));
let mut timeouts = HashMap::new();
timeouts.insert(Pubkey::default(), node.crds_timeout);
timeouts.insert(peer_pubkey, node.msg_timeout + 1);
// inserting a fresh value should be fine.
assert_eq!(
node.process_pull_response(
&mut node_crds,
&peer_pubkey,
&timeouts,
vec![peer_entry.clone()],
1,
),
0
);
let mut node_crds = Crds::default();
let unstaked_peer_entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(
ContactInfo::new_localhost(&peer_pubkey, 0),
));
// check that old contact infos fail if they are too old, regardless of "timeouts"
assert_eq!(
node.process_pull_response(
&mut node_crds,
&peer_pubkey,
&timeouts,
vec![peer_entry.clone(), unstaked_peer_entry],
node.msg_timeout + 100,
),
2
);
let mut node_crds = Crds::default();
// check that old contact infos can still land as long as they have a "timeouts" entry
assert_eq!(
node.process_pull_response(
&mut node_crds,
&peer_pubkey,
&timeouts,
vec![peer_entry.clone()],
node.msg_timeout + 1,
),
0
);
// construct something that's not a contact info
let peer_vote =
CrdsValue::new_unsigned(CrdsData::Vote(0, Vote::new(&peer_pubkey, test_tx(), 0)));
// check that older CrdsValues (non-ContactInfos) infos pass even if are too old,
// but a recent contact info (inserted above) exists
assert_eq!(
node.process_pull_response(
&mut node_crds,
&peer_pubkey,
&timeouts,
vec![peer_vote.clone()],
node.msg_timeout + 1,
),
0
);
let mut node_crds = Crds::default();
// without a contact info, inserting an old value should fail
assert_eq!(
node.process_pull_response(
&mut node_crds,
&peer_pubkey,
&timeouts,
vec![peer_vote.clone()],
node.msg_timeout + 1,
),
1
);
}
}

View File

@@ -30,7 +30,10 @@ use std::collections::{HashMap, HashSet};
pub const CRDS_GOSSIP_NUM_ACTIVE: usize = 30;
pub const CRDS_GOSSIP_PUSH_FANOUT: usize = 6;
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 5000;
// With a fanout of 6, a 1000 node cluster should only take ~4 hops to converge.
// However since pushes are stake weighed, some trailing nodes
// might need more time to receive values. 30 seconds should be plenty.
pub const CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS: u64 = 30000;
pub const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500;
pub const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15;
@@ -135,7 +138,12 @@ impl CrdsGossipPush {
value: CrdsValue,
now: u64,
) -> Result<Option<VersionedCrdsValue>, CrdsGossipError> {
if now > value.wallclock() + self.msg_timeout {
if now
> value
.wallclock()
.checked_add(self.msg_timeout)
.unwrap_or_else(|| 0)
{
return Err(CrdsGossipError::PushMessageTimeout);
}
if now + self.msg_timeout < value.wallclock() {

View File

@@ -197,10 +197,10 @@ fn spy(
tvu_peers = spy_ref
.read()
.unwrap()
.tvu_peers()
.all_tvu_peers()
.into_iter()
.collect::<Vec<_>>();
archivers = spy_ref.read().unwrap().storage_peers();
archivers = spy_ref.read().unwrap().all_storage_peers();
if let Some(num) = num_nodes {
if tvu_peers.len() + archivers.len() >= num {
if let Some(gossip_addr) = find_node_by_gossip_addr {

View File

@@ -17,7 +17,7 @@ use std::time::Duration;
// - A validator to download a snapshot from a peer and boot from it
// - To make sure that if a validator needs to reboot from its own snapshot, it has enough slots locally
// to catch back up to where it was when it stopped
pub const MAX_LEDGER_SLOTS: u64 = 6400;
pub const DEFAULT_MAX_LEDGER_SLOTS: u64 = 6400;
// Remove a fixed number of slots at a time, it's more efficient than doing it one-by-one
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;

View File

@@ -12,7 +12,7 @@ use solana_metrics::inc_new_counter_debug;
pub use solana_sdk::packet::{Meta, Packet, PACKET_DATA_SIZE};
use std::{net::UdpSocket, time::Instant};
pub fn recv_from(obj: &mut Packets, socket: &UdpSocket) -> Result<usize> {
pub fn recv_from(obj: &mut Packets, socket: &UdpSocket, max_wait_ms: usize) -> Result<usize> {
let mut i = 0;
//DOCUMENTED SIDE-EFFECT
//Performance out of the IO without poll
@@ -23,9 +23,11 @@ pub fn recv_from(obj: &mut Packets, socket: &UdpSocket) -> Result<usize> {
socket.set_nonblocking(false)?;
trace!("receiving on {}", socket.local_addr().unwrap());
let start = Instant::now();
let mut total_size = 0;
loop {
obj.packets.resize(i + NUM_RCVMMSGS, Packet::default());
obj.packets.resize(
std::cmp::min(i + NUM_RCVMMSGS, PACKETS_PER_BATCH),
Packet::default(),
);
match recv_mmsg(socket, &mut obj.packets[i..]) {
Err(_) if i > 0 => {
if start.elapsed().as_millis() > 1 {
@@ -36,16 +38,15 @@ pub fn recv_from(obj: &mut Packets, socket: &UdpSocket) -> Result<usize> {
trace!("recv_from err {:?}", e);
return Err(Error::IO(e));
}
Ok((size, npkts)) => {
Ok((_, npkts)) => {
if i == 0 {
socket.set_nonblocking(true)?;
}
trace!("got {} packets", npkts);
i += npkts;
total_size += size;
// Try to batch into big enough buffers
// will cause less re-shuffling later on.
if start.elapsed().as_millis() > 1 || total_size >= PACKETS_BATCH_SIZE {
if start.elapsed().as_millis() > max_wait_ms as u128 || i >= PACKETS_PER_BATCH {
break;
}
}
@@ -98,7 +99,7 @@ mod tests {
}
send_to(&p, &send_socket).unwrap();
let recvd = recv_from(&mut p, &recv_socket).unwrap();
let recvd = recv_from(&mut p, &recv_socket, 1).unwrap();
assert_eq!(recvd, p.packets.len());
@@ -130,4 +131,32 @@ mod tests {
p2.data[0] = 4;
assert!(p1 != p2);
}
#[test]
fn test_packet_resize() {
solana_logger::setup();
let recv_socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
let addr = recv_socket.local_addr().unwrap();
let send_socket = UdpSocket::bind("127.0.0.1:0").expect("bind");
let mut p = Packets::default();
p.packets.resize(PACKETS_PER_BATCH, Packet::default());
// Should only get PACKETS_PER_BATCH packets per iteration even
// if a lot more were sent, and regardless of packet size
for _ in 0..2 * PACKETS_PER_BATCH {
let mut p = Packets::default();
p.packets.resize(1, Packet::default());
for m in p.packets.iter_mut() {
m.meta.set_addr(&addr);
m.meta.size = 1;
}
send_to(&p, &send_socket).unwrap();
}
let recvd = recv_from(&mut p, &recv_socket, 100).unwrap();
// Check we only got PACKETS_PER_BATCH packets
assert_eq!(recvd, PACKETS_PER_BATCH);
assert_eq!(p.packets.capacity(), PACKETS_PER_BATCH);
}
}

View File

@@ -29,7 +29,7 @@ use std::sync::{Arc, Mutex};
use std::time::Instant;
const GRACE_TICKS_FACTOR: u64 = 2;
const MAX_GRACE_SLOTS: u64 = 3;
const MAX_GRACE_SLOTS: u64 = 2;
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum PohRecorderError {
@@ -126,6 +126,33 @@ impl PohRecorder {
self.ticks_per_slot
}
fn received_any_previous_leader_data(&self, slot: Slot) -> bool {
(slot.saturating_sub(NUM_CONSECUTIVE_LEADER_SLOTS)..slot).any(|i| {
// Check if we have received any data in previous leader's slots
if let Ok(slot_meta) = self.blockstore.meta(i as Slot) {
if let Some(slot_meta) = slot_meta {
slot_meta.received > 0
} else {
false
}
} else {
false
}
})
}
fn reached_leader_tick(&self, leader_first_tick_height: u64) -> bool {
let target_tick_height = leader_first_tick_height.saturating_sub(1);
let ideal_target_tick_height = target_tick_height.saturating_sub(self.grace_ticks);
let current_slot = self.tick_height / self.ticks_per_slot;
// we've approached target_tick_height OR poh was reset to run immediately
// Or, previous leader didn't transmit in any of its leader slots, so ignore grace ticks
self.tick_height >= target_tick_height
|| self.start_tick_height + self.grace_ticks == leader_first_tick_height
|| (self.tick_height >= ideal_target_tick_height
&& !self.received_any_previous_leader_data(current_slot))
}
/// returns if leader slot has been reached, how many grace ticks were afforded,
/// imputed leader_slot and self.start_slot
/// reached_leader_slot() == true means "ready for a bank"
@@ -143,10 +170,7 @@ impl PohRecorder {
let next_leader_slot = (next_tick_height - 1) / self.ticks_per_slot;
if let Some(leader_first_tick_height) = self.leader_first_tick_height {
let target_tick_height = leader_first_tick_height.saturating_sub(1);
// we've approached target_tick_height OR poh was reset to run immediately
if self.tick_height >= target_tick_height
|| self.start_tick_height + self.grace_ticks == leader_first_tick_height
{
if self.reached_leader_tick(leader_first_tick_height) {
assert!(next_tick_height >= self.start_tick_height);
let ideal_target_tick_height = target_tick_height.saturating_sub(self.grace_ticks);
@@ -469,7 +493,8 @@ impl PohRecorder {
mod tests {
use super::*;
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
use bincode::serialize;
use solana_ledger::{blockstore::Blockstore, blockstore_meta::SlotMeta, get_tmp_ledger_path};
use solana_perf::test_tx::test_tx;
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
use solana_sdk::hash::hash;
@@ -1088,6 +1113,60 @@ mod tests {
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_reached_leader_tick() {
solana_logger::setup();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2);
let bank = Arc::new(Bank::new(&genesis_config));
let prev_hash = bank.last_blockhash();
let (mut poh_recorder, _entry_receiver) = PohRecorder::new(
0,
prev_hash,
0,
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
);
assert_eq!(poh_recorder.reached_leader_tick(0), true);
let grace_ticks = bank.ticks_per_slot() * MAX_GRACE_SLOTS;
let new_tick_height = NUM_CONSECUTIVE_LEADER_SLOTS * bank.ticks_per_slot();
for _ in 0..new_tick_height {
poh_recorder.tick();
}
poh_recorder.grace_ticks = grace_ticks;
// True, as previous leader did not transmit in its slots
assert_eq!(
poh_recorder.reached_leader_tick(new_tick_height + grace_ticks),
true
);
let mut parent_meta = SlotMeta::default();
parent_meta.received = 1;
poh_recorder
.blockstore
.put_meta_bytes(0, &serialize(&parent_meta).unwrap())
.unwrap();
// False, as previous leader transmitted in one of its recent slots
// and grace ticks have not expired
assert_eq!(
poh_recorder.reached_leader_tick(new_tick_height + grace_ticks),
false
);
}
}
#[test]
fn test_reached_leader_slot() {
solana_logger::setup();
@@ -1134,6 +1213,13 @@ mod tests {
init_ticks + bank.ticks_per_slot()
);
let mut parent_meta = SlotMeta::default();
parent_meta.received = 1;
poh_recorder
.blockstore
.put_meta_bytes(0, &serialize(&parent_meta).unwrap())
.unwrap();
// Test that we don't reach the leader slot because of grace ticks
assert_eq!(poh_recorder.reached_leader_slot().0, false);

View File

@@ -332,7 +332,7 @@ mod tests {
// it should send this over the sockets.
retransmit_sender.send(packets).unwrap();
let mut packets = Packets::new(vec![]);
packet::recv_from(&mut packets, &me_retransmit).unwrap();
packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
assert_eq!(packets.packets.len(), 1);
assert_eq!(packets.packets[0].meta.repair, false);
@@ -348,7 +348,7 @@ mod tests {
let packets = Packets::new(vec![repair, Packet::default()]);
retransmit_sender.send(packets).unwrap();
let mut packets = Packets::new(vec![]);
packet::recv_from(&mut packets, &me_retransmit).unwrap();
packet::recv_from(&mut packets, &me_retransmit, 1).unwrap();
assert_eq!(packets.packets.len(), 1);
assert_eq!(packets.packets[0].meta.repair, false);
}

View File

@@ -229,6 +229,19 @@ impl JsonRpcRequestProcessor {
Ok(self.bank(commitment).collector_id().to_string())
}
fn minimum_ledger_slot(&self) -> Result<Slot> {
match self.blockstore.slot_meta_iterator(0) {
Ok(mut metas) => match metas.next() {
Some((slot, _meta)) => Ok(slot),
None => Err(Error::invalid_request()),
},
Err(err) => {
warn!("slot_meta_iterator failed: {:?}", err);
Err(Error::invalid_request())
}
}
}
fn get_transaction_count(&self, commitment: Option<CommitmentConfig>) -> Result<u64> {
Ok(self.bank(commitment).transaction_count() as u64)
}
@@ -530,6 +543,9 @@ pub trait RpcSol {
commitment: Option<CommitmentConfig>,
) -> Result<String>;
#[rpc(meta, name = "minimumLedgerSlot")]
fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result<Slot>;
#[rpc(meta, name = "getVoteAccounts")]
fn get_vote_accounts(
&self,
@@ -710,11 +726,14 @@ impl RpcSol for RpcSolImpl {
None
}
}
let shred_version = cluster_info.my_data().shred_version;
Ok(cluster_info
.all_peers()
.iter()
.filter_map(|(contact_info, _)| {
if ContactInfo::is_valid_address(&contact_info.gossip) {
if shred_version == contact_info.shred_version
&& ContactInfo::is_valid_address(&contact_info.gossip)
{
Some(RpcContactInfo {
pubkey: contact_info.id.to_string(),
gossip: Some(contact_info.gossip),
@@ -990,6 +1009,10 @@ impl RpcSol for RpcSolImpl {
.get_slot_leader(commitment)
}
fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result<Slot> {
meta.request_processor.read().unwrap().minimum_ledger_slot()
}
fn get_vote_accounts(
&self,
meta: Self::Metadata,
@@ -1379,6 +1402,21 @@ pub mod tests {
assert_eq!(expected, result);
}
#[test]
fn test_rpc_minimum_ledger_slot() {
let bob_pubkey = Pubkey::new_rand();
let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey);
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"minimumLedgerSlot"}}"#);
let res = io.handle_request_sync(&req, meta);
let expected = r#"{"jsonrpc":"2.0","result":0,"id":1}"#;
let expected: Response =
serde_json::from_str(&expected).expect("expected response deserialization");
let result: Response = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
assert_eq!(expected, result);
}
#[test]
fn test_rpc_get_total_supply() {
let bob_pubkey = Pubkey::new_rand();

View File

@@ -1,21 +1,15 @@
use crate::result::{Error, Result};
use bincode::serialize_into;
use solana_ledger::snapshot_package::{SnapshotPackage, SnapshotPackageReceiver};
use solana_ledger::snapshot_utils::{self, TAR_ACCOUNTS_DIR, TAR_SNAPSHOTS_DIR};
use solana_measure::measure::Measure;
use solana_metrics::datapoint_info;
use solana_runtime::status_cache::SlotDelta;
use solana_sdk::transaction::Result as TransactionResult;
use std::fs;
use std::fs::File;
use std::io::{BufWriter, Error as IOError, ErrorKind};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::RecvTimeoutError;
use std::sync::Arc;
use std::thread::{self, Builder, JoinHandle};
use std::time::Duration;
use symlink;
use tempfile::TempDir;
use solana_ledger::{
snapshot_package::SnapshotPackageReceiver, snapshot_utils::archive_snapshot_package,
};
use std::{
sync::{
atomic::{AtomicBool, Ordering},
mpsc::RecvTimeoutError,
Arc,
},
thread::{self, Builder, JoinHandle},
time::Duration,
};
pub struct SnapshotPackagerService {
t_snapshot_packager: JoinHandle<()>,
@@ -30,12 +24,19 @@ impl SnapshotPackagerService {
if exit.load(Ordering::Relaxed) {
break;
}
if let Err(e) = Self::run(&snapshot_package_receiver) {
match e {
Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break,
Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (),
_ => info!("Error from package_snapshots: {:?}", e),
match snapshot_package_receiver.recv_timeout(Duration::from_secs(1)) {
Ok(mut snapshot_package) => {
// Only package the latest
while let Ok(new_snapshot_package) = snapshot_package_receiver.try_recv() {
snapshot_package = new_snapshot_package;
}
if let Err(err) = archive_snapshot_package(&snapshot_package) {
warn!("Failed to create snapshot archive: {}", err);
}
}
Err(RecvTimeoutError::Disconnected) => break,
Err(RecvTimeoutError::Timeout) => (),
}
})
.unwrap();
@@ -44,144 +45,6 @@ impl SnapshotPackagerService {
}
}
pub fn package_snapshots(snapshot_package: &SnapshotPackage) -> Result<()> {
info!(
"Generating snapshot tarball for root {}",
snapshot_package.root
);
Self::serialize_status_cache(
&snapshot_package.slot_deltas,
&snapshot_package.snapshot_links,
)?;
let mut timer = Measure::start("snapshot_package-package_snapshots");
let tar_dir = snapshot_package
.tar_output_file
.parent()
.expect("Tar output path is invalid");
fs::create_dir_all(tar_dir)?;
// Create the staging directories
let staging_dir = TempDir::new()?;
let staging_accounts_dir = staging_dir.path().join(TAR_ACCOUNTS_DIR);
let staging_snapshots_dir = staging_dir.path().join(TAR_SNAPSHOTS_DIR);
fs::create_dir_all(&staging_accounts_dir)?;
// Add the snapshots to the staging directory
symlink::symlink_dir(
snapshot_package.snapshot_links.path(),
&staging_snapshots_dir,
)?;
// Add the AppendVecs into the compressible list
for storage in &snapshot_package.storage_entries {
storage.flush()?;
let storage_path = storage.get_path();
let output_path = staging_accounts_dir.join(
storage_path
.file_name()
.expect("Invalid AppendVec file path"),
);
// `storage_path` - The file path where the AppendVec itself is located
// `output_path` - The directory where the AppendVec will be placed in the staging directory.
let storage_path =
fs::canonicalize(storage_path).expect("Could not get absolute path for accounts");
symlink::symlink_dir(storage_path, &output_path)?;
if !output_path.is_file() {
return Err(Self::get_io_error(
"Error trying to generate snapshot archive: storage path symlink is invalid",
));
}
}
// Tar the staging directory into the archive at `archive_path`
let archive_path = tar_dir.join("new_state.tar.bz2");
let args = vec![
"jcfhS",
archive_path.to_str().unwrap(),
"-C",
staging_dir.path().to_str().unwrap(),
TAR_ACCOUNTS_DIR,
TAR_SNAPSHOTS_DIR,
];
let output = std::process::Command::new("tar").args(&args).output()?;
if !output.status.success() {
warn!("tar command failed with exit code: {}", output.status);
use std::str::from_utf8;
info!("tar stdout: {}", from_utf8(&output.stdout).unwrap_or("?"));
info!("tar stderr: {}", from_utf8(&output.stderr).unwrap_or("?"));
return Err(Self::get_io_error(&format!(
"Error trying to generate snapshot archive: {}",
output.status
)));
}
// Once everything is successful, overwrite the previous tarball so that other validators
// can fetch this newly packaged snapshot
let metadata = fs::metadata(&archive_path)?;
fs::rename(&archive_path, &snapshot_package.tar_output_file)?;
timer.stop();
info!(
"Successfully created tarball. slot: {}, elapsed ms: {}, size={}",
snapshot_package.root,
timer.as_ms(),
metadata.len()
);
datapoint_info!(
"snapshot-package",
("slot", snapshot_package.root, i64),
("duration_ms", timer.as_ms(), i64),
("size", metadata.len(), i64)
);
Ok(())
}
fn run(snapshot_receiver: &SnapshotPackageReceiver) -> Result<()> {
let mut snapshot_package = snapshot_receiver.recv_timeout(Duration::from_secs(1))?;
// Only package the latest
while let Ok(new_snapshot_package) = snapshot_receiver.try_recv() {
snapshot_package = new_snapshot_package;
}
Self::package_snapshots(&snapshot_package)?;
Ok(())
}
fn get_io_error(error: &str) -> Error {
warn!("Snapshot Packaging Error: {:?}", error);
Error::IO(IOError::new(ErrorKind::Other, error))
}
fn serialize_status_cache(
slot_deltas: &[SlotDelta<TransactionResult<()>>],
snapshot_links: &TempDir,
) -> Result<()> {
// the status cache is stored as snapshot_path/status_cache
let snapshot_status_cache_file_path = snapshot_links
.path()
.join(snapshot_utils::SNAPSHOT_STATUS_CACHE_FILE_NAME);
let status_cache = File::create(&snapshot_status_cache_file_path)?;
// status cache writer
let mut status_cache_stream = BufWriter::new(status_cache);
let mut status_cache_serialize = Measure::start("status_cache_serialize-ms");
// write the status cache
serialize_into(&mut status_cache_stream, slot_deltas)
.map_err(|_| Self::get_io_error("serialize status cache error"))?;
status_cache_serialize.stop();
inc_new_counter_info!(
"serialize-status-cache-ms",
status_cache_serialize.as_ms() as usize
);
Ok(())
}
pub fn join(self) -> thread::Result<()> {
self.t_snapshot_packager.join()
}
@@ -190,11 +53,13 @@ impl SnapshotPackagerService {
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::snapshot_utils;
use solana_runtime::accounts_db::AccountStorageEntry;
use bincode::serialize_into;
use solana_ledger::{snapshot_package::SnapshotPackage, snapshot_utils};
use solana_runtime::{accounts_db::AccountStorageEntry, status_cache::SlotDelta};
use solana_sdk::transaction;
use std::{
fs::{remove_dir_all, OpenOptions},
io::Write,
fs::{self, remove_dir_all, File, OpenOptions},
io::{BufWriter, Write},
path::{Path, PathBuf},
};
use tempfile::TempDir;
@@ -262,7 +127,8 @@ mod tests {
}
// Create a packageable snapshot
let output_tar_path = snapshot_utils::get_snapshot_tar_path(&snapshot_package_output_path);
let output_tar_path =
snapshot_utils::get_snapshot_archive_path(&snapshot_package_output_path);
let snapshot_package = SnapshotPackage::new(
5,
vec![],
@@ -272,18 +138,18 @@ mod tests {
);
// Make tarball from packageable snapshot
SnapshotPackagerService::package_snapshots(&snapshot_package).unwrap();
snapshot_utils::archive_snapshot_package(&snapshot_package).unwrap();
// before we compare, stick an empty status_cache in this dir so that the package comparision works
// This is needed since the status_cache is added by the packager and is not collected from
// the source dir for snapshots
let slot_deltas: Vec<SlotDelta<TransactionResult<()>>> = vec![];
let slot_deltas: Vec<SlotDelta<transaction::Result<()>>> = vec![];
let dummy_status_cache = File::create(snapshots_dir.join("status_cache")).unwrap();
let mut status_cache_stream = BufWriter::new(dummy_status_cache);
serialize_into(&mut status_cache_stream, &slot_deltas).unwrap();
status_cache_stream.flush().unwrap();
// Check tarball is correct
snapshot_utils::verify_snapshot_tar(output_tar_path, snapshots_dir, accounts_dir);
// Check archive is correct
snapshot_utils::verify_snapshot_archive(output_tar_path, snapshots_dir, accounts_dir);
}
}

View File

@@ -35,7 +35,7 @@ fn recv_loop(
if exit.load(Ordering::Relaxed) {
return Ok(());
}
if let Ok(len) = packet::recv_from(&mut msgs, sock) {
if let Ok(len) = packet::recv_from(&mut msgs, sock, 1) {
if len == NUM_RCVMMSGS {
num_max_received += 1;
}

View File

@@ -20,6 +20,7 @@ use crate::{
tvu::{Sockets, Tvu},
};
use crossbeam_channel::unbounded;
use solana_ledger::shred::Shred;
use solana_ledger::{
bank_forks::{BankForks, SnapshotConfig},
bank_forks_utils,
@@ -30,17 +31,16 @@ use solana_ledger::{
leader_schedule_cache::LeaderScheduleCache,
};
use solana_metrics::datapoint_info;
use solana_runtime::{bank::Bank, hard_forks::HardForks};
use solana_sdk::{
clock::{Slot, DEFAULT_SLOTS_PER_TURN},
genesis_config::GenesisConfig,
hash::Hash,
hash::{extend_and_hash, Hash},
poh_config::PohConfig,
pubkey::Pubkey,
signature::{Keypair, KeypairUtil},
timing::timestamp,
};
use solana_ledger::shred::Shred;
use std::{
net::{IpAddr, Ipv4Addr, SocketAddr},
path::{Path, PathBuf},
@@ -57,6 +57,7 @@ pub struct ValidatorConfig {
pub dev_sigverify_disabled: bool,
pub dev_halt_at_slot: Option<Slot>,
pub expected_genesis_hash: Option<Hash>,
pub expected_shred_version: Option<u16>,
pub voting_disabled: bool,
pub transaction_status_service_disabled: bool,
pub blockstream_unix_socket: Option<PathBuf>,
@@ -69,6 +70,7 @@ pub struct ValidatorConfig {
pub partition_cfg: Option<PartitionCfg>,
pub fixed_leader_schedule: Option<FixedSchedule>,
pub wait_for_supermajority: bool,
pub new_hard_forks: Option<Vec<Slot>>,
}
impl Default for ValidatorConfig {
@@ -77,6 +79,7 @@ impl Default for ValidatorConfig {
dev_sigverify_disabled: false,
dev_halt_at_slot: None,
expected_genesis_hash: None,
expected_shred_version: None,
voting_disabled: false,
transaction_status_service_disabled: false,
blockstream_unix_socket: None,
@@ -89,6 +92,7 @@ impl Default for ValidatorConfig {
partition_cfg: None,
fixed_leader_schedule: None,
wait_for_supermajority: false,
new_hard_forks: None,
}
}
}
@@ -125,6 +129,7 @@ pub struct Validator {
}
impl Validator {
#[allow(clippy::cognitive_complexity)]
pub fn new(
mut node: Node,
keypair: &Arc<Keypair>,
@@ -162,17 +167,23 @@ impl Validator {
) = new_banks_from_blockstore(
config.expected_genesis_hash,
ledger_path,
config.account_paths.clone(),
config.snapshot_config.clone(),
poh_verify,
config.dev_halt_at_slot,
config.fixed_leader_schedule.clone(),
config,
);
let leader_schedule_cache = Arc::new(leader_schedule_cache);
let exit = Arc::new(AtomicBool::new(false));
let bank_info = &bank_forks_info[0];
let bank = bank_forks[bank_info.bank_slot].clone();
info!("Starting validator from slot {}", bank.slot());
{
let hard_forks: Vec<_> = bank.hard_forks().read().unwrap().iter().copied().collect();
if !hard_forks.is_empty() {
info!("Hard forks: {:?}", hard_forks);
}
}
let bank_forks = Arc::new(RwLock::new(bank_forks));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
@@ -182,9 +193,20 @@ impl Validator {
let validator_exit = Arc::new(RwLock::new(Some(validator_exit)));
node.info.wallclock = timestamp();
node.info.shred_version = Shred::version_from_hash(&genesis_hash);
node.info.shred_version =
compute_shred_version(&genesis_hash, &bank.hard_forks().read().unwrap());
Self::print_node_info(&node);
if let Some(expected_shred_version) = config.expected_shred_version {
if expected_shred_version != node.info.shred_version {
error!(
"shred version mismatch: expected {}",
expected_shred_version
);
process::exit(1);
}
}
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new(
node.info.clone(),
keypair.clone(),
@@ -298,14 +320,14 @@ impl Validator {
if config.wait_for_supermajority {
info!(
"Waiting for more than 66% of activated stake at slot {} to be in gossip...",
"Waiting for more than 75% of activated stake at slot {} to be in gossip...",
bank.slot()
);
loop {
let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info);
info!("{}% of activated stake in gossip", gossip_stake_percent,);
if gossip_stake_percent > 66 {
if gossip_stake_percent > 75 {
break;
}
sleep(Duration::new(1, 0));
@@ -469,14 +491,25 @@ impl Validator {
}
}
pub fn new_banks_from_blockstore(
fn compute_shred_version(genesis_hash: &Hash, hard_forks: &HardForks) -> u16 {
use byteorder::{ByteOrder, LittleEndian};
let mut hash = *genesis_hash;
for (slot, count) in hard_forks.iter() {
let mut buf = [0u8; 16];
LittleEndian::write_u64(&mut buf[..8], *slot);
LittleEndian::write_u64(&mut buf[8..], *count as u64);
hash = extend_and_hash(&hash, &buf);
}
Shred::version_from_hash(&hash)
}
fn new_banks_from_blockstore(
expected_genesis_hash: Option<Hash>,
blockstore_path: &Path,
account_paths: Vec<PathBuf>,
snapshot_config: Option<SnapshotConfig>,
poh_verify: bool,
dev_halt_at_slot: Option<Slot>,
fixed_leader_schedule: Option<FixedSchedule>,
config: &ValidatorConfig,
) -> (
Hash,
BankForks,
@@ -510,15 +543,16 @@ pub fn new_banks_from_blockstore(
let process_options = blockstore_processor::ProcessOptions {
poh_verify,
dev_halt_at_slot,
dev_halt_at_slot: config.dev_halt_at_slot,
new_hard_forks: config.new_hard_forks.clone(),
..blockstore_processor::ProcessOptions::default()
};
let (mut bank_forks, bank_forks_info, mut leader_schedule_cache) = bank_forks_utils::load(
&genesis_config,
&blockstore,
account_paths,
snapshot_config.as_ref(),
config.account_paths.clone(),
config.snapshot_config.as_ref(),
process_options,
)
.unwrap_or_else(|err| {
@@ -526,9 +560,9 @@ pub fn new_banks_from_blockstore(
std::process::exit(1);
});
leader_schedule_cache.set_fixed_leader_schedule(fixed_leader_schedule);
leader_schedule_cache.set_fixed_leader_schedule(config.fixed_leader_schedule.clone());
bank_forks.set_snapshot_config(snapshot_config);
bank_forks.set_snapshot_config(config.snapshot_config.clone());
(
genesis_hash,
@@ -607,10 +641,7 @@ fn report_target_features() {
}
// Get the activated stake percentage (based on the provided bank) that is visible in gossip
fn get_stake_percent_in_gossip(
bank: &Arc<solana_runtime::bank::Bank>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
) -> u64 {
fn get_stake_percent_in_gossip(bank: &Arc<Bank>, cluster_info: &Arc<RwLock<ClusterInfo>>) -> u64 {
let mut gossip_stake = 0;
let mut total_activated_stake = 0;
let tvu_peers = cluster_info.read().unwrap().tvu_peers();
@@ -645,6 +676,16 @@ mod tests {
use crate::genesis_utils::create_genesis_config_with_leader;
use std::fs::remove_dir_all;
#[test]
fn test_compute_shred_version() {
let mut hard_forks = HardForks::default();
assert_eq!(compute_shred_version(&Hash::default(), &hard_forks), 0);
hard_forks.register(1);
assert_eq!(compute_shred_version(&Hash::default(), &hard_forks), 55550);
hard_forks.register(1);
assert_eq!(compute_shred_version(&Hash::default(), &hard_forks), 46352);
}
#[test]
fn validator_exit() {
solana_logger::setup();

View File

@@ -22,8 +22,7 @@ mod tests {
hash::hashv,
pubkey::Pubkey,
signature::{Keypair, KeypairUtil},
system_transaction,
transaction::Result as TransactionResult,
system_transaction, transaction,
};
use std::{
fs,
@@ -87,7 +86,7 @@ mod tests {
.as_ref()
.unwrap()
.snapshot_path,
snapshot_utils::get_snapshot_tar_path(snapshot_package_output_path),
snapshot_utils::get_snapshot_archive_path(snapshot_package_output_path),
)
.unwrap();
@@ -144,12 +143,15 @@ mod tests {
slot_snapshot_paths
.last()
.expect("no snapshots found in path"),
snapshot_utils::get_snapshot_tar_path(&snapshot_config.snapshot_package_output_path),
snapshot_utils::get_snapshot_archive_path(
&snapshot_config.snapshot_package_output_path,
),
&snapshot_config.snapshot_path,
&last_bank.src.roots(),
)
.unwrap();
SnapshotPackagerService::package_snapshots(&snapshot_package).unwrap();
snapshot_utils::archive_snapshot_package(&snapshot_package).unwrap();
restore_from_snapshot(bank_forks, vec![accounts_dir.path().to_path_buf()]);
}
@@ -318,14 +320,14 @@ mod tests {
// before we compare, stick an empty status_cache in this dir so that the package comparision works
// This is needed since the status_cache is added by the packager and is not collected from
// the source dir for snapshots
let slot_deltas: Vec<SlotDelta<TransactionResult<()>>> = vec![];
let slot_deltas: Vec<SlotDelta<transaction::Result<()>>> = vec![];
let dummy_status_cache =
File::create(saved_snapshots_dir.path().join("status_cache")).unwrap();
let mut status_cache_stream = BufWriter::new(dummy_status_cache);
serialize_into(&mut status_cache_stream, &slot_deltas).unwrap();
status_cache_stream.flush().unwrap();
snapshot_utils::verify_snapshot_tar(
snapshot_utils::verify_snapshot_archive(
saved_tar,
saved_snapshots_dir.path(),
saved_accounts_dir

View File

@@ -5,6 +5,7 @@ use solana_core::cluster_info;
use solana_core::contact_info::ContactInfo;
use solana_core::crds_gossip::*;
use solana_core::crds_gossip_error::CrdsGossipError;
use solana_core::crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS;
use solana_core::crds_gossip_push::CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS;
use solana_core::crds_value::CrdsValueLabel;
use solana_core::crds_value::{CrdsData, CrdsValue};
@@ -396,6 +397,9 @@ fn network_run_pull(
let mut convergance = 0f64;
let num = network.len();
let network_values: Vec<Node> = network.values().cloned().collect();
let mut timeouts = HashMap::new();
timeouts.insert(Pubkey::default(), CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS);
for t in start..end {
let now = t as u64 * 100;
let requests: Vec<_> = {
@@ -448,7 +452,10 @@ fn network_run_pull(
node.lock()
.unwrap()
.mark_pull_request_creation_time(&from, now);
overhead += node.lock().unwrap().process_pull_response(&from, rsp, now);
overhead += node
.lock()
.unwrap()
.process_pull_response(&from, &timeouts, rsp, now);
});
(bytes, msgs, overhead)
})

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-crate-features"
version = "0.22.3"
version = "0.22.8"
description = "Solana Crate Features"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-faucet"
version = "0.22.3"
version = "0.22.8"
description = "Solana Faucet"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -19,10 +19,10 @@ clap = "2.33"
log = "0.4.8"
serde = "1.0.104"
serde_derive = "1.0.103"
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-metrics = { path = "../metrics", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
solana-metrics = { path = "../metrics", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
tokio = "0.1"
tokio-codec = "0.1"

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
PERF_LIBS_VERSION=v0.16.2
PERF_LIBS_VERSION=v0.18.0
VERSION=$PERF_LIBS_VERSION-1
set -e

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-fixed-buf"
version = "0.22.3"
version = "0.22.8"
description = "A fixed-size byte array that supports bincode serde"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-genesis-programs"
version = "0.22.3"
version = "0.22.8"
description = "Solana genesis programs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -10,16 +10,16 @@ edition = "2018"
[dependencies]
log = { version = "0.4.8" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "0.22.3" }
solana-budget-program = { path = "../programs/budget", version = "0.22.3" }
solana-config-program = { path = "../programs/config", version = "0.22.3" }
solana-exchange-program = { path = "../programs/exchange", version = "0.22.3" }
solana-runtime = { path = "../runtime", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-stake-program = { path = "../programs/stake", version = "0.22.3" }
solana-storage-program = { path = "../programs/storage", version = "0.22.3" }
solana-vest-program = { path = "../programs/vest", version = "0.22.3" }
solana-vote-program = { path = "../programs/vote", version = "0.22.3" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "0.22.8" }
solana-budget-program = { path = "../programs/budget", version = "0.22.8" }
solana-config-program = { path = "../programs/config", version = "0.22.8" }
solana-exchange-program = { path = "../programs/exchange", version = "0.22.8" }
solana-runtime = { path = "../runtime", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
solana-stake-program = { path = "../programs/stake", version = "0.22.8" }
solana-storage-program = { path = "../programs/storage", version = "0.22.8" }
solana-vest-program = { path = "../programs/vest", version = "0.22.8" }
solana-vote-program = { path = "../programs/vote", version = "0.22.8" }
[lib]
crate-type = ["lib"]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-genesis"
description = "Blockchain, Rebuilt for Scale"
version = "0.22.3"
version = "0.22.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -17,13 +17,13 @@ serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.22.3" }
solana-ledger = { path = "../ledger", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-stake-program = { path = "../programs/stake", version = "0.22.3" }
solana-storage-program = { path = "../programs/storage", version = "0.22.3" }
solana-vote-program = { path = "../programs/vote", version = "0.22.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.8" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.22.8" }
solana-ledger = { path = "../ledger", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
solana-stake-program = { path = "../programs/stake", version = "0.22.8" }
solana-storage-program = { path = "../programs/storage", version = "0.22.8" }
solana-vote-program = { path = "../programs/vote", version = "0.22.8" }
tempfile = "3.1.0"
[[bin]]

View File

@@ -220,7 +220,6 @@ pub const VALIDATOR_PUBKEYS: &[&str] = &[
"7v5DXDvYzkgTdFYXYB12ZLKD6z8QfzR53N9hg6XgEQJE", // Cryptium Labs GmbH
"8LSwP5qYbmuUfKLGwi8XaKJnai9HyZAJTnBovyWebRfd", //
"8UPb8LMWyoJJC9Aeq9QmTzKZKV2ssov739bTJ14M4ws1", //
"8oRw7qpj6XgLGXYCDuNoTMCqoJnDd6A8LTpNyqApSfkA", //
"8wFK4fCAuDoAH1fsgou9yKZPqDMFtJUVoDdkZAAMuhyA", // LunaNova Technologies Ltd
"94eWgQm2k8BXKEWbJP2eScHZeKopXpqkuoVrCofQWBhW", // Node A-Team
"9J8WcnXxo3ArgEwktfk9tsrf4Rp8h5uPUgnQbQHLvtkd", // moonli.me

View File

@@ -3,19 +3,19 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-gossip"
description = "Blockchain, Rebuilt for Scale"
version = "0.22.3"
version = "0.22.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
solana-core = { path = "../core", version = "0.22.3" }
solana-client = { path = "../client", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-net-utils = { path = "../net-utils", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.8" }
solana-core = { path = "../core", version = "0.22.8" }
solana-client = { path = "../client", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
solana-net-utils = { path = "../net-utils", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-install"
description = "The solana cluster software installer"
version = "0.22.3"
version = "0.22.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -26,11 +26,11 @@ reqwest = { version = "0.9.24", default-features = false, features = ["rustls-tl
serde = "1.0.104"
serde_derive = "1.0.103"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
solana-client = { path = "../client", version = "0.22.3" }
solana-config-program = { path = "../programs/config", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.8" }
solana-client = { path = "../client", version = "0.22.8" }
solana-config-program = { path = "../programs/config", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
tar = "0.4.26"
tempdir = "0.3.7"
url = "2.1.0"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-keygen"
version = "0.22.3"
version = "0.22.8"
description = "Solana key generation utility"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -14,8 +14,8 @@ clap = "2.33"
dirs = "2.0.2"
num_cpus = "1.11.1"
rpassword = "4.0"
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
tiny-bip39 = "0.6.2"
[[bin]]

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-ledger-tool"
description = "Blockchain, Rebuilt for Scale"
version = "0.22.3"
version = "0.22.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -16,12 +16,13 @@ serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
solana-ledger = { path = "../ledger", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-runtime = { path = "../runtime", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-vote-program = { path = "../programs/vote", version = "0.22.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.8" }
solana-ledger = { path = "../ledger", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
solana-runtime = { path = "../runtime", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
solana-vote-program = { path = "../programs/vote", version = "0.22.8" }
tempfile = "3.1.0"
[dev-dependencies]
assert_cmd = "0.12"

View File

@@ -1,17 +1,17 @@
use clap::{
crate_description, crate_name, value_t, value_t_or_exit, values_t_or_exit, App, Arg, SubCommand,
crate_description, crate_name, value_t, value_t_or_exit, values_t_or_exit, App, Arg,
ArgMatches, SubCommand,
};
use histogram;
use serde_json::json;
use solana_ledger::blockstore_db::Database;
use solana_ledger::{
bank_forks::{BankForks, SnapshotConfig},
bank_forks_utils,
blockstore::Blockstore,
blockstore_db,
blockstore_db::Column,
blockstore_processor,
blockstore_db::{self, Column, Database},
blockstore_processor::{BankForksInfo, BlockstoreProcessorResult, ProcessOptions},
rooted_slot_iterator::RootedSlotIterator,
snapshot_utils,
};
use solana_sdk::{
clock::Slot, genesis_config::GenesisConfig, instruction_processor_utils::limited_deserialize,
@@ -21,7 +21,7 @@ use solana_vote_program::vote_state::VoteState;
use std::{
collections::{BTreeMap, HashMap, HashSet},
ffi::OsStr,
fs::File,
fs::{self, File},
io::{self, stdout, Write},
path::{Path, PathBuf},
process::{exit, Command, Stdio},
@@ -173,13 +173,13 @@ fn render_dot(dot: String, output_file: &str, output_format: &str) -> io::Result
#[allow(clippy::cognitive_complexity)]
fn graph_forks(
bank_forks: BankForks,
bank_forks_info: Vec<blockstore_processor::BankForksInfo>,
bank_forks: &BankForks,
bank_forks_info: &[BankForksInfo],
include_all_votes: bool,
) -> String {
// Search all forks and collect the last vote made by each validator
let mut last_votes = HashMap::new();
for bfi in &bank_forks_info {
for bfi in bank_forks_info {
let bank = bank_forks.banks.get(&bfi.bank_slot).unwrap();
let total_stake = bank
@@ -221,7 +221,7 @@ fn graph_forks(
dot.push(" style=invis".to_string());
let mut styled_slots = HashSet::new();
let mut all_votes: HashMap<Pubkey, HashMap<Slot, VoteState>> = HashMap::new();
for bfi in &bank_forks_info {
for bfi in bank_forks_info {
let bank = bank_forks.banks.get(&bfi.bank_slot).unwrap();
let mut bank = bank.clone();
@@ -512,6 +512,44 @@ fn open_database(ledger_path: &Path) -> Database {
}
}
// This function is duplicated in validator/src/main.rs...
fn hardforks_of(matches: &ArgMatches<'_>, name: &str) -> Option<Vec<Slot>> {
if matches.is_present(name) {
Some(values_t_or_exit!(matches, name, Slot))
} else {
None
}
}
fn load_bank_forks(
arg_matches: &ArgMatches,
ledger_path: &PathBuf,
process_options: ProcessOptions,
) -> BlockstoreProcessorResult {
let snapshot_config = if arg_matches.is_present("no_snapshot") {
None
} else {
Some(SnapshotConfig {
snapshot_interval_slots: 0, // Value doesn't matter
snapshot_package_output_path: ledger_path.clone(),
snapshot_path: ledger_path.clone().join("snapshot"),
})
};
let account_paths = if let Some(account_paths) = arg_matches.value_of("account_paths") {
account_paths.split(',').map(PathBuf::from).collect()
} else {
vec![ledger_path.join("accounts")]
};
bank_forks_utils::load(
&open_genesis_config(&ledger_path),
&open_blockstore(&ledger_path),
account_paths,
snapshot_config.as_ref(),
process_options,
)
}
#[allow(clippy::cognitive_complexity)]
fn main() {
const DEFAULT_ROOT_COUNT: &str = "1";
@@ -523,18 +561,38 @@ fn main() {
.takes_value(true)
.default_value("0")
.help("Start at this slot");
let no_snapshot_arg = Arg::with_name("no_snapshot")
.long("no-snapshot")
.takes_value(false)
.help("Do not start from a local snapshot if present");
let account_paths_arg = Arg::with_name("account_paths")
.long("accounts")
.value_name("PATHS")
.takes_value(true)
.help("Comma separated persistent accounts location");
let halt_at_slot_arg = Arg::with_name("halt_at_slot")
.long("halt-at-slot")
.value_name("SLOT")
.takes_value(true)
.help("Halt processing at the given slot");
let hard_forks_arg = Arg::with_name("hard_forks")
.long("hard-fork")
.value_name("SLOT")
.multiple(true)
.takes_value(true)
.help("Add a hard fork at this slot");
let matches = App::new(crate_name!())
.about(crate_description!())
.version(solana_clap_utils::version!())
.arg(
Arg::with_name("ledger")
Arg::with_name("ledger_path")
.short("l")
.long("ledger")
.value_name("DIR")
.takes_value(true)
.global(true)
.help("Use directory for ledger location"),
.help("Use DIR for ledger location"),
)
.subcommand(
SubCommand::with_name("print")
@@ -568,8 +626,7 @@ fn main() {
.required(false)
.help("Additionally print all the non-empty slots within the bounds"),
)
)
.subcommand(
).subcommand(
SubCommand::with_name("json")
.about("Print the ledger in JSON format")
.arg(&starting_slot_arg)
@@ -577,44 +634,54 @@ fn main() {
.subcommand(
SubCommand::with_name("verify")
.about("Verify the ledger")
.arg(
Arg::with_name("no_snapshot")
.long("no-snapshot")
.takes_value(false)
.help("Do not start from a local snapshot if present"),
)
.arg(
Arg::with_name("account_paths")
.long("accounts")
.value_name("PATHS")
.takes_value(true)
.help("Comma separated persistent accounts location"),
)
.arg(
Arg::with_name("halt_at_slot")
.long("halt-at-slot")
.value_name("SLOT")
.takes_value(true)
.help("Halt processing at the given slot"),
)
.arg(&no_snapshot_arg)
.arg(&account_paths_arg)
.arg(&halt_at_slot_arg)
.arg(&hard_forks_arg)
.arg(
Arg::with_name("skip_poh_verify")
.long("skip-poh-verify")
.takes_value(false)
.help("Skip ledger PoH verification"),
)
).subcommand(
SubCommand::with_name("graph")
.about("Create a Graphviz rendering of the ledger")
.arg(&no_snapshot_arg)
.arg(&account_paths_arg)
.arg(&halt_at_slot_arg)
.arg(&hard_forks_arg)
.arg(
Arg::with_name("graph_forks")
.long("graph-forks")
.value_name("FILENAME")
.takes_value(true)
.help("Create a Graphviz DOT file representing the active forks once the ledger is verified"),
Arg::with_name("include_all_votes")
.long("include-all-votes")
.help("Include all votes in the graph"),
)
.arg(
Arg::with_name("graph_forks_include_all_votes")
.long("graph-forks-include-all-votes")
.requires("graph_forks")
.help("Include all votes in forks graph"),
Arg::with_name("graph_filename")
.index(1)
.value_name("FILENAME")
.takes_value(true)
.help("Output file"),
)
).subcommand(
SubCommand::with_name("create-snapshot")
.about("Create a new ledger snapshot")
.arg(&no_snapshot_arg)
.arg(&account_paths_arg)
.arg(&hard_forks_arg)
.arg(
Arg::with_name("snapshot_slot")
.index(1)
.value_name("SLOT")
.takes_value(true)
.help("Slot at which to create the snapshot"),
)
.arg(
Arg::with_name("output_directory")
.index(2)
.value_name("DIR")
.takes_value(true)
.help("Output directory for the snapshot"),
)
).subcommand(
SubCommand::with_name("prune")
@@ -663,7 +730,13 @@ fn main() {
)
.get_matches();
let ledger_path = PathBuf::from(value_t_or_exit!(matches, "ledger", String));
let ledger_path = PathBuf::from(value_t_or_exit!(matches, "ledger_path", String));
// Canonicalize ledger path to avoid issues with symlink creation
let ledger_path = fs::canonicalize(&ledger_path).unwrap_or_else(|err| {
eprintln!("Unable to access ledger path: {:?}", err);
exit(1);
});
match matches.subcommand() {
("print", Some(args_matches)) => {
@@ -697,67 +770,109 @@ fn main() {
);
}
("verify", Some(arg_matches)) => {
println!("Verifying ledger...");
let dev_halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok();
let poh_verify = !arg_matches.is_present("skip_poh_verify");
let snapshot_config = if arg_matches.is_present("no_snapshot") {
None
} else {
Some(SnapshotConfig {
snapshot_interval_slots: 0, // Value doesn't matter
snapshot_package_output_path: ledger_path.clone(),
snapshot_path: ledger_path.clone().join("snapshot"),
})
};
let account_paths = if let Some(account_paths) = matches.value_of("account_paths") {
account_paths.split(',').map(PathBuf::from).collect()
} else {
vec![ledger_path.join("accounts")]
let process_options = ProcessOptions {
dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(),
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
poh_verify: !arg_matches.is_present("skip_poh_verify"),
..ProcessOptions::default()
};
let process_options = blockstore_processor::ProcessOptions {
poh_verify,
dev_halt_at_slot,
..blockstore_processor::ProcessOptions::default()
load_bank_forks(arg_matches, &ledger_path, process_options).unwrap_or_else(|err| {
eprintln!("Ledger verification failed: {:?}", err);
exit(1);
});
println!("Ok");
}
("graph", Some(arg_matches)) => {
let output_file = value_t_or_exit!(arg_matches, "graph_filename", String);
let process_options = ProcessOptions {
dev_halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(),
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
poh_verify: false,
..ProcessOptions::default()
};
match bank_forks_utils::load(
&open_genesis_config(&ledger_path),
&open_blockstore(&ledger_path),
account_paths,
snapshot_config.as_ref(),
process_options,
) {
match load_bank_forks(arg_matches, &ledger_path, process_options) {
Ok((bank_forks, bank_forks_info, _leader_schedule_cache)) => {
println!("Ok");
let dot = graph_forks(
&bank_forks,
&bank_forks_info,
arg_matches.is_present("include_all_votes"),
);
if let Some(output_file) = arg_matches.value_of("graph_forks") {
let dot = graph_forks(
bank_forks,
bank_forks_info,
arg_matches.is_present("graph_forks_include_all_votes"),
);
let extension = Path::new(&output_file).extension();
let result = if extension == Some(OsStr::new("pdf")) {
render_dot(dot, &output_file, "pdf")
} else if extension == Some(OsStr::new("png")) {
render_dot(dot, &output_file, "png")
} else {
File::create(&output_file)
.and_then(|mut file| file.write_all(&dot.into_bytes()))
};
let extension = Path::new(output_file).extension();
let result = if extension == Some(OsStr::new("pdf")) {
render_dot(dot, output_file, "pdf")
} else if extension == Some(OsStr::new("png")) {
render_dot(dot, output_file, "png")
} else {
File::create(output_file)
.and_then(|mut file| file.write_all(&dot.into_bytes()))
};
match result {
Ok(_) => println!("Wrote {}", output_file),
Err(err) => eprintln!("Unable to write {}: {}", output_file, err),
}
match result {
Ok(_) => println!("Wrote {}", output_file),
Err(err) => eprintln!("Unable to write {}: {}", output_file, err),
}
}
Err(err) => {
eprintln!("Ledger verification failed: {:?}", err);
eprintln!("Failed to load ledger: {:?}", err);
exit(1);
}
}
}
("create-snapshot", Some(arg_matches)) => {
let snapshot_slot = value_t_or_exit!(arg_matches, "snapshot_slot", Slot);
let output_directory = value_t_or_exit!(arg_matches, "output_directory", String);
let process_options = ProcessOptions {
dev_halt_at_slot: Some(snapshot_slot),
new_hard_forks: hardforks_of(arg_matches, "hard_forks"),
poh_verify: false,
..ProcessOptions::default()
};
match load_bank_forks(arg_matches, &ledger_path, process_options) {
Ok((bank_forks, _bank_forks_info, _leader_schedule_cache)) => {
let bank = bank_forks.get(snapshot_slot).unwrap_or_else(|| {
eprintln!("Error: Slot {} is not available", snapshot_slot);
exit(1);
});
println!("Creating a snapshot of slot {}", bank.slot());
bank.squash();
let temp_dir = tempfile::TempDir::new().unwrap_or_else(|err| {
eprintln!("Unable to create temporary directory: {}", err);
exit(1);
});
snapshot_utils::add_snapshot(&temp_dir, &bank)
.and_then(|slot_snapshot_paths| {
snapshot_utils::package_snapshot(
&bank,
&slot_snapshot_paths,
snapshot_utils::get_snapshot_archive_path(output_directory),
&temp_dir,
&bank.src.roots(),
)
})
.and_then(|package| {
snapshot_utils::archive_snapshot_package(&package).map(|ok| {
println!(
"Successfully created snapshot for slot {}: {:?}",
snapshot_slot, package.tar_output_file
);
ok
})
})
.unwrap_or_else(|err| {
eprintln!("Unable to create snapshot archive: {}", err);
exit(1);
});
}
Err(err) => {
eprintln!("Failed to load ledger: {:?}", err);
exit(1);
}
}

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-ledger"
version = "0.22.3"
version = "0.22.8"
description = "Solana ledger"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -29,20 +29,21 @@ rayon = "1.2.0"
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
serde = "1.0.104"
serde_derive = "1.0.103"
solana-client = { path = "../client", version = "0.22.3" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-measure = { path = "../measure", version = "0.22.3" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.22.3" }
solana-metrics = { path = "../metrics", version = "0.22.3" }
solana-perf = { path = "../perf", version = "0.22.3" }
solana-client = { path = "../client", version = "0.22.8" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
solana-measure = { path = "../measure", version = "0.22.8" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.22.8" }
solana-metrics = { path = "../metrics", version = "0.22.8" }
solana-perf = { path = "../perf", version = "0.22.8" }
ed25519-dalek = "1.0.0-pre.1"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.22.3" }
solana-runtime = { path = "../runtime", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-stake-program = { path = "../programs/stake", version = "0.22.3" }
solana-vote-program = { path = "../programs/vote", version = "0.22.3" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.22.8" }
solana-runtime = { path = "../runtime", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
solana-stake-program = { path = "../programs/stake", version = "0.22.8" }
solana-vote-program = { path = "../programs/vote", version = "0.22.8" }
sys-info = "0.5.8"
symlink = "0.1.0"
tar = "0.4.26"
thiserror = "1.0"
tempfile = "3.1.0"
@@ -57,7 +58,7 @@ features = ["lz4"]
[dev-dependencies]
assert_matches = "1.3.0"
matches = "0.1.6"
solana-budget-program = { path = "../programs/budget", version = "0.22.3" }
solana-budget-program = { path = "../programs/budget", version = "0.22.8" }
[lib]
crate-type = ["lib"]

View File

@@ -192,7 +192,7 @@ impl BankForks {
root,
&root_bank.src.roots(),
snapshot_package_sender.as_ref().unwrap(),
snapshot_utils::get_snapshot_tar_path(&config.snapshot_package_output_path),
snapshot_utils::get_snapshot_archive_path(&config.snapshot_package_output_path),
);
if r.is_err() {
warn!("Error generating snapshot for bank: {}, err: {:?}", root, r);

View File

@@ -1,8 +1,7 @@
use crate::{
bank_forks::{BankForks, SnapshotConfig},
bank_forks::SnapshotConfig,
blockstore::Blockstore,
blockstore_processor::{self, BankForksInfo, BlockstoreProcessorError, ProcessOptions},
leader_schedule_cache::LeaderScheduleCache,
blockstore_processor::{self, BlockstoreProcessorResult, ProcessOptions},
snapshot_utils,
};
use log::*;
@@ -15,7 +14,7 @@ pub fn load(
account_paths: Vec<PathBuf>,
snapshot_config: Option<&SnapshotConfig>,
process_options: ProcessOptions,
) -> Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlockstoreProcessorError> {
) -> BlockstoreProcessorResult {
if let Some(snapshot_config) = snapshot_config.as_ref() {
info!(
"Initializing snapshot path: {:?}",
@@ -25,8 +24,9 @@ pub fn load(
fs::create_dir_all(&snapshot_config.snapshot_path)
.expect("Couldn't create snapshot directory");
let tar =
snapshot_utils::get_snapshot_tar_path(&snapshot_config.snapshot_package_output_path);
let tar = snapshot_utils::get_snapshot_archive_path(
&snapshot_config.snapshot_package_output_path,
);
if tar.exists() {
info!("Loading snapshot package: {:?}", tar);
// Fail hard here if snapshot fails to load, don't silently continue

View File

@@ -58,6 +58,7 @@ pub const BLOCKSTORE_DIRECTORY: &str = "rocksdb";
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.thread_name(|ix| format!("blockstore_{}", ix))
.build()
.unwrap()));
@@ -1473,7 +1474,7 @@ impl Blockstore {
completed_ranges
.par_iter()
.map(|(start_index, end_index)| {
self.get_entries_in_data_block(slot, *start_index, *end_index)
self.get_entries_in_data_block(slot, *start_index, *end_index, &slot_meta)
})
.collect()
})
@@ -1514,6 +1515,7 @@ impl Blockstore {
slot: Slot,
start_index: u32,
end_index: u32,
slot_meta: &SlotMeta,
) -> Result<Vec<Entry>> {
let data_shred_cf = self.db.column::<cf::ShredData>();
@@ -1523,10 +1525,22 @@ impl Blockstore {
data_shred_cf
.get_bytes((slot, u64::from(i)))
.and_then(|serialized_shred| {
Shred::new_from_serialized_shred(
serialized_shred
.expect("Shred must exist if shred index was included in a range"),
)
Shred::new_from_serialized_shred(serialized_shred.unwrap_or_else(|| {
panic!(
"Shred with
slot: {},
index: {},
consumed: {},
completed_indexes: {:?}
must exist if shred index was included in a range: {} {}",
slot,
i,
slot_meta.consumed,
slot_meta.completed_data_indexes,
start_index,
end_index
)
}))
.map_err(|err| {
BlockstoreError::InvalidShredData(Box::new(bincode::ErrorKind::Custom(
format!(
@@ -5083,7 +5097,7 @@ pub mod tests {
.iter()
.cloned()
.chain(
coding_shreds[coding_shreds.len() / 2 - 1..coding_shreds.len() / 2]
coding_shreds[coding_shreds.len() / 2 - 1..data_shreds.len() / 2]
.iter()
.cloned(),
)

View File

@@ -221,17 +221,19 @@ impl ErasureMeta {
.data()
.present_in_bounds(self.set_index..self.set_index + self.config.num_data() as u64);
let (data_missing, num_needed) = (
self.config.num_data().saturating_sub(num_data),
self.config.num_data().saturating_sub(num_data + num_coding),
let (data_missing, coding_missing) = (
self.config.num_data() - num_data,
self.config.num_coding() - num_coding,
);
if data_missing == 0 {
DataFull
} else if num_needed == 0 {
let total_missing = data_missing + coding_missing;
if data_missing > 0 && total_missing <= self.config.num_coding() {
CanRecover
} else if data_missing == 0 {
DataFull
} else {
StillNeed(num_needed)
StillNeed(total_missing - self.config.num_coding())
}
}

View File

@@ -36,8 +36,12 @@ use std::{
};
use thiserror::Error;
pub type BlockstoreProcessorResult =
result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlockstoreProcessorError>;
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.thread_name(|ix| format!("blockstore_processor_{}", ix))
.build()
.unwrap())
);
@@ -250,6 +254,9 @@ pub enum BlockstoreProcessorError {
#[error("no valid forks found")]
NoValidForksFound,
#[error("invalid hard fork")]
InvalidHardFork(Slot),
}
/// Callback for accessing bank state while processing the blockstore
@@ -262,6 +269,7 @@ pub struct ProcessOptions {
pub dev_halt_at_slot: Option<Slot>,
pub entry_callback: Option<ProcessCallback>,
pub override_num_threads: Option<usize>,
pub new_hard_forks: Option<Vec<Slot>>,
}
pub fn process_blockstore(
@@ -269,8 +277,7 @@ pub fn process_blockstore(
blockstore: &Blockstore,
account_paths: Vec<PathBuf>,
opts: ProcessOptions,
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlockstoreProcessorError>
{
) -> BlockstoreProcessorResult {
if let Some(num_threads) = opts.override_num_threads {
PAR_THREAD_POOL.with(|pool| {
*pool.borrow_mut() = rayon::ThreadPoolBuilder::new()
@@ -293,8 +300,7 @@ pub fn process_blockstore_from_root(
blockstore: &Blockstore,
bank: Arc<Bank>,
opts: &ProcessOptions,
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlockstoreProcessorError>
{
) -> BlockstoreProcessorResult {
info!("processing ledger from root slot {}...", bank.slot());
let allocated = thread_mem_usage::Allocatedp::default();
let initial_allocation = allocated.get();
@@ -309,6 +315,23 @@ pub fn process_blockstore_from_root(
genesis_config.operating_mode,
));
if let Some(ref new_hard_forks) = opts.new_hard_forks {
let hard_forks = bank.hard_forks();
for hard_fork_slot in new_hard_forks.iter() {
// Ensure the user isn't trying to add new hard forks for a slot that's earlier than the current
// root slot. Doing so won't cause any effect so emit an error
if *hard_fork_slot <= start_slot {
error!(
"Unable to add new hard fork at {}, it must be greater than slot {}",
hard_fork_slot, start_slot
);
return Err(BlockstoreProcessorError::InvalidHardFork(*hard_fork_slot));
}
hard_forks.write().unwrap().register(*hard_fork_slot);
}
}
blockstore
.set_roots(&[start_slot])
.expect("Couldn't set root slot on startup");
@@ -352,7 +375,7 @@ pub fn process_blockstore_from_root(
};
info!(
"ledger processed in {}ms. {} MB allocated. {} fork{} at {}",
"ledger processed in {}ms. {} MB allocated. {} fork{} at {}, with {} frozen bank{}",
duration_as_ms(&now.elapsed()),
allocated.since(initial_allocation) / 1_000_000,
bank_forks_info.len(),
@@ -360,8 +383,15 @@ pub fn process_blockstore_from_root(
bank_forks_info
.iter()
.map(|bfi| bfi.bank_slot.to_string())
.join(", ")
.join(", "),
bank_forks.frozen_banks().len(),
if bank_forks.frozen_banks().len() > 1 {
"s"
} else {
""
},
);
assert!(bank_forks.active_banks().is_empty());
Ok((bank_forks, bank_forks_info, leader_schedule_cache))
}
@@ -560,10 +590,6 @@ fn process_pending_slots(
allocated.since(initial_allocation)
);
if slot >= dev_halt_at_slot {
break;
}
process_next_slots(
&bank,
&meta,
@@ -572,6 +598,10 @@ fn process_pending_slots(
&mut pending_slots,
&mut fork_info,
)?;
if slot >= dev_halt_at_slot {
break;
}
}
Ok(fork_info)

View File

@@ -27,6 +27,7 @@ use std::time::Instant;
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.thread_name(|ix| format!("entry_{}", ix))
.build()
.unwrap()));

View File

@@ -4,7 +4,7 @@ pub mod block_error;
#[macro_use]
pub mod blockstore;
pub mod blockstore_db;
mod blockstore_meta;
pub mod blockstore_meta;
pub mod blockstore_processor;
pub mod entry;
pub mod erasure;
@@ -25,3 +25,6 @@ extern crate solana_metrics;
#[macro_use]
extern crate log;
#[macro_use]
extern crate lazy_static;

View File

@@ -40,6 +40,7 @@ pub const SIZE_OF_DATA_SHRED_PAYLOAD: usize = PACKET_DATA_SIZE
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.thread_name(|ix| format!("shredder_{}", ix))
.build()
.unwrap()));
@@ -602,14 +603,7 @@ impl Shredder {
if fec_rate != 0.0 {
let num_data = data_shred_batch.len();
// always generate at least 1 coding shred even if the fec_rate doesn't allow it
let num_coding = Self::calculate_num_coding_shreds(num_data as u32, fec_rate);
if num_coding > num_data {
trace!(
"Generated more codes ({}) than data shreds ({})",
num_coding,
num_data
);
}
let num_coding = Self::calculate_num_coding_shreds(num_data as f32, fec_rate);
let session =
Session::new(num_data, num_coding).expect("Failed to create erasure session");
let start_index = data_shred_batch[0].common_header.index;
@@ -677,8 +671,8 @@ impl Shredder {
}
}
fn calculate_num_coding_shreds(num_data_shreds: u32, fec_rate: f32) -> usize {
(MAX_DATA_SHREDS_PER_FEC_BLOCK.max(num_data_shreds) as f32 * fec_rate) as usize
fn calculate_num_coding_shreds(num_data_shreds: f32, fec_rate: f32) -> usize {
1.max((fec_rate * num_data_shreds) as usize)
}
fn fill_in_missing_shreds(
@@ -975,7 +969,7 @@ pub mod tests {
let no_header_size = SIZE_OF_DATA_SHRED_PAYLOAD as u64;
let num_expected_data_shreds = (size + no_header_size - 1) / no_header_size;
let num_expected_coding_shreds =
Shredder::calculate_num_coding_shreds(num_expected_data_shreds as u32, fec_rate);
Shredder::calculate_num_coding_shreds(num_expected_data_shreds as f32, fec_rate);
let start_index = 0;
let (data_shreds, coding_shreds, next_index) =
@@ -1147,6 +1141,9 @@ pub mod tests {
let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 0);
// Must have created an equal number of coding and data shreds
assert_eq!(data_shreds.len(), coding_shreds.len());
for (i, s) in data_shreds.iter().enumerate() {
verify_test_data_shred(
s,
@@ -1191,10 +1188,10 @@ pub mod tests {
let serialized_entries = bincode::serialize(&entries).unwrap();
let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 0);
let num_coding_shreds = coding_shreds.len();
// We should have 10 shreds now, an equal number of coding shreds
assert_eq!(data_shreds.len(), num_data_shreds);
assert_eq!(coding_shreds.len(), num_data_shreds);
let all_shreds = data_shreds
.iter()
@@ -1207,7 +1204,7 @@ pub mod tests {
Shredder::try_recovery(
data_shreds[..data_shreds.len() - 1].to_vec(),
num_data_shreds,
num_coding_shreds,
num_data_shreds,
0,
0,
slot
@@ -1219,7 +1216,7 @@ pub mod tests {
let recovered_data = Shredder::try_recovery(
data_shreds[..].to_vec(),
num_data_shreds,
num_coding_shreds,
num_data_shreds,
0,
0,
slot,
@@ -1237,7 +1234,7 @@ pub mod tests {
let mut recovered_data = Shredder::try_recovery(
shred_info.clone(),
num_data_shreds,
num_coding_shreds,
num_data_shreds,
0,
0,
slot,
@@ -1285,7 +1282,7 @@ pub mod tests {
let recovered_data = Shredder::try_recovery(
shred_info.clone(),
num_data_shreds,
num_coding_shreds,
num_data_shreds,
0,
0,
slot,
@@ -1338,9 +1335,10 @@ pub mod tests {
// and 2 missing coding shreds. Hint: should work
let serialized_entries = bincode::serialize(&entries).unwrap();
let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 25);
let num_coding_shreds = coding_shreds.len();
// We should have 10 shreds now
// We should have 10 shreds now, an equal number of coding shreds
assert_eq!(data_shreds.len(), num_data_shreds);
assert_eq!(coding_shreds.len(), num_data_shreds);
let all_shreds = data_shreds
.iter()
@@ -1357,7 +1355,7 @@ pub mod tests {
let recovered_data = Shredder::try_recovery(
shred_info.clone(),
num_data_shreds,
num_coding_shreds,
num_data_shreds,
25,
25,
slot,
@@ -1389,7 +1387,7 @@ pub mod tests {
let recovered_data = Shredder::try_recovery(
shred_info.clone(),
num_data_shreds,
num_coding_shreds,
num_data_shreds,
25,
25,
slot + 1,
@@ -1402,7 +1400,7 @@ pub mod tests {
Shredder::try_recovery(
shred_info.clone(),
num_data_shreds,
num_coding_shreds,
num_data_shreds,
15,
15,
slot,
@@ -1412,7 +1410,7 @@ pub mod tests {
// Test8: Try recovery/reassembly with incorrect index. Hint: does not recover any shreds
assert_matches!(
Shredder::try_recovery(shred_info, num_data_shreds, num_coding_shreds, 35, 35, slot,),
Shredder::try_recovery(shred_info, num_data_shreds, num_data_shreds, 35, 35, slot,),
Err(reed_solomon_erasure::Error::TooFewShardsPresent)
);
}

View File

@@ -20,15 +20,17 @@ use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Signature;
use solana_sdk::signature::{Keypair, KeypairUtil};
use std::sync::Arc;
use std::{cell::RefCell, collections::HashMap, mem::size_of};
use std::{collections::HashMap, mem::size_of};
pub const SIGN_SHRED_GPU_MIN: usize = 256;
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.thread_name(|ix| format!("sigverify_shreds_{}", ix))
.build()
.unwrap()));
lazy_static! {
pub static ref SIGVERIFY_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.thread_name(|ix| format!("sigverify_shreds_{}", ix))
.build()
.unwrap();
}
/// Assuming layout is
/// signature: Signature
@@ -67,18 +69,16 @@ fn verify_shreds_cpu(batches: &[Packets], slot_leaders: &HashMap<u64, [u8; 32]>)
use rayon::prelude::*;
let count = batch_size(batches);
debug!("CPU SHRED ECDSA for {}", count);
let rv = PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| {
batches
.into_par_iter()
.map(|p| {
p.packets
.par_iter()
.map(|p| verify_shred_cpu(p, slot_leaders).unwrap_or(0))
.collect()
})
.collect()
})
let rv = SIGVERIFY_THREAD_POOL.install(|| {
batches
.into_par_iter()
.map(|p| {
p.packets
.par_iter()
.map(|p| verify_shred_cpu(p, slot_leaders).unwrap_or(0))
.collect()
})
.collect()
});
inc_new_counter_debug!("ed25519_shred_verify_cpu", count);
rv
@@ -94,30 +94,28 @@ fn slot_key_data_for_gpu<
) -> (PinnedVec<u8>, TxOffset, usize) {
//TODO: mark Pubkey::default shreds as failed after the GPU returns
assert_eq!(slot_keys.get(&std::u64::MAX), Some(&T::default()));
let slots: Vec<Vec<u64>> = PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| {
batches
.into_par_iter()
.map(|p| {
p.packets
.iter()
.map(|packet| {
let slot_start = size_of::<Signature>() + size_of::<ShredType>();
let slot_end = slot_start + size_of::<u64>();
if packet.meta.size < slot_end {
return std::u64::MAX;
}
let slot: Option<u64> =
limited_deserialize(&packet.data[slot_start..slot_end]).ok();
match slot {
Some(slot) if slot_keys.get(&slot).is_some() => slot,
_ => std::u64::MAX,
}
})
.collect()
})
.collect()
})
let slots: Vec<Vec<u64>> = SIGVERIFY_THREAD_POOL.install(|| {
batches
.into_par_iter()
.map(|p| {
p.packets
.iter()
.map(|packet| {
let slot_start = size_of::<Signature>() + size_of::<ShredType>();
let slot_end = slot_start + size_of::<u64>();
if packet.meta.size < slot_end {
return std::u64::MAX;
}
let slot: Option<u64> =
limited_deserialize(&packet.data[slot_start..slot_end]).ok();
match slot {
Some(slot) if slot_keys.get(&slot).is_some() => slot,
_ => std::u64::MAX,
}
})
.collect()
})
.collect()
});
let mut keys_to_slots: HashMap<T, Vec<u64>> = HashMap::new();
for batch in slots.iter() {
@@ -309,14 +307,12 @@ pub fn sign_shreds_cpu(keypair: &Keypair, batches: &mut [Packets]) {
use rayon::prelude::*;
let count = batch_size(batches);
debug!("CPU SHRED ECDSA for {}", count);
PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| {
batches.par_iter_mut().for_each(|p| {
p.packets[..]
.par_iter_mut()
.for_each(|mut p| sign_shred_cpu(keypair, &mut p));
});
})
SIGVERIFY_THREAD_POOL.install(|| {
batches.par_iter_mut().for_each(|p| {
p.packets[..]
.par_iter_mut()
.for_each(|mut p| sign_shred_cpu(keypair, &mut p));
});
});
inc_new_counter_debug!("ed25519_shred_verify_cpu", count);
}
@@ -422,25 +418,23 @@ pub fn sign_shreds_gpu(
}
sizes[i] += sizes[i - 1];
}
PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| {
batches
.par_iter_mut()
.enumerate()
.for_each(|(batch_ix, batch)| {
let num_packets = sizes[batch_ix];
batch.packets[..]
.par_iter_mut()
.enumerate()
.for_each(|(packet_ix, packet)| {
let sig_ix = packet_ix + num_packets;
let sig_start = sig_ix * sig_size;
let sig_end = sig_start + sig_size;
packet.data[0..sig_size]
.copy_from_slice(&signatures_out[sig_start..sig_end]);
});
});
});
SIGVERIFY_THREAD_POOL.install(|| {
batches
.par_iter_mut()
.enumerate()
.for_each(|(batch_ix, batch)| {
let num_packets = sizes[batch_ix];
batch.packets[..]
.par_iter_mut()
.enumerate()
.for_each(|(packet_ix, packet)| {
let sig_ix = packet_ix + num_packets;
let sig_start = sig_ix * sig_size;
let sig_end = sig_start + sig_size;
packet.data[0..sig_size]
.copy_from_slice(&signatures_out[sig_start..sig_end]);
});
});
});
inc_new_counter_debug!("ed25519_shred_sign_gpu", count);
}

View File

@@ -1,6 +1,6 @@
use solana_runtime::accounts_db::AccountStorageEntry;
use solana_runtime::status_cache::SlotDelta;
use solana_sdk::transaction::Result as TransactionResult;
use solana_sdk::{clock::Slot, transaction};
use std::path::PathBuf;
use std::sync::mpsc::{Receiver, SendError, Sender};
use std::sync::Arc;
@@ -10,9 +10,10 @@ pub type SnapshotPackageSender = Sender<SnapshotPackage>;
pub type SnapshotPackageReceiver = Receiver<SnapshotPackage>;
pub type SnapshotPackageSendError = SendError<SnapshotPackage>;
#[derive(Debug)]
pub struct SnapshotPackage {
pub root: u64,
pub slot_deltas: Vec<SlotDelta<TransactionResult<()>>>,
pub root: Slot,
pub slot_deltas: Vec<SlotDelta<transaction::Result<()>>>,
pub snapshot_links: TempDir,
pub storage_entries: Vec<Arc<AccountStorageEntry>>,
pub tar_output_file: PathBuf,
@@ -20,8 +21,8 @@ pub struct SnapshotPackage {
impl SnapshotPackage {
pub fn new(
root: u64,
slot_deltas: Vec<SlotDelta<TransactionResult<()>>>,
root: Slot,
slot_deltas: Vec<SlotDelta<transaction::Result<()>>>,
snapshot_links: TempDir,
storage_entries: Vec<Arc<AccountStorageEntry>>,
tar_output_file: PathBuf,

View File

@@ -4,21 +4,28 @@ use bzip2::bufread::BzDecoder;
use fs_extra::dir::CopyOptions;
use log::*;
use solana_measure::measure::Measure;
use solana_runtime::{bank::Bank, status_cache::SlotDelta};
use solana_runtime::{
bank::{self, Bank},
status_cache::SlotDelta,
};
use solana_sdk::{clock::Slot, transaction};
use std::{
cmp::Ordering,
fs,
fs::File,
io::{BufReader, BufWriter, Error as IOError, ErrorKind},
fs::{self, File},
io::{BufReader, BufWriter, Error as IOError, ErrorKind, Read, Write},
path::{Path, PathBuf},
process::ExitStatus,
};
use tar::Archive;
use tempfile::TempDir;
use thiserror::Error;
pub const SNAPSHOT_STATUS_CACHE_FILE_NAME: &str = "status_cache";
pub const TAR_SNAPSHOTS_DIR: &str = "snapshots";
pub const TAR_ACCOUNTS_DIR: &str = "accounts";
pub const TAR_VERSION_FILE: &str = "version";
pub const SNAPSHOT_VERSION: &str = "0.22.6"; // format!("{}\n", env!("CARGO_PKG_VERSION"));
#[derive(PartialEq, Ord, Eq, Debug)]
pub struct SlotSnapshotPaths {
@@ -36,6 +43,12 @@ pub enum SnapshotError {
#[error("file system error")]
FsExtra(#[from] fs_extra::error::Error),
#[error("archive generation failure {0}")]
ArchiveGenerationFailure(ExitStatus),
#[error("storage path symlink is invalid")]
StoragePathSymlinkInvalid,
}
pub type Result<T> = std::result::Result<T, SnapshotError>;
@@ -101,6 +114,136 @@ pub fn package_snapshot<P: AsRef<Path>, Q: AsRef<Path>>(
Ok(package)
}
pub fn archive_snapshot_package(snapshot_package: &SnapshotPackage) -> Result<()> {
info!(
"Generating snapshot tarball for root {}",
snapshot_package.root
);
fn serialize_status_cache(
slot_deltas: &[SlotDelta<transaction::Result<()>>],
snapshot_links: &TempDir,
) -> Result<()> {
// the status cache is stored as snapshot_path/status_cache
let snapshot_status_cache_file_path =
snapshot_links.path().join(SNAPSHOT_STATUS_CACHE_FILE_NAME);
let status_cache = File::create(&snapshot_status_cache_file_path)?;
// status cache writer
let mut status_cache_stream = BufWriter::new(status_cache);
let mut status_cache_serialize = Measure::start("status_cache_serialize-ms");
// write the status cache
serialize_into(&mut status_cache_stream, slot_deltas)
.map_err(|_| get_io_error("serialize status cache error"))?;
status_cache_serialize.stop();
inc_new_counter_info!(
"serialize-status-cache-ms",
status_cache_serialize.as_ms() as usize
);
Ok(())
}
serialize_status_cache(
&snapshot_package.slot_deltas,
&snapshot_package.snapshot_links,
)?;
let mut timer = Measure::start("snapshot_package-package_snapshots");
let tar_dir = snapshot_package
.tar_output_file
.parent()
.expect("Tar output path is invalid");
fs::create_dir_all(tar_dir)?;
// Create the staging directories
let staging_dir = TempDir::new()?;
let staging_accounts_dir = staging_dir.path().join(TAR_ACCOUNTS_DIR);
let staging_snapshots_dir = staging_dir.path().join(TAR_SNAPSHOTS_DIR);
let staging_version_file = staging_dir.path().join(TAR_VERSION_FILE);
fs::create_dir_all(&staging_accounts_dir)?;
// Add the snapshots to the staging directory
symlink::symlink_dir(
snapshot_package.snapshot_links.path(),
&staging_snapshots_dir,
)?;
// Add the AppendVecs into the compressible list
for storage in &snapshot_package.storage_entries {
storage.flush()?;
let storage_path = storage.get_path();
let output_path = staging_accounts_dir.join(
storage_path
.file_name()
.expect("Invalid AppendVec file path"),
);
// `storage_path` - The file path where the AppendVec itself is located
// `output_path` - The directory where the AppendVec will be placed in the staging directory.
let storage_path =
fs::canonicalize(storage_path).expect("Could not get absolute path for accounts");
symlink::symlink_dir(storage_path, &output_path)?;
if !output_path.is_file() {
return Err(get_io_error(
"Error trying to generate snapshot archive: storage path symlink is invalid",
));
}
}
// Write version file
{
let mut f = std::fs::File::create(staging_version_file)?;
f.write_all(&SNAPSHOT_VERSION.to_string().into_bytes())?;
}
// Tar the staging directory into the archive at `archive_path`
let archive_path = tar_dir.join("new_state.tar.bz2");
let args = vec![
"jcfhS",
archive_path.to_str().unwrap(),
"-C",
staging_dir.path().to_str().unwrap(),
TAR_ACCOUNTS_DIR,
TAR_SNAPSHOTS_DIR,
TAR_VERSION_FILE,
];
let output = std::process::Command::new("tar").args(&args).output()?;
if !output.status.success() {
warn!("tar command failed with exit code: {}", output.status);
use std::str::from_utf8;
info!("tar stdout: {}", from_utf8(&output.stdout).unwrap_or("?"));
info!("tar stderr: {}", from_utf8(&output.stderr).unwrap_or("?"));
return Err(get_io_error(&format!(
"Error trying to generate snapshot archive: {}",
output.status
)));
}
// Once everything is successful, overwrite the previous tarball so that other validators
// can fetch this newly packaged snapshot
let metadata = fs::metadata(&archive_path)?;
fs::rename(&archive_path, &snapshot_package.tar_output_file)?;
timer.stop();
info!(
"Successfully created tarball. slot: {}, elapsed ms: {}, size={}",
snapshot_package.root,
timer.as_ms(),
metadata.len()
);
datapoint_info!(
"snapshot-package",
("slot", snapshot_package.root, i64),
("duration_ms", timer.as_ms(), i64),
("size", metadata.len(), i64)
);
Ok(())
}
pub fn get_snapshot_paths<P: AsRef<Path>>(snapshot_path: P) -> Vec<SlotSnapshotPaths>
where
P: std::fmt::Debug,
@@ -138,7 +281,7 @@ where
}
}
pub fn add_snapshot<P: AsRef<Path>>(snapshot_path: P, bank: &Bank) -> Result<()> {
pub fn add_snapshot<P: AsRef<Path>>(snapshot_path: P, bank: &Bank) -> Result<SlotSnapshotPaths> {
bank.purge_zero_lamport_accounts();
let slot = bank.slot();
// snapshot_path/slot
@@ -147,11 +290,7 @@ pub fn add_snapshot<P: AsRef<Path>>(snapshot_path: P, bank: &Bank) -> Result<()>
// the snapshot is stored as snapshot_path/slot/slot
let snapshot_file_path = slot_snapshot_dir.join(get_snapshot_file_name(slot));
info!(
"creating snapshot {}, path: {:?}",
bank.slot(),
snapshot_file_path,
);
info!("Creating snapshot {}, path: {:?}", slot, snapshot_file_path);
let snapshot_file = File::create(&snapshot_file_path)?;
// snapshot writer
@@ -165,12 +304,13 @@ pub fn add_snapshot<P: AsRef<Path>>(snapshot_path: P, bank: &Bank) -> Result<()>
info!(
"{} for slot {} at {:?}",
bank_rc_serialize,
bank.slot(),
snapshot_file_path,
bank_rc_serialize, slot, snapshot_file_path,
);
Ok(())
Ok(SlotSnapshotPaths {
slot,
snapshot_file_path,
})
}
pub fn remove_snapshot<P: AsRef<Path>>(slot: Slot, snapshot_path: P) -> Result<()> {
@@ -206,7 +346,20 @@ pub fn bank_from_archive<P: AsRef<Path>>(
let mut measure = Measure::start("bank rebuild from snapshot");
let unpacked_accounts_dir = unpack_dir.as_ref().join(TAR_ACCOUNTS_DIR);
let unpacked_snapshots_dir = unpack_dir.as_ref().join(TAR_SNAPSHOTS_DIR);
let unpacked_version_file = unpack_dir.as_ref().join(TAR_VERSION_FILE);
let snapshot_version = if let Ok(mut f) = File::open(unpacked_version_file) {
let mut snapshot_version = String::new();
f.read_to_string(&mut snapshot_version)?;
snapshot_version
} else {
// Once 0.23.x is deployed, this default can be removed and snapshots without a version
// file can be rejected
String::from("0.22.3")
};
let bank = rebuild_bank_from_snapshots(
snapshot_version.trim(),
account_paths,
&unpacked_snapshots_dir,
unpacked_accounts_dir,
@@ -235,7 +388,7 @@ pub fn bank_from_archive<P: AsRef<Path>>(
Ok(bank)
}
pub fn get_snapshot_tar_path<P: AsRef<Path>>(snapshot_output_dir: P) -> PathBuf {
pub fn get_snapshot_archive_path<P: AsRef<Path>>(snapshot_output_dir: P) -> PathBuf {
snapshot_output_dir.as_ref().join("snapshot.tar.bz2")
}
@@ -254,13 +407,16 @@ pub fn untar_snapshot_in<P: AsRef<Path>, Q: AsRef<Path>>(
}
fn rebuild_bank_from_snapshots<P>(
local_account_paths: &[PathBuf],
snapshot_version: &str,
account_paths: &[PathBuf],
unpacked_snapshots_dir: &PathBuf,
append_vecs_path: P,
) -> Result<Bank>
where
P: AsRef<Path>,
{
info!("snapshot version: {}", snapshot_version);
let mut snapshot_paths = get_snapshot_paths(&unpacked_snapshots_dir);
if snapshot_paths.len() > 1 {
return Err(get_io_error("invalid snapshot format"));
@@ -270,14 +426,30 @@ where
.ok_or_else(|| get_io_error("No snapshots found in snapshots directory"))?;
// Rebuild the root bank
info!("Loading from {:?}", &root_paths.snapshot_file_path);
info!("Loading bank from {:?}", &root_paths.snapshot_file_path);
let file = File::open(&root_paths.snapshot_file_path)?;
let mut stream = BufReader::new(file);
let bank: Bank = deserialize_from(&mut stream)?;
let mut bank: Bank = match snapshot_version {
SNAPSHOT_VERSION => deserialize_from(&mut stream)?,
"0.22.3" => {
let bank0223: solana_runtime::bank::LegacyBank0223 = deserialize_from(&mut stream)?;
bank0223.into()
}
_ => {
return Err(get_io_error(&format!(
"unsupported snapshot version: {}",
snapshot_version
)));
}
};
// Rebuild accounts
bank.set_bank_rc(
bank::BankRc::new(account_paths.to_vec(), 0, bank.slot()),
bank::StatusCacheRc::default(),
);
bank.rc
.accounts_from_stream(&mut stream, local_account_paths, append_vecs_path)?;
.accounts_from_stream(&mut stream, account_paths, append_vecs_path)?;
// Rebuild status cache
let status_cache_path = unpacked_snapshots_dir.join(SNAPSHOT_STATUS_CACHE_FILE_NAME);
@@ -304,8 +476,11 @@ fn get_io_error(error: &str) -> SnapshotError {
SnapshotError::IO(IOError::new(ErrorKind::Other, error))
}
pub fn verify_snapshot_tar<P, Q, R>(snapshot_tar: P, snapshots_to_verify: Q, storages_to_verify: R)
where
pub fn verify_snapshot_archive<P, Q, R>(
snapshot_tar: P,
snapshots_to_verify: Q,
storages_to_verify: R,
) where
P: AsRef<Path>,
Q: AsRef<Path>,
R: AsRef<Path>,

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-local-cluster"
description = "Blockchain, Rebuilt for Scale"
version = "0.22.3"
version = "0.22.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -12,23 +12,23 @@ homepage = "https://solana.com/"
itertools = "0.8.1"
log = "0.4.8"
rand = "0.6.5"
solana-config-program = { path = "../programs/config", version = "0.22.3" }
solana-core = { path = "../core", version = "0.22.3" }
solana-client = { path = "../client", version = "0.22.3" }
solana-faucet = { path = "../faucet", version = "0.22.3" }
solana-exchange-program = { path = "../programs/exchange", version = "0.22.3" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.22.3" }
solana-ledger = { path = "../ledger", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-runtime = { path = "../runtime", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-stake-program = { path = "../programs/stake", version = "0.22.3" }
solana-storage-program = { path = "../programs/storage", version = "0.22.3" }
solana-vest-program = { path = "../programs/vest", version = "0.22.3" }
solana-vote-program = { path = "../programs/vote", version = "0.22.3" }
solana-config-program = { path = "../programs/config", version = "0.22.8" }
solana-core = { path = "../core", version = "0.22.8" }
solana-client = { path = "../client", version = "0.22.8" }
solana-faucet = { path = "../faucet", version = "0.22.8" }
solana-exchange-program = { path = "../programs/exchange", version = "0.22.8" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.22.8" }
solana-ledger = { path = "../ledger", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
solana-runtime = { path = "../runtime", version = "0.22.8" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
solana-stake-program = { path = "../programs/stake", version = "0.22.8" }
solana-storage-program = { path = "../programs/storage", version = "0.22.8" }
solana-vest-program = { path = "../programs/vest", version = "0.22.8" }
solana-vote-program = { path = "../programs/vote", version = "0.22.8" }
symlink = "0.1.0"
tempfile = "3.1.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.22.3" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.22.8" }
[dev-dependencies]
assert_matches = "1.3.0"

View File

@@ -38,6 +38,7 @@ impl ClusterValidatorInfo {
pub trait Cluster {
fn get_node_pubkeys(&self) -> Vec<Pubkey>;
fn get_validator_client(&self, pubkey: &Pubkey) -> Option<ThinClient>;
fn get_contact_info(&self, pubkey: &Pubkey) -> Option<&ContactInfo>;
fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo;
fn restart_node(&mut self, pubkey: &Pubkey, cluster_validator_info: ClusterValidatorInfo);
fn exit_restart_node(&mut self, pubkey: &Pubkey, config: ValidatorConfig);

View File

@@ -695,6 +695,10 @@ impl Cluster for LocalCluster {
cluster_validator_info.config = validator_config;
self.restart_node(pubkey, cluster_validator_info);
}
fn get_contact_info(&self, pubkey: &Pubkey) -> Option<&ContactInfo> {
self.validators.get(pubkey).map(|v| &v.info.contact_info)
}
}
impl Drop for LocalCluster {

View File

@@ -73,12 +73,14 @@ fn run_archiver_startup_basic(num_nodes: usize, num_archivers: usize) {
}
#[test]
#[ignore]
#[serial]
fn test_archiver_startup_1_node() {
run_archiver_startup_basic(1, 1);
}
#[test]
#[ignore]
#[serial]
fn test_archiver_startup_2_nodes() {
run_archiver_startup_basic(2, 1);

View File

@@ -651,12 +651,13 @@ fn test_snapshot_restart_tower() {
.as_ref()
.unwrap()
.snapshot_package_output_path;
let tar = snapshot_utils::get_snapshot_tar_path(&snapshot_package_output_path);
let tar = snapshot_utils::get_snapshot_archive_path(&snapshot_package_output_path);
wait_for_next_snapshot(&cluster, &tar);
// Copy tar to validator's snapshot output directory
let validator_tar_path =
snapshot_utils::get_snapshot_tar_path(&validator_snapshot_test_config.snapshot_output_path);
let validator_tar_path = snapshot_utils::get_snapshot_archive_path(
&validator_snapshot_test_config.snapshot_output_path,
);
fs::hard_link(tar, &validator_tar_path).unwrap();
// Restart validator from snapshot, the validator's tower state in this snapshot
@@ -664,8 +665,11 @@ fn test_snapshot_restart_tower() {
cluster.restart_node(&validator_id, validator_info);
// Test cluster can still make progress and get confirmations in tower
// Use the restarted node as the discovery point so that we get updated
// validator's ContactInfo
let restarted_node_info = cluster.get_contact_info(&validator_id).unwrap();
cluster_tests::spend_and_verify_all_nodes(
&cluster.entry_point_info,
&restarted_node_info,
&cluster.funding_keypair,
1,
HashSet::new(),
@@ -702,7 +706,7 @@ fn test_snapshots_blockstore_floor() {
trace!("Waiting for snapshot tar to be generated with slot",);
let tar = snapshot_utils::get_snapshot_tar_path(&snapshot_package_output_path);
let tar = snapshot_utils::get_snapshot_archive_path(&snapshot_package_output_path);
loop {
if tar.exists() {
trace!("snapshot tar exists");
@@ -712,8 +716,9 @@ fn test_snapshots_blockstore_floor() {
}
// Copy tar to validator's snapshot output directory
let validator_tar_path =
snapshot_utils::get_snapshot_tar_path(&validator_snapshot_test_config.snapshot_output_path);
let validator_tar_path = snapshot_utils::get_snapshot_archive_path(
&validator_snapshot_test_config.snapshot_output_path,
);
fs::hard_link(tar, &validator_tar_path).unwrap();
let slot_floor = snapshot_utils::bank_slot_from_archive(&validator_tar_path).unwrap();
@@ -803,7 +808,7 @@ fn test_snapshots_restart_validity() {
expected_balances.extend(new_balances);
let tar = snapshot_utils::get_snapshot_tar_path(&snapshot_package_output_path);
let tar = snapshot_utils::get_snapshot_archive_path(&snapshot_package_output_path);
wait_for_next_snapshot(&cluster, &tar);
// Create new account paths since validator exit is not guaranteed to cleanup RPC threads,

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-log-analyzer"
description = "The solana cluster network analysis tool"
version = "0.22.3"
version = "0.22.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -17,8 +17,8 @@ semver = "0.9.0"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
[[bin]]
name = "solana-log-analyzer"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-logger"
version = "0.22.3"
version = "0.22.8"
description = "Solana Logger"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-measure"
description = "Blockchain, Rebuilt for Scale"
version = "0.22.3"
version = "0.22.8"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@@ -12,8 +12,8 @@ edition = "2018"
[dependencies]
log = "0.4.8"
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-metrics = { path = "../metrics", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
solana-metrics = { path = "../metrics", version = "0.22.8" }
[target."cfg(unix)".dependencies]
jemallocator = "0.3.2"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-merkle-tree"
version = "0.22.3"
version = "0.22.8"
description = "Solana Merkle Tree"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -9,7 +9,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
[dev-dependencies]
hex = "0.4.0"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-metrics"
version = "0.22.3"
version = "0.22.8"
description = "Solana Metrics"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -13,7 +13,7 @@ env_logger = "0.7.1"
lazy_static = "1.4.0"
log = "0.4.8"
reqwest = { version = "0.9.24", default-features = false, features = ["rustls-tls"] }
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
sys-info = "0.5.8"
[dev-dependencies]

View File

@@ -74,7 +74,6 @@ ledger_dir="$SOLANA_CONFIG_DIR"/bootstrap-leader
}
args+=(
--accounts "$SOLANA_CONFIG_DIR"/bootstrap-leader/accounts
--enable-rpc-exit
--ledger "$ledger_dir"
--rpc-port 8899

View File

@@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-net-shaper"
description = "The solana cluster network shaping tool"
version = "0.22.3"
version = "0.22.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@@ -16,8 +16,8 @@ semver = "0.9.0"
serde = "1.0.104"
serde_derive = "1.0.103"
serde_json = "1.0.44"
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
rand = "0.6.5"
[[bin]]

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-net-utils"
version = "0.22.3"
version = "0.22.8"
description = "Solana Network Utilities"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -18,8 +18,8 @@ rand = "0.6.1"
serde = "1.0.104"
serde_derive = "1.0.103"
socket2 = "0.3.11"
solana-clap-utils = { path = "../clap-utils", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-clap-utils = { path = "../clap-utils", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
tokio = "0.1"
tokio-codec = "0.1"

View File

@@ -72,6 +72,7 @@ else
query() {
echo "$*"
set -x
curl -XPOST \
"$host/query?u=${username}&p=${password}" \
--data-urlencode "q=$*"

View File

@@ -1,6 +1,6 @@
[package]
name = "solana-perf"
version = "0.22.3"
version = "0.22.8"
description = "Solana Performance APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -18,11 +18,11 @@ serde_derive = "1.0.103"
dlopen_derive = "0.1.4"
lazy_static = "1.4.0"
log = "0.4.8"
solana-sdk = { path = "../sdk", version = "0.22.3" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.22.3" }
solana-budget-program = { path = "../programs/budget", version = "0.22.3" }
solana-logger = { path = "../logger", version = "0.22.3" }
solana-metrics = { path = "../metrics", version = "0.22.3" }
solana-sdk = { path = "../sdk", version = "0.22.8" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.22.8" }
solana-budget-program = { path = "../programs/budget", version = "0.22.8" }
solana-logger = { path = "../logger", version = "0.22.8" }
solana-metrics = { path = "../metrics", version = "0.22.8" }
[lib]
name = "solana_perf"

View File

@@ -151,6 +151,10 @@ impl<T: Clone + Default + Sized> PinnedVec<T> {
pub fn iter_mut(&mut self) -> PinnedIterMut<T> {
PinnedIterMut(self.x.iter_mut())
}
pub fn capacity(&self) -> usize {
self.x.capacity()
}
}
impl<'a, T: Clone + Send + Sync + Default + Sized> IntoParallelIterator for &'a PinnedVec<T> {

View File

@@ -18,14 +18,15 @@ use solana_sdk::short_vec::decode_len;
use solana_sdk::signature::Signature;
#[cfg(test)]
use solana_sdk::transaction::Transaction;
use std::cell::RefCell;
use std::mem::size_of;
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.thread_name(|ix| format!("sigverify_{}", ix))
.build()
.unwrap()));
lazy_static! {
static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new()
.num_threads(get_thread_count())
.thread_name(|ix| format!("sigverify_{}", ix))
.build()
.unwrap();
}
pub type TxOffset = PinnedVec<u32>;
@@ -247,13 +248,11 @@ pub fn ed25519_verify_cpu(batches: &[Packets]) -> Vec<Vec<u8>> {
use rayon::prelude::*;
let count = batch_size(batches);
debug!("CPU ECDSA for {}", batch_size(batches));
let rv = PAR_THREAD_POOL.with(|thread_pool| {
thread_pool.borrow().install(|| {
batches
.into_par_iter()
.map(|p| p.packets.par_iter().map(verify_packet).collect())
.collect()
})
let rv = PAR_THREAD_POOL.install(|| {
batches
.into_par_iter()
.map(|p| p.packets.par_iter().map(verify_packet).collect())
.collect()
});
inc_new_counter_debug!("ed25519_verify_cpu", count);
rv

View File

@@ -1,7 +1,7 @@
[package]
name = "solana-bpf-programs"
description = "Blockchain, Rebuilt for Scale"
version = "0.22.3"
version = "0.22.8"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "README.md"
@@ -22,10 +22,10 @@ walkdir = "2"
bincode = "1.1.4"
byteorder = "1.3.2"
elf = "0.0.10"
solana-bpf-loader-program = { path = "../bpf_loader", version = "0.22.3" }
solana-logger = { path = "../../logger", version = "0.22.3" }
solana-runtime = { path = "../../runtime", version = "0.22.3" }
solana-sdk = { path = "../../sdk", version = "0.22.3" }
solana-bpf-loader-program = { path = "../bpf_loader", version = "0.22.8" }
solana-logger = { path = "../../logger", version = "0.22.8" }
solana-runtime = { path = "../../runtime", version = "0.22.8" }
solana-sdk = { path = "../../sdk", version = "0.22.8" }
solana_rbpf = "=0.1.19"
[[bench]]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-128bit"
version = "0.22.3"
version = "0.22.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,11 +12,11 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.22.3", default-features = false }
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.22.3" }
solana-sdk = { path = "../../../../sdk/", version = "0.22.8", default-features = false }
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.22.8" }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.3" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.8" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-128bit-dep"
version = "0.22.3"
version = "0.22.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.22.3", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.22.8", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.3" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.8" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-alloc"
version = "0.22.3"
version = "0.22.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.22.3", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.22.8", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.3" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.8" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-dep-crate"
version = "0.22.3"
version = "0.22.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -13,10 +13,10 @@ edition = "2018"
[dependencies]
byteorder = { version = "1", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.22.3", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.22.8", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.3" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.8" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-external-spend"
version = "0.22.3"
version = "0.22.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.22.3", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.22.8", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.3" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.8" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-iter"
version = "0.22.3"
version = "0.22.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.22.3", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.22.8", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.3" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.8" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-many-args"
version = "0.22.3"
version = "0.22.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,11 +12,11 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.22.3", default-features = false }
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.22.3" }
solana-sdk = { path = "../../../../sdk/", version = "0.22.8", default-features = false }
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.22.8" }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.3" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.8" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-many-args-dep"
version = "0.22.3"
version = "0.22.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.22.3", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.22.8", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.3" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.8" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-noop"
version = "0.22.3"
version = "0.22.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.22.3", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.22.8", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.3" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.8" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-panic"
version = "0.22.3"
version = "0.22.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.22.3", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.22.8", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.3" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.8" }
[features]
program = ["solana-sdk/program"]

View File

@@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-param-passing"
version = "0.22.3"
version = "0.22.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@@ -12,11 +12,11 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.22.3", default-features = false }
solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "0.22.3" }
solana-sdk = { path = "../../../../sdk/", version = "0.22.8", default-features = false }
solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "0.22.8" }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.3" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.22.8" }
[features]
program = ["solana-sdk/program"]

Some files were not shown because too many files have changed in this diff Show More