Compare commits

...

66 Commits

Author SHA1 Message Date
dfdb2eea49 Include shred version in gossip (#7799)
automerge
2020-01-14 17:46:39 -08:00
2d3ead0c05 Unignore advisories as affected ver. is corrected (bp #7730) (#7782)
automerge
2020-01-13 19:56:17 -08:00
4499173ea3 Pick an RPC node at random to avoid getting stuck on a bad RPC node (#7762)
automerge
2020-01-12 20:18:55 -08:00
633e820970 Update http crate in bpf program workspace to fix security vulnerability (bp #7735) (#7742)
automerge
2020-01-10 07:57:06 -08:00
95f9862df7 Correctly integrate buildkite with codecov (#7718) (#7726)
automerge
2020-01-09 14:33:24 -08:00
c16356ecb3 Update http crate to fix security vulnerability (bp #7725) (#7728)
automerge
2020-01-09 13:33:44 -08:00
f0bbec7999 Bump version to 0.21.8 2020-01-07 23:13:53 -07:00
5226d2b1de Bump version to 0.21.7 2020-01-07 21:58:17 -07:00
57bc9c3ee7 Account for stake held by the current node while waiting for the supermajority to join gossip 2020-01-07 20:38:27 -07:00
d4b23a0cc9 validator: Add --wait-for-super-majority to facilitate asynchronous cluster restarts (bp #7701) (#7703)
automerge
2020-01-07 15:38:53 -08:00
d6c1cf2499 Measure heap usage while processing the ledger at validator startup (bp #7667) (#7669)
automerge
2020-01-03 15:42:27 -08:00
63db9d6933 net: Add a stand-alone gossip node on the blocksteamer instance (bp #7654) (#7658)
automerge
2020-01-02 17:10:04 -08:00
3db644320e Specify version for solana-sdk-macro to enable crate.io publishing (#7617) 2019-12-23 12:30:31 -08:00
e29fffbdb8 Groom log messages (#7610) (#7613)
automerge
2019-12-23 10:27:48 -08:00
f5e6259199 Expose stake delegations from bank for winner tool (#7354) (#7609)
automerge
2019-12-22 23:20:01 -08:00
0c2f002774 cargo fmt 2019-12-20 19:52:12 -08:00
9815dcc159 Cargo.lock 2019-12-20 19:48:17 -08:00
87a46ffc30 Print slot meta when printing a slot (#7133)
automerge
2019-12-20 15:41:54 -08:00
19215ddaa2 Add --all option to bounds, to display all non-empty slots 2019-12-20 15:41:40 -08:00
e35bd54d99 Bump version to 0.21.6 2019-12-19 15:37:58 -07:00
6672d640f5 Tune bench-tps blockhash poller and stop panicking (bp #7563) (#7567)
* Tune bench-tps blockhash poller and stop panicking

(cherry picked from commit 4b3176a9a1)

* Revert blockhash sleep

(cherry picked from commit e82db6fc2f)

* @sakridge feedback

(cherry picked from commit 848fe51f3d)

* reduce error logging

(cherry picked from commit a096ade345)

* clippy

(cherry picked from commit 54f16ca2bf)
2019-12-19 11:42:42 -07:00
b8b84a4c95 Speed up show-block-production command (#7565)
automerge
2019-12-19 10:31:47 -08:00
9677b602e7 Add show-block-production command (#7562)
automerge
2019-12-18 23:39:38 -08:00
771e1e3a71 Add get_confirmed_block()/get_confirmed_blocks()
(cherry picked from commit 05664d150b)
2019-12-19 00:01:04 -07:00
c12ea7a112 Add support for multiple params
(cherry picked from commit fcda972cec)
2019-12-19 00:01:04 -07:00
6047796c24 Improve bench-tps stability (#7537) (#7558)
automerge
2019-12-18 21:25:53 -08:00
11a0a9ac47 Add getConfirmedBlocks rpc method (bp #7550) (#7557)
automerge
2019-12-18 17:00:05 -08:00
5fbe5aa22d Speed up getLeaderSchedule RPC call by reducing pubkey duplication (#7556)
automerge
2019-12-18 15:45:20 -08:00
8a879a52ef GetLeaderSchedule can now return a schedule for arbitrary epochs (#7545)
automerge
2019-12-17 23:15:04 -08:00
c7669d4afe Only return accounts that have changed since the bank's parent (#7520) (#7523)
automerge
2019-12-17 08:42:39 -08:00
1f81206210 Add Telegram notification support 2019-12-16 15:19:03 -07:00
5a2a34b035 watchtower: Add Slack/Discord sanity failure notification (#7467)
automerge
2019-12-16 15:19:03 -07:00
2ef7579b6c Revert "Add Telegram notification support (#7511)"
This reverts commit fed7cfef58.
2019-12-16 14:43:17 -07:00
fed7cfef58 Add Telegram notification support (#7511)
automerge
2019-12-16 13:33:36 -08:00
8b2ad77699 Add validator-identity argument to support monitoring a specific validator only (#7501)
automerge
2019-12-16 11:25:34 -08:00
abcabc18ac getVoteAccounts now excludes listing inactive unstaked accounts as delinquent (#7483)
automerge
2019-12-14 09:41:04 -08:00
4e0a3862a6 Bump version to 0.21.5 2019-12-14 00:49:04 -07:00
2eaf47d563 Ship solana-watchtower program 2019-12-13 23:04:31 -07:00
31f7b3782e Fix Blocktree Config (#7399) (#7468)
automerge
2019-12-13 00:16:19 -08:00
d6169f92c1 Add vote-update-validator subcommand
(cherry picked from commit f7a87d5e52)
2019-12-13 00:19:10 -07:00
7df72d36c4 Publish solana-docker releases (#7460) (#7462)
automerge
2019-12-12 16:50:23 -08:00
5318cdac8f Add solana-watchtower program 2019-12-12 16:21:39 -07:00
00434d5e6e Clarify show-vote-account/uptime output: "node id" really means "validator identity" (#7458)
automerge
2019-12-12 14:50:44 -08:00
ebf644ddef Add uptime column to show-validators (#7441) (#7445)
automerge
2019-12-12 08:27:50 -08:00
5e4fe9c67b Bump version to 0.21.4 2019-12-11 21:35:45 -07:00
1b65f9189e Add special handling for snapshot root slot in get_confirmed_block (bp #7430) (#7434)
automerge
2019-12-11 14:44:42 -08:00
57d91c9da0 Fix sigverify metrics (#7393) (#7405)
automerge
2019-12-10 12:10:23 -08:00
a6e6ec63f1 Better space out show-stake-history columns (#7403)
automerge
2019-12-10 08:56:31 -08:00
b8b1e57df4 Fix offline stakes payer (#7385) (#7394)
automerge
2019-12-09 23:44:51 -08:00
969afe54c2 Improve get-epoch-info output for longer epoch durations (#7392)
automerge
2019-12-09 23:17:18 -08:00
5a8d4dcbd7 Fix stable metrics graph: "Bank Height / Slot Distance ($hostid)" 2019-12-09 22:57:48 -07:00
685ef72288 Continue processing the ledger on InvalidTickCount errors (#7387)
automerge
2019-12-09 16:24:26 -08:00
521fd755ac Fix Erasure Index (#7319) (#7371)
automerge
2019-12-09 14:22:26 -08:00
74fe6163c6 Support local cluster edge case testing (#7135) (#7382)
automerge
2019-12-09 13:42:16 -08:00
6e592dba17 Remove redundant check (#7369) (#7375)
automerge
2019-12-09 01:58:53 -08:00
625a9fd932 Properly set parallelism (#7370) (#7372)
automerge
2019-12-09 01:03:35 -08:00
5d37a0d108 Bump version to 0.21.3 2019-12-08 22:55:06 -07:00
68cb6aa1af no lockups for community (bp #7366) (#7367)
automerge
2019-12-08 20:57:00 -08:00
9d0cb47367 500M SOL (bp #7361) (#7365)
automerge
2019-12-08 15:37:25 -08:00
569d0ccb4d Add argument to configure the authorized pubkey for the bootstrap leader's stake (#7362) (#7363)
automerge
2019-12-08 13:40:12 -08:00
ffe17566f1 Adjust show-validators column alignment (#7359) (#7360)
automerge
2019-12-08 09:39:39 -08:00
293ad196f3 Account for all tokens at genesis (bp #7350) (#7358)
automerge
2019-12-08 09:07:39 -08:00
729e1159aa Add Forbole ValidatorInfo (#7355) (#7357)
automerge
2019-12-07 23:19:25 -08:00
f9d354f711 Add Stake Capital ValidatorInfo (#7346) (#7347)
automerge
2019-12-07 01:39:30 -08:00
f9849b515b getVoteAccounts RPC API no longer returns "idle" vote accounts, take II (bp #7344) (#7345)
automerge
2019-12-07 00:52:12 -08:00
fdc0276ed1 Update cargo version to 0.21.2 (#7342) 2019-12-06 21:32:39 -05:00
148 changed files with 4644 additions and 2241 deletions

886
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -44,12 +44,14 @@ members = [
"runtime",
"sdk",
"sdk-c",
"scripts",
"upload-perf",
"net-utils",
"fixed-buf",
"vote-signer",
"cli",
"rayon-threadlimit",
"watchtower",
]
exclude = [

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-archiver"
version = "0.21.1"
version = "0.21.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -10,10 +10,10 @@ homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
console = "0.9.1"
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-core = { path = "../core", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-metrics = { path = "../metrics", version = "0.21.1" }
solana-net-utils = { path = "../net-utils", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.8" }
solana-core = { path = "../core", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
solana-metrics = { path = "../metrics", version = "0.21.8" }
solana-net-utils = { path = "../net-utils", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-banking-bench"
version = "0.21.1"
version = "0.21.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -10,11 +10,11 @@ homepage = "https://solana.com/"
[dependencies]
log = "0.4.6"
rayon = "1.2.0"
solana-core = { path = "../core", version = "0.21.1" }
solana-ledger = { path = "../ledger", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-runtime = { path = "../runtime", version = "0.21.1" }
solana-measure = { path = "../measure", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-core = { path = "../core", version = "0.21.8" }
solana-ledger = { path = "../ledger", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
solana-runtime = { path = "../runtime", version = "0.21.8" }
solana-measure = { path = "../measure", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
rand = "0.6.5"
crossbeam-channel = "0.3"

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-exchange"
version = "0.21.1"
version = "0.21.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -23,19 +23,19 @@ serde = "1.0.102"
serde_derive = "1.0.102"
serde_json = "1.0.41"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-core = { path = "../core", version = "0.21.1" }
solana-genesis = { path = "../genesis", version = "0.21.1" }
solana-client = { path = "../client", version = "0.21.1" }
solana-drone = { path = "../drone", version = "0.21.1" }
solana-exchange-program = { path = "../programs/exchange", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-metrics = { path = "../metrics", version = "0.21.1" }
solana-net-utils = { path = "../net-utils", version = "0.21.1" }
solana-runtime = { path = "../runtime", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.8" }
solana-core = { path = "../core", version = "0.21.8" }
solana-genesis = { path = "../genesis", version = "0.21.8" }
solana-client = { path = "../client", version = "0.21.8" }
solana-drone = { path = "../drone", version = "0.21.8" }
solana-exchange-program = { path = "../programs/exchange", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
solana-metrics = { path = "../metrics", version = "0.21.8" }
solana-net-utils = { path = "../net-utils", version = "0.21.8" }
solana-runtime = { path = "../runtime", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
untrusted = "0.7.0"
ws = "0.9.1"
[dev-dependencies]
solana-local-cluster = { path = "../local-cluster", version = "0.21.1" }
solana-local-cluster = { path = "../local-cluster", version = "0.21.8" }

View File

@ -2,14 +2,14 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-streamer"
version = "0.21.1"
version = "0.21.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-core = { path = "../core", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-net-utils = { path = "../net-utils", version = "0.21.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.8" }
solana-core = { path = "../core", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
solana-net-utils = { path = "../net-utils", version = "0.21.8" }

View File

@ -2,7 +2,7 @@
authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-bench-tps"
version = "0.21.1"
version = "0.21.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -16,24 +16,24 @@ serde = "1.0.102"
serde_derive = "1.0.102"
serde_json = "1.0.41"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-core = { path = "../core", version = "0.21.1" }
solana-genesis = { path = "../genesis", version = "0.21.1" }
solana-client = { path = "../client", version = "0.21.1" }
solana-drone = { path = "../drone", version = "0.21.1" }
solana-librapay-api = { path = "../programs/librapay_api", version = "0.21.1", optional = true }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-metrics = { path = "../metrics", version = "0.21.1" }
solana-measure = { path = "../measure", version = "0.21.1" }
solana-net-utils = { path = "../net-utils", version = "0.21.1" }
solana-runtime = { path = "../runtime", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-move-loader-program = { path = "../programs/move_loader", version = "0.21.1", optional = true }
solana-clap-utils = { path = "../clap-utils", version = "0.21.8" }
solana-core = { path = "../core", version = "0.21.8" }
solana-genesis = { path = "../genesis", version = "0.21.8" }
solana-client = { path = "../client", version = "0.21.8" }
solana-drone = { path = "../drone", version = "0.21.8" }
solana-librapay-api = { path = "../programs/librapay_api", version = "0.21.8", optional = true }
solana-logger = { path = "../logger", version = "0.21.8" }
solana-metrics = { path = "../metrics", version = "0.21.8" }
solana-measure = { path = "../measure", version = "0.21.8" }
solana-net-utils = { path = "../net-utils", version = "0.21.8" }
solana-runtime = { path = "../runtime", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
solana-move-loader-program = { path = "../programs/move_loader", version = "0.21.8", optional = true }
[dev-dependencies]
serial_test = "0.2.0"
serial_test_derive = "0.2.0"
solana-local-cluster = { path = "../local-cluster", version = "0.21.1" }
solana-local-cluster = { path = "../local-cluster", version = "0.21.8" }
[features]
move = ["solana-librapay-api", "solana-move-loader-program"]

View File

@ -24,6 +24,7 @@ use std::{
cmp,
collections::VecDeque,
net::SocketAddr,
process::exit,
sync::{
atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering},
Arc, RwLock,
@ -88,8 +89,13 @@ where
let clients: Vec<_> = clients.into_iter().map(Arc::new).collect();
let client = &clients[0];
let start = gen_keypairs.len() - (tx_count * 2) as usize;
let keypairs = &gen_keypairs[start..];
let mut source_keypair_chunks: Vec<Vec<&Keypair>> = Vec::new();
let mut dest_keypair_chunks: Vec<VecDeque<&Keypair>> = Vec::new();
assert!(gen_keypairs.len() >= 2 * tx_count);
for chunk in gen_keypairs.chunks_exact(2 * tx_count) {
source_keypair_chunks.push(chunk[..tx_count].iter().collect());
dest_keypair_chunks.push(chunk[tx_count..].iter().collect());
}
let first_tx_count = loop {
match client.get_transaction_count() {
@ -126,9 +132,23 @@ where
let shared_txs: SharedTransactions = Arc::new(RwLock::new(VecDeque::new()));
let recent_blockhash = Arc::new(RwLock::new(get_recent_blockhash(client.as_ref()).0));
let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0));
let total_tx_sent_count = Arc::new(AtomicUsize::new(0));
let blockhash_thread = {
let exit_signal = exit_signal.clone();
let recent_blockhash = recent_blockhash.clone();
let client = client.clone();
let id = id.pubkey();
Builder::new()
.name("solana-blockhash-poller".to_string())
.spawn(move || {
poll_blockhash(&exit_signal, &recent_blockhash, &client, &id);
})
.unwrap()
};
let s_threads: Vec<_> = (0..threads)
.map(|_| {
let exit_signal = exit_signal.clone();
@ -154,58 +174,40 @@ where
// generate and send transactions for the specified duration
let start = Instant::now();
let keypair_chunks = source_keypair_chunks.len() as u64;
let mut reclaim_lamports_back_to_source_account = false;
let mut i = keypair0_balance;
let mut blockhash = Hash::default();
let mut blockhash_time;
while start.elapsed() < duration {
// ping-pong between source and destination accounts for each loop iteration
// this seems to be faster than trying to determine the balance of individual
// accounts
let len = tx_count as usize;
blockhash_time = Instant::now();
if let Ok((new_blockhash, _fee_calculator)) = client.get_new_blockhash(&blockhash) {
blockhash = new_blockhash;
} else {
if blockhash_time.elapsed().as_secs() > 30 {
panic!("Blockhash is not updating");
}
sleep(Duration::from_millis(100));
continue;
}
datapoint_debug!(
"bench-tps-get_blockhash",
("duration", duration_as_us(&blockhash_time.elapsed()), i64)
);
blockhash_time = Instant::now();
let balance = client.get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_lamport_balance(balance);
datapoint_debug!(
"bench-tps-get_balance",
("duration", duration_as_us(&blockhash_time.elapsed()), i64)
);
let chunk_index = (i % keypair_chunks) as usize;
generate_txs(
&shared_txs,
&blockhash,
&keypairs[..len],
&keypairs[len..],
&recent_blockhash,
&source_keypair_chunks[chunk_index],
&dest_keypair_chunks[chunk_index],
threads,
reclaim_lamports_back_to_source_account,
&libra_args,
);
// In sustained mode overlap the transfers with generation
// this has higher average performance but lower peak performance
// in tested environments.
if !sustained {
// In sustained mode, overlap the transfers with generation. This has higher average
// performance but lower peak performance in tested environments.
if sustained {
// Ensure that we don't generate more transactions than we can handle.
while shared_txs.read().unwrap().len() > 2 * threads {
sleep(Duration::from_millis(1));
}
} else {
while shared_tx_active_thread_count.load(Ordering::Relaxed) > 0 {
sleep(Duration::from_millis(1));
}
}
// Rotate destination keypairs so that the next round of transactions will have different
// transaction signatures even when blockhash is reused.
dest_keypair_chunks[chunk_index].rotate_left(1);
i += 1;
if should_switch_directions(num_lamports_per_account, i) {
if should_switch_directions(num_lamports_per_account, keypair_chunks, i) {
reclaim_lamports_back_to_source_account = !reclaim_lamports_back_to_source_account;
}
}
@ -228,6 +230,11 @@ where
}
}
info!("Waiting for blockhash thread...");
if let Err(err) = blockhash_thread.join() {
info!(" join() failed with: {:?}", err);
}
let balance = client.get_balance(&id.pubkey()).unwrap_or(0);
metrics_submit_lamport_balance(balance);
@ -252,8 +259,8 @@ fn metrics_submit_lamport_balance(lamport_balance: u64) {
#[cfg(feature = "move")]
fn generate_move_txs(
source: &[Keypair],
dest: &[Keypair],
source: &[&Keypair],
dest: &VecDeque<&Keypair>,
reclaim: bool,
move_keypairs: &[Keypair],
libra_pay_program_id: &Pubkey,
@ -297,8 +304,8 @@ fn generate_move_txs(
}
fn generate_system_txs(
source: &[Keypair],
dest: &[Keypair],
source: &[&Keypair],
dest: &VecDeque<&Keypair>,
reclaim: bool,
blockhash: &Hash,
) -> Vec<(Transaction, u64)> {
@ -321,15 +328,19 @@ fn generate_system_txs(
fn generate_txs(
shared_txs: &SharedTransactions,
blockhash: &Hash,
source: &[Keypair],
dest: &[Keypair],
blockhash: &Arc<RwLock<Hash>>,
source: &[&Keypair],
dest: &VecDeque<&Keypair>,
threads: usize,
reclaim: bool,
libra_args: &Option<LibraKeys>,
) {
let blockhash = *blockhash.read().unwrap();
let tx_count = source.len();
info!("Signing transactions... {} (reclaim={})", tx_count, reclaim);
info!(
"Signing transactions... {} (reclaim={}, blockhash={})",
tx_count, reclaim, &blockhash
);
let signing_start = Instant::now();
let transactions = if let Some((
@ -353,11 +364,11 @@ fn generate_txs(
&_libra_keys,
_libra_pay_program_id,
&_libra_genesis_keypair.pubkey(),
blockhash,
&blockhash,
)
}
} else {
generate_system_txs(source, dest, reclaim, blockhash)
generate_system_txs(source, dest, reclaim, &blockhash)
};
let duration = signing_start.elapsed();
@ -386,6 +397,48 @@ fn generate_txs(
}
}
fn poll_blockhash<T: Client>(
exit_signal: &Arc<AtomicBool>,
blockhash: &Arc<RwLock<Hash>>,
client: &Arc<T>,
id: &Pubkey,
) {
let mut blockhash_last_updated = Instant::now();
let mut last_error_log = Instant::now();
loop {
let blockhash_updated = {
let old_blockhash = *blockhash.read().unwrap();
if let Ok((new_blockhash, _fee)) = client.get_new_blockhash(&old_blockhash) {
*blockhash.write().unwrap() = new_blockhash;
blockhash_last_updated = Instant::now();
true
} else {
if blockhash_last_updated.elapsed().as_secs() > 120 {
eprintln!("Blockhash is stuck");
exit(1)
} else if blockhash_last_updated.elapsed().as_secs() > 30
&& last_error_log.elapsed().as_secs() >= 1
{
last_error_log = Instant::now();
error!("Blockhash is not updating");
}
false
}
};
if blockhash_updated {
let balance = client.get_balance(id).unwrap_or(0);
metrics_submit_lamport_balance(balance);
}
if exit_signal.load(Ordering::Relaxed) {
break;
}
sleep(Duration::from_millis(50));
}
}
fn do_tx_transfers<T: Client>(
exit_signal: &Arc<AtomicBool>,
shared_txs: &SharedTransactions,
@ -398,11 +451,10 @@ fn do_tx_transfers<T: Client>(
if thread_batch_sleep_ms > 0 {
sleep(Duration::from_millis(thread_batch_sleep_ms as u64));
}
let txs;
{
let txs = {
let mut shared_txs_wl = shared_txs.write().expect("write lock in do_tx_transfers");
txs = shared_txs_wl.pop_front();
}
shared_txs_wl.pop_front()
};
if let Some(txs0) = txs {
shared_tx_thread_count.fetch_add(1, Ordering::Relaxed);
info!(
@ -758,11 +810,15 @@ fn compute_and_report_stats(
);
}
// First transfer 3/4 of the lamports to the dest accounts
// then ping-pong 1/4 of the lamports back to the other account
// this leaves 1/4 lamport buffer in each account
fn should_switch_directions(num_lamports_per_account: u64, i: u64) -> bool {
i % (num_lamports_per_account / 4) == 0 && (i >= (3 * num_lamports_per_account) / 4)
// First transfer 2/3 of the lamports to the dest accounts
// then ping-pong 1/3 of the lamports back to the other account
// this leaves 1/3 lamport buffer in each account
fn should_switch_directions(num_lamports_per_account: u64, keypair_chunks: u64, i: u64) -> bool {
if i < keypair_chunks * (2 * num_lamports_per_account) / 3 {
return false;
}
i % (keypair_chunks * num_lamports_per_account / 3) == 0
}
pub fn generate_keypairs(seed_keypair: &Keypair, count: u64) -> (Vec<Keypair>, u64) {
@ -897,9 +953,12 @@ fn fund_move_keys<T: Client>(
info!("funded libra funding key {}", i);
}
let tx_count = keypairs.len();
let amount = total / (tx_count as u64);
for (i, keys) in keypairs[..tx_count].chunks(NUM_FUNDING_KEYS).enumerate() {
let keypair_count = keypairs.len();
let amount = total / (keypair_count as u64);
for (i, keys) in keypairs[..keypair_count]
.chunks(NUM_FUNDING_KEYS)
.enumerate()
{
for (j, key) in keys.iter().enumerate() {
let tx = librapay_transaction::transfer(
libra_pay_program_id,
@ -949,18 +1008,18 @@ pub fn generate_and_fund_keypairs<T: Client>(
client: &T,
drone_addr: Option<SocketAddr>,
funding_key: &Keypair,
tx_count: usize,
keypair_count: usize,
lamports_per_account: u64,
use_move: bool,
) -> Result<(Vec<Keypair>, Option<LibraKeys>, u64)> {
info!("Creating {} keypairs...", tx_count * 2);
let (mut keypairs, extra) = generate_keypairs(funding_key, tx_count as u64 * 2);
info!("Creating {} keypairs...", keypair_count);
let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64);
info!("Get lamports...");
// Sample the first keypair, see if it has lamports, if so then resume.
// This logic is to prevent lamport loss on repeated solana-bench-tps executions
let last_keypair_balance = client
.get_balance(&keypairs[tx_count * 2 - 1].pubkey())
.get_balance(&keypairs[keypair_count - 1].pubkey())
.unwrap_or(0);
#[cfg(feature = "move")]
@ -999,7 +1058,7 @@ pub fn generate_and_fund_keypairs<T: Client>(
// Still fund the solana ones which will be used for fees.
let seed = [0u8; 32];
let mut rnd = GenKeys::new(seed);
let move_keypairs = rnd.gen_n_keypairs(tx_count as u64 * 2);
let move_keypairs = rnd.gen_n_keypairs(keypair_count as u64);
fund_move_keys(
client,
funding_key,
@ -1032,7 +1091,7 @@ pub fn generate_and_fund_keypairs<T: Client>(
}
// 'generate_keypairs' generates extra keys to be able to have size-aligned funding batches for fund_keys.
keypairs.truncate(2 * tx_count);
keypairs.truncate(keypair_count);
Ok((keypairs, move_keypairs_ret, last_keypair_balance))
}
@ -1048,17 +1107,21 @@ mod tests {
#[test]
fn test_switch_directions() {
assert_eq!(should_switch_directions(20, 0), false);
assert_eq!(should_switch_directions(20, 1), false);
assert_eq!(should_switch_directions(20, 14), false);
assert_eq!(should_switch_directions(20, 15), true);
assert_eq!(should_switch_directions(20, 16), false);
assert_eq!(should_switch_directions(20, 19), false);
assert_eq!(should_switch_directions(20, 20), true);
assert_eq!(should_switch_directions(20, 21), false);
assert_eq!(should_switch_directions(20, 99), false);
assert_eq!(should_switch_directions(20, 100), true);
assert_eq!(should_switch_directions(20, 101), false);
assert_eq!(should_switch_directions(30, 1, 0), false);
assert_eq!(should_switch_directions(30, 1, 1), false);
assert_eq!(should_switch_directions(30, 1, 20), true);
assert_eq!(should_switch_directions(30, 1, 21), false);
assert_eq!(should_switch_directions(30, 1, 30), true);
assert_eq!(should_switch_directions(30, 1, 90), true);
assert_eq!(should_switch_directions(30, 1, 91), false);
assert_eq!(should_switch_directions(30, 2, 0), false);
assert_eq!(should_switch_directions(30, 2, 1), false);
assert_eq!(should_switch_directions(30, 2, 20), false);
assert_eq!(should_switch_directions(30, 2, 40), true);
assert_eq!(should_switch_directions(30, 2, 90), false);
assert_eq!(should_switch_directions(30, 2, 100), true);
assert_eq!(should_switch_directions(30, 2, 101), false);
}
#[test]
@ -1072,8 +1135,9 @@ mod tests {
config.tx_count = 10;
config.duration = Duration::from_secs(5);
let keypair_count = config.tx_count * config.keypair_multiplier;
let (keypairs, _move_keypairs, _keypair_balance) =
generate_and_fund_keypairs(&clients[0], None, &config.id, config.tx_count, 20, false)
generate_and_fund_keypairs(&clients[0], None, &config.id, keypair_count, 20, false)
.unwrap();
do_bench_tps(clients, config, keypairs, 0, None);
@ -1084,11 +1148,11 @@ mod tests {
let (genesis_config, id) = create_genesis_config(10_000);
let bank = Bank::new(&genesis_config);
let client = BankClient::new(bank);
let tx_count = 10;
let keypair_count = 20;
let lamports = 20;
let (keypairs, _move_keypairs, _keypair_balance) =
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, false).unwrap();
generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap();
for kp in &keypairs {
assert_eq!(
@ -1107,11 +1171,11 @@ mod tests {
genesis_config.fee_calculator = fee_calculator;
let bank = Bank::new(&genesis_config);
let client = BankClient::new(bank);
let tx_count = 10;
let keypair_count = 20;
let lamports = 20;
let (keypairs, _move_keypairs, _keypair_balance) =
generate_and_fund_keypairs(&client, None, &id, tx_count, lamports, false).unwrap();
generate_and_fund_keypairs(&client, None, &id, keypair_count, lamports, false).unwrap();
let max_fee = client
.get_recent_blockhash_with_commitment(CommitmentConfig::recent())

View File

@ -15,6 +15,7 @@ pub struct Config {
pub num_nodes: usize,
pub duration: Duration,
pub tx_count: usize,
pub keypair_multiplier: usize,
pub thread_batch_sleep_ms: usize,
pub sustained: bool,
pub client_ids_and_stake_file: String,
@ -36,6 +37,7 @@ impl Default for Config {
num_nodes: 1,
duration: Duration::new(std::u64::MAX, 0),
tx_count: 50_000,
keypair_multiplier: 8,
thread_batch_sleep_ms: 1000,
sustained: false,
client_ids_and_stake_file: String::new(),
@ -122,6 +124,13 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> {
.takes_value(true)
.help("Number of transactions to send per batch")
)
.arg(
Arg::with_name("keypair_multiplier")
.long("keypair-multiplier")
.value_name("NUM")
.takes_value(true)
.help("Multiply by transaction count to determine number of keypairs to create")
)
.arg(
Arg::with_name("thread-batch-sleep-ms")
.short("z")
@ -208,7 +217,15 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
}
if let Some(s) = matches.value_of("tx_count") {
args.tx_count = s.to_string().parse().expect("can't parse tx_account");
args.tx_count = s.to_string().parse().expect("can't parse tx_count");
}
if let Some(s) = matches.value_of("keypair_multiplier") {
args.keypair_multiplier = s
.to_string()
.parse()
.expect("can't parse keypair-multiplier");
assert!(args.keypair_multiplier >= 2);
}
if let Some(t) = matches.value_of("thread-batch-sleep-ms") {

View File

@ -24,6 +24,7 @@ fn main() {
id,
num_nodes,
tx_count,
keypair_multiplier,
client_ids_and_stake_file,
write_to_client_file,
read_from_client_file,
@ -34,9 +35,10 @@ fn main() {
..
} = &cli_config;
let keypair_count = *tx_count * keypair_multiplier;
if *write_to_client_file {
info!("Generating {} keypairs", *tx_count * 2);
let (keypairs, _) = generate_keypairs(&id, *tx_count as u64 * 2);
info!("Generating {} keypairs", keypair_count);
let (keypairs, _) = generate_keypairs(&id, keypair_count as u64);
let num_accounts = keypairs.len() as u64;
let max_fee =
FeeCalculator::new(*target_lamports_per_signature, 0).max_lamports_per_signature;
@ -102,10 +104,10 @@ fn main() {
last_balance = primordial_account.balance;
});
if keypairs.len() < tx_count * 2 {
if keypairs.len() < keypair_count {
eprintln!(
"Expected {} accounts in {}, only received {} (--tx_count mismatch?)",
tx_count * 2,
keypair_count,
client_ids_and_stake_file,
keypairs.len(),
);
@ -121,7 +123,7 @@ fn main() {
&client,
Some(*drone_addr),
&id,
*tx_count,
keypair_count,
*num_lamports_per_account,
*use_move,
)

View File

@ -47,11 +47,12 @@ fn test_bench_tps_local_cluster(config: Config) {
let lamports_per_account = 100;
let keypair_count = config.tx_count * config.keypair_multiplier;
let (keypairs, move_keypairs, _keypair_balance) = generate_and_fund_keypairs(
&client,
Some(drone_addr),
&config.id,
config.tx_count,
keypair_count,
lamports_per_account,
config.use_move,
)

View File

@ -177,7 +177,7 @@ $ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
## Usage
### solana-cli
```text
solana-cli 0.21.1
solana-cli 0.21.8
Blockchain, Rebuilt for Scale
USAGE:

View File

@ -21,6 +21,7 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
* [getBlockTime](jsonrpc-api.md#getblocktime)
* [getClusterNodes](jsonrpc-api.md#getclusternodes)
* [getConfirmedBlock](jsonrpc-api.md#getconfirmedblock)
* [getConfirmedBlocks](jsonrpc-api.md#getconfirmedblocks)
* [getEpochInfo](jsonrpc-api.md#getepochinfo)
* [getEpochSchedule](jsonrpc-api.md#getepochschedule)
* [getGenesisHash](jsonrpc-api.md#getgenesishash)
@ -156,7 +157,7 @@ The result value will be an RpcResponse JSON object containing an AccountInfo JS
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.21.1,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]}},"id":1}
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.21.8,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]}},"id":1}
```
### getBalance
@ -295,6 +296,31 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"m
{"jsonrpc":"2.0","result":{"blockhash":[165,245,120,183,32,205,89,222,249,114,229,49,250,231,149,122,156,232,181,83,238,194,157,153,7,213,180,54,177,6,25,101],"parentSlot":429,"previousBlockhash":[21,108,181,90,139,241,212,203,45,78,232,29,161,31,159,188,110,82,81,11,250,74,47,140,188,28,23,96,251,164,208,166],"transactions":[[{"message":{"accountKeys":[[5],[219,181,202,40,52,148,34,136,186,59,137,160,250,225,234,17,244,160,88,116,24,176,30,227,68,11,199,38,141,68,131,228],[233,48,179,56,91,40,254,206,53,48,196,176,119,248,158,109,121,77,11,69,108,160,128,27,228,122,146,249,53,184,68,87],[6,167,213,23,25,47,10,175,198,242,101,227,251,119,204,122,218,130,197,41,208,190,59,19,110,45,0,85,32,0,0,0],[6,167,213,23,24,199,116,201,40,86,99,152,105,29,94,182,139,94,184,163,155,75,109,92,115,85,91,33,0,0,0,0],[7,97,72,29,53,116,116,187,124,77,118,36,235,211,189,179,216,53,94,115,209,16,67,252,13,163,83,128,0,0,0,0]],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[[1],{"accounts":[[3],1,2,3],"data":[[52],2,0,0,0,1,0,0,0,0,0,0,0,173,1,0,0,0,0,0,0,86,55,9,248,142,238,135,114,103,83,247,124,67,68,163,233,55,41,59,129,64,50,110,221,234,234,27,213,205,193,219,50],"program_id_index":4}],"recentBlockhash":[21,108,181,90,139,241,212,203,45,78,232,29,161,31,159,188,110,82,81,11,250,74,47,140,188,28,23,96,251,164,208,166]},"signatures":[[2],[119,9,95,108,35,95,7,1,69,101,65,45,5,204,61,114,172,88,123,238,32,201,135,229,57,50,13,21,106,216,129,183,238,43,37,101,148,81,56,232,88,136,80,65,46,189,39,106,94,13,238,54,186,48,118,186,0,62,121,122,172,171,66,5],[78,40,77,250,10,93,6,157,48,173,100,40,251,9,7,218,7,184,43,169,76,240,254,34,235,48,41,175,119,126,75,107,106,248,45,161,119,48,174,213,57,69,111,225,245,60,148,73,124,82,53,6,203,126,120,180,111,169,89,64,29,23,237,13]]},{"fee":100000,"status":{"Ok":null}}]]},"id":1}
```
### getConfirmedBlocks
Returns a list of confirmed blocks
#### Parameters:
* `integer` - start_slot, as u64 integer
* `integer` - (optional) end_slot, as u64 integer
#### Results:
The result field will be an array of u64 integers listing confirmed blocks
between start_slot and either end_slot, if provided, or latest confirmed block,
inclusive.
#### Example:
```bash
// Request
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlocks","params":[5, 10]}' localhost:8899
// Result
{"jsonrpc":"2.0","result":[5,6,7,8,9,10],"id":1}
```
### getEpochInfo
Returns information about the current epoch
@ -373,15 +399,18 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
### getLeaderSchedule
Returns the leader schedule for the current epoch
Returns the leader schedule for an epoch
#### Parameters:
* `slot` - (optional) Fetch the leader schedule for the epoch that corresponds to the provided slot. If unspecified, the leader schedule for the current epoch is fetched
* `object` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
#### Results:
The result field will be an array of leader public keys \(as base-58 encoded strings\) for each slot in the current epoch
The result field will be a dictionary of leader public keys \(as base-58 encoded
strings\) and their corresponding leader slot indices as values (indices are to
the first slot in the requested epoch)
#### Example:
@ -390,7 +419,7 @@ The result field will be an array of leader public keys \(as base-58 encoded str
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://localhost:8899
// Result
{"jsonrpc":"2.0","result":[...],"id":1}
{"jsonrpc":"2.0","result":{"4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63]},"id":1}
```
### getMinimumBalanceForRentExemption
@ -824,7 +853,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
#### Notification Format:
```bash
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.21.1,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.21.8,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
```
### accountUnsubscribe
@ -882,7 +911,7 @@ Subscribe to a program to receive notifications when the lamports or data for a
* `object` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\)
```bash
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0.21.1,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0.21.8,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
```
### programUnsubscribe

View File

@ -81,8 +81,8 @@ With a FEC rate: `16:4`
With FEC rate of `16:16`
* `G = 12800`
* `S = SUM of i=0 -> 32 for binomial(prob_failure = 0.2775, trials = 64, failures = i) = 0.002132`
* `B = (1 - 0.002132) ^ (12800 / 32) = 0.42583`
* `S = SUM of i=0 -> 32 for binomial(prob_failure = 0.2775, trials = 64, failures = i) = 0.0.21.8`
* `B = (1 - 0.0.21.8) ^ (12800 / 32) = 0.42583`
With FEC rate of `32:32`
* `G = 12800`

View File

@ -2,7 +2,7 @@
This design describes Solana's _Tower BFT_ algorithm. It addresses the following problems:
* Some forks may not end up accepted by the super-majority of the cluster, and voters need to recover from voting on such forks.
* Some forks may not end up accepted by the supermajority of the cluster, and voters need to recover from voting on such forks.
* Many forks may be votable by different voters, and each voter may see a different set of votable forks. The selected forks should eventually converge for the cluster.
* Reward based votes have an associated risk. Voters should have the ability to configure how much risk they take on.
* The [cost of rollback](tower-bft.md#cost-of-rollback) needs to be computable. It is important to clients that rely on some measurable form of Consistency. The costs to break consistency need to be computable, and increase super-linearly for older votes.

View File

@ -2,7 +2,7 @@
Follow this guide to setup Solana's key generation tool called `solana-keygen`
{% hint style="warn" %}
After installation, ensure your version is `0.21.1` or higher by running `solana-keygen -V`
After installation, ensure your version is `0.21.8` or higher by running `solana-keygen -V`
{% endhint %}
## Download

View File

@ -91,5 +91,5 @@ This is an area currently under exploration
As discussed in the [Economic Design](../implemented-proposals/ed_overview/) section, annual validator interest rates are to be specified as a function of total percentage of circulating supply that has been staked. The cluster rewards validators who are online and actively participating in the validation process throughout the entirety of their _validation period_. For validators that go offline/fail to validate transactions during this period, their annual reward is effectively reduced.
Similarly, we may consider an algorithmic reduction in a validator's active amount staked amount in the case that they are offline. I.e. if a validator is inactive for some amount of time, either due to a partition or otherwise, the amount of their stake that is considered active \(eligible to earn rewards\) may be reduced. This design would be structured to help long-lived partitions to eventually reach finality on their respective chains as the % of non-voting total stake is reduced over time until a super-majority can be achieved by the active validators in each partition. Similarly, upon re-engaging, the active amount staked will come back online at some defined rate. Different rates of stake reduction may be considered depending on the size of the partition/active set.
Similarly, we may consider an algorithmic reduction in a validator's active amount staked amount in the case that they are offline. I.e. if a validator is inactive for some amount of time, either due to a partition or otherwise, the amount of their stake that is considered active \(eligible to earn rewards\) may be reduced. This design would be structured to help long-lived partitions to eventually reach finality on their respective chains as the % of non-voting total stake is reduced over time until a supermajority can be achieved by the active validators in each partition. Similarly, upon re-engaging, the active amount staked will come back online at some defined rate. Different rates of stake reduction may be considered depending on the size of the partition/active set.

View File

@ -1,6 +1,6 @@
[package]
name = "solana-chacha-sys"
version = "0.21.1"
version = "0.21.8"
description = "Solana chacha-sys"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@ -72,10 +72,14 @@ ARGS+=(
--env CI_JOB_ID
--env CI_PULL_REQUEST
--env CI_REPO_SLUG
--env CODECOV_TOKEN
--env CRATES_IO_TOKEN
)
# Also propagate environment variables needed for codecov
# https://docs.codecov.io/docs/testing-with-docker#section-codecov-inside-docker
# We normalize CI to `1`; but codecov expects it to be `true` to detect Buildkite...
CODECOV_ENVS=$(CI=true bash <(curl -s https://codecov.io/env))
if $INTERACTIVE; then
if [[ -n $1 ]]; then
echo
@ -83,8 +87,10 @@ if $INTERACTIVE; then
echo
fi
set -x
exec docker run --interactive --tty "${ARGS[@]}" "$IMAGE" bash
# shellcheck disable=SC2086
exec docker run --interactive --tty "${ARGS[@]}" $CODECOV_ENVS "$IMAGE" bash
fi
set -x
exec docker run "${ARGS[@]}" "$IMAGE" "$@"
# shellcheck disable=SC2086
exec docker run "${ARGS[@]}" $CODECOV_ENVS "$IMAGE" "$@"

View File

@ -33,7 +33,7 @@ else
fi
if [[ -z $CHANNEL_OR_TAG ]]; then
echo +++ Unable to determine channel to publish into, exiting.
echo +++ Unable to determine channel or tag to publish into, exiting.
exit 0
fi

View File

@ -41,7 +41,8 @@ if [[ -z "$CODECOV_TOKEN" ]]; then
echo "^^^ +++"
echo CODECOV_TOKEN undefined, codecov.io upload skipped
else
bash <(curl -s https://codecov.io/bash) -X gcov -f target/cov/lcov.info
# We normalize CI to `1`; but codecov expects it to be `true` to detect Buildkite...
CI=true bash <(curl -s https://codecov.io/bash) -X gcov -f target/cov/lcov.info
annotate --style success --context codecov.io \
"CodeCov report: https://codecov.io/github/solana-labs/solana/commit/${CI_COMMIT:0:9}"

View File

@ -1,6 +1,6 @@
[package]
name = "solana-clap-utils"
version = "0.21.1"
version = "0.21.8"
description = "Solana utilities for the clap"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,7 +12,7 @@ edition = "2018"
clap = "2.33.0"
rpassword = "4.0"
semver = "0.9.0"
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
tiny-bip39 = "0.6.2"
url = "2.1.0"

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-cli"
description = "Blockchain, Rebuilt for Scale"
version = "0.21.1"
version = "0.21.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -20,6 +20,7 @@ dirs = "2.0.2"
lazy_static = "1.4.0"
log = "0.4.8"
indicatif = "0.13.0"
humantime = "1.3.0"
num-traits = "0.2"
pretty-hex = "0.1.1"
reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tls"] }
@ -27,24 +28,24 @@ serde = "1.0.102"
serde_derive = "1.0.102"
serde_json = "1.0.41"
serde_yaml = "0.8.11"
solana-budget-program = { path = "../programs/budget", version = "0.21.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-client = { path = "../client", version = "0.21.1" }
solana-config-program = { path = "../programs/config", version = "0.21.1" }
solana-drone = { path = "../drone", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-net-utils = { path = "../net-utils", version = "0.21.1" }
solana-runtime = { path = "../runtime", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-stake-program = { path = "../programs/stake", version = "0.21.1" }
solana-storage-program = { path = "../programs/storage", version = "0.21.1" }
solana-vote-program = { path = "../programs/vote", version = "0.21.1" }
solana-vote-signer = { path = "../vote-signer", version = "0.21.1" }
solana-budget-program = { path = "../programs/budget", version = "0.21.8" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.8" }
solana-client = { path = "../client", version = "0.21.8" }
solana-config-program = { path = "../programs/config", version = "0.21.8" }
solana-drone = { path = "../drone", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
solana-net-utils = { path = "../net-utils", version = "0.21.8" }
solana-runtime = { path = "../runtime", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
solana-stake-program = { path = "../programs/stake", version = "0.21.8" }
solana-storage-program = { path = "../programs/storage", version = "0.21.8" }
solana-vote-program = { path = "../programs/vote", version = "0.21.8" }
solana-vote-signer = { path = "../vote-signer", version = "0.21.8" }
url = "2.1.0"
[dev-dependencies]
solana-core = { path = "../core", version = "0.21.1" }
solana-budget-program = { path = "../programs/budget", version = "0.21.1" }
solana-core = { path = "../core", version = "0.21.8" }
solana-budget-program = { path = "../programs/budget", version = "0.21.8" }
tempfile = "3.1.0"
[[bin]]

View File

@ -20,7 +20,7 @@ use solana_drone::drone::request_airdrop_transaction;
use solana_drone::drone_mock::request_airdrop_transaction;
use solana_sdk::{
bpf_loader,
clock::Slot,
clock::{Epoch, Slot},
commitment_config::CommitmentConfig,
fee_calculator::FeeCalculator,
hash::Hash,
@ -100,6 +100,10 @@ pub enum CliCommand {
timeout: Duration,
commitment_config: CommitmentConfig,
},
ShowBlockProduction {
epoch: Option<Epoch>,
slot_limit: Option<u64>,
},
ShowGossip,
ShowValidators {
use_lamports_unit: bool,
@ -173,7 +177,16 @@ pub enum CliCommand {
aggregate: bool,
span: Option<u64>,
},
VoteAuthorize(Pubkey, Pubkey, VoteAuthorize),
VoteAuthorize {
vote_account_pubkey: Pubkey,
new_authorized_pubkey: Pubkey,
vote_authorize: VoteAuthorize,
},
VoteUpdateValidator {
vote_account_pubkey: Pubkey,
new_identity_pubkey: Pubkey,
authorized_voter: KeypairEq,
},
// Wallet Commands
Address,
Airdrop {
@ -299,6 +312,7 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
("get-slot", Some(matches)) => parse_get_slot(matches),
("get-transaction-count", Some(matches)) => parse_get_transaction_count(matches),
("ping", Some(matches)) => parse_cluster_ping(matches),
("show-block-production", Some(matches)) => parse_show_block_production(matches),
("show-gossip", Some(_matches)) => Ok(CliCommandInfo {
command: CliCommand::ShowGossip,
require_keypair: false,
@ -346,6 +360,7 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
},
// Vote Commands
("create-vote-account", Some(matches)) => parse_vote_create_account(matches),
("vote-update-validator", Some(matches)) => parse_vote_update_validator(matches),
("vote-authorize-voter", Some(matches)) => {
parse_vote_authorize(matches, VoteAuthorize::Voter)
}
@ -512,20 +527,20 @@ pub type ProcessResult = Result<String, Box<dyn std::error::Error>>;
pub fn check_account_for_fee(
rpc_client: &RpcClient,
config: &CliConfig,
account_pubkey: &Pubkey,
fee_calculator: &FeeCalculator,
message: &Message,
) -> Result<(), Box<dyn error::Error>> {
check_account_for_multiple_fees(rpc_client, config, fee_calculator, &[message])
check_account_for_multiple_fees(rpc_client, account_pubkey, fee_calculator, &[message])
}
fn check_account_for_multiple_fees(
rpc_client: &RpcClient,
config: &CliConfig,
account_pubkey: &Pubkey,
fee_calculator: &FeeCalculator,
messages: &[&Message],
) -> Result<(), Box<dyn error::Error>> {
let balance = rpc_client.retry_get_balance(&config.keypair.pubkey(), 5)?;
let balance = rpc_client.retry_get_balance(account_pubkey, 5)?;
if let Some(lamports) = balance {
if lamports
>= messages
@ -744,7 +759,12 @@ fn process_deploy(
let mut finalize_tx = Transaction::new(&signers, message, blockhash);
messages.push(&finalize_tx.message);
check_account_for_multiple_fees(rpc_client, config, &fee_calculator, &messages)?;
check_account_for_multiple_fees(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&messages,
)?;
trace!("Creating program account");
let result = rpc_client.send_and_confirm_transaction(&mut create_account_tx, &signers);
@ -804,7 +824,12 @@ fn process_pay(
if sign_only {
return_signers(&tx)
} else {
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<SystemError>(result)
}
@ -838,7 +863,12 @@ fn process_pay(
if sign_only {
return_signers(&tx)
} else {
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client
.send_and_confirm_transaction(&mut tx, &[&config.keypair, &contract_state]);
let signature_str = log_instruction_custom_error::<BudgetError>(result)?;
@ -883,7 +913,12 @@ fn process_pay(
} else {
let result = rpc_client
.send_and_confirm_transaction(&mut tx, &[&config.keypair, &contract_state]);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let signature_str = log_instruction_custom_error::<BudgetError>(result)?;
Ok(json!({
@ -905,7 +940,12 @@ fn process_cancel(rpc_client: &RpcClient, config: &CliConfig, pubkey: &Pubkey) -
&config.keypair.pubkey(),
);
let mut tx = Transaction::new_signed_instructions(&[&config.keypair], vec![ix], blockhash);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<BudgetError>(result)
}
@ -921,7 +961,12 @@ fn process_time_elapsed(
let ix = budget_instruction::apply_timestamp(&config.keypair.pubkey(), pubkey, to, dt);
let mut tx = Transaction::new_signed_instructions(&[&config.keypair], vec![ix], blockhash);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<BudgetError>(result)
}
@ -936,7 +981,12 @@ fn process_witness(
let ix = budget_instruction::apply_signature(&config.keypair.pubkey(), pubkey, to);
let mut tx = Transaction::new_signed_instructions(&[&config.keypair], vec![ix], blockhash);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<BudgetError>(result)
}
@ -995,6 +1045,9 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
timeout,
commitment_config,
),
CliCommand::ShowBlockProduction { epoch, slot_limit } => {
process_show_block_production(&rpc_client, *epoch, *slot_limit)
}
CliCommand::ShowGossip => process_show_gossip(&rpc_client),
CliCommand::ShowValidators { use_lamports_unit } => {
process_show_validators(&rpc_client, *use_lamports_unit)
@ -1171,15 +1224,28 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
&vote_account_pubkey,
*use_lamports_unit,
),
CliCommand::VoteAuthorize(vote_account_pubkey, new_authorized_pubkey, vote_authorize) => {
process_vote_authorize(
&rpc_client,
config,
&vote_account_pubkey,
&new_authorized_pubkey,
*vote_authorize,
)
}
CliCommand::VoteAuthorize {
vote_account_pubkey,
new_authorized_pubkey,
vote_authorize,
} => process_vote_authorize(
&rpc_client,
config,
&vote_account_pubkey,
&new_authorized_pubkey,
*vote_authorize,
),
CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_pubkey,
authorized_voter,
} => process_vote_update_validator(
&rpc_client,
config,
&vote_account_pubkey,
&new_identity_pubkey,
authorized_voter,
),
CliCommand::Uptime {
pubkey: vote_account_pubkey,
aggregate,
@ -2149,8 +2215,20 @@ mod tests {
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
let new_authorized_pubkey = Pubkey::new_rand();
config.command =
CliCommand::VoteAuthorize(bob_pubkey, new_authorized_pubkey, VoteAuthorize::Voter);
config.command = CliCommand::VoteAuthorize {
vote_account_pubkey: bob_pubkey,
new_authorized_pubkey,
vote_authorize: VoteAuthorize::Voter,
};
let signature = process_command(&config);
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
let new_identity_pubkey = Pubkey::new_rand();
config.command = CliCommand::VoteUpdateValidator {
vote_account_pubkey: bob_pubkey,
new_identity_pubkey,
authorized_voter: Keypair::new().into(),
};
let signature = process_command(&config);
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
@ -2333,7 +2411,18 @@ mod tests {
};
assert!(process_command(&config).is_err());
config.command = CliCommand::VoteAuthorize(bob_pubkey, bob_pubkey, VoteAuthorize::Voter);
config.command = CliCommand::VoteAuthorize {
vote_account_pubkey: bob_pubkey,
new_authorized_pubkey: bob_pubkey,
vote_authorize: VoteAuthorize::Voter,
};
assert!(process_command(&config).is_err());
config.command = CliCommand::VoteUpdateValidator {
vote_account_pubkey: bob_pubkey,
new_identity_pubkey: bob_pubkey,
authorized_voter: Keypair::new().into(),
};
assert!(process_command(&config).is_err());
config.command = CliCommand::GetSlot {

View File

@ -5,7 +5,7 @@ use crate::{
},
display::println_name_value,
};
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use clap::{value_t, value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use console::{style, Emoji};
use indicatif::{ProgressBar, ProgressStyle};
use solana_clap_utils::{input_parsers::*, input_validators::*};
@ -13,13 +13,14 @@ use solana_client::{rpc_client::RpcClient, rpc_request::RpcVoteAccountInfo};
use solana_sdk::{
clock::{self, Slot},
commitment_config::CommitmentConfig,
epoch_schedule::{Epoch, EpochSchedule},
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, KeypairUtil},
system_transaction,
};
use std::{
collections::VecDeque,
collections::{HashMap, VecDeque},
net::SocketAddr,
thread::sleep,
time::{Duration, Instant},
@ -147,6 +148,22 @@ impl ClusterQuerySubCommands for App<'_, '_> {
),
),
)
.subcommand(
SubCommand::with_name("show-block-production")
.about("Show information about block production")
.arg(
Arg::with_name("epoch")
.long("epoch")
.takes_value(true)
.help("Epoch to show block production for [default: current epoch]"),
)
.arg(
Arg::with_name("slot_limit")
.long("slot-limit")
.takes_value(true)
.help("Limit results to this many slots from the end of the epoch [default: full epoch]"),
),
)
.subcommand(
SubCommand::with_name("show-gossip")
.about("Show the current gossip network nodes"),
@ -260,6 +277,20 @@ fn new_spinner_progress_bar() -> ProgressBar {
progress_bar
}
/// Aggregate epoch credit stats and return (total credits, total slots, total epochs)
pub fn aggregate_epoch_credits(
epoch_credits: &[(Epoch, u64, u64)],
epoch_schedule: &EpochSchedule,
) -> (u64, u64, u64) {
epoch_credits
.iter()
.fold((0, 0, 0), |acc, (epoch, credits, prev_credits)| {
let credits_earned = credits - prev_credits;
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(*epoch);
(acc.0 + credits_earned, acc.1 + slots_in_epoch, acc.2 + 1)
})
}
pub fn process_catchup(rpc_client: &RpcClient, node_pubkey: &Pubkey) -> ProcessResult {
let cluster_nodes = rpc_client.get_cluster_nodes()?;
@ -360,11 +391,7 @@ pub fn process_get_epoch_info(
);
println_name_value(
"Time remaining in current epoch:",
&format!(
"{} minutes, {} seconds",
remaining_time_in_epoch.as_secs() / 60,
remaining_time_in_epoch.as_secs() % 60
),
&humantime::format_duration(remaining_time_in_epoch).to_string(),
);
Ok("".to_string())
}
@ -382,6 +409,149 @@ pub fn process_get_slot(
Ok(slot.to_string())
}
pub fn parse_show_block_production(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let epoch = value_t!(matches, "epoch", Epoch).ok();
let slot_limit = value_t!(matches, "slot_limit", u64).ok();
Ok(CliCommandInfo {
command: CliCommand::ShowBlockProduction { epoch, slot_limit },
require_keypair: false,
})
}
pub fn process_show_block_production(
rpc_client: &RpcClient,
epoch: Option<Epoch>,
slot_limit: Option<u64>,
) -> ProcessResult {
let epoch_schedule = rpc_client.get_epoch_schedule()?;
let epoch_info = rpc_client.get_epoch_info_with_commitment(CommitmentConfig::max())?;
let epoch = epoch.unwrap_or(epoch_info.epoch);
if epoch > epoch_info.epoch {
return Err(format!("Epoch {} is in the future", epoch).into());
}
let end_slot = std::cmp::min(
epoch_info.absolute_slot,
epoch_schedule.get_last_slot_in_epoch(epoch),
);
let start_slot = {
let start_slot = epoch_schedule.get_first_slot_in_epoch(epoch);
std::cmp::max(
end_slot.saturating_sub(slot_limit.unwrap_or(start_slot)),
start_slot,
)
};
let progress_bar = new_spinner_progress_bar();
progress_bar.set_message("Connecting...");
progress_bar.set_message(&format!("Fetching leader schedule for epoch {}...", epoch));
progress_bar.set_message(&format!(
"Fetching confirmed blocks between slots {} and {}...",
start_slot, end_slot
));
let confirmed_blocks = rpc_client.get_confirmed_blocks(start_slot, Some(end_slot))?;
let total_slots = (end_slot - start_slot + 1) as usize;
let total_blocks = confirmed_blocks.len();
assert!(total_blocks <= total_slots);
let total_slots_missed = total_slots - total_blocks;
let mut leader_slot_count = HashMap::new();
let mut leader_missed_slots = HashMap::new();
let leader_schedule = rpc_client
.get_leader_schedule_with_commitment(Some(start_slot), CommitmentConfig::max())?;
if leader_schedule.is_none() {
return Err(format!("Unable to fetch leader schedule for slot {}", start_slot).into());
}
let leader_schedule = leader_schedule.unwrap();
let mut leader_per_slot_index = Vec::new();
leader_per_slot_index.resize(total_slots, "");
for (pubkey, leader_slots) in leader_schedule.iter() {
for slot_index in leader_slots.iter() {
if *slot_index < total_slots {
leader_per_slot_index[*slot_index] = pubkey;
}
}
}
progress_bar.set_message(&format!(
"Processing {} slots containing {} blocks and {} empty slots...",
total_slots, total_blocks, total_slots_missed
));
let mut confirmed_blocks_index = 0;
for (slot_index, leader) in leader_per_slot_index.iter().enumerate().take(total_slots) {
let slot = start_slot + slot_index as u64;
let slot_count = leader_slot_count.entry(leader).or_insert(0);
*slot_count += 1;
let missed_slots = leader_missed_slots.entry(leader).or_insert(0);
loop {
if !confirmed_blocks.is_empty() {
let slot_of_next_confirmed_block = confirmed_blocks[confirmed_blocks_index];
if slot_of_next_confirmed_block < slot {
confirmed_blocks_index += 1;
continue;
}
if slot_of_next_confirmed_block == slot {
break;
}
}
*missed_slots += 1;
break;
}
}
progress_bar.finish_and_clear();
println!(
"\n{}",
style(format!(
" {:<44} {:>15} {:>15} {:>15} {:>23}",
"Identity Pubkey",
"Leader Slots",
"Blocks Produced",
"Missed Slots",
"Missed Block Percentage",
))
.bold()
);
let mut table = vec![];
for (leader, leader_slots) in leader_slot_count.iter() {
let missed_slots = leader_missed_slots.get(leader).unwrap();
let blocks_produced = leader_slots - missed_slots;
table.push(format!(
" {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
leader,
leader_slots,
blocks_produced,
missed_slots,
*missed_slots as f64 / *leader_slots as f64 * 100.
));
}
table.sort();
println!(
"{}\n\n {:<44} {:>15} {:>15} {:>15} {:>22.2}%",
table.join("\n"),
format!("Epoch {} total:", epoch),
total_slots,
total_blocks,
total_slots_missed,
total_slots_missed as f64 / total_slots as f64 * 100.
);
println!(
" (using data from {} slots: {} to {})",
total_slots, start_slot, end_slot
);
Ok("".to_string())
}
pub fn process_get_transaction_count(
rpc_client: &RpcClient,
commitment_config: &CommitmentConfig,
@ -423,7 +593,12 @@ pub fn process_ping(
let transaction =
system_transaction::transfer(&config.keypair, &to, lamports, recent_blockhash);
check_account_for_fee(rpc_client, config, &fee_calculator, &transaction.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&transaction.message,
)?;
match rpc_client.send_transaction(&transaction) {
Ok(signature) => {
@ -549,6 +724,7 @@ pub fn process_show_gossip(rpc_client: &RpcClient) -> ProcessResult {
}
pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool) -> ProcessResult {
let epoch_schedule = rpc_client.get_epoch_schedule()?;
let vote_accounts = rpc_client.get_vote_accounts()?;
let total_active_stake = vote_accounts
.current
@ -591,19 +767,21 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
println!(
"{}",
style(format!(
" {:<44} {:<44} {:<11} {:>10} {:>11} {}",
" {:<44} {:<44} {} {} {} {:>7} {}",
"Identity Pubkey",
"Vote Account Pubkey",
"Commission",
"Last Vote",
"Root Block",
"Uptime",
"Active Stake",
))
.bold()
);
fn print_vote_account(
vote_account: &RpcVoteAccountInfo,
vote_account: RpcVoteAccountInfo,
epoch_schedule: &EpochSchedule,
total_active_stake: f64,
use_lamports_unit: bool,
delinquent: bool,
@ -615,8 +793,20 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
format!("{}", v)
}
}
fn uptime(epoch_credits: Vec<(Epoch, u64, u64)>, epoch_schedule: &EpochSchedule) -> String {
let (total_credits, total_slots, _) =
aggregate_epoch_credits(&epoch_credits, &epoch_schedule);
if total_slots > 0 {
let total_uptime = 100_f64 * total_credits as f64 / total_slots as f64;
format!("{:.2}%", total_uptime)
} else {
"-".into()
}
}
println!(
"{} {:<44} {:<44} {:>3}% {:>10} {:>11} {:>11}",
"{} {:<44} {:<44} {:>9}% {:>8} {:>10} {:>7} {}",
if delinquent {
WARNING.to_string()
} else {
@ -627,6 +817,7 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
vote_account.commission,
non_zero_or_dash(vote_account.last_vote),
non_zero_or_dash(vote_account.root_slot),
uptime(vote_account.epoch_credits, epoch_schedule),
if vote_account.activated_stake > 0 {
format!(
"{} ({:.2}%)",
@ -639,11 +830,23 @@ pub fn process_show_validators(rpc_client: &RpcClient, use_lamports_unit: bool)
);
}
for vote_account in vote_accounts.current.iter() {
print_vote_account(vote_account, total_active_stake, use_lamports_unit, false);
for vote_account in vote_accounts.current.into_iter() {
print_vote_account(
vote_account,
&epoch_schedule,
total_active_stake,
use_lamports_unit,
false,
);
}
for vote_account in vote_accounts.delinquent.iter() {
print_vote_account(vote_account, total_active_stake, use_lamports_unit, true);
for vote_account in vote_accounts.delinquent.into_iter() {
print_vote_account(
vote_account,
&epoch_schedule,
total_active_stake,
use_lamports_unit,
true,
);
}
Ok("".to_string())

View File

@ -344,6 +344,7 @@ pub fn parse_stake_delegate_stake(matches: &ArgMatches<'_>) -> Result<CliCommand
let sign_only = matches.is_present("sign_only");
let signers = pubkeys_sigs_of(&matches, "signer");
let blockhash = value_of(matches, "blockhash");
let require_keypair = signers.is_none();
Ok(CliCommandInfo {
command: CliCommand::DelegateStake {
@ -354,7 +355,7 @@ pub fn parse_stake_delegate_stake(matches: &ArgMatches<'_>) -> Result<CliCommand
signers,
blockhash,
},
require_keypair: !sign_only,
require_keypair,
})
}
@ -378,6 +379,7 @@ pub fn parse_stake_authorize(
pub fn parse_redeem_vote_credits(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let stake_account_pubkey = pubkey_of(matches, "stake_account_pubkey").unwrap();
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
Ok(CliCommandInfo {
command: CliCommand::RedeemVoteCredits(stake_account_pubkey, vote_account_pubkey),
require_keypair: true,
@ -389,6 +391,8 @@ pub fn parse_stake_deactivate_stake(matches: &ArgMatches<'_>) -> Result<CliComma
let sign_only = matches.is_present("sign_only");
let signers = pubkeys_sigs_of(&matches, "signer");
let blockhash = value_of(matches, "blockhash");
let require_keypair = signers.is_none();
Ok(CliCommandInfo {
command: CliCommand::DeactivateStake {
stake_account_pubkey,
@ -396,7 +400,7 @@ pub fn parse_stake_deactivate_stake(matches: &ArgMatches<'_>) -> Result<CliComma
signers,
blockhash,
},
require_keypair: !sign_only,
require_keypair,
})
}
@ -489,7 +493,12 @@ pub fn process_create_stake_account(
&[&config.keypair, stake_account],
recent_blockhash,
);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let result =
rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair, stake_account]);
log_instruction_custom_error::<SystemError>(result)
@ -520,7 +529,12 @@ pub fn process_stake_authorize(
&[&config.keypair],
recent_blockhash,
);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<StakeError>(result)
}
@ -551,7 +565,12 @@ pub fn process_deactivate_stake_account(
if sign_only {
return_signers(&tx)
} else {
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&tx.message.account_keys[0],
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<StakeError>(result)
}
@ -579,7 +598,12 @@ pub fn process_withdraw_stake(
&[&config.keypair],
recent_blockhash,
);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<StakeError>(result)
}
@ -601,7 +625,12 @@ pub fn process_redeem_vote_credits(
&[&config.keypair],
recent_blockhash,
);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<StakeError>(result)
}
@ -696,7 +725,7 @@ pub fn process_show_stake_history(
println!(
"{}",
style(format!(
" {:<5} {:>15} {:>16} {:>18}",
" {:<5} {:>20} {:>20} {:>20}",
"Epoch", "Effective Stake", "Activating Stake", "Deactivating Stake",
))
.bold()
@ -704,7 +733,7 @@ pub fn process_show_stake_history(
for (epoch, entry) in stake_history.deref() {
println!(
" {:>5} {:>15} {:>16} {:>18} {}",
" {:>5} {:>20} {:>20} {:>20} {}",
epoch,
build_balance_message(entry.effective, use_lamports_unit, false),
build_balance_message(entry.activating, use_lamports_unit, false),
@ -791,7 +820,12 @@ pub fn process_delegate_stake(
if sign_only {
return_signers(&tx)
} else {
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&tx.message.account_keys[0],
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<StakeError>(result)
}
@ -1008,7 +1042,7 @@ mod tests {
signers: None,
blockhash: None
},
require_keypair: false
require_keypair: true
}
);
@ -1035,7 +1069,7 @@ mod tests {
signers: Some(vec![(key1, sig1)]),
blockhash: None
},
require_keypair: true
require_keypair: false
}
);
@ -1064,7 +1098,7 @@ mod tests {
signers: Some(vec![(key1, sig1), (key2, sig2)]),
blockhash: None
},
require_keypair: true
require_keypair: false
}
);
@ -1143,7 +1177,7 @@ mod tests {
signers: None,
blockhash: None
},
require_keypair: false
require_keypair: true
}
);
@ -1167,7 +1201,7 @@ mod tests {
signers: Some(vec![(key1, sig1)]),
blockhash: None
},
require_keypair: true
require_keypair: false
}
);
@ -1193,7 +1227,7 @@ mod tests {
signers: Some(vec![(key1, sig1), (key2, sig2)]),
blockhash: None
},
require_keypair: true
require_keypair: false
}
);
}

View File

@ -176,7 +176,12 @@ pub fn process_create_storage_account(
ixs,
recent_blockhash,
);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let result =
rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair, &storage_account]);
log_instruction_custom_error::<SystemError>(result)
@ -196,7 +201,12 @@ pub fn process_claim_storage_reward(
let message = Message::new_with_payer(vec![instruction], Some(&signers[0].pubkey()));
let mut tx = Transaction::new(&signers, message, recent_blockhash);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let signature_str = rpc_client.send_and_confirm_transaction(&mut tx, &signers)?;
Ok(signature_str.to_string())
}

View File

@ -347,7 +347,12 @@ pub fn process_set_validator_info(
// Submit transaction
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let mut tx = Transaction::new(&signers, message, recent_blockhash);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let signature_str = rpc_client.send_and_confirm_transaction(&mut tx, &signers)?;
println!("Success! Validator info published at: {:?}", info_pubkey);

View File

@ -1,6 +1,10 @@
use crate::cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult,
use crate::{
cli::{
build_balance_message, check_account_for_fee, check_unique_pubkeys,
log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError,
ProcessResult,
},
cluster_query::aggregate_epoch_credits,
};
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
use solana_clap_utils::{input_parsers::*, input_validators::*};
@ -34,9 +38,9 @@ impl VoteSubCommands for App<'_, '_> {
.help("Vote account keypair to fund"),
)
.arg(
Arg::with_name("node_pubkey")
Arg::with_name("identity_pubkey")
.index(2)
.value_name("VALIDATOR PUBKEY")
.value_name("VALIDATOR IDENTITY PUBKEY")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
@ -66,6 +70,37 @@ impl VoteSubCommands for App<'_, '_> {
.help("Public key of the authorized withdrawer (defaults to cli config pubkey)"),
),
)
.subcommand(
SubCommand::with_name("vote-update-validator")
.about("Update the vote account's validator identity")
.arg(
Arg::with_name("vote_account_pubkey")
.index(1)
.value_name("VOTE ACCOUNT PUBKEY")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.help("Vote account to update"),
)
.arg(
Arg::with_name("new_identity_pubkey")
.index(2)
.value_name("NEW VALIDATOR IDENTITY PUBKEY")
.takes_value(true)
.required(true)
.validator(is_pubkey_or_keypair)
.help("New validator that will vote with this account"),
)
.arg(
Arg::with_name("authorized_voter")
.index(3)
.value_name("AUTHORIZED VOTER KEYPAIR")
.takes_value(true)
.required(true)
.validator(is_keypair)
.help("Authorized voter keypair"),
)
)
.subcommand(
SubCommand::with_name("vote-authorize-voter")
.about("Authorize a new vote signing keypair for the given vote account")
@ -159,7 +194,7 @@ impl VoteSubCommands for App<'_, '_> {
pub fn parse_vote_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let vote_account = keypair_of(matches, "vote_account").unwrap();
let node_pubkey = pubkey_of(matches, "node_pubkey").unwrap();
let identity_pubkey = pubkey_of(matches, "identity_pubkey").unwrap();
let commission = value_of(&matches, "commission").unwrap_or(0);
let authorized_voter = pubkey_of(matches, "authorized_voter");
let authorized_withdrawer = pubkey_of(matches, "authorized_withdrawer");
@ -167,7 +202,7 @@ pub fn parse_vote_create_account(matches: &ArgMatches<'_>) -> Result<CliCommandI
Ok(CliCommandInfo {
command: CliCommand::CreateVoteAccount {
vote_account: vote_account.into(),
node_pubkey,
node_pubkey: identity_pubkey,
authorized_voter,
authorized_withdrawer,
commission,
@ -184,11 +219,26 @@ pub fn parse_vote_authorize(
let new_authorized_pubkey = pubkey_of(matches, "new_authorized_pubkey").unwrap();
Ok(CliCommandInfo {
command: CliCommand::VoteAuthorize(
command: CliCommand::VoteAuthorize {
vote_account_pubkey,
new_authorized_pubkey,
vote_authorize,
),
},
require_keypair: true,
})
}
pub fn parse_vote_update_validator(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
let vote_account_pubkey = pubkey_of(matches, "vote_account_pubkey").unwrap();
let new_identity_pubkey = pubkey_of(matches, "new_identity_pubkey").unwrap();
let authorized_voter = keypair_of(matches, "authorized_voter").unwrap();
Ok(CliCommandInfo {
command: CliCommand::VoteUpdateValidator {
vote_account_pubkey,
new_identity_pubkey,
authorized_voter: authorized_voter.into(),
},
require_keypair: true,
})
}
@ -229,7 +279,7 @@ pub fn process_create_vote_account(
rpc_client: &RpcClient,
config: &CliConfig,
vote_account: &Keypair,
node_pubkey: &Pubkey,
identity_pubkey: &Pubkey,
authorized_voter: &Option<Pubkey>,
authorized_withdrawer: &Option<Pubkey>,
commission: u8,
@ -237,7 +287,7 @@ pub fn process_create_vote_account(
let vote_account_pubkey = vote_account.pubkey();
check_unique_pubkeys(
(&vote_account_pubkey, "vote_account_pubkey".to_string()),
(&node_pubkey, "node_pubkey".to_string()),
(&identity_pubkey, "identity_pubkey".to_string()),
)?;
check_unique_pubkeys(
(&config.keypair.pubkey(), "cli keypair".to_string()),
@ -251,7 +301,7 @@ pub fn process_create_vote_account(
1
};
let vote_init = VoteInit {
node_pubkey: *node_pubkey,
node_pubkey: *identity_pubkey,
authorized_voter: authorized_voter.unwrap_or(vote_account_pubkey),
authorized_withdrawer: authorized_withdrawer.unwrap_or(config.keypair.pubkey()),
commission,
@ -268,7 +318,12 @@ pub fn process_create_vote_account(
ixs,
recent_blockhash,
);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair, vote_account]);
log_instruction_custom_error::<SystemError>(result)
}
@ -298,7 +353,46 @@ pub fn process_vote_authorize(
&[&config.keypair],
recent_blockhash,
);
check_account_for_fee(rpc_client, config, &fee_calculator, &tx.message)?;
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<VoteError>(result)
}
pub fn process_vote_update_validator(
rpc_client: &RpcClient,
config: &CliConfig,
vote_account_pubkey: &Pubkey,
new_identity_pubkey: &Pubkey,
authorized_voter: &Keypair,
) -> ProcessResult {
check_unique_pubkeys(
(vote_account_pubkey, "vote_account_pubkey".to_string()),
(new_identity_pubkey, "new_identity_pubkey".to_string()),
)?;
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
let ixs = vec![vote_instruction::update_node(
vote_account_pubkey,
&authorized_voter.pubkey(),
new_identity_pubkey,
)];
let mut tx = Transaction::new_signed_with_payer(
ixs,
Some(&config.keypair.pubkey()),
&[&config.keypair, authorized_voter],
recent_blockhash,
);
check_account_for_fee(
rpc_client,
&config.keypair.pubkey(),
&fee_calculator,
&tx.message,
)?;
let result = rpc_client.send_and_confirm_transaction(&mut tx, &[&config.keypair]);
log_instruction_custom_error::<VoteError>(result)
}
@ -338,7 +432,7 @@ pub fn process_show_vote_account(
"account balance: {}",
build_balance_message(vote_account.lamports, use_lamports_unit, true)
);
println!("node id: {}", vote_state.node_pubkey);
println!("validator identity: {}", vote_state.node_pubkey);
println!("authorized voter: {}", vote_state.authorized_voter);
println!(
"authorized withdrawer: {}",
@ -386,38 +480,42 @@ pub fn process_uptime(
let epoch_schedule = rpc_client.get_epoch_schedule()?;
println!("Node id: {}", vote_state.node_pubkey);
println!("Authorized voter: {}", vote_state.authorized_voter);
println!("validator identity: {}", vote_state.node_pubkey);
println!("authorized voter: {}", vote_state.authorized_voter);
if !vote_state.votes.is_empty() {
println!("Uptime:");
println!("uptime:");
let epoch_credits_vec: Vec<(u64, u64, u64)> = vote_state.epoch_credits().copied().collect();
let epoch_credits = if let Some(x) = span {
epoch_credits_vec.iter().rev().take(x as usize)
let epoch_credits: Vec<(u64, u64, u64)> = if let Some(x) = span {
vote_state
.epoch_credits()
.iter()
.rev()
.take(x as usize)
.cloned()
.collect()
} else {
epoch_credits_vec.iter().rev().take(epoch_credits_vec.len())
vote_state.epoch_credits().iter().rev().cloned().collect()
};
if aggregate {
let (credits_earned, slots_in_epoch, epochs): (u64, u64, u64) =
epoch_credits.fold((0, 0, 0), |acc, (epoch, credits, prev_credits)| {
let credits_earned = credits - prev_credits;
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(*epoch);
(acc.0 + credits_earned, acc.1 + slots_in_epoch, acc.2 + 1)
});
let total_uptime = credits_earned as f64 / slots_in_epoch as f64;
println!("{:.2}% over {} epochs", total_uptime * 100_f64, epochs,);
let (total_credits, total_slots, epochs) =
aggregate_epoch_credits(&epoch_credits, &epoch_schedule);
if total_slots > 0 {
let total_uptime = 100_f64 * total_credits as f64 / total_slots as f64;
println!("{:.2}% over {} epochs", total_uptime, epochs);
} else {
println!("Insufficient voting history available");
}
} else {
for (epoch, credits, prev_credits) in epoch_credits {
let credits_earned = credits - prev_credits;
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(*epoch);
let slots_in_epoch = epoch_schedule.get_slots_in_epoch(epoch);
let uptime = credits_earned as f64 / slots_in_epoch as f64;
println!("- epoch: {} {:.2}% uptime", epoch, uptime * 100_f64,);
}
}
if let Some(x) = span {
if x > epoch_credits_vec.len() as u64 {
if x > vote_state.epoch_credits().len() as u64 {
println!("(span longer than available epochs)");
}
}
@ -443,17 +541,24 @@ mod tests {
let keypair = Keypair::new();
let pubkey = keypair.pubkey();
let pubkey_string = pubkey.to_string();
let keypair2 = Keypair::new();
let pubkey2 = keypair2.pubkey();
let pubkey2_string = pubkey2.to_string();
let test_authorize_voter = test_commands.clone().get_matches_from(vec![
"test",
"vote-authorize-voter",
&pubkey_string,
&pubkey_string,
&pubkey2_string,
]);
assert_eq!(
parse_command(&test_authorize_voter).unwrap(),
CliCommandInfo {
command: CliCommand::VoteAuthorize(pubkey, pubkey, VoteAuthorize::Voter),
command: CliCommand::VoteAuthorize {
vote_account_pubkey: pubkey,
new_authorized_pubkey: pubkey2,
vote_authorize: VoteAuthorize::Voter
},
require_keypair: true
}
);
@ -564,6 +669,27 @@ mod tests {
}
);
let test_update_validator = test_commands.clone().get_matches_from(vec![
"test",
"vote-update-validator",
&pubkey_string,
&pubkey2_string,
&keypair_file,
]);
assert_eq!(
parse_command(&test_update_validator).unwrap(),
CliCommandInfo {
command: CliCommand::VoteUpdateValidator {
vote_account_pubkey: pubkey,
new_identity_pubkey: pubkey2,
authorized_voter: solana_sdk::signature::read_keypair_file(&keypair_file)
.unwrap()
.into(),
},
require_keypair: true
}
);
// Test Uptime Subcommand
let pubkey = Pubkey::new_rand();
let matches = test_commands.clone().get_matches_from(vec![

252
cli/tests/stake.rs Normal file
View File

@ -0,0 +1,252 @@
use serde_json::Value;
use solana_cli::cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig};
use solana_client::rpc_client::RpcClient;
use solana_drone::drone::run_local_drone;
use solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::{read_keypair_file, write_keypair, KeypairUtil, Signature},
};
use solana_stake_program::stake_state::Lockup;
use std::fs::remove_dir_all;
use std::str::FromStr;
use std::sync::mpsc::channel;
#[cfg(test)]
use solana_core::validator::new_validator_for_tests;
use std::thread::sleep;
use std::time::Duration;
use tempfile::NamedTempFile;
fn make_tmp_file() -> (String, NamedTempFile) {
let tmp_file = NamedTempFile::new().unwrap();
(String::from(tmp_file.path().to_str().unwrap()), tmp_file)
}
fn check_balance(expected_balance: u64, client: &RpcClient, pubkey: &Pubkey) {
(0..5).for_each(|tries| {
let balance = client.retry_get_balance(pubkey, 1).unwrap().unwrap();
if balance == expected_balance {
return;
}
if tries == 4 {
assert_eq!(balance, expected_balance);
}
sleep(Duration::from_millis(500));
});
}
#[test]
fn test_stake_delegation_and_deactivation() {
solana_logger::setup();
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let (sender, receiver) = channel();
run_local_drone(alice, sender, None);
let drone_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(leader_data.rpc);
let mut config_validator = CliConfig::default();
config_validator.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let mut config_vote = CliConfig::default();
config_vote.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&config_vote.keypair, tmp_file.as_file_mut()).unwrap();
let mut config_stake = CliConfig::default();
config_stake.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&config_stake.keypair, tmp_file.as_file_mut()).unwrap();
request_and_confirm_airdrop(
&rpc_client,
&drone_addr,
&config_validator.keypair.pubkey(),
100_000,
)
.unwrap();
check_balance(100_000, &rpc_client, &config_validator.keypair.pubkey());
// Create vote account
config_validator.command = CliCommand::CreateVoteAccount {
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
node_pubkey: config_validator.keypair.pubkey(),
authorized_voter: None,
authorized_withdrawer: None,
commission: 0,
};
process_command(&config_validator).unwrap();
// Create stake account
config_validator.command = CliCommand::CreateStakeAccount {
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
staker: None,
withdrawer: None,
lockup: Lockup {
custodian: Pubkey::default(),
epoch: 0,
},
lamports: 50_000,
};
process_command(&config_validator).unwrap();
// Delegate stake
config_validator.command = CliCommand::DelegateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
vote_account_pubkey: config_vote.keypair.pubkey(),
force: true,
sign_only: false,
signers: None,
blockhash: None,
};
process_command(&config_validator).unwrap();
// Deactivate stake
config_validator.command = CliCommand::DeactivateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
sign_only: false,
signers: None,
blockhash: None,
};
process_command(&config_validator).unwrap();
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}
#[test]
fn test_stake_delegation_and_deactivation_offline() {
solana_logger::setup();
let (server, leader_data, alice, ledger_path) = new_validator_for_tests();
let (sender, receiver) = channel();
run_local_drone(alice, sender, None);
let drone_addr = receiver.recv().unwrap();
let rpc_client = RpcClient::new_socket(leader_data.rpc);
let mut config_validator = CliConfig::default();
config_validator.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let mut config_payer = CliConfig::default();
config_payer.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let mut config_vote = CliConfig::default();
config_vote.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let (vote_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&config_vote.keypair, tmp_file.as_file_mut()).unwrap();
let mut config_stake = CliConfig::default();
config_stake.json_rpc_url =
format!("http://{}:{}", leader_data.rpc.ip(), leader_data.rpc.port());
let (stake_keypair_file, mut tmp_file) = make_tmp_file();
write_keypair(&config_stake.keypair, tmp_file.as_file_mut()).unwrap();
request_and_confirm_airdrop(
&rpc_client,
&drone_addr,
&config_validator.keypair.pubkey(),
100_000,
)
.unwrap();
check_balance(100_000, &rpc_client, &config_validator.keypair.pubkey());
// Create vote account
config_validator.command = CliCommand::CreateVoteAccount {
vote_account: read_keypair_file(&vote_keypair_file).unwrap().into(),
node_pubkey: config_validator.keypair.pubkey(),
authorized_voter: None,
authorized_withdrawer: None,
commission: 0,
};
process_command(&config_validator).unwrap();
// Create stake account
config_validator.command = CliCommand::CreateStakeAccount {
stake_account: read_keypair_file(&stake_keypair_file).unwrap().into(),
staker: None,
withdrawer: None,
lockup: Lockup {
custodian: Pubkey::default(),
epoch: 0,
},
lamports: 50_000,
};
process_command(&config_validator).unwrap();
// Delegate stake offline
config_validator.command = CliCommand::DelegateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
vote_account_pubkey: config_vote.keypair.pubkey(),
force: true,
sign_only: true,
signers: None,
blockhash: None,
};
let sig_response = process_command(&config_validator).unwrap();
let object: Value = serde_json::from_str(&sig_response).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let signers: Vec<_> = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
// Delegate stake online
config_payer.command = CliCommand::DelegateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
vote_account_pubkey: config_vote.keypair.pubkey(),
force: true,
sign_only: false,
signers: Some(signers),
blockhash: Some(blockhash_str.parse::<Hash>().unwrap()),
};
process_command(&config_payer).unwrap();
// Deactivate stake offline
config_validator.command = CliCommand::DeactivateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
sign_only: true,
signers: None,
blockhash: None,
};
let sig_response = process_command(&config_validator).unwrap();
let object: Value = serde_json::from_str(&sig_response).unwrap();
let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap();
let signer_strings = object.get("signers").unwrap().as_array().unwrap();
let signers: Vec<_> = signer_strings
.iter()
.map(|signer_string| {
let mut signer = signer_string.as_str().unwrap().split('=');
let key = Pubkey::from_str(signer.next().unwrap()).unwrap();
let sig = Signature::from_str(signer.next().unwrap()).unwrap();
(key, sig)
})
.collect();
// Deactivate stake online
config_payer.command = CliCommand::DeactivateStake {
stake_account_pubkey: config_stake.keypair.pubkey(),
sign_only: false,
signers: Some(signers),
blockhash: Some(blockhash_str.parse::<Hash>().unwrap()),
};
process_command(&config_payer).unwrap();
server.close().unwrap();
remove_dir_all(ledger_path).unwrap();
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-client"
version = "0.21.1"
version = "0.21.8"
description = "Solana Client"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -19,10 +19,10 @@ reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tl
serde = "1.0.102"
serde_derive = "1.0.102"
serde_json = "1.0.41"
solana-net-utils = { path = "../net-utils", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-net-utils = { path = "../net-utils", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
[dev-dependencies]
jsonrpc-core = "14.0.3"
jsonrpc-http-server = "14.0.3"
solana-logger = { path = "../logger", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.8" }

View File

@ -1,12 +1,10 @@
use crate::{client_error::ClientError, rpc_request::RpcRequest};
use solana_sdk::commitment_config::CommitmentConfig;
pub(crate) trait GenericRpcClientRequest {
fn send(
&self,
request: &RpcRequest,
params: Option<serde_json::Value>,
params: serde_json::Value,
retries: usize,
commitment_config: Option<CommitmentConfig>,
) -> Result<serde_json::Value, ClientError>;
}

View File

@ -5,7 +5,6 @@ use crate::{
};
use serde_json::{Number, Value};
use solana_sdk::{
commitment_config::CommitmentConfig,
fee_calculator::FeeCalculator,
transaction::{self, TransactionError},
};
@ -28,17 +27,16 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
fn send(
&self,
request: &RpcRequest,
params: Option<serde_json::Value>,
params: serde_json::Value,
_retries: usize,
_commitment_config: Option<CommitmentConfig>,
) -> Result<serde_json::Value, ClientError> {
if self.url == "fails" {
return Ok(Value::Null);
}
let val = match request {
RpcRequest::ConfirmTransaction => {
if let Some(Value::Array(param_array)) = params {
if let Value::String(param_string) = &param_array[0] {
if let Some(params_array) = params.as_array() {
if let Value::String(param_string) = &params_array[0] {
Value::Bool(param_string == SIGNATURE)
} else {
Value::Null

View File

@ -4,7 +4,10 @@ use crate::{
generic_rpc_client_request::GenericRpcClientRequest,
mock_rpc_client_request::MockRpcClientRequest,
rpc_client_request::RpcClientRequest,
rpc_request::{RpcContactInfo, RpcEpochInfo, RpcRequest, RpcVersionInfo, RpcVoteAccountStatus},
rpc_request::{
RpcConfirmedBlock, RpcContactInfo, RpcEpochInfo, RpcLeaderSchedule, RpcRequest,
RpcVersionInfo, RpcVoteAccountStatus,
},
};
use bincode::serialize;
use log::*;
@ -67,14 +70,12 @@ impl RpcClient {
signature: &str,
commitment_config: CommitmentConfig,
) -> RpcResponse<bool> {
let params = json!(signature);
let response = self
.client
.send(
&RpcRequest::ConfirmTransaction,
Some(params),
json!([signature, commitment_config]),
0,
Some(commitment_config),
)
.map_err(|err| {
io::Error::new(
@ -93,10 +94,9 @@ impl RpcClient {
pub fn send_transaction(&self, transaction: &Transaction) -> Result<String, ClientError> {
let serialized = serialize(transaction).unwrap();
let params = json!(serialized);
let signature = self
.client
.send(&RpcRequest::SendTransaction, Some(params), 5, None)?;
.send(&RpcRequest::SendTransaction, json!([serialized]), 5)?;
if signature.as_str().is_none() {
Err(io::Error::new(
io::ErrorKind::Other,
@ -120,12 +120,10 @@ impl RpcClient {
signature: &str,
commitment_config: CommitmentConfig,
) -> Result<Option<transaction::Result<()>>, ClientError> {
let params = json!(signature.to_string());
let signature_status = self.client.send(
&RpcRequest::GetSignatureStatus,
Some(params),
json!([signature.to_string(), commitment_config]),
5,
commitment_config.ok(),
)?;
let result: Option<transaction::Result<()>> =
serde_json::from_value(signature_status).unwrap();
@ -142,7 +140,7 @@ impl RpcClient {
) -> io::Result<Slot> {
let response = self
.client
.send(&RpcRequest::GetSlot, None, 0, commitment_config.ok())
.send(&RpcRequest::GetSlot, json!([commitment_config]), 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@ -161,7 +159,7 @@ impl RpcClient {
pub fn get_vote_accounts(&self) -> io::Result<RpcVoteAccountStatus> {
let response = self
.client
.send(&RpcRequest::GetVoteAccounts, None, 0, None)
.send(&RpcRequest::GetVoteAccounts, Value::Null, 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@ -180,7 +178,7 @@ impl RpcClient {
pub fn get_cluster_nodes(&self) -> io::Result<Vec<RpcContactInfo>> {
let response = self
.client
.send(&RpcRequest::GetClusterNodes, None, 0, None)
.send(&RpcRequest::GetClusterNodes, Value::Null, 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@ -196,11 +194,56 @@ impl RpcClient {
})
}
pub fn get_block_time(&self, slot: Slot) -> io::Result<UnixTimestamp> {
let params = json!(slot);
pub fn get_confirmed_block(&self, slot: Slot) -> io::Result<RpcConfirmedBlock> {
let response = self
.client
.send(&RpcRequest::GetBlockTime, Some(params), 0, None);
.send(&RpcRequest::GetConfirmedBlock, json!([slot]), 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetConfirmedBlock request failure: {:?}", err),
)
})?;
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetConfirmedBlock parse failure: {}", err),
)
})
}
pub fn get_confirmed_blocks(
&self,
start_slot: Slot,
end_slot: Option<Slot>,
) -> io::Result<Vec<Slot>> {
let response = self
.client
.send(
&RpcRequest::GetConfirmedBlocks,
json!([start_slot, end_slot]),
0,
)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetConfirmedBlocks request failure: {:?}", err),
)
})?;
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetConfirmedBlocks parse failure: {}", err),
)
})
}
pub fn get_block_time(&self, slot: Slot) -> io::Result<UnixTimestamp> {
let response = self
.client
.send(&RpcRequest::GetBlockTime, json!([slot]), 0);
response
.map(|result_json| {
@ -232,7 +275,7 @@ impl RpcClient {
) -> io::Result<RpcEpochInfo> {
let response = self
.client
.send(&RpcRequest::GetEpochInfo, None, 0, commitment_config.ok())
.send(&RpcRequest::GetEpochInfo, json!([commitment_config]), 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@ -248,10 +291,41 @@ impl RpcClient {
})
}
pub fn get_leader_schedule(&self, slot: Option<Slot>) -> io::Result<Option<RpcLeaderSchedule>> {
self.get_leader_schedule_with_commitment(slot, CommitmentConfig::default())
}
pub fn get_leader_schedule_with_commitment(
&self,
slot: Option<Slot>,
commitment_config: CommitmentConfig,
) -> io::Result<Option<RpcLeaderSchedule>> {
let response = self
.client
.send(
&RpcRequest::GetLeaderSchedule,
json!([slot, commitment_config]),
0,
)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetLeaderSchedule request failure: {:?}", err),
)
})?;
serde_json::from_value(response).map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
format!("GetLeaderSchedule failure: {}", err),
)
})
}
pub fn get_epoch_schedule(&self) -> io::Result<EpochSchedule> {
let response = self
.client
.send(&RpcRequest::GetEpochSchedule, None, 0, None)
.send(&RpcRequest::GetEpochSchedule, Value::Null, 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@ -270,7 +344,7 @@ impl RpcClient {
pub fn get_inflation(&self) -> io::Result<Inflation> {
let response = self
.client
.send(&RpcRequest::GetInflation, None, 0, None)
.send(&RpcRequest::GetInflation, Value::Null, 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@ -289,7 +363,7 @@ impl RpcClient {
pub fn get_version(&self) -> io::Result<RpcVersionInfo> {
let response = self
.client
.send(&RpcRequest::GetVersion, None, 0, None)
.send(&RpcRequest::GetVersion, Value::Null, 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@ -438,10 +512,13 @@ impl RpcClient {
pubkey: &Pubkey,
retries: usize,
) -> Result<Option<u64>, Box<dyn error::Error>> {
let params = json!(format!("{}", pubkey));
let balance_json = self
.client
.send(&RpcRequest::GetBalance, Some(params), retries, None)
.send(
&RpcRequest::GetBalance,
json!([pubkey.to_string()]),
retries,
)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@ -477,12 +554,10 @@ impl RpcClient {
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResponse<Option<Account>> {
let params = json!(format!("{}", pubkey));
let response = self.client.send(
&RpcRequest::GetAccountInfo,
Some(params),
json!([pubkey.to_string(), commitment_config]),
0,
Some(commitment_config),
);
response
@ -510,14 +585,12 @@ impl RpcClient {
}
pub fn get_minimum_balance_for_rent_exemption(&self, data_len: usize) -> io::Result<u64> {
let params = json!(data_len);
let minimum_balance_json = self
.client
.send(
&RpcRequest::GetMinimumBalanceForRentExemption,
Some(params),
json!([data_len]),
0,
None,
)
.map_err(|err| {
io::Error::new(
@ -555,14 +628,12 @@ impl RpcClient {
pubkey: &Pubkey,
commitment_config: CommitmentConfig,
) -> RpcResponse<u64> {
let params = json!(pubkey.to_string());
let balance_json = self
.client
.send(
&RpcRequest::GetBalance,
Some(params),
json!([pubkey.to_string(), commitment_config]),
0,
Some(commitment_config),
)
.map_err(|err| {
io::Error::new(
@ -580,10 +651,13 @@ impl RpcClient {
}
pub fn get_program_accounts(&self, pubkey: &Pubkey) -> io::Result<Vec<(Pubkey, Account)>> {
let params = json!(format!("{}", pubkey));
let response = self
.client
.send(&RpcRequest::GetProgramAccounts, Some(params), 0, None)
.send(
&RpcRequest::GetProgramAccounts,
json!([pubkey.to_string()]),
0,
)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@ -625,9 +699,8 @@ impl RpcClient {
.client
.send(
&RpcRequest::GetTransactionCount,
None,
json!([commitment_config]),
0,
commitment_config.ok(),
)
.map_err(|err| {
io::Error::new(
@ -658,9 +731,8 @@ impl RpcClient {
.client
.send(
&RpcRequest::GetRecentBlockhash,
None,
json!([commitment_config]),
0,
commitment_config.ok(),
)
.map_err(|err| {
io::Error::new(
@ -723,7 +795,7 @@ impl RpcClient {
pub fn get_genesis_hash(&self) -> io::Result<Hash> {
let response = self
.client
.send(&RpcRequest::GetGenesisHash, None, 0, None)
.send(&RpcRequest::GetGenesisHash, Value::Null, 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@ -848,14 +920,12 @@ impl RpcClient {
/// Check a signature in the bank.
pub fn check_signature(&self, signature: &Signature) -> bool {
trace!("check_signature: {:?}", signature);
let params = json!(format!("{}", signature));
for _ in 0..30 {
let response = self.client.send(
&RpcRequest::ConfirmTransaction,
Some(params.clone()),
json!([signature.to_string(), CommitmentConfig::recent()]),
0,
Some(CommitmentConfig::recent()),
);
match response {
@ -942,16 +1012,14 @@ impl RpcClient {
pub fn get_num_blocks_since_signature_confirmation(
&self,
sig: &Signature,
signature: &Signature,
) -> io::Result<usize> {
let params = json!(format!("{}", sig));
let response = self
.client
.send(
&RpcRequest::GetNumBlocksSinceSignatureConfirmation,
Some(params.clone()),
json!([signature.to_string(), CommitmentConfig::recent().ok()]),
1,
CommitmentConfig::recent().ok(),
)
.map_err(|err| {
io::Error::new(
@ -976,7 +1044,7 @@ impl RpcClient {
pub fn validator_exit(&self) -> io::Result<bool> {
let response = self
.client
.send(&RpcRequest::ValidatorExit, None, 0, None)
.send(&RpcRequest::ValidatorExit, Value::Null, 0)
.map_err(|err| {
io::Error::new(
io::ErrorKind::Other,
@ -994,11 +1062,11 @@ impl RpcClient {
pub fn send(
&self,
request: &RpcRequest,
params: Option<Value>,
params: Value,
retries: usize,
commitment: Option<CommitmentConfig>,
) -> Result<Value, ClientError> {
self.client.send(request, params, retries, commitment)
assert!(params.is_array() || params.is_null());
self.client.send(request, params, retries)
}
}
@ -1062,25 +1130,19 @@ mod tests {
let balance = rpc_client.send(
&RpcRequest::GetBalance,
Some(json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"])),
json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"]),
0,
None,
);
assert_eq!(balance.unwrap().as_u64().unwrap(), 50);
let blockhash = rpc_client.send(&RpcRequest::GetRecentBlockhash, None, 0, None);
let blockhash = rpc_client.send(&RpcRequest::GetRecentBlockhash, Value::Null, 0);
assert_eq!(
blockhash.unwrap().as_str().unwrap(),
"deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx"
);
// Send erroneous parameter
let blockhash = rpc_client.send(
&RpcRequest::GetRecentBlockhash,
Some(json!("parameter")),
0,
None,
);
let blockhash = rpc_client.send(&RpcRequest::GetRecentBlockhash, json!(["parameter"]), 0);
assert_eq!(blockhash.is_err(), true);
}
@ -1116,9 +1178,8 @@ mod tests {
let balance = rpc_client.send(
&RpcRequest::GetBalance,
Some(json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhw"])),
json!(["deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhw"]),
10,
None,
);
assert_eq!(balance.unwrap().as_u64().unwrap(), 5);
}

View File

@ -5,10 +5,7 @@ use crate::{
};
use log::*;
use reqwest::{self, header::CONTENT_TYPE};
use solana_sdk::{
clock::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT},
commitment_config::CommitmentConfig,
};
use solana_sdk::clock::{DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT};
use std::{thread::sleep, time::Duration};
pub struct RpcClientRequest {
@ -38,14 +35,13 @@ impl GenericRpcClientRequest for RpcClientRequest {
fn send(
&self,
request: &RpcRequest,
params: Option<serde_json::Value>,
params: serde_json::Value,
mut retries: usize,
commitment_config: Option<CommitmentConfig>,
) -> Result<serde_json::Value, ClientError> {
// Concurrent requests are not supported so reuse the same request id for all requests
let request_id = 1;
let request_json = request.build_request_json(request_id, params, commitment_config);
let request_json = request.build_request_json(request_id, params);
loop {
match self

View File

@ -2,11 +2,10 @@ use jsonrpc_core::Result as JsonResult;
use serde_json::{json, Value};
use solana_sdk::{
clock::{Epoch, Slot},
commitment_config::CommitmentConfig,
hash::Hash,
transaction::{Result, Transaction},
};
use std::{error, fmt, io, net::SocketAddr};
use std::{collections::HashMap, error, fmt, io, net::SocketAddr};
pub type RpcResponseIn<T> = JsonResult<Response<T>>;
pub type RpcResponse<T> = io::Result<Response<T>>;
@ -31,7 +30,7 @@ pub struct RpcConfirmedBlock {
pub transactions: Vec<(Transaction, Option<RpcTransactionStatus>)>,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RpcTransactionStatus {
pub status: Result<()>,
pub fee: u64,
@ -49,6 +48,9 @@ pub struct RpcContactInfo {
pub rpc: Option<SocketAddr>,
}
/// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot
pub type RpcLeaderSchedule = HashMap<String, Vec<usize>>;
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RpcEpochInfo {
@ -97,6 +99,10 @@ pub struct RpcVoteAccountInfo {
/// Whether this account is staked for the current epoch
pub epoch_vote_account: bool,
/// History of how many credits earned by the end of each epoch
/// each tuple is (Epoch, credits, prev_credits)
pub epoch_credits: Vec<(Epoch, u64, u64)>,
/// Most recent slot voted on by this vote account (0 if no votes exist)
pub last_vote: u64,
@ -113,10 +119,13 @@ pub enum RpcRequest {
GetBalance,
GetBlockTime,
GetClusterNodes,
GetConfirmedBlock,
GetConfirmedBlocks,
GetEpochInfo,
GetEpochSchedule,
GetGenesisHash,
GetInflation,
GetLeaderSchedule,
GetNumBlocksSinceSignatureConfirmation,
GetProgramAccounts,
GetRecentBlockhash,
@ -138,12 +147,7 @@ pub enum RpcRequest {
}
impl RpcRequest {
pub(crate) fn build_request_json(
&self,
id: u64,
params: Option<Value>,
commitment_config: Option<CommitmentConfig>,
) -> Value {
pub(crate) fn build_request_json(&self, id: u64, params: Value) -> Value {
let jsonrpc = "2.0";
let method = match self {
RpcRequest::ConfirmTransaction => "confirmTransaction",
@ -153,10 +157,13 @@ impl RpcRequest {
RpcRequest::GetBalance => "getBalance",
RpcRequest::GetBlockTime => "getBlockTime",
RpcRequest::GetClusterNodes => "getClusterNodes",
RpcRequest::GetConfirmedBlock => "getConfirmedBlock",
RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks",
RpcRequest::GetEpochInfo => "getEpochInfo",
RpcRequest::GetEpochSchedule => "getEpochSchedule",
RpcRequest::GetGenesisHash => "getGenesisHash",
RpcRequest::GetInflation => "getInflation",
RpcRequest::GetLeaderSchedule => "getLeaderSchedule",
RpcRequest::GetNumBlocksSinceSignatureConfirmation => {
"getNumBlocksSinceSignatureConfirmation"
}
@ -178,21 +185,12 @@ impl RpcRequest {
RpcRequest::SignVote => "signVote",
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
};
let mut request = json!({
json!({
"jsonrpc": jsonrpc,
"id": id,
"method": method,
});
if let Some(param_string) = params {
if let Some(config) = commitment_config {
request["params"] = json!([param_string, config]);
} else {
request["params"] = json!([param_string]);
}
} else if let Some(config) = commitment_config {
request["params"] = json!([config]);
}
request
"params": params,
})
}
}
@ -221,46 +219,46 @@ impl error::Error for RpcError {
#[cfg(test)]
mod tests {
use super::*;
use solana_sdk::commitment_config::CommitmentLevel;
use solana_sdk::commitment_config::{CommitmentConfig, CommitmentLevel};
#[test]
fn test_build_request_json() {
let test_request = RpcRequest::GetAccountInfo;
let addr = json!("deadbeefXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNHhx");
let request = test_request.build_request_json(1, Some(addr.clone()), None);
let request = test_request.build_request_json(1, json!([addr.clone()]));
assert_eq!(request["method"], "getAccountInfo");
assert_eq!(request["params"], json!([addr]));
let test_request = RpcRequest::GetBalance;
let request = test_request.build_request_json(1, Some(addr), None);
let request = test_request.build_request_json(1, json!([addr]));
assert_eq!(request["method"], "getBalance");
let test_request = RpcRequest::GetEpochInfo;
let request = test_request.build_request_json(1, None, None);
let request = test_request.build_request_json(1, Value::Null);
assert_eq!(request["method"], "getEpochInfo");
let test_request = RpcRequest::GetInflation;
let request = test_request.build_request_json(1, None, None);
let request = test_request.build_request_json(1, Value::Null);
assert_eq!(request["method"], "getInflation");
let test_request = RpcRequest::GetRecentBlockhash;
let request = test_request.build_request_json(1, None, None);
let request = test_request.build_request_json(1, Value::Null);
assert_eq!(request["method"], "getRecentBlockhash");
let test_request = RpcRequest::GetSlot;
let request = test_request.build_request_json(1, None, None);
let request = test_request.build_request_json(1, Value::Null);
assert_eq!(request["method"], "getSlot");
let test_request = RpcRequest::GetTransactionCount;
let request = test_request.build_request_json(1, None, None);
let request = test_request.build_request_json(1, Value::Null);
assert_eq!(request["method"], "getTransactionCount");
let test_request = RpcRequest::RequestAirdrop;
let request = test_request.build_request_json(1, None, None);
let request = test_request.build_request_json(1, Value::Null);
assert_eq!(request["method"], "requestAirdrop");
let test_request = RpcRequest::SendTransaction;
let request = test_request.build_request_json(1, None, None);
let request = test_request.build_request_json(1, Value::Null);
assert_eq!(request["method"], "sendTransaction");
}
@ -273,13 +271,13 @@ mod tests {
// Test request with CommitmentConfig and no params
let test_request = RpcRequest::GetRecentBlockhash;
let request = test_request.build_request_json(1, None, Some(commitment_config.clone()));
let request = test_request.build_request_json(1, json!([commitment_config.clone()]));
assert_eq!(request["params"], json!([commitment_config.clone()]));
// Test request with CommitmentConfig and params
let test_request = RpcRequest::GetBalance;
let request =
test_request.build_request_json(1, Some(addr.clone()), Some(commitment_config.clone()));
test_request.build_request_json(1, json!([addr.clone(), commitment_config.clone()]));
assert_eq!(request["params"], json!([addr, commitment_config]));
}
}

View File

@ -1,7 +1,7 @@
[package]
name = "solana-core"
description = "Blockchain, Rebuilt for Scale"
version = "0.21.1"
version = "0.21.8"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@ -41,25 +41,25 @@ rayon = "1.2.0"
serde = "1.0.102"
serde_derive = "1.0.102"
serde_json = "1.0.41"
solana-budget-program = { path = "../programs/budget", version = "0.21.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.21.1" }
solana-client = { path = "../client", version = "0.21.1" }
solana-drone = { path = "../drone", version = "0.21.1" }
solana-budget-program = { path = "../programs/budget", version = "0.21.8" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.8" }
solana-chacha-sys = { path = "../chacha-sys", version = "0.21.8" }
solana-client = { path = "../client", version = "0.21.8" }
solana-drone = { path = "../drone", version = "0.21.8" }
ed25519-dalek = "1.0.0-pre.1"
solana-ledger = { path = "../ledger", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.21.1" }
solana-metrics = { path = "../metrics", version = "0.21.1" }
solana-measure = { path = "../measure", version = "0.21.1" }
solana-net-utils = { path = "../net-utils", version = "0.21.1" }
solana-perf = { path = "../perf", version = "0.21.1" }
solana-runtime = { path = "../runtime", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-stake-program = { path = "../programs/stake", version = "0.21.1" }
solana-storage-program = { path = "../programs/storage", version = "0.21.1" }
solana-vote-program = { path = "../programs/vote", version = "0.21.1" }
solana-vote-signer = { path = "../vote-signer", version = "0.21.1" }
solana-ledger = { path = "../ledger", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.21.8" }
solana-metrics = { path = "../metrics", version = "0.21.8" }
solana-measure = { path = "../measure", version = "0.21.8" }
solana-net-utils = { path = "../net-utils", version = "0.21.8" }
solana-perf = { path = "../perf", version = "0.21.8" }
solana-runtime = { path = "../runtime", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
solana-stake-program = { path = "../programs/stake", version = "0.21.8" }
solana-storage-program = { path = "../programs/storage", version = "0.21.8" }
solana-vote-program = { path = "../programs/vote", version = "0.21.8" }
solana-vote-signer = { path = "../vote-signer", version = "0.21.8" }
symlink = "0.1.0"
sys-info = "0.5.8"
tempfile = "3.1.0"
@ -68,13 +68,9 @@ tokio-codec = "0.1"
tokio-fs = "0.1"
tokio-io = "0.1"
untrusted = "0.7.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.1" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.8" }
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
[target."cfg(unix)".dependencies]
jemallocator = "0.3.2"
jemalloc-ctl = "0.3.2"
[dev-dependencies]
hex-literal = "0.2.1"
matches = "0.1.6"

View File

@ -748,9 +748,8 @@ impl Archiver {
Ok(rpc_client
.send(
&RpcRequest::GetSlotsPerSegment,
None,
serde_json::json!([client_commitment]),
0,
Some(client_commitment),
)
.map_err(|err| {
warn!("Error while making rpc request {:?}", err);
@ -803,7 +802,11 @@ impl Archiver {
RpcClient::new_socket(rpc_peers[node_index].rpc)
};
let response = rpc_client
.send(&RpcRequest::GetStorageTurn, None, 0, None)
.send(
&RpcRequest::GetStorageTurn,
serde_json::value::Value::Null,
0,
)
.map_err(|err| {
warn!("Error while making rpc request {:?}", err);
Error::IO(io::Error::new(ErrorKind::Other, "rpc error"))

View File

@ -7,7 +7,6 @@ use crate::{
poh_recorder::{PohRecorder, PohRecorderError, WorkingBankEntry},
poh_service::PohService,
result::{Error, Result},
thread_mem_usage,
};
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
use itertools::Itertools;
@ -17,7 +16,7 @@ use solana_ledger::{
entry::hash_transactions,
leader_schedule_cache::LeaderScheduleCache,
};
use solana_measure::measure::Measure;
use solana_measure::{measure::Measure, thread_mem_usage};
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info, inc_new_counter_warn};
use solana_perf::{cuda_runtime::PinnedVec, perf_libs};
use solana_runtime::{accounts_db::ErrorCounters, bank::Bank, transaction_batch::TransactionBatch};

View File

@ -24,7 +24,6 @@ use crate::{
repair_service::RepairType,
result::{Error, Result},
sendmmsg::{multicast, send_mmsg},
thread_mem_usage,
weighted_shuffle::{weighted_best, weighted_shuffle},
};
use bincode::{serialize, serialized_size};
@ -32,6 +31,7 @@ use core::cmp;
use itertools::Itertools;
use rand::{thread_rng, Rng};
use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree, staking_utils};
use solana_measure::thread_mem_usage;
use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error};
use solana_net_utils::{
bind_common, bind_common_in_range, bind_in_range, find_available_port_in_range,
@ -67,11 +67,11 @@ pub const GOSSIP_SLEEP_MILLIS: u64 = 100;
/// the number of slots to respond with when responding to `Orphan` requests
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10;
/// The maximum size of a bloom filter
pub const MAX_BLOOM_SIZE: usize = 1030;
pub const MAX_BLOOM_SIZE: usize = 1028;
/// The maximum size of a protocol payload
const MAX_PROTOCOL_PAYLOAD_SIZE: u64 = PACKET_DATA_SIZE as u64 - MAX_PROTOCOL_HEADER_SIZE;
/// The largest protocol header size
const MAX_PROTOCOL_HEADER_SIZE: u64 = 202;
const MAX_PROTOCOL_HEADER_SIZE: u64 = 204;
#[derive(Debug, PartialEq, Eq)]
pub enum ClusterInfoError {
@ -273,7 +273,7 @@ impl ClusterInfo {
let ip_addr = node.gossip.ip();
format!(
"{:15} {:2}| {:5} | {:44} | {:5}| {:5}| {:5} | {:5}| {:5} | {:5}| {:5} | {:5}| {:5}\n",
"{:15} {:2}| {:5} | {:44} | {:5}| {:5}| {:5} | {:5}| {:5} | {:5}| {:5} | {:5}| {:5}| v{}\n",
if ContactInfo::is_valid_address(&node.gossip) {
ip_addr.to_string()
} else {
@ -291,15 +291,16 @@ impl ClusterInfo {
addr_to_string(&ip_addr, &node.storage_addr),
addr_to_string(&ip_addr, &node.rpc),
addr_to_string(&ip_addr, &node.rpc_pubsub),
node.shred_version,
)
})
.collect();
format!(
"IP Address |Age(ms)| Node identifier \
|Gossip| TPU |TPU fwd| TVU |TVU fwd|Repair|Storage| RPC |PubSub\n\
|Gossip| TPU |TPU fwd| TVU |TVU fwd|Repair|Storage| RPC |PubSub|ShredVer\n\
------------------+-------+----------------------------------------------+\
------+------+-------+------+-------+------+-------+------+------\n\
------+------+-------+------+-------+------+-------+------+------+--------\n\
{}\
Nodes: {}{}{}",
nodes.join(""),
@ -406,13 +407,13 @@ impl ClusterInfo {
}
pub fn rpc_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data().id;
let me = self.my_data();
self.gossip
.crds
.table
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| x.id != me)
.filter(|x| x.id != me.id)
.filter(|x| ContactInfo::is_valid_address(&x.rpc))
.cloned()
.collect()
@ -447,7 +448,7 @@ impl ClusterInfo {
/// all validators that have a valid tvu port.
pub fn tvu_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data().id;
let me = self.my_data();
self.gossip
.crds
.table
@ -455,34 +456,34 @@ impl ClusterInfo {
.filter_map(|x| x.value.contact_info())
.filter(|x| ContactInfo::is_valid_address(&x.tvu))
.filter(|x| !ClusterInfo::is_archiver(x))
.filter(|x| x.id != me)
.filter(|x| x.id != me.id)
.cloned()
.collect()
}
/// all peers that have a valid storage addr
pub fn storage_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data().id;
let me = self.my_data();
self.gossip
.crds
.table
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| ContactInfo::is_valid_address(&x.storage_addr))
.filter(|x| x.id != me)
.filter(|x| x.id != me.id)
.cloned()
.collect()
}
/// all peers that have a valid tvu
pub fn retransmit_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data().id;
let me = self.my_data();
self.gossip
.crds
.table
.values()
.filter_map(|x| x.value.contact_info())
.filter(|x| x.id != me)
.filter(|x| x.id != me.id)
.filter(|x| ContactInfo::is_valid_address(&x.tvu))
.filter(|x| ContactInfo::is_valid_address(&x.tvu_forwards))
.cloned()
@ -491,10 +492,10 @@ impl ClusterInfo {
/// all tvu peers with valid gossip addrs
fn repair_peers(&self) -> Vec<ContactInfo> {
let me = self.my_data().id;
let me = self.my_data();
ClusterInfo::tvu_peers(self)
.into_iter()
.filter(|x| x.id != me)
.filter(|x| x.id != me.id)
.filter(|x| ContactInfo::is_valid_address(&x.gossip))
.collect()
}
@ -2563,7 +2564,7 @@ mod tests {
#[test]
fn test_split_messages_packet_size() {
// Test that if a value is smaller than payload size but too large to be wrappe in a vec
// Test that if a value is smaller than payload size but too large to be wrapped in a vec
// that it is still dropped
let payload: Vec<CrdsValue> = vec![];
let vec_size = serialized_size(&payload).unwrap();
@ -2576,7 +2577,7 @@ mod tests {
}));
let mut i = 0;
while value.size() < desired_size {
while value.size() <= desired_size {
let slots = (0..i).collect::<BTreeSet<_>>();
if slots.len() > 200 {
panic!(

View File

@ -31,6 +31,8 @@ pub struct ContactInfo {
pub rpc_pubsub: SocketAddr,
/// latest wallclock picked
pub wallclock: u64,
/// node shred version
pub shred_version: u16,
}
impl Ord for ContactInfo {
@ -84,6 +86,7 @@ impl Default for ContactInfo {
rpc: socketaddr_any!(),
rpc_pubsub: socketaddr_any!(),
wallclock: 0,
shred_version: 0,
}
}
}
@ -115,6 +118,7 @@ impl ContactInfo {
rpc,
rpc_pubsub,
wallclock: now,
shred_version: 0,
}
}

View File

@ -5,7 +5,7 @@ use crate::packet::PacketsRecycler;
use crate::poh_recorder::PohRecorder;
use crate::result::{Error, Result};
use crate::streamer::{self, PacketReceiver, PacketSender};
use crate::thread_mem_usage;
use solana_measure::thread_mem_usage;
use solana_metrics::{inc_new_counter_debug, inc_new_counter_info};
use solana_perf::recycler::Recycler;
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;

View File

@ -199,7 +199,6 @@ fn spy(
.unwrap()
.tvu_peers()
.into_iter()
.filter(|node| !ClusterInfo::is_archiver(&node))
.collect::<Vec<_>>();
archivers = spy_ref.read().unwrap().storage_peers();
if let Some(num) = num_nodes {

View File

@ -12,7 +12,6 @@ pub mod chacha_cuda;
pub mod cluster_info_vote_listener;
pub mod commitment;
pub mod shred_fetch_stage;
pub mod thread_mem_usage;
#[macro_use]
pub mod contact_info;
pub mod archiver;
@ -84,10 +83,3 @@ extern crate solana_metrics;
#[cfg(test)]
#[macro_use]
extern crate matches;
#[cfg(unix)]
extern crate jemallocator;
#[cfg(unix)]
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;

View File

@ -7,7 +7,6 @@ use crate::{
poh_recorder::PohRecorder,
result::{Error, Result},
rpc_subscriptions::RpcSubscriptions,
thread_mem_usage,
};
use solana_ledger::{
bank_forks::BankForks,
@ -18,7 +17,7 @@ use solana_ledger::{
leader_schedule_cache::LeaderScheduleCache,
snapshot_package::SnapshotPackageSender,
};
use solana_measure::measure::Measure;
use solana_measure::{measure::Measure, thread_mem_usage};
use solana_metrics::inc_new_counter_info;
use solana_runtime::bank::Bank;
use solana_sdk::{
@ -1025,7 +1024,7 @@ impl ReplayStage {
);
datapoint_error!(
"replay-stage-entry_verification_failure",
"replay-stage-block-error",
("slot", bank.slot(), i64),
("last_entry", last_entry.to_string(), String),
);

View File

@ -12,11 +12,13 @@ use bincode::serialize;
use jsonrpc_core::{Error, Metadata, Result};
use jsonrpc_derive::rpc;
use solana_client::rpc_request::{
Response, RpcConfirmedBlock, RpcContactInfo, RpcEpochInfo, RpcResponseContext, RpcVersionInfo,
RpcVoteAccountInfo, RpcVoteAccountStatus,
Response, RpcConfirmedBlock, RpcContactInfo, RpcEpochInfo, RpcLeaderSchedule,
RpcResponseContext, RpcVersionInfo, RpcVoteAccountInfo, RpcVoteAccountStatus,
};
use solana_drone::drone::request_airdrop_transaction;
use solana_ledger::{bank_forks::BankForks, blocktree::Blocktree};
use solana_ledger::{
bank_forks::BankForks, blocktree::Blocktree, rooted_slot_iterator::RootedSlotIterator,
};
use solana_runtime::bank::Bank;
use solana_sdk::{
account::Account,
@ -33,6 +35,7 @@ use solana_sdk::{
};
use solana_vote_program::vote_state::{VoteState, MAX_LOCKOUT_HISTORY};
use std::{
collections::HashMap,
net::{SocketAddr, UdpSocket},
sync::{Arc, RwLock},
thread::sleep,
@ -251,14 +254,11 @@ impl JsonRpcRequestProcessor {
activated_stake: *activated_stake,
commission: vote_state.commission,
root_slot: vote_state.root_slot.unwrap_or(0),
epoch_credits: vote_state.epoch_credits().clone(),
epoch_vote_account,
last_vote,
}
})
.filter(|vote_account_info| {
// Remove vote accounts that have never voted and also have no stake
vote_account_info.last_vote == 0 && vote_account_info.activated_stake == 0
})
.partition(|vote_account_info| {
if bank.slot() >= MAX_LOCKOUT_HISTORY as u64 {
vote_account_info.last_vote > bank.slot() - MAX_LOCKOUT_HISTORY as u64
@ -266,9 +266,15 @@ impl JsonRpcRequestProcessor {
vote_account_info.last_vote > 0
}
});
let delinquent_staked_vote_accounts = delinquent_vote_accounts
.into_iter()
.filter(|vote_account_info| vote_account_info.activated_stake > 0)
.collect::<Vec<_>>();
Ok(RpcVoteAccountStatus {
current: current_vote_accounts,
delinquent: delinquent_vote_accounts,
delinquent: delinquent_staked_vote_accounts,
})
}
@ -310,6 +316,29 @@ impl JsonRpcRequestProcessor {
Ok(self.blocktree.get_confirmed_block(slot).ok())
}
pub fn get_confirmed_blocks(
&self,
start_slot: Slot,
end_slot: Option<Slot>,
) -> Result<Vec<Slot>> {
let end_slot = end_slot.unwrap_or_else(|| self.bank(None).slot());
if end_slot < start_slot {
return Ok(vec![]);
}
let start_slot = (start_slot..end_slot).find(|&slot| self.blocktree.is_root(slot));
if let Some(start_slot) = start_slot {
let mut slots: Vec<Slot> = RootedSlotIterator::new(start_slot, &self.blocktree)
.unwrap()
.map(|(slot, _)| slot)
.collect();
slots.retain(|&x| x <= end_slot);
Ok(slots)
} else {
Ok(vec![])
}
}
// The `get_block_time` method is not fully implemented. It currently returns `slot` *
// DEFAULT_MS_PER_SLOT offset from 0 for all requests, and null for any values that would
// overflow.
@ -415,7 +444,7 @@ pub trait RpcSol {
fn get_block_commitment(
&self,
meta: Self::Metadata,
block: u64,
block: Slot,
) -> Result<(Option<BlockCommitment>, u64)>;
#[rpc(meta, name = "getGenesisHash")]
@ -425,8 +454,9 @@ pub trait RpcSol {
fn get_leader_schedule(
&self,
meta: Self::Metadata,
slot: Option<Slot>,
commitment: Option<CommitmentConfig>,
) -> Result<Option<Vec<String>>>;
) -> Result<Option<RpcLeaderSchedule>>;
#[rpc(meta, name = "getRecentBlockhash")]
fn get_recent_blockhash(
@ -536,6 +566,14 @@ pub trait RpcSol {
#[rpc(meta, name = "getBlockTime")]
fn get_block_time(&self, meta: Self::Metadata, slot: Slot) -> Result<Option<UnixTimestamp>>;
#[rpc(meta, name = "getConfirmedBlocks")]
fn get_confirmed_blocks(
&self,
meta: Self::Metadata,
start_slot: Slot,
end_slot: Option<Slot>,
) -> Result<Vec<Slot>>;
}
pub struct RpcSolImpl;
@ -675,8 +713,9 @@ impl RpcSol for RpcSolImpl {
) -> Result<RpcEpochInfo> {
let bank = meta.request_processor.read().unwrap().bank(commitment);
let epoch_schedule = bank.epoch_schedule();
let (epoch, slot_index) = epoch_schedule.get_epoch_and_slot_index(bank.slot());
let slot = bank.slot();
let (epoch, slot_index) = epoch_schedule.get_epoch_and_slot_index(slot);
Ok(RpcEpochInfo {
epoch,
slot_index,
@ -705,17 +744,25 @@ impl RpcSol for RpcSolImpl {
fn get_leader_schedule(
&self,
meta: Self::Metadata,
slot: Option<Slot>,
commitment: Option<CommitmentConfig>,
) -> Result<Option<Vec<String>>> {
) -> Result<Option<RpcLeaderSchedule>> {
let bank = meta.request_processor.read().unwrap().bank(commitment);
let slot = slot.unwrap_or_else(|| bank.slot());
let epoch = bank.epoch_schedule().get_epoch(slot);
Ok(
solana_ledger::leader_schedule_utils::leader_schedule(bank.epoch(), &bank).map(
solana_ledger::leader_schedule_utils::leader_schedule(epoch, &bank).map(
|leader_schedule| {
leader_schedule
.get_slot_leaders()
.iter()
.map(|pubkey| pubkey.to_string())
.collect()
let mut map = HashMap::new();
for (slot_index, pubkey) in
leader_schedule.get_slot_leaders().iter().enumerate()
{
let pubkey = pubkey.to_string();
map.entry(pubkey).or_insert_with(|| vec![]).push(slot_index);
}
map
},
),
)
@ -991,6 +1038,18 @@ impl RpcSol for RpcSolImpl {
.get_confirmed_block(slot)
}
fn get_confirmed_blocks(
&self,
meta: Self::Metadata,
start_slot: Slot,
end_slot: Option<Slot>,
) -> Result<Vec<Slot>> {
meta.request_processor
.read()
.unwrap()
.get_confirmed_blocks(start_slot, end_slot)
}
fn get_block_time(&self, meta: Self::Metadata, slot: Slot) -> Result<Option<UnixTimestamp>> {
meta.request_processor.read().unwrap().get_block_time(slot)
}
@ -1005,7 +1064,10 @@ pub mod tests {
replay_stage::tests::create_test_transactions_and_populate_blocktree,
};
use jsonrpc_core::{MetaIoHandler, Output, Response, Value};
use solana_ledger::get_tmp_ledger_path;
use solana_ledger::{
blocktree::entries_to_test_shreds, blocktree_processor::fill_blocktree_slot_with_ticks,
entry::next_entry_mut, get_tmp_ledger_path,
};
use solana_sdk::{
fee_calculator::DEFAULT_BURN_PERCENT,
hash::{hash, Hash},
@ -1014,27 +1076,41 @@ pub mod tests {
system_transaction,
transaction::TransactionError,
};
use solana_vote_program::{
vote_instruction,
vote_state::{Vote, VoteInit, MAX_LOCKOUT_HISTORY},
};
use std::{
collections::HashMap,
sync::atomic::{AtomicBool, Ordering},
thread,
};
const TEST_MINT_LAMPORTS: u64 = 10_000;
const TEST_MINT_LAMPORTS: u64 = 1_000_000;
const TEST_SLOTS_PER_EPOCH: u64 = 50;
struct RpcHandler {
io: MetaIoHandler<Meta>,
meta: Meta,
bank: Arc<Bank>,
bank_forks: Arc<RwLock<BankForks>>,
blockhash: Hash,
alice: Keypair,
leader_pubkey: Pubkey,
leader_vote_keypair: Keypair,
block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
confirmed_block_signatures: Vec<Signature>,
}
fn start_rpc_handler_with_tx(pubkey: &Pubkey) -> RpcHandler {
let (bank_forks, alice) = new_bank_forks();
start_rpc_handler_with_tx_and_blocktree(pubkey, vec![])
}
fn start_rpc_handler_with_tx_and_blocktree(
pubkey: &Pubkey,
blocktree_roots: Vec<Slot>,
) -> RpcHandler {
let (bank_forks, alice, leader_vote_keypair) = new_bank_forks();
let bank = bank_forks.read().unwrap().working_bank();
let commitment_slot0 = BlockCommitment::new([8; MAX_LOCKOUT_HISTORY]);
@ -1063,6 +1139,55 @@ pub mod tests {
blocktree.clone(),
);
// Add timestamp vote to blocktree
let vote = Vote {
slots: vec![1],
hash: Hash::default(),
};
let vote_ix = vote_instruction::vote(
&leader_vote_keypair.pubkey(),
&leader_vote_keypair.pubkey(),
vote,
);
let vote_tx = Transaction::new_signed_instructions(
&[&leader_vote_keypair],
vec![vote_ix],
Hash::default(),
);
let shreds = entries_to_test_shreds(
vec![next_entry_mut(&mut Hash::default(), 0, vec![vote_tx])],
1,
0,
true,
0,
);
blocktree.insert_shreds(shreds, None, false).unwrap();
blocktree.set_roots(&[1]).unwrap();
let mut roots = blocktree_roots.clone();
if !roots.is_empty() {
roots.retain(|&x| x > 1);
let mut parent_bank = bank;
for (i, root) in roots.iter().enumerate() {
let new_bank =
Bank::new_from_parent(&parent_bank, parent_bank.collector_id(), *root);
parent_bank = bank_forks.write().unwrap().insert(new_bank);
parent_bank.squash();
bank_forks.write().unwrap().set_root(*root, &None);
let parent = if i > 0 { roots[i - 1] } else { 1 };
fill_blocktree_slot_with_ticks(&blocktree, 5, *root, parent, Hash::default());
}
blocktree.set_roots(&roots).unwrap();
let new_bank = Bank::new_from_parent(
&parent_bank,
parent_bank.collector_id(),
roots.iter().max().unwrap() + 1,
);
bank_forks.write().unwrap().insert(new_bank);
}
let bank = bank_forks.read().unwrap().working_bank();
let leader_pubkey = *bank.collector_id();
let exit = Arc::new(AtomicBool::new(false));
let validator_exit = create_validator_exit(&exit);
@ -1076,7 +1201,7 @@ pub mod tests {
let request_processor = Arc::new(RwLock::new(JsonRpcRequestProcessor::new(
JsonRpcConfig::default(),
bank_forks,
bank_forks.clone(),
block_commitment_cache.clone(),
blocktree,
StorageState::default(),
@ -1106,9 +1231,11 @@ pub mod tests {
io,
meta,
bank,
bank_forks,
blockhash,
alice,
leader_pubkey,
leader_vote_keypair,
block_commitment_cache,
confirmed_block_signatures,
}
@ -1119,7 +1246,7 @@ pub mod tests {
let bob_pubkey = Pubkey::new_rand();
let exit = Arc::new(AtomicBool::new(false));
let validator_exit = create_validator_exit(&exit);
let (bank_forks, alice) = new_bank_forks();
let (bank_forks, alice, _) = new_bank_forks();
let bank = bank_forks.read().unwrap().working_bank();
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
let ledger_path = get_tmp_ledger_path!();
@ -1325,6 +1452,62 @@ pub mod tests {
assert_eq!(epoch_schedule, *bank.epoch_schedule());
}
#[test]
fn test_rpc_get_leader_schedule() {
let bob_pubkey = Pubkey::new_rand();
let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey);
for req in [
r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [0]}"#,
r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule"}"#,
]
.iter()
{
let rep = io.handle_request_sync(&req, meta.clone());
let res: Response = serde_json::from_str(&rep.expect("actual response"))
.expect("actual response deserialization");
let schedule: Option<RpcLeaderSchedule> = if let Response::Single(res) = res {
if let Output::Success(res) = res {
serde_json::from_value(res.result).unwrap()
} else {
panic!("Expected success for {}", req);
}
} else {
panic!("Expected single response");
};
let schedule = schedule.expect("leader schedule");
let bob_schedule = schedule
.get(&bank.collector_id().to_string())
.expect("leader not in the leader schedule");
assert_eq!(
bob_schedule.len(),
solana_ledger::leader_schedule_utils::leader_schedule(bank.epoch(), &bank)
.unwrap()
.get_slot_leaders()
.len()
);
}
let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [42424242]}"#;
let rep = io.handle_request_sync(&req, meta);
let res: Response = serde_json::from_str(&rep.expect("actual response"))
.expect("actual response deserialization");
let schedule: Option<RpcLeaderSchedule> = if let Response::Single(res) = res {
if let Output::Success(res) = res {
serde_json::from_value(res.result).unwrap()
} else {
panic!("Expected success");
}
} else {
panic!("Expected single response");
};
assert_eq!(schedule, None);
}
#[test]
fn test_rpc_get_account_info() {
let bob_pubkey = Pubkey::new_rand();
@ -1629,20 +1812,23 @@ pub mod tests {
);
}
fn new_bank_forks() -> (Arc<RwLock<BankForks>>, Keypair) {
fn new_bank_forks() -> (Arc<RwLock<BankForks>>, Keypair, Keypair) {
let GenesisConfigInfo {
mut genesis_config,
mint_keypair,
..
voting_keypair,
} = create_genesis_config(TEST_MINT_LAMPORTS);
genesis_config.rent.lamports_per_byte_year = 50;
genesis_config.rent.exemption_threshold = 2.0;
genesis_config.epoch_schedule =
EpochSchedule::custom(TEST_SLOTS_PER_EPOCH, TEST_SLOTS_PER_EPOCH, false);
let bank = Bank::new(&genesis_config);
(
Arc::new(RwLock::new(BankForks::new(bank.slot(), bank))),
mint_keypair,
voting_keypair,
)
}
@ -1846,6 +2032,54 @@ pub mod tests {
}
}
#[test]
fn test_get_confirmed_blocks() {
let bob_pubkey = Pubkey::new_rand();
let roots = vec![0, 1, 3, 4, 8];
let RpcHandler { io, meta, .. } =
start_rpc_handler_with_tx_and_blocktree(&bob_pubkey, roots.clone());
let req =
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0]}}"#);
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap();
assert_eq!(confirmed_blocks, roots);
let req =
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[2]}}"#);
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap();
assert_eq!(confirmed_blocks, vec![3, 4, 8]);
let req =
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0, 4]}}"#);
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap();
assert_eq!(confirmed_blocks, vec![0, 1, 3, 4]);
let req =
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[0, 7]}}"#);
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap();
assert_eq!(confirmed_blocks, vec![0, 1, 3, 4]);
let req =
format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getConfirmedBlocks","params":[9, 11]}}"#);
let res = io.handle_request_sync(&req, meta);
let result: Value = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let confirmed_blocks: Vec<Slot> = serde_json::from_value(result["result"].clone()).unwrap();
assert_eq!(confirmed_blocks, Vec::<Slot>::new());
}
#[test]
fn test_get_block_time() {
let bob_pubkey = Pubkey::new_rand();
@ -1898,4 +2132,162 @@ pub mod tests {
.expect("actual response deserialization");
assert_eq!(expected, result);
}
#[test]
fn test_get_vote_accounts() {
let RpcHandler {
io,
meta,
mut bank,
bank_forks,
alice,
leader_vote_keypair,
..
} = start_rpc_handler_with_tx(&Pubkey::new_rand());
assert_eq!(bank.vote_accounts().len(), 1);
// Create a vote account with no stake.
let alice_vote_keypair = Keypair::new();
let instructions = vote_instruction::create_account(
&alice.pubkey(),
&alice_vote_keypair.pubkey(),
&VoteInit {
node_pubkey: alice.pubkey(),
authorized_voter: alice_vote_keypair.pubkey(),
authorized_withdrawer: alice_vote_keypair.pubkey(),
commission: 0,
},
bank.get_minimum_balance_for_rent_exemption(VoteState::size_of()),
);
let transaction = Transaction::new_signed_instructions(
&[&alice, &alice_vote_keypair],
instructions,
bank.last_blockhash(),
);
bank.process_transaction(&transaction)
.expect("process transaction");
assert_eq!(bank.vote_accounts().len(), 2);
// Check getVoteAccounts: the bootstrap leader vote account will be delinquent as it has
// stake but has never voted, and the vote account with no stake should not be present.
{
let req = format!(r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts"}}"#);
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let vote_account_status: RpcVoteAccountStatus =
serde_json::from_value(result["result"].clone()).unwrap();
assert!(vote_account_status.current.is_empty());
assert_eq!(vote_account_status.delinquent.len(), 1);
for vote_account_info in vote_account_status.delinquent {
assert_ne!(vote_account_info.activated_stake, 0);
}
}
// Advance bank to the next epoch
for _ in 0..TEST_SLOTS_PER_EPOCH {
bank.freeze();
// Votes
let instructions = vec![
vote_instruction::vote(
&leader_vote_keypair.pubkey(),
&leader_vote_keypair.pubkey(),
Vote {
slots: vec![bank.slot()],
hash: bank.hash(),
},
),
vote_instruction::vote(
&alice_vote_keypair.pubkey(),
&alice_vote_keypair.pubkey(),
Vote {
slots: vec![bank.slot()],
hash: bank.hash(),
},
),
];
bank = bank_forks.write().unwrap().insert(Bank::new_from_parent(
&bank,
&Pubkey::default(),
bank.slot() + 1,
));
let transaction = Transaction::new_signed_with_payer(
instructions,
Some(&alice.pubkey()),
&[&alice, &leader_vote_keypair, &alice_vote_keypair],
bank.last_blockhash(),
);
bank.process_transaction(&transaction)
.expect("process transaction");
}
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts","params":{}}}"#,
json!([CommitmentConfig::recent()])
);
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let vote_account_status: RpcVoteAccountStatus =
serde_json::from_value(result["result"].clone()).unwrap();
// The vote account with no stake should not be present.
assert!(vote_account_status.delinquent.is_empty());
// Both accounts should be active and have voting history.
assert_eq!(vote_account_status.current.len(), 2);
//let leader_info = &vote_account_status.current[0];
let leader_info = vote_account_status
.current
.iter()
.find(|x| x.vote_pubkey == leader_vote_keypair.pubkey().to_string())
.unwrap();
assert_ne!(leader_info.activated_stake, 0);
// Subtract one because the last vote always carries over to the next epoch
let expected_credits = TEST_SLOTS_PER_EPOCH - MAX_LOCKOUT_HISTORY as u64 - 1;
assert_eq!(leader_info.epoch_credits, vec![(0, expected_credits, 0)]);
// Advance bank with no voting
bank.freeze();
bank_forks.write().unwrap().insert(Bank::new_from_parent(
&bank,
&Pubkey::default(),
bank.slot() + TEST_SLOTS_PER_EPOCH,
));
// The leader vote account should now be delinquent, and the other vote account disappears
// because it's inactive with no stake
{
let req = format!(
r#"{{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts","params":{}}}"#,
json!([CommitmentConfig::recent()])
);
let res = io.handle_request_sync(&req, meta.clone());
let result: Value = serde_json::from_str(&res.expect("actual response"))
.expect("actual response deserialization");
let vote_account_status: RpcVoteAccountStatus =
serde_json::from_value(result["result"].clone()).unwrap();
assert!(vote_account_status.current.is_empty());
assert_eq!(vote_account_status.delinquent.len(), 1);
for vote_account_info in vote_account_status.delinquent {
assert_eq!(
vote_account_info.vote_pubkey,
leader_vote_keypair.pubkey().to_string()
);
}
}
}
}

View File

@ -11,7 +11,7 @@ use crate::sigverify;
use crate::streamer::{self, PacketReceiver};
use crossbeam_channel::Sender as CrossbeamSender;
use solana_measure::measure::Measure;
use solana_metrics::{datapoint_debug, inc_new_counter_info};
use solana_metrics::datapoint_debug;
use solana_perf::perf_libs;
use solana_sdk::timing;
use std::sync::mpsc::{Receiver, RecvTimeoutError};
@ -65,7 +65,6 @@ impl SigVerifyStage {
RECV_BATCH_MAX_CPU
},
)?;
inc_new_counter_info!("sigverify_stage-packets_received", len);
let mut verify_batch_time = Measure::start("sigverify_batch_time");
let batch_len = batch.len();
@ -77,7 +76,6 @@ impl SigVerifyStage {
);
let verified_batch = verifier.verify_batch(batch);
inc_new_counter_info!("sigverify_stage-verified_packets_send", len);
for v in verified_batch {
if sendr.send(vec![v]).is_err() {
@ -87,10 +85,6 @@ impl SigVerifyStage {
verify_batch_time.stop();
inc_new_counter_info!(
"sigverify_stage-time_ms",
(verify_batch_time.as_ms() + recv_time) as usize
);
debug!(
"@{:?} verifier: done. batches: {} total verify time: {:?} id: {} verified: {} v/s {}",
timing::timestamp(),
@ -103,9 +97,10 @@ impl SigVerifyStage {
datapoint_debug!(
"sigverify_stage-total_verify_time",
("batch_len", batch_len, i64),
("len", len, i64),
("total_time_ms", verify_batch_time.as_ms(), i64)
("num_batches", batch_len, i64),
("num_packets", len, i64),
("verify_time_ms", verify_batch_time.as_ms(), i64),
("recv_time", recv_time, i64),
);
Ok(())

View File

@ -4,7 +4,7 @@
use crate::packet::{self, send_to, Packets, PacketsRecycler, PACKETS_PER_BATCH};
use crate::recvmmsg::NUM_RCVMMSGS;
use crate::result::{Error, Result};
use crate::thread_mem_usage;
use solana_measure::thread_mem_usage;
use solana_sdk::timing::duration_as_ms;
use std::net::UdpSocket;
use std::sync::atomic::{AtomicBool, Ordering};
@ -41,7 +41,9 @@ fn recv_loop(
}
recv_count += len;
call_count += 1;
channel.send(msgs)?;
if len > 0 {
channel.send(msgs)?;
}
break;
}
}
@ -55,9 +57,9 @@ fn recv_loop(
);
recv_count = 0;
call_count = 0;
now = Instant::now();
num_max_received = 0;
}
now = Instant::now();
}
}
@ -100,7 +102,6 @@ pub fn recv_batch(recvr: &PacketReceiver, max_batch: usize) -> Result<(Vec<Packe
trace!("got more msgs");
len += more.packets.len();
batch.push(more);
if len > max_batch {
break;
}

View File

@ -26,6 +26,7 @@ use solana_ledger::{
blocktree::{Blocktree, CompletedSlotsReceiver},
blocktree_processor::{self, BankForksInfo},
create_new_tmp_ledger,
leader_schedule::FixedSchedule,
leader_schedule_cache::LeaderScheduleCache,
};
use solana_metrics::datapoint_info;
@ -47,7 +48,8 @@ use std::{
sync::atomic::{AtomicBool, Ordering},
sync::mpsc::Receiver,
sync::{Arc, Mutex, RwLock},
thread::Result,
thread::{sleep, Result},
time::Duration,
};
#[derive(Clone, Debug)]
@ -65,6 +67,8 @@ pub struct ValidatorConfig {
pub max_ledger_slots: Option<u64>,
pub broadcast_stage_type: BroadcastStageType,
pub partition_cfg: Option<PartitionCfg>,
pub fixed_leader_schedule: Option<FixedSchedule>,
pub wait_for_supermajority: bool,
}
impl Default for ValidatorConfig {
@ -83,6 +87,8 @@ impl Default for ValidatorConfig {
snapshot_config: None,
broadcast_stage_type: BroadcastStageType::Standard,
partition_cfg: None,
fixed_leader_schedule: None,
wait_for_supermajority: false,
}
}
}
@ -135,32 +141,10 @@ impl Validator {
warn!("identity pubkey: {:?}", id);
warn!("vote pubkey: {:?}", vote_account);
warn!(
"CUDA is {}abled",
if solana_perf::perf_libs::api().is_some() {
"en"
} else {
"dis"
}
);
// Validator binaries built on a machine with AVX support will generate invalid opcodes
// when run on machines without AVX causing a non-obvious process abort. Instead detect
// the mismatch and error cleanly.
#[target_feature(enable = "avx")]
{
if is_x86_feature_detected!("avx") {
info!("AVX detected");
} else {
error!("Your machine does not have AVX support, please rebuild from source on your machine");
process::exit(1);
}
}
report_target_features();
info!("entrypoint: {:?}", entrypoint_info_option);
Self::print_node_info(&node);
info!("Initializing sigverify, this could take a while...");
sigverify::init();
info!("Done.");
@ -182,6 +166,7 @@ impl Validator {
config.snapshot_config.clone(),
poh_verify,
config.dev_halt_at_slot,
config.fixed_leader_schedule.clone(),
);
let leader_schedule_cache = Arc::new(leader_schedule_cache);
@ -190,8 +175,6 @@ impl Validator {
let bank = bank_forks[bank_info.bank_slot].clone();
let bank_forks = Arc::new(RwLock::new(bank_forks));
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
// The version used by shreds, derived from genesis
let shred_version = Shred::version_from_hash(&genesis_hash);
let mut validator_exit = ValidatorExit::default();
let exit_ = exit.clone();
@ -199,6 +182,9 @@ impl Validator {
let validator_exit = Arc::new(RwLock::new(Some(validator_exit)));
node.info.wallclock = timestamp();
node.info.shred_version = Shred::version_from_hash(&genesis_hash);
Self::print_node_info(&node);
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new(
node.info.clone(),
keypair.clone(),
@ -289,14 +275,7 @@ impl Validator {
if config.snapshot_config.is_some() {
poh_recorder.set_bank(&bank);
}
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit);
assert_eq!(
blocktree.new_shreds_signals.len(),
1,
"New shred signal for the TVU should be the same as the clear bank signal."
);
let ip_echo_server = solana_net_utils::ip_echo_server(node.sockets.ip_echo.unwrap());
@ -317,6 +296,22 @@ impl Validator {
.set_entrypoint(entrypoint_info.clone());
}
if config.wait_for_supermajority {
info!(
"Waiting more than 66% of activated stake at slot {} to be in gossip...",
bank.slot()
);
loop {
let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info);
info!("{}% of activated stake in gossip", gossip_stake_percent,);
if gossip_stake_percent > 66 {
break;
}
sleep(Duration::new(1, 0));
}
}
let sockets = Sockets {
repair: node
.sockets
@ -349,6 +344,13 @@ impl Validator {
Some(voting_keypair)
};
let poh_service = PohService::new(poh_recorder.clone(), &poh_config, &exit);
assert_eq!(
blocktree.new_shreds_signals.len(),
1,
"New shred signal for the TVU should be the same as the clear bank signal."
);
let tvu = Tvu::new(
vote_account,
voting_keypair,
@ -369,7 +371,7 @@ impl Validator {
block_commitment_cache,
config.dev_sigverify_disabled,
config.partition_cfg.clone(),
shred_version,
node.info.shred_version,
transaction_status_sender.clone(),
);
@ -389,7 +391,7 @@ impl Validator {
&blocktree,
&config.broadcast_stage_type,
&exit,
shred_version,
node.info.shred_version,
);
datapoint_info!("validator-new", ("id", id.to_string(), String));
@ -469,6 +471,7 @@ pub fn new_banks_from_blocktree(
snapshot_config: Option<SnapshotConfig>,
poh_verify: bool,
dev_halt_at_slot: Option<Slot>,
fixed_leader_schedule: Option<FixedSchedule>,
) -> (
Hash,
BankForks,
@ -506,7 +509,7 @@ pub fn new_banks_from_blocktree(
..blocktree_processor::ProcessOptions::default()
};
let (mut bank_forks, bank_forks_info, leader_schedule_cache) = bank_forks_utils::load(
let (mut bank_forks, bank_forks_info, mut leader_schedule_cache) = bank_forks_utils::load(
&genesis_config,
&blocktree,
account_paths,
@ -518,6 +521,8 @@ pub fn new_banks_from_blocktree(
std::process::exit(1);
});
leader_schedule_cache.set_fixed_leader_schedule(fixed_leader_schedule);
bank_forks.set_snapshot_config(snapshot_config);
(
@ -572,6 +577,63 @@ pub fn new_validator_for_tests() -> (Validator, ContactInfo, Keypair, PathBuf) {
(node, contact_info, mint_keypair, ledger_path)
}
fn report_target_features() {
warn!(
"CUDA is {}abled",
if solana_perf::perf_libs::api().is_some() {
"en"
} else {
"dis"
}
);
// Validator binaries built on a machine with AVX support will generate invalid opcodes
// when run on machines without AVX causing a non-obvious process abort. Instead detect
// the mismatch and error cleanly.
#[target_feature(enable = "avx")]
{
if is_x86_feature_detected!("avx") {
info!("AVX detected");
} else {
error!("Your machine does not have AVX support, please rebuild from source on your machine");
process::exit(1);
}
}
}
// Get the activated stake percentage (based on the provided bank) that is visible in gossip
fn get_stake_percent_in_gossip(
bank: &Arc<solana_runtime::bank::Bank>,
cluster_info: &Arc<RwLock<ClusterInfo>>,
) -> u64 {
let mut gossip_stake = 0;
let mut total_activated_stake = 0;
let tvu_peers = cluster_info.read().unwrap().tvu_peers();
let me = cluster_info.read().unwrap().my_data();
for (activated_stake, vote_account) in bank.vote_accounts().values() {
let vote_state =
solana_vote_program::vote_state::VoteState::from(&vote_account).unwrap_or_default();
total_activated_stake += activated_stake;
if tvu_peers
.iter()
.filter(|peer| peer.shred_version == me.shred_version)
.any(|peer| peer.id == vote_state.node_pubkey)
{
trace!(
"observed {} in gossip, (activated_stake={})",
vote_state.node_pubkey,
activated_stake
);
gossip_stake += activated_stake;
} else if vote_state.node_pubkey == cluster_info.read().unwrap().id() {
gossip_stake += activated_stake;
}
}
gossip_stake * 100 / total_activated_stake
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -1,6 +1,6 @@
[package]
name = "solana-crate-features"
version = "0.21.1"
version = "0.21.8"
description = "Solana Crate Features"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@ -1,6 +1,6 @@
[package]
name = "solana-drone"
version = "0.21.1"
version = "0.21.8"
description = "Solana Drone"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -19,10 +19,10 @@ clap = "2.33"
log = "0.4.8"
serde = "1.0.102"
serde_derive = "1.0.102"
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-metrics = { path = "../metrics", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
solana-metrics = { path = "../metrics", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
tokio = "0.1"
tokio-codec = "0.1"

View File

@ -1,6 +1,6 @@
[package]
name = "solana-fixed-buf"
version = "0.21.1"
version = "0.21.8"
description = "A fixed-size byte array that supports bincode serde"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@ -1,6 +1,6 @@
[package]
name = "solana-genesis-programs"
version = "0.21.1"
version = "0.21.8"
description = "Solana genesis programs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -10,16 +10,16 @@ edition = "2018"
[dependencies]
log = { version = "0.4.8" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "0.21.1" }
solana-budget-program = { path = "../programs/budget", version = "0.21.1" }
solana-config-program = { path = "../programs/config", version = "0.21.1" }
solana-exchange-program = { path = "../programs/exchange", version = "0.21.1" }
solana-runtime = { path = "../runtime", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-stake-program = { path = "../programs/stake", version = "0.21.1" }
solana-storage-program = { path = "../programs/storage", version = "0.21.1" }
solana-vest-program = { path = "../programs/vest", version = "0.21.1" }
solana-vote-program = { path = "../programs/vote", version = "0.21.1" }
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "0.21.8" }
solana-budget-program = { path = "../programs/budget", version = "0.21.8" }
solana-config-program = { path = "../programs/config", version = "0.21.8" }
solana-exchange-program = { path = "../programs/exchange", version = "0.21.8" }
solana-runtime = { path = "../runtime", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
solana-stake-program = { path = "../programs/stake", version = "0.21.8" }
solana-storage-program = { path = "../programs/storage", version = "0.21.8" }
solana-vest-program = { path = "../programs/vest", version = "0.21.8" }
solana-vote-program = { path = "../programs/vote", version = "0.21.8" }
[lib]
crate-type = ["lib"]

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-genesis"
description = "Blockchain, Rebuilt for Scale"
version = "0.21.1"
version = "0.21.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -16,11 +16,11 @@ serde = "1.0.102"
serde_derive = "1.0.102"
serde_json = "1.0.41"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.21.1" }
solana-ledger = { path = "../ledger", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-stake-program = { path = "../programs/stake", version = "0.21.1" }
solana-storage-program = { path = "../programs/storage", version = "0.21.1" }
solana-vote-program = { path = "../programs/vote", version = "0.21.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.8" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.21.8" }
solana-ledger = { path = "../ledger", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
solana-stake-program = { path = "../programs/stake", version = "0.21.8" }
solana-storage-program = { path = "../programs/storage", version = "0.21.8" }
solana-vote-program = { path = "../programs/vote", version = "0.21.8" }
tempfile = "3.1.0"

View File

@ -3,7 +3,10 @@ use crate::{
unlocks::UnlockInfo,
validators::{create_and_add_validator, ValidatorInfo},
};
use solana_sdk::{genesis_config::GenesisConfig, native_token::sol_to_lamports};
use solana_sdk::{
genesis_config::GenesisConfig,
native_token::{lamports_to_sol, sol_to_lamports},
};
// 30 month schedule is 1/5th every 6 months for 30 months
const UNLOCKS_BY_FIFTHS_FOR_30_MONTHS: UnlockInfo = UnlockInfo {
@ -11,7 +14,7 @@ const UNLOCKS_BY_FIFTHS_FOR_30_MONTHS: UnlockInfo = UnlockInfo {
cliff_years: 0.5,
unlocks: 4,
unlock_years: 0.5,
custodian: "11111111111111111111111111111111",
custodian: "6LnFgiECFQKUcxNYDvUBMxgjeGQzzy4kgxGhantoxfUe",
};
// 60 month schedule is 1/10th every 6 months for 60 months
//const UNLOCKS_BY_TENTHS_FOR_60_MONTHS: UnlockInfo = UnlockInfo {
@ -31,452 +34,125 @@ const UNLOCKS_BY_TENTHS_FOR_60_MONTHS: UnlockInfo = UnlockInfo {
custodian: "11111111111111111111111111111111",
};
// 1st batch
const BATCH_ONE_STAKER_INFOS: &[StakerInfo] = &[
StakerInfo {
name: "diligent bridge",
staker: "BwwM47pLHwUgjJXKQKVNiRfGhtPNWfNLH27na2HJQHhd",
sol: 6_250_000.0,
},
StakerInfo {
name: "four wish",
staker: "8A6ZEEW2odkqXNjTWHNG6tUk7uj6zCzHueTyEr9pM1tH",
sol: 10_000_000.0,
},
StakerInfo {
name: "simple friends",
staker: "D89HyaBmr2WmrTehsfkQrY23wCXcDfsFnN9gMfUXHaDd",
sol: 1_250_000.0,
},
StakerInfo {
name: "noxious leather",
staker: "FwPvDpvUmnco1CSfwXQDTbUbuhG5eP7h2vgCKYKVL7at",
sol: 6_250_000.0,
},
StakerInfo {
name: "worthless direction",
staker: "4K16iBoC9kAQRT8pUEKeD2h9WEx1zsRgEmJFssXcXmqq",
sol: 12_500_000.0,
},
StakerInfo {
name: "historical company",
staker: "rmLpENW4V6QNeEhdJJVxo9Xt99oKgNUFZS4Y4375amW",
sol: 322_850.0,
},
StakerInfo {
name: "callous money",
staker: "5kAztE3XtrpeyGZZxckSUt3ZWojNTmph1QSC9S2682z4",
sol: 5_927_155.25,
},
StakerInfo {
name: "outstanding jump",
staker: "H6HMVuDR8XCw3EuhLvFG4EciVvGo76Agq1kSBL2ozoDs",
sol: 625_000.0,
},
StakerInfo {
name: "feeble toes",
staker: "3sfv8tk5ZSDBWbTkFkvFxCvJUyW5yDJUu6VMJcUARQWq",
sol: 750_000.0,
},
StakerInfo {
name: "disillusioned deer",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 1_250_000.0,
},
StakerInfo {
name: "unwritten songs",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 4_250_000.0,
},
StakerInfo {
name: "overt dime",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 500_000.0,
},
StakerInfo {
name: "slow committee",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 625_000.0,
},
StakerInfo {
name: "curvy twig",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 625_000.0,
},
StakerInfo {
name: "gamy scissors",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 250_000.0,
},
StakerInfo {
name: "mushy key",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 1_250_000.0,
},
StakerInfo {
name: "marked silver",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 250_000.0,
},
StakerInfo {
name: "free sock",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 625_000.0,
},
StakerInfo {
name: "tremendous meeting",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 1_250_000.0,
},
StakerInfo {
name: "panoramic cloth",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 625_000.0,
},
StakerInfo {
name: "normal kick",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 2_500_000.0,
},
StakerInfo {
name: "unbecoming observation",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 250_000.0,
},
StakerInfo {
name: "cut beginner",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 250_000.0,
},
StakerInfo {
name: "alcoholic button",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 625_000.0,
},
StakerInfo {
name: "old-fashioned clover",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 750_000.0,
},
StakerInfo {
name: "expensive underwear",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 2_500_000.0,
},
StakerInfo {
name: "like dust",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 5_000_000.0,
},
StakerInfo {
name: "rapid straw",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 5_850_000.0,
},
StakerInfo {
name: "windy trousers",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 2_579_350.0,
},
StakerInfo {
name: "dramatic veil",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 3_611_110.50,
},
StakerInfo {
name: "incandescent skin",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 3_000_000.0,
},
StakerInfo {
name: "spiky love",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 3_250_000.0,
},
];
const BATCH_TWO_STAKER_INFOS: &[StakerInfo] = &[
// 2nd batch
StakerInfo {
name: "macabre note",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 4_000_000.0,
},
StakerInfo {
name: "alcoholic letter",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 4_000_000.0,
},
StakerInfo {
name: "heady trucks",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 4_000_000.0,
},
StakerInfo {
name: "ten support",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 1_000_000.0,
},
StakerInfo {
name: "foregoing middle",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 800_000.0,
},
StakerInfo {
name: "ludicrous destruction",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 4_000_000.0,
},
StakerInfo {
name: "numberless wheel",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 4_000_000.0,
},
StakerInfo {
name: "short powder",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 4_000_000.0,
},
StakerInfo {
name: "cut name",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 4_000_000.0,
},
StakerInfo {
name: "six fly",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 4_000_000.0,
},
StakerInfo {
name: "mindless pickle",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 100_000.0,
},
StakerInfo {
name: "marked rabbit",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 38_741.36,
},
StakerInfo {
name: "jagged doctor",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 711_258.64,
},
StakerInfo {
name: "truthful pollution",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 1_587_300.0,
},
StakerInfo {
name: "unkempt activity",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 2_222_220.0,
},
StakerInfo {
name: "ritzy view",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 40_000.0,
},
StakerInfo {
name: "remarkable plant",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 300_000.0,
},
StakerInfo {
name: "busy value",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 100_000.0,
},
StakerInfo {
name: "imperfect slave",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 222_065.84,
},
StakerInfo {
name: "uneven drawer",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 400_000.0,
},
StakerInfo {
name: "far behavior",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 4_000_000.0,
},
StakerInfo {
name: "abaft memory",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 400_000.0,
},
StakerInfo {
name: "poor glove",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 2_000_000.0,
},
StakerInfo {
name: "strange iron",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 2_000_000.0,
},
StakerInfo {
name: "nonstop rail",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 1_000_000.0,
},
StakerInfo {
name: "milky bait",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 400_000.0,
},
StakerInfo {
name: "wandering start",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 1_200_000.0,
},
];
pub const BATCH_THREE_STAKER_INFOS: &[StakerInfo] = &[
// 3rd batch
StakerInfo {
name: "dusty dress",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 1_212_121.21,
},
StakerInfo {
name: "godly bed",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 151_515.15,
},
StakerInfo {
name: "innocent property",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 227_272.73,
},
StakerInfo {
name: "responsible bikes",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 3_030_303.03,
},
StakerInfo {
name: "learned market",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 3_030_303.03,
},
StakerInfo {
name: "jumpy school",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 303_030.30,
},
StakerInfo {
name: "sticky houses",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 1_515_151.52,
},
StakerInfo {
name: "bustling basketball",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 1_515_152.52,
},
StakerInfo {
name: "ordinary dad",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 606_060.61,
},
StakerInfo {
name: "absurd bat",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 90_909.09,
},
StakerInfo {
name: "cloudy ocean",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 67_945.45,
},
StakerInfo {
name: "black-and-white fold",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 757_575.76,
},
StakerInfo {
name: "stale part",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 45_454.55,
},
StakerInfo {
name: "available health",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 2_797_575.76,
},
StakerInfo {
name: "afraid visitor",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 481_818.18,
},
StakerInfo {
name: "arrogant front",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 151_515.15,
},
StakerInfo {
name: "juvenile zinc",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 151_515.15,
},
StakerInfo {
name: "disturbed box",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 303_030.30,
},
StakerInfo {
name: "disagreeable skate",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 454_545.45,
},
StakerInfo {
name: "miscreant sidewalk",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 75_757.58,
},
StakerInfo {
name: "shy play",
staker: "P1aceHo1derPubkey11111111111111111111111111",
sol: 303_030.30,
},
];
// no lockups
const UNLOCKS_ALL_DAY_ZERO: UnlockInfo = UnlockInfo {
cliff_fraction: 1.0,
cliff_years: 0.0,
unlocks: 0,
unlock_years: 0.0,
custodian: "11111111111111111111111111111111",
};
pub const BATCH_FOUR_STAKER_INFOS: &[StakerInfo] = &[
StakerInfo {
name: "deserted window",
staker: "XTeBBZextvHkoRqDF8yb4hihjcraKQDwTEXhzjd8fip",
sol: 3_655_292.0,
name: "impossible pizza",
staker: "CDtJpwRSiPRDGeKrvymWQKM7JY9M3hU7iimEKBDxZyoP",
sol: 5_000_000.0,
},
StakerInfo {
name: "wretched texture",
staker: "HbENu65qjWLEB5TrMouSSWLq9mbtGx2bvfhPjk2FpYek",
sol: 225_000.0,
},
StakerInfo {
name: "nutritious examination",
staker: "C9CfFpmLDsQsz6wt7MrrZquNB5oS4QkpJkmDAiboVEZZ",
sol: 5_000_000.0,
},
StakerInfo {
name: "tidy impression",
staker: "6ne6Rbag4FAnop1KNgVdM1SEHnJEysHSWyqvRpFrzaig",
sol: 5_000_000.0,
},
StakerInfo {
name: "unbecoming silver",
// TODO: staker: "42yapY7Vrs5jqht9TCKsPoyb4vDFYcPfRkqAP85NSAQ", WrongSize
staker: "GS7RFm4nrxzYGcPTmu1otzHzZbURWDKuxo2L4AQDvTg2",
sol: 28_800.0,
},
StakerInfo {
name: "dramatic treatment",
staker: "GTyawCMwt3kMb51AgDtfdp97mDot7jNwc8ifuS9qqANg",
sol: 1_205_602.0,
},
StakerInfo {
name: "angry noise",
staker: "Fqxs9MhqjKuMq6YwjBG4ktEapuZQ3kj19mpuHLZKtkg9",
sol: 5_000_000.0,
},
StakerInfo {
name: "hard cousin",
staker: "9MYDzj7QuAX9QAK7da1GhzPB4gA3qbPNWsW3MMSZobru",
sol: 5_000_000.0,
},
];
pub const POOL_STAKER_INFOS: &[StakerInfo] = &[
StakerInfo {
name: "shrill charity",
staker: "BzuqQFnu7oNUeok9ZoJezpqu2vZJU7XR1PxVLkk6wwUD",
name: "inexpensive uncle",
staker: "E4DLNkmdL34ejA48ApfPDoFVuD9XWAFqi8bXzBGRhKst",
sol: 300_000.0,
},
StakerInfo {
name: "lopsided skill",
staker: "8cV7zCTF5UMrZakZXiL2Jw5uY3ms2Wz4twzFXEY9Kge2",
sol: 5_000_000.0,
},
StakerInfo {
name: "legal gate",
staker: "FwMbkDZUb78aiMWhZY4BEroAcqmnrXZV77nwrg71C57d",
sol: 21_086_641.0,
name: "red snake",
staker: "JBGnGdLyo7V2z9hz51mnnbyDp9sBACtw5WYH9YRG8n7e",
sol: 3_655_292.0,
},
StakerInfo {
name: "cluttered complaint",
staker: "4h1rt2ic4AXwG7p3Qqhw57EMDD4c3tLYb5J3QstGA2p5",
sol: 153_333_633.41,
name: "hellish money",
staker: "CqKdQ57mBj2mKcAbpjWc28Ls7yXzBXboxSTCRWocmUVj",
sol: 200_000.0,
},
StakerInfo {
name: "one thanks",
staker: "3b7akieYUyCgz3Cwt5sTSErMWjg8NEygD6mbGjhGkduB",
sol: 157_613_284.59,
name: "full grape",
staker: "2SCJKvh7wWo32PtfUZdVZQ84WnMWoUpF4WTm6ZxcCJ15",
sol: 450_000.0,
},
StakerInfo {
name: "nice ghost",
staker: "FeumxB3gfzrVQzABBiha8AacKPY3Rf4BTFSh2aZWHqR8",
sol: 650_000.0,
},
StakerInfo {
name: "jolly year",
staker: "HBwFWNGPVZgkf3yqUKxuAds5aANGWX62LzUFvZVCWLdJ",
sol: 5_000_000.0,
},
StakerInfo {
name: "typical initiative",
staker: "3JMz3kaDUZEVK2JVjRqwERGMp7LbWbgUjAFBb42qxoHb",
sol: 5_000_000.0,
},
StakerInfo {
name: "deserted window",
staker: "XTeBBZextvHkoRqDF8yb4hihjcraKQDwTEXhzjd8fip",
sol: 3_655_292.0,
},
StakerInfo {
name: "eight nation",
staker: "E5bSU5ywqPiz3ije89ef5gaEC7jy81BAc72Zeb9MqeHY",
sol: 103_519.0,
},
StakerInfo {
name: "earsplitting meaning",
staker: "4ZemkSoE75RFE1SVLnnmHcaNWT4qN8KFrKP2wAYfv8CB",
sol: 5_000_000.0,
},
StakerInfo {
name: "alike cheese",
staker: "72BGEwYee5txFonmpEarTEKCZVN2UxcSUgdphdhcx3V",
sol: 3_880_295.0,
},
StakerInfo {
name: "noisy honey",
staker: "DRp1Scyn4yJZQfMAdQew2x8RtvRmsNELN37JTK5Xvzgn",
sol: 5_000_000.0,
},
];
pub const FOUNDATION_STAKER_INFOS: &[StakerInfo] = &[
StakerInfo {
name: "lyrical supermarket",
staker: "GRZwoJGisLTszcxtWpeREJ98EGg8pZewhbtcrikoU7b3",
@ -487,6 +163,9 @@ pub const POOL_STAKER_INFOS: &[StakerInfo] = &[
staker: "J51tinoLdmEdUR27LUVymrb2LB3xQo1aSHSgmbSGdj58",
sol: 57_500_000.0,
},
];
pub const GRANTS_STAKER_INFOS: &[StakerInfo] = &[
StakerInfo {
name: "rightful agreement",
staker: "DNaKiBwwbbqk1wVoC5AQxWQbuDhvaDVbAtXzsVos9mrc",
@ -499,6 +178,24 @@ pub const POOL_STAKER_INFOS: &[StakerInfo] = &[
},
];
pub const COMMUNITY_STAKER_INFOS: &[StakerInfo] = &[
StakerInfo {
name: "shrill charity",
staker: "BzuqQFnu7oNUeok9ZoJezpqu2vZJU7XR1PxVLkk6wwUD",
sol: 5_000_000.0,
},
StakerInfo {
name: "legal gate",
staker: "FwMbkDZUb78aiMWhZY4BEroAcqmnrXZV77nwrg71C57d",
sol: 16_086_641.0,
},
StakerInfo {
name: "cluttered complaint",
staker: "4h1rt2ic4AXwG7p3Qqhw57EMDD4c3tLYb5J3QstGA2p5",
sol: 153_333_633.41,
},
];
fn add_stakes(
genesis_config: &mut GenesisConfig,
staker_infos: &[StakerInfo],
@ -542,13 +239,6 @@ pub const VALIDATOR_INFOS: &[ValidatorInfo] = &[
node_sol: 500.0,
commission: 0,
},
ValidatorInfo {
name: "P2P.ORG - Secure Non-custodial Staking",
node: "44e8VyWoyZSE2oYHxMHMedAiHkGJqJgPd3tdt6iKoAFL",
vote: "BwwpzEpo1wzgV9N1987ntgNG6jLt3C9532C68pswT7Gp",
node_sol: 500.0,
commission: 0,
},
ValidatorInfo {
name: "Dokia Capital",
node: "GeZ5PrJi9muVCJiJAaFBNGoCEdxGEqTp7L2BmT2WTTy1",
@ -556,6 +246,20 @@ pub const VALIDATOR_INFOS: &[ValidatorInfo] = &[
node_sol: 500.0,
commission: 0,
},
ValidatorInfo {
name: "Forbole",
node: "Fe5sLQAAT7RBT8mcH1AAGCbExJQcYxcwXvp1GjrGbvxs",
vote: "Dr8MkZZuvZVQJFKtjShZYEfg6n93sc1GxevqLnGss7FW",
node_sol: 500.0,
commission: 0,
},
ValidatorInfo {
name: "P2P.ORG - Secure Non-custodial Staking",
node: "44e8VyWoyZSE2oYHxMHMedAiHkGJqJgPd3tdt6iKoAFL",
vote: "BwwpzEpo1wzgV9N1987ntgNG6jLt3C9532C68pswT7Gp",
node_sol: 500.0,
commission: 0,
},
ValidatorInfo {
name: "RockX",
node: "Ez4iUU87ViJLCnmSy1t1Ti3DLoysFXiBseNfnRfoehyY",
@ -563,6 +267,13 @@ pub const VALIDATOR_INFOS: &[ValidatorInfo] = &[
node_sol: 500.0,
commission: 0,
},
ValidatorInfo {
name: "Stake Capital",
node: "HavuVVDXXsJqMzPwQ4KcF5kFm2xqjbChhyi1bgGeCQif",
vote: "HswPkKj1xoLLmpM8t1vy5Pbi8zYYUs9ZawswvofKsFo1",
node_sol: 500.0,
commission: 0,
},
ValidatorInfo {
name: "Staking Facilities",
node: "pbAxyqHHPMwgEjv8kmjGxysk9rhNtN7q22eAjReq6Hj",
@ -688,34 +399,44 @@ fn add_spare_validators(genesis_config: &mut GenesisConfig) -> u64 {
.sum::<u64>()
}
pub fn add_genesis_accounts(genesis_config: &mut GenesisConfig) -> u64 {
add_stakes(
genesis_config,
&BATCH_ONE_STAKER_INFOS,
&UNLOCKS_BY_FIFTHS_FOR_30_MONTHS,
sol_to_lamports(1_000_000.0),
) + add_stakes(
genesis_config,
&BATCH_TWO_STAKER_INFOS,
&UNLOCKS_BY_FIFTHS_FOR_30_MONTHS,
sol_to_lamports(1_000_000.0),
) + add_stakes(
genesis_config,
&BATCH_THREE_STAKER_INFOS,
&UNLOCKS_BY_FIFTHS_FOR_30_MONTHS,
sol_to_lamports(1_000_000.0),
) + add_stakes(
pub fn add_genesis_accounts(genesis_config: &mut GenesisConfig, mut issued_lamports: u64) {
// add_stakes() and add_validators() award tokens for rent exemption and
// to cover an initial transfer-free period of the network
issued_lamports += add_stakes(
genesis_config,
&BATCH_FOUR_STAKER_INFOS,
&UNLOCKS_BY_FIFTHS_FOR_30_MONTHS,
sol_to_lamports(1_000_000.0),
) + add_stakes(
genesis_config,
&POOL_STAKER_INFOS,
&FOUNDATION_STAKER_INFOS,
&UNLOCKS_BY_TENTHS_FOR_60_MONTHS,
sol_to_lamports(1_000_000.0),
) + add_stakes(
genesis_config,
&GRANTS_STAKER_INFOS,
&UNLOCKS_BY_TENTHS_FOR_60_MONTHS,
sol_to_lamports(1_000_000.0),
) + add_stakes(
genesis_config,
&COMMUNITY_STAKER_INFOS,
&UNLOCKS_ALL_DAY_ZERO,
sol_to_lamports(1_000_000.0),
) + add_validators(genesis_config, &VALIDATOR_INFOS)
+ add_spare_validators(genesis_config)
+ add_spare_validators(genesis_config);
// "one thanks" (community pool) gets 500_000_000SOL (total) - above distributions
create_and_add_stakes(
genesis_config,
&StakerInfo {
name: "one thanks",
staker: "3b7akieYUyCgz3Cwt5sTSErMWjg8NEygD6mbGjhGkduB",
sol: 500_000_000.0 - lamports_to_sol(issued_lamports),
},
&UNLOCKS_ALL_DAY_ZERO,
sol_to_lamports(1_000_000.0),
);
}
#[cfg(test)]
@ -726,14 +447,14 @@ mod tests {
fn test_add_genesis_accounts() {
let mut genesis_config = GenesisConfig::default();
let issued_lamports = add_genesis_accounts(&mut genesis_config);
add_genesis_accounts(&mut genesis_config, 0);
let lamports = genesis_config
.accounts
.iter()
.map(|(_, account)| account.lamports)
.sum::<u64>();
assert_eq!(issued_lamports, lamports);
// tolerate rounding errors, less than one part in 10M
assert!((500_000_000.0 - lamports_to_sol(lamports)).abs() < lamports_to_sol(100));
}
}

View File

@ -164,7 +164,6 @@ fn main() -> Result<(), Box<dyn error::Error>> {
)
.arg(
Arg::with_name("bootstrap_vote_pubkey_file")
.short("s")
.long("bootstrap-vote-pubkey")
.value_name("BOOTSTRAP VOTE PUBKEY")
.takes_value(true)
@ -173,13 +172,19 @@ fn main() -> Result<(), Box<dyn error::Error>> {
)
.arg(
Arg::with_name("bootstrap_stake_pubkey_file")
.short("k")
.long("bootstrap-stake-pubkey")
.value_name("BOOTSTRAP STAKE PUBKEY")
.takes_value(true)
.required(true)
.help("Path to file containing the bootstrap leader's staking pubkey"),
)
.arg(
Arg::with_name("bootstrap_stake_authorized_pubkey_file")
.long("bootstrap-stake-authorized-pubkey")
.value_name("BOOTSTRAP STAKE AUTHORIZED PUBKEY")
.takes_value(true)
.help("Path to file containing the pubkey authorized to manage the bootstrap leader's stake [default: --bootstrap-leader-pubkey]"),
)
.arg(
Arg::with_name("bootstrap_storage_pubkey_file")
.long("bootstrap-storage-pubkey")
@ -317,7 +322,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
)
.get_matches();
let faucet_lamports = value_t!(matches, "faucet_lamports", u64);
let faucet_lamports = value_t!(matches, "faucet_lamports", u64).unwrap_or(0);
let ledger_path = PathBuf::from(matches.value_of("ledger_path").unwrap());
let bootstrap_leader_lamports = value_t_or_exit!(matches, "bootstrap_leader_lamports", u64);
let bootstrap_leader_stake_lamports =
@ -326,6 +331,8 @@ fn main() -> Result<(), Box<dyn error::Error>> {
let bootstrap_leader_pubkey = required_pubkey(&matches, "bootstrap_leader_pubkey_file")?;
let bootstrap_vote_pubkey = required_pubkey(&matches, "bootstrap_vote_pubkey_file")?;
let bootstrap_stake_pubkey = required_pubkey(&matches, "bootstrap_stake_pubkey_file")?;
let bootstrap_stake_authorized_pubkey =
pubkey_of(&matches, "bootstrap_stake_authorized_pubkey_file");
let bootstrap_storage_pubkey = pubkey_of(&matches, "bootstrap_storage_pubkey_file");
let faucet_pubkey = pubkey_of(&matches, "faucet_pubkey_file");
@ -339,7 +346,9 @@ fn main() -> Result<(), Box<dyn error::Error>> {
vote_state::create_account(&bootstrap_vote_pubkey, &bootstrap_leader_pubkey, 0, 1);
let bootstrap_leader_stake_account = stake_state::create_account(
&bootstrap_leader_pubkey,
bootstrap_stake_authorized_pubkey
.as_ref()
.unwrap_or(&bootstrap_leader_pubkey),
&bootstrap_vote_pubkey,
&bootstrap_leader_vote_account,
&rent,
@ -438,7 +447,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
if let Some(faucet_pubkey) = faucet_pubkey {
genesis_config.add_account(
faucet_pubkey,
Account::new(faucet_lamports.unwrap(), 0, &system_program::id()),
Account::new(faucet_lamports, 0, &system_program::id()),
);
}
@ -452,12 +461,19 @@ fn main() -> Result<(), Box<dyn error::Error>> {
}
}
add_genesis_accounts(&mut genesis_config);
let issued_lamports = genesis_config
.accounts
.iter()
.map(|(_key, account)| account.lamports)
.sum::<u64>();
add_genesis_accounts(&mut genesis_config, issued_lamports - faucet_lamports);
create_new_ledger(&ledger_path, &genesis_config)?;
println!(
"Genesis mode: {:?} hashes per tick: {:?} slots_per_epoch: {} capitalization: {}SOL in {} accounts",
"Genesis hash: {}\nOperating mode: {:?}\nHashes per tick: {:?}\nSlots per epoch: {}\nCapitalization: {} SOL in {} accounts",
genesis_config.hash(),
operating_mode,
genesis_config.poh_config.hashes_per_tick,
slots_per_epoch,

View File

@ -3,19 +3,19 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-gossip"
description = "Blockchain, Rebuilt for Scale"
version = "0.21.1"
version = "0.21.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
clap = "2.33.0"
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-core = { path = "../core", version = "0.21.1" }
solana-client = { path = "../client", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-net-utils = { path = "../net-utils", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.8" }
solana-core = { path = "../core", version = "0.21.8" }
solana-client = { path = "../client", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
solana-net-utils = { path = "../net-utils", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }

View File

@ -9,7 +9,7 @@ use solana_client::rpc_client::RpcClient;
use solana_core::{contact_info::ContactInfo, gossip_service::discover};
use solana_sdk::pubkey::Pubkey;
use std::error;
use std::net::SocketAddr;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::process::exit;
fn main() -> Result<(), Box<dyn error::Error>> {
@ -38,6 +38,13 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.takes_value(false)
.help("Return all RPC URLs"),
)
.arg(
Arg::with_name("any")
.long("any")
.takes_value(false)
.conflicts_with("all")
.help("Return any RPC URL"),
)
.arg(
Arg::with_name("timeout")
.long("timeout")
@ -74,9 +81,8 @@ fn main() -> Result<(), Box<dyn error::Error>> {
.long("gossip-host")
.value_name("HOST")
.takes_value(true)
.conflicts_with("entrypoint")
.validator(solana_net_utils::is_host)
.help("Gossip DNS name or IP address for the node when --entrypoint is not provided [default: 127.0.0.1]"),
.help("Gossip DNS name or IP address for the node [default: ask --entrypoint, or 127.0.0.1 when --entrypoint is not provided]"),
)
.arg(
Arg::with_name("num_nodes")
@ -164,21 +170,29 @@ fn main() -> Result<(), Box<dyn error::Error>> {
let entrypoint_addr = parse_entrypoint(&matches);
let gossip_host = if let Some(entrypoint_addr) = entrypoint_addr {
solana_net_utils::get_public_ip_addr(&entrypoint_addr).unwrap_or_else(|err| {
eprintln!(
"Failed to contact cluster entrypoint {}: {}",
entrypoint_addr, err
);
exit(1);
})
} else {
solana_net_utils::parse_host(matches.value_of("gossip_host").unwrap_or("127.0.0.1"))
.unwrap_or_else(|err| {
eprintln!("Error: {}", err);
let gossip_host = matches
.value_of("gossip_host")
.map(|gossip_host| {
solana_net_utils::parse_host(gossip_host).unwrap_or_else(|e| {
eprintln!("failed to parse gossip-host: {}", e);
exit(1);
})
};
})
.unwrap_or_else(|| {
if let Some(entrypoint_addr) = entrypoint_addr {
solana_net_utils::get_public_ip_addr(&entrypoint_addr).unwrap_or_else(
|err| {
eprintln!(
"Failed to contact cluster entrypoint {}: {}",
entrypoint_addr, err
);
exit(1);
},
)
} else {
IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))
}
});
let gossip_addr = SocketAddr::new(
gossip_host,
@ -230,6 +244,8 @@ fn main() -> Result<(), Box<dyn error::Error>> {
}
}
("get-rpc-url", Some(matches)) => {
let any = matches.is_present("any");
let all = matches.is_present("all");
let entrypoint_addr = parse_entrypoint(&matches);
let timeout = value_t_or_exit!(matches, "timeout", u64);
let (nodes, _archivers) = discover(
@ -244,7 +260,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
let rpc_addrs: Vec<_> = nodes
.iter()
.filter_map(|contact_info| {
if (matches.is_present("all") || Some(contact_info.gossip) == entrypoint_addr)
if (any || all || Some(contact_info.gossip) == entrypoint_addr)
&& ContactInfo::is_valid_address(&contact_info.rpc)
{
return Some(contact_info.rpc);
@ -260,6 +276,9 @@ fn main() -> Result<(), Box<dyn error::Error>> {
for rpc_addr in rpc_addrs {
println!("http://{}", rpc_addr);
if any {
break;
}
}
}
("stop", Some(matches)) => {

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-install"
description = "The solana cluster software installer"
version = "0.21.1"
version = "0.21.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -26,11 +26,11 @@ reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tl
serde = "1.0.102"
serde_derive = "1.0.102"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-client = { path = "../client", version = "0.21.1" }
solana-config-program = { path = "../programs/config", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.8" }
solana-client = { path = "../client", version = "0.21.8" }
solana-config-program = { path = "../programs/config", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
tar = "0.4.26"
tempdir = "0.3.7"
url = "2.1.0"

View File

@ -1,6 +1,6 @@
[package]
name = "solana-keygen"
version = "0.21.1"
version = "0.21.8"
description = "Solana key generation utility"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -14,8 +14,8 @@ clap = "2.33"
dirs = "2.0.2"
num_cpus = "1.11.1"
rpassword = "4.0"
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
tiny-bip39 = "0.6.2"
[[bin]]

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-ledger-tool"
description = "Blockchain, Rebuilt for Scale"
version = "0.21.1"
version = "0.21.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -15,12 +15,12 @@ serde = "1.0.102"
serde_derive = "1.0.102"
serde_json = "1.0.41"
serde_yaml = "0.8.11"
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-ledger = { path = "../ledger", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-runtime = { path = "../runtime", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-vote-program = { path = "../programs/vote", version = "0.21.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.8" }
solana-ledger = { path = "../ledger", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
solana-runtime = { path = "../runtime", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
solana-vote-program = { path = "../programs/vote", version = "0.21.8" }
[dev-dependencies]
assert_cmd = "0.11"

View File

@ -30,6 +30,7 @@ enum LedgerOutputMethod {
}
fn output_slot(blocktree: &Blocktree, slot: Slot, method: &LedgerOutputMethod) {
println!("Slot Meta {:?}", blocktree.meta(slot));
let entries = blocktree
.get_slot_entries(slot, 0, None)
.unwrap_or_else(|err| {
@ -436,7 +437,14 @@ fn main() {
)
.subcommand(
SubCommand::with_name("bounds")
.about("Print lowest and highest non-empty slots. Note: This ignores gaps in slots")
.about("Print lowest and highest non-empty slots. Note that there may be empty slots within the bounds")
.arg(
Arg::with_name("all")
.long("all")
.takes_value(false)
.required(false)
.help("Additionally print all the non-empty slots within the bounds"),
)
)
.subcommand(
SubCommand::with_name("json")
@ -719,8 +727,10 @@ fn main() {
}
});
}
("bounds", _) => match blocktree.slot_meta_iterator(0) {
("bounds", Some(args_matches)) => match blocktree.slot_meta_iterator(0) {
Ok(metas) => {
let all = args_matches.is_present("all");
println!("Collecting Ledger information...");
let slots: Vec<_> = metas.map(|(slot, _)| slot).collect();
if slots.is_empty() {
@ -729,10 +739,10 @@ fn main() {
let first = slots.first().unwrap();
let last = slots.last().unwrap_or_else(|| first);
if first != last {
println!(
"Ledger contains some data for slots {:?} to {:?}",
first, last
);
println!("Ledger contains data from slots {:?} to {:?}", first, last);
if all {
println!("Non-empty slots: {:?}", slots);
}
} else {
println!("Ledger only contains some data for slot {:?}", first);
}

View File

@ -31,6 +31,7 @@ fn bad_arguments() {
fn nominal() {
let genesis_config = create_genesis_config(100).genesis_config;
let ticks_per_slot = genesis_config.ticks_per_slot;
let meta_lines = 1;
let (ledger_path, _blockhash) = create_new_tmp_ledger!(&genesis_config);
let ticks = ticks_per_slot as usize;
@ -44,5 +45,5 @@ fn nominal() {
// Print everything
let output = run_ledger_tool(&["-l", &ledger_path, "print"]);
assert!(output.status.success());
assert_eq!(count_newlines(&output.stdout), ticks + 1);
assert_eq!(count_newlines(&output.stdout), ticks + meta_lines + 1);
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-ledger"
version = "0.21.1"
version = "0.21.8"
description = "Solana ledger"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -29,19 +29,19 @@ rayon = "1.2.0"
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
serde = "1.0.102"
serde_derive = "1.0.102"
solana-client = { path = "../client", version = "0.21.1" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-measure = { path = "../measure", version = "0.21.1" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.21.1" }
solana-metrics = { path = "../metrics", version = "0.21.1" }
solana-perf = { path = "../perf", version = "0.21.1" }
solana-client = { path = "../client", version = "0.21.8" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
solana-measure = { path = "../measure", version = "0.21.8" }
solana-merkle-tree = { path = "../merkle-tree", version = "0.21.8" }
solana-metrics = { path = "../metrics", version = "0.21.8" }
solana-perf = { path = "../perf", version = "0.21.8" }
ed25519-dalek = "1.0.0-pre.1"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.1" }
solana-runtime = { path = "../runtime", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-stake-program = { path = "../programs/stake", version = "0.21.1" }
solana-vote-program = { path = "../programs/vote", version = "0.21.1" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.8" }
solana-runtime = { path = "../runtime", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
solana-stake-program = { path = "../programs/stake", version = "0.21.8" }
solana-vote-program = { path = "../programs/vote", version = "0.21.8" }
sys-info = "0.5.8"
tar = "0.4.26"
tempfile = "3.1.0"
@ -54,8 +54,9 @@ default-features = false
features = ["lz4"]
[dev-dependencies]
assert_matches = "1.3.0"
matches = "0.1.6"
solana-budget-program = { path = "../programs/budget", version = "0.21.1" }
solana-budget-program = { path = "../programs/budget", version = "0.21.8" }
[lib]
crate-type = ["lib"]

View File

@ -357,10 +357,20 @@ impl Blocktree {
Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
}
pub fn slot_coding_iterator<'a>(
&'a self,
slot: Slot,
) -> Result<impl Iterator<Item = ((u64, u64), Box<[u8]>)> + 'a> {
let slot_iterator = self
.db
.iter::<cf::ShredCode>(IteratorMode::From((slot, 0), IteratorDirection::Forward))?;
Ok(slot_iterator.take_while(move |((shred_slot, _), _)| *shred_slot == slot))
}
fn try_shred_recovery(
db: &Database,
erasure_metas: &HashMap<(u64, u64), ErasureMeta>,
index_working_set: &HashMap<u64, IndexMetaWorkingSetEntry>,
index_working_set: &mut HashMap<u64, IndexMetaWorkingSetEntry>,
prev_inserted_datas: &mut HashMap<(u64, u64), Shred>,
prev_inserted_codes: &mut HashMap<(u64, u64), Shred>,
) -> Vec<Shred> {
@ -385,8 +395,8 @@ impl Blocktree {
);
};
let index_meta_entry = index_working_set.get(&slot).expect("Index");
let index = &index_meta_entry.index;
let index_meta_entry = index_working_set.get_mut(&slot).expect("Index");
let index = &mut index_meta_entry.index;
match erasure_meta.status(&index) {
ErasureMetaStatus::CanRecover => {
// Find shreds for this erasure set and try recovery
@ -413,8 +423,17 @@ impl Blocktree {
});
(set_index..set_index + erasure_meta.config.num_coding() as u64).for_each(
|i| {
if let Some(shred) =
prev_inserted_codes.remove(&(slot, i)).or_else(|| {
if let Some(shred) = prev_inserted_codes
.remove(&(slot, i))
.map(|s| {
// Remove from the index so it doesn't get committed. We know
// this is safe to do because everything in
// `prev_inserted_codes` does not yet exist in blocktree
// (guaranteed by `check_cache_coding_shred`)
index.coding_mut().set_present(i, false);
s
})
.or_else(|| {
if index.coding().is_present(i) {
let some_code = code_cf
.get_bytes((slot, i))
@ -450,8 +469,14 @@ impl Blocktree {
ErasureMetaStatus::DataFull => {
(set_index..set_index + erasure_meta.config.num_coding() as u64).for_each(
|i| {
// Remove saved coding shreds. We don't need these for future recovery
let _ = prev_inserted_codes.remove(&(slot, i));
// Remove saved coding shreds. We don't need these for future recovery.
if prev_inserted_codes.remove(&(slot, i)).is_some() {
// Remove from the index so it doesn't get committed. We know
// this is safe to do because everything in
// `prev_inserted_codes` does not yet exist in blocktree
// (guaranteed by `check_cache_coding_shred`)
index.coding_mut().set_present(i, false);
}
},
);
submit_metrics(false, "complete".into(), 0);
@ -524,7 +549,7 @@ impl Blocktree {
let recovered_data = Self::try_shred_recovery(
&db,
&erasure_metas,
&index_working_set,
&mut index_working_set,
&mut just_inserted_data_shreds,
&mut just_inserted_coding_shreds,
);
@ -681,6 +706,13 @@ impl Blocktree {
);
}
// Should be safe to modify index_meta here. Two cases
// 1) Recovery happens: Then all inserted erasure metas are removed
// from just_received_coding_shreds, and nothing wll be committed by
// `check_insert_coding_shred`, so the coding index meta will not be
// committed
index_meta.coding_mut().set_present(shred_index, true);
just_received_coding_shreds
.entry((slot, shred_index))
.or_insert_with(|| shred);
@ -862,19 +894,10 @@ impl Blocktree {
// Parent for slot meta should have been set by this point
assert!(!is_orphan(slot_meta));
let data_cf = self.db.column::<cf::ShredData>();
let check_data_cf = |slot, index| {
data_cf
.get_bytes((slot, index))
.map(|opt| opt.is_some())
.unwrap_or(false)
};
let new_consumed = if slot_meta.consumed == index {
let mut current_index = index + 1;
while data_index.is_present(current_index) || check_data_cf(slot, current_index) {
while data_index.is_present(current_index) {
current_index += 1;
}
current_index
@ -1160,24 +1183,30 @@ impl Blocktree {
.expect("Rooted slot must exist in SlotMeta");
let slot_entries = self.get_slot_entries(slot, 0, None)?;
let slot_transaction_iterator = slot_entries
.iter()
.cloned()
.flat_map(|entry| entry.transactions);
let parent_slot_entries = self.get_slot_entries(slot_meta.parent_slot, 0, None)?;
if !slot_entries.is_empty() {
let slot_transaction_iterator = slot_entries
.iter()
.cloned()
.flat_map(|entry| entry.transactions);
let parent_slot_entries = self.get_slot_entries(slot_meta.parent_slot, 0, None)?;
let previous_blockhash = if !parent_slot_entries.is_empty() {
get_last_hash(parent_slot_entries.iter()).unwrap()
} else {
Hash::default()
};
let block = RpcConfirmedBlock {
previous_blockhash: get_last_hash(parent_slot_entries.iter())
.expect("Rooted parent slot must have blockhash"),
blockhash: get_last_hash(slot_entries.iter())
.expect("Rooted slot must have blockhash"),
parent_slot: slot_meta.parent_slot,
transactions: self.map_transactions_to_statuses(slot, slot_transaction_iterator),
};
Ok(block)
} else {
Err(BlocktreeError::SlotNotRooted)
let block = RpcConfirmedBlock {
previous_blockhash,
blockhash: get_last_hash(slot_entries.iter())
.unwrap_or_else(|| panic!("Rooted slot {:?} must have blockhash", slot)),
parent_slot: slot_meta.parent_slot,
transactions: self
.map_transactions_to_statuses(slot, slot_transaction_iterator),
};
return Ok(block);
}
}
Err(BlocktreeError::SlotNotRooted)
}
fn map_transactions_to_statuses<'a>(
@ -2092,12 +2121,16 @@ pub mod tests {
use crate::{
entry::{next_entry, next_entry_mut},
genesis_utils::{create_genesis_config, GenesisConfigInfo},
leader_schedule::{FixedSchedule, LeaderSchedule},
shred::{max_ticks_per_n_shreds, DataShredHeader},
};
use assert_matches::assert_matches;
use bincode::serialize;
use itertools::Itertools;
use rand::{seq::SliceRandom, thread_rng};
use solana_runtime::bank::Bank;
use solana_sdk::{
hash::{self, Hash},
hash::{self, hash, Hash},
instruction::CompiledInstruction,
packet::PACKET_DATA_SIZE,
pubkey::Pubkey,
@ -2107,13 +2140,9 @@ pub mod tests {
use std::{iter::FromIterator, time::Duration};
// used for tests only
fn make_slot_entries_with_transactions(
slot: Slot,
parent_slot: Slot,
num_entries: u64,
) -> (Vec<Shred>, Vec<Entry>) {
fn make_slot_entries_with_transactions(num_entries: u64) -> Vec<Entry> {
let mut entries: Vec<Entry> = Vec::new();
for _ in 0..num_entries {
for x in 0..num_entries {
let transaction = Transaction::new_with_compiled_instructions(
&[&Keypair::new()],
&[Pubkey::new_rand()],
@ -2122,11 +2151,10 @@ pub mod tests {
vec![CompiledInstruction::new(1, &(), vec![0])],
);
entries.push(next_entry_mut(&mut Hash::default(), 0, vec![transaction]));
let mut tick = create_ticks(1, 0, Hash::default());
let mut tick = create_ticks(1, 0, hash(&serialize(&x).unwrap()));
entries.append(&mut tick);
}
let shreds = entries_to_test_shreds(entries.clone(), slot, parent_slot, true, 0);
(shreds, entries)
entries
}
#[test]
@ -4169,13 +4197,22 @@ pub mod tests {
#[test]
fn test_get_confirmed_block() {
let slot = 0;
let (shreds, entries) = make_slot_entries_with_transactions(slot, 0, 100);
let slot = 10;
let entries = make_slot_entries_with_transactions(100);
let blockhash = get_last_hash(entries.iter()).unwrap();
let shreds = entries_to_test_shreds(entries.clone(), slot, slot - 1, true, 0);
let more_shreds = entries_to_test_shreds(entries.clone(), slot + 1, slot, true, 0);
let ledger_path = get_tmp_ledger_path!();
let ledger = Blocktree::open(&ledger_path).unwrap();
ledger.insert_shreds(shreds, None, false).unwrap();
ledger.set_roots(&[0]).unwrap();
ledger.insert_shreds(more_shreds, None, false).unwrap();
ledger.set_roots(&[slot - 1, slot, slot + 1]).unwrap();
let mut parent_meta = SlotMeta::default();
parent_meta.parent_slot = std::u64::MAX;
ledger
.put_meta_bytes(slot - 1, &serialize(&parent_meta).unwrap())
.unwrap();
let expected_transactions: Vec<(Transaction, Option<RpcTransactionStatus>)> = entries
.iter()
@ -4194,6 +4231,16 @@ pub mod tests {
},
)
.unwrap();
ledger
.transaction_status_cf
.put(
(slot + 1, signature),
&RpcTransactionStatus {
status: Ok(()),
fee: 42,
},
)
.unwrap();
(
transaction,
Some(RpcTransactionStatus {
@ -4204,17 +4251,33 @@ pub mod tests {
})
.collect();
let confirmed_block = ledger.get_confirmed_block(0).unwrap();
// Even if marked as root, a slot that is empty of entries should return an error
let confirmed_block_err = ledger.get_confirmed_block(slot - 1).unwrap_err();
assert_matches!(confirmed_block_err, BlocktreeError::SlotNotRooted);
let confirmed_block = ledger.get_confirmed_block(slot).unwrap();
assert_eq!(confirmed_block.transactions.len(), 100);
let mut expected_block = RpcConfirmedBlock::default();
expected_block.transactions = expected_transactions.clone();
expected_block.parent_slot = slot - 1;
expected_block.blockhash = blockhash;
// The previous_blockhash of `expected_block` is default because its parent slot is a
// root, but empty of entries. This is special handling for snapshot root slots.
assert_eq!(confirmed_block, expected_block);
let confirmed_block = ledger.get_confirmed_block(slot + 1).unwrap();
assert_eq!(confirmed_block.transactions.len(), 100);
let mut expected_block = RpcConfirmedBlock::default();
expected_block.transactions = expected_transactions;
// The blockhash and previous_blockhash of `expected_block` are default only because
// `make_slot_entries_with_transactions` sets all entry hashes to default
expected_block.parent_slot = slot;
expected_block.previous_blockhash = blockhash;
expected_block.blockhash = blockhash;
assert_eq!(confirmed_block, expected_block);
let not_root = ledger.get_confirmed_block(1);
assert!(not_root.is_err());
let not_root = ledger.get_confirmed_block(slot + 2).unwrap_err();
assert_matches!(not_root, BlocktreeError::SlotNotRooted);
drop(ledger);
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
@ -4342,4 +4405,236 @@ pub mod tests {
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
fn test_recovery() {
let slot = 1;
let (data_shreds, coding_shreds, leader_schedule_cache) =
setup_erasure_shreds(slot, 0, 100, 1.0);
let blocktree_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
blocktree
.insert_shreds(coding_shreds, Some(&leader_schedule_cache), false)
.unwrap();
let shred_bufs: Vec<_> = data_shreds
.iter()
.map(|shred| shred.payload.clone())
.collect();
// Check all the data shreds were recovered
for (s, buf) in data_shreds.iter().zip(shred_bufs) {
assert_eq!(
blocktree
.get_data_shred(s.slot(), s.index() as u64)
.unwrap()
.unwrap(),
buf
);
}
verify_index_integrity(&blocktree, slot);
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
#[test]
fn test_index_integrity() {
let slot = 1;
let num_entries = 100;
let (data_shreds, coding_shreds, leader_schedule_cache) =
setup_erasure_shreds(slot, 0, num_entries, 1.0);
assert!(data_shreds.len() > 3);
assert!(coding_shreds.len() > 3);
let blocktree_path = get_tmp_ledger_path!();
{
let blocktree = Blocktree::open(&blocktree_path).unwrap();
// Test inserting all the shreds
let all_shreds: Vec<_> = data_shreds
.iter()
.cloned()
.chain(coding_shreds.iter().cloned())
.collect();
blocktree
.insert_shreds(all_shreds, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blocktree, slot);
blocktree.purge_slots(0, Some(slot));
// Test inserting just the codes, enough for recovery
blocktree
.insert_shreds(coding_shreds.clone(), Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blocktree, slot);
blocktree.purge_slots(0, Some(slot));
// Test inserting some codes, but not enough for recovery
blocktree
.insert_shreds(
coding_shreds[..coding_shreds.len() - 1].to_vec(),
Some(&leader_schedule_cache),
false,
)
.unwrap();
verify_index_integrity(&blocktree, slot);
blocktree.purge_slots(0, Some(slot));
// Test inserting just the codes, and some data, enough for recovery
let shreds: Vec<_> = data_shreds[..data_shreds.len() - 1]
.iter()
.cloned()
.chain(coding_shreds[..coding_shreds.len() - 1].iter().cloned())
.collect();
blocktree
.insert_shreds(shreds, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blocktree, slot);
blocktree.purge_slots(0, Some(slot));
// Test inserting some codes, and some data, but enough for recovery
let shreds: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
.iter()
.cloned()
.chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned())
.collect();
blocktree
.insert_shreds(shreds, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blocktree, slot);
blocktree.purge_slots(0, Some(slot));
// Test inserting all shreds in 2 rounds, make sure nothing is lost
let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
.iter()
.cloned()
.chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned())
.collect();
let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 1..]
.iter()
.cloned()
.chain(coding_shreds[coding_shreds.len() / 2 - 1..].iter().cloned())
.collect();
blocktree
.insert_shreds(shreds1, Some(&leader_schedule_cache), false)
.unwrap();
blocktree
.insert_shreds(shreds2, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blocktree, slot);
blocktree.purge_slots(0, Some(slot));
// Test not all, but enough data and coding shreds in 2 rounds to trigger recovery,
// make sure nothing is lost
let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 1]
.iter()
.cloned()
.chain(coding_shreds[..coding_shreds.len() / 2 - 1].iter().cloned())
.collect();
let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 1..data_shreds.len() / 2]
.iter()
.cloned()
.chain(
coding_shreds[coding_shreds.len() / 2 - 1..data_shreds.len() / 2]
.iter()
.cloned(),
)
.collect();
blocktree
.insert_shreds(shreds1, Some(&leader_schedule_cache), false)
.unwrap();
blocktree
.insert_shreds(shreds2, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blocktree, slot);
blocktree.purge_slots(0, Some(slot));
// Test insert shreds in 2 rounds, but not enough to trigger
// recovery, make sure nothing is lost
let shreds1: Vec<_> = data_shreds[..data_shreds.len() / 2 - 2]
.iter()
.cloned()
.chain(coding_shreds[..coding_shreds.len() / 2 - 2].iter().cloned())
.collect();
let shreds2: Vec<_> = data_shreds[data_shreds.len() / 2 - 2..data_shreds.len() / 2 - 1]
.iter()
.cloned()
.chain(
coding_shreds[coding_shreds.len() / 2 - 2..coding_shreds.len() / 2 - 1]
.iter()
.cloned(),
)
.collect();
blocktree
.insert_shreds(shreds1, Some(&leader_schedule_cache), false)
.unwrap();
blocktree
.insert_shreds(shreds2, Some(&leader_schedule_cache), false)
.unwrap();
verify_index_integrity(&blocktree, slot);
blocktree.purge_slots(0, Some(slot));
}
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
}
fn setup_erasure_shreds(
slot: u64,
parent_slot: u64,
num_entries: u64,
erasure_rate: f32,
) -> (Vec<Shred>, Vec<Shred>, Arc<LeaderScheduleCache>) {
let entries = make_slot_entries_with_transactions(num_entries);
let leader_keypair = Arc::new(Keypair::new());
let shredder = Shredder::new(
slot,
parent_slot,
erasure_rate,
leader_keypair.clone(),
0,
0,
)
.expect("Failed in creating shredder");
let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds(&entries, true, 0);
let genesis_config = create_genesis_config(2).genesis_config;
let bank = Arc::new(Bank::new(&genesis_config));
let mut leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank);
let fixed_schedule = FixedSchedule {
leader_schedule: Arc::new(LeaderSchedule::new_from_schedule(vec![
leader_keypair.pubkey()
])),
start_epoch: 0,
};
leader_schedule_cache.set_fixed_leader_schedule(Some(fixed_schedule));
(data_shreds, coding_shreds, Arc::new(leader_schedule_cache))
}
fn verify_index_integrity(blocktree: &Blocktree, slot: u64) {
let index = blocktree.get_index(slot).unwrap().unwrap();
// Test the set of data shreds in the index and in the data column
// family are the same
let data_iter = blocktree.slot_data_iterator(slot).unwrap();
let mut num_data = 0;
for ((slot, index), _) in data_iter {
num_data += 1;
assert!(blocktree.get_data_shred(slot, index).unwrap().is_some());
}
// Test the data index doesn't have anything extra
let num_data_in_index = index.data().num_data();
assert_eq!(num_data_in_index, num_data);
// Test the set of coding shreds in the index and in the coding column
// family are the same
let coding_iter = blocktree.slot_coding_iterator(slot).unwrap();
let mut num_coding = 0;
for ((slot, index), _) in coding_iter {
num_coding += 1;
assert!(blocktree.get_coding_shred(slot, index).unwrap().is_some());
}
// Test the data index doesn't have anything extra
let num_coding_in_index = index.coding().num_coding();
assert_eq!(num_coding_in_index, num_coding);
}
}

View File

@ -13,8 +13,6 @@ use solana_client::rpc_request::RpcTransactionStatus;
use solana_sdk::{clock::Slot, signature::Signature};
use std::{collections::HashMap, fs, marker::PhantomData, path::Path, sync::Arc};
// A good value for this is the number of cores on the machine
const TOTAL_THREADS: i32 = 8;
const MAX_WRITE_BUFFER_SIZE: u64 = 256 * 1024 * 1024; // 256MB
// Column family for metadata about a leader slot
@ -718,8 +716,15 @@ fn get_cf_options() -> Options {
// 256 * 8 = 2GB. 6 of these columns should take at most 12GB of RAM
options.set_max_write_buffer_number(8);
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE as usize);
options.set_target_file_size_base(MAX_WRITE_BUFFER_SIZE / 10);
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE);
let file_num_compaction_trigger = 4;
// Recommend that this be around the size of level 0. Level 0 estimated size in stable state is
// write_buffer_size * min_write_buffer_number_to_merge * level0_file_num_compaction_trigger
// Source: https://docs.rs/rocksdb/0.6.0/rocksdb/struct.Options.html#method.set_level_zero_file_num_compaction_trigger
let total_size_base = MAX_WRITE_BUFFER_SIZE * file_num_compaction_trigger;
let file_size_base = total_size_base / 10;
options.set_level_zero_file_num_compaction_trigger(file_num_compaction_trigger as i32);
options.set_max_bytes_for_level_base(total_size_base);
options.set_target_file_size_base(file_size_base);
options
}
@ -727,8 +732,7 @@ fn get_db_options() -> Options {
let mut options = Options::default();
options.create_if_missing(true);
options.create_missing_column_families(true);
options.increase_parallelism(TOTAL_THREADS);
options.set_max_background_flushes(4);
options.set_max_background_compactions(4);
// A good value for this is the number of cores on the machine
options.increase_parallelism(sys_info::cpu_num().unwrap() as i32);
options
}

View File

@ -97,6 +97,10 @@ impl Index {
}
impl CodingIndex {
pub fn num_coding(&self) -> usize {
self.index.len()
}
pub fn present_in_bounds(&self, bounds: impl RangeBounds<u64>) -> usize {
self.index.range(bounds).count()
}
@ -121,6 +125,10 @@ impl CodingIndex {
}
impl DataIndex {
pub fn num_data(&self) -> usize {
self.index.len()
}
pub fn present_in_bounds(&self, bounds: impl RangeBounds<u64>) -> usize {
self.index.range(bounds).count()
}

View File

@ -11,6 +11,7 @@ use itertools::Itertools;
use log::*;
use rand::{seq::SliceRandom, thread_rng};
use rayon::{prelude::*, ThreadPool};
use solana_measure::thread_mem_usage;
use solana_metrics::{datapoint, datapoint_error, inc_new_counter_debug};
use solana_rayon_threadlimit::get_thread_count;
use solana_runtime::{
@ -231,6 +232,7 @@ pub enum BlocktreeProcessorError {
FailedToLoadMeta,
InvalidBlock(BlockError),
InvalidTransaction,
NoValidForksFound,
}
impl From<BlockError> for BlocktreeProcessorError {
@ -268,7 +270,7 @@ pub fn process_blocktree(
// Setup bank for slot 0
let bank0 = Arc::new(Bank::new_with_paths(&genesis_config, account_paths));
info!("processing ledger for bank 0...");
info!("processing ledger for slot 0...");
process_bank_0(&bank0, blocktree, &opts)?;
process_blocktree_from_root(genesis_config, blocktree, bank0, &opts)
}
@ -280,7 +282,10 @@ pub fn process_blocktree_from_root(
bank: Arc<Bank>,
opts: &ProcessOptions,
) -> result::Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> {
info!("processing ledger from root: {}...", bank.slot());
info!("processing ledger from root slot {}...", bank.slot());
let allocated = thread_mem_usage::Allocatedp::default();
let initial_allocation = allocated.get();
// Starting slot must be a root, and thus has no parents
assert!(bank.parent().is_none());
let start_slot = bank.slot();
@ -293,7 +298,7 @@ pub fn process_blocktree_from_root(
blocktree
.set_roots(&[start_slot])
.expect("Couldn't set root on startup");
.expect("Couldn't set root slot on startup");
let meta = blocktree.meta(start_slot).unwrap();
@ -314,6 +319,9 @@ pub fn process_blocktree_from_root(
opts,
)?;
let (banks, bank_forks_info): (Vec<_>, Vec<_>) = fork_info.into_iter().unzip();
if banks.is_empty() {
return Err(BlocktreeProcessorError::NoValidForksFound);
}
let bank_forks = BankForks::new_from_banks(&banks, rooted_path);
(bank_forks, bank_forks_info, leader_schedule_cache)
} else {
@ -330,8 +338,9 @@ pub fn process_blocktree_from_root(
};
info!(
"ledger processed in {}ms. {} fork{} at {}",
"ledger processed in {}ms. {} MB allocated. {} fork{} at {}",
duration_as_ms(&now.elapsed()),
allocated.since(initial_allocation) / 1_000_000,
bank_forks_info.len(),
if bank_forks_info.len() > 1 { "s" } else { "" },
bank_forks_info
@ -447,6 +456,9 @@ fn process_next_slots(
// Only process full slots in blocktree_processor, replay_stage
// handles any partials
if next_meta.is_full() {
let allocated = thread_mem_usage::Allocatedp::default();
let initial_allocation = allocated.get();
let next_bank = Arc::new(Bank::new_from_parent(
&bank,
&leader_schedule_cache
@ -454,7 +466,12 @@ fn process_next_slots(
.unwrap(),
*next_slot,
));
trace!("Add child bank {} of slot={}", next_slot, bank.slot());
trace!(
"New bank for slot {}, parent slot is {}. {} bytes allocated",
next_slot,
bank.slot(),
allocated.since(initial_allocation)
);
pending_slots.push((*next_slot, next_meta, next_bank, bank.last_blockhash()));
} else {
let bfi = BankForksInfo {
@ -482,6 +499,7 @@ fn process_pending_slots(
let mut fork_info = vec![];
let mut last_status_report = Instant::now();
let mut pending_slots = vec![];
let mut last_root_slot = root_bank.slot();
process_next_slots(
root_bank,
root_meta,
@ -496,7 +514,10 @@ fn process_pending_slots(
let (slot, meta, bank, last_entry_hash) = pending_slots.pop().unwrap();
if last_status_report.elapsed() > Duration::from_secs(2) {
info!("processing ledger...block {}", slot);
info!(
"processing ledger: slot={}, last root slot={}",
slot, last_root_slot
);
last_status_report = Instant::now();
}
@ -505,13 +526,19 @@ fn process_pending_slots(
continue;
}
let allocated = thread_mem_usage::Allocatedp::default();
let initial_allocation = allocated.get();
// Fetch all entries for this slot
let entries = blocktree.get_slot_entries(slot, 0, None).map_err(|err| {
warn!("Failed to load entries for slot {}: {:?}", slot, err);
BlocktreeProcessorError::FailedToLoadEntries
})?;
verify_and_process_slot_entries(&bank, &entries, last_entry_hash, opts)?;
if let Err(err) = verify_and_process_slot_entries(&bank, &entries, last_entry_hash, opts) {
warn!("slot {} failed to verify: {:?}", slot, err);
continue;
}
bank.freeze(); // all banks handled by this routine are created from complete slots
@ -524,8 +551,16 @@ fn process_pending_slots(
bank.squash();
pending_slots.clear();
fork_info.clear();
last_root_slot = slot;
}
trace!(
"Bank for {}slot {} is complete. {} bytes allocated",
if last_root_slot == slot { "root " } else { "" },
slot,
allocated.since(initial_allocation)
);
if slot >= dev_halt_at_slot {
let bfi = BankForksInfo { bank_slot: slot };
fork_info.push((bank, bfi));
@ -644,8 +679,8 @@ pub mod tests {
let parent_slot = 0;
let slot = 1;
let entries = create_ticks(ticks_per_slot, hashes_per_tick - 1, blockhash);
blocktree
.write_entries(
assert_matches!(
blocktree.write_entries(
slot,
0,
0,
@ -655,18 +690,22 @@ pub mod tests {
&Arc::new(Keypair::new()),
entries,
0,
)
.expect("Expected to write shredded entries to blocktree");
),
Ok(_)
);
let opts = ProcessOptions {
poh_verify: true,
..ProcessOptions::default()
};
assert_eq!(
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).err(),
Some(BlocktreeProcessorError::InvalidBlock(
BlockError::InvalidTickHashCount
)),
process_blocktree(
&genesis_config,
&blocktree,
Vec::new(),
ProcessOptions {
poh_verify: true,
..ProcessOptions::default()
}
)
.err(),
Some(BlocktreeProcessorError::NoValidForksFound)
);
}
@ -685,8 +724,8 @@ pub mod tests {
let parent_slot = 0;
let slot = 1;
let entries = create_ticks(ticks_per_slot - 1, 0, blockhash);
blocktree
.write_entries(
assert_matches!(
blocktree.write_entries(
slot,
0,
0,
@ -696,19 +735,42 @@ pub mod tests {
&Arc::new(Keypair::new()),
entries,
0,
)
.expect("Expected to write shredded entries to blocktree");
let opts = ProcessOptions {
poh_verify: true,
..ProcessOptions::default()
};
assert_eq!(
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).err(),
Some(BlocktreeProcessorError::InvalidBlock(
BlockError::InvalidTickCount
)),
),
Ok(_)
);
// No valid forks in blocktree, expect a failure
assert_eq!(
process_blocktree(
&genesis_config,
&blocktree,
Vec::new(),
ProcessOptions {
poh_verify: true,
..ProcessOptions::default()
}
)
.err(),
Some(BlocktreeProcessorError::NoValidForksFound)
);
// Write slot 2 fully
let _last_slot2_entry_hash =
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 0, blockhash);
let (_bank_forks, bank_forks_info, _) = process_blocktree(
&genesis_config,
&blocktree,
Vec::new(),
ProcessOptions {
poh_verify: true,
..ProcessOptions::default()
},
)
.unwrap();
// One valid fork, one bad fork. process_blocktree() should only return the valid fork
assert_eq!(bank_forks_info, vec![BankForksInfo { bank_slot: 2 }]);
}
#[test]
@ -737,8 +799,8 @@ pub mod tests {
// per slot.
let parent_slot = 0;
let slot = 1;
blocktree
.write_entries(
assert_matches!(
blocktree.write_entries(
slot,
0,
0,
@ -748,8 +810,9 @@ pub mod tests {
&Arc::new(Keypair::new()),
entries,
0,
)
.expect("Expected to write shredded entries to blocktree");
),
Ok(_)
);
let opts = ProcessOptions {
poh_verify: true,
@ -757,9 +820,7 @@ pub mod tests {
};
assert_eq!(
process_blocktree(&genesis_config, &blocktree, Vec::new(), opts).err(),
Some(BlocktreeProcessorError::InvalidBlock(
BlockError::TrailingEntry
)),
Some(BlocktreeProcessorError::NoValidForksFound)
);
}
@ -800,8 +861,8 @@ pub mod tests {
// throw away last one
entries.pop();
blocktree
.write_entries(
assert_matches!(
blocktree.write_entries(
slot,
0,
0,
@ -811,8 +872,9 @@ pub mod tests {
&Arc::new(Keypair::new()),
entries,
0,
)
.expect("Expected to write shredded entries to blocktree");
),
Ok(_)
);
}
// slot 2, points at slot 1

View File

@ -3,6 +3,14 @@ use rand::SeedableRng;
use rand_chacha::ChaChaRng;
use solana_sdk::pubkey::Pubkey;
use std::ops::Index;
use std::sync::Arc;
// Used for testing
#[derive(Clone, Debug)]
pub struct FixedSchedule {
pub leader_schedule: Arc<LeaderSchedule>,
pub start_epoch: u64,
}
/// Stake-weighted leader schedule for one epoch.
#[derive(Debug, Default, PartialEq)]
@ -30,9 +38,17 @@ impl LeaderSchedule {
Self { slot_leaders }
}
pub fn new_from_schedule(slot_leaders: Vec<Pubkey>) -> Self {
Self { slot_leaders }
}
pub fn get_slot_leaders(&self) -> &[Pubkey] {
&self.slot_leaders
}
pub fn num_slots(&self) -> usize {
self.slot_leaders.len()
}
}
impl Index<u64> for LeaderSchedule {

View File

@ -1,4 +1,8 @@
use crate::{blocktree::Blocktree, leader_schedule::LeaderSchedule, leader_schedule_utils};
use crate::{
blocktree::Blocktree,
leader_schedule::{FixedSchedule, LeaderSchedule},
leader_schedule_utils,
};
use log::*;
use solana_runtime::bank::Bank;
use solana_sdk::{
@ -28,6 +32,7 @@ pub struct LeaderScheduleCache {
epoch_schedule: EpochSchedule,
max_epoch: RwLock<Epoch>,
max_schedules: CacheCapacity,
fixed_schedule: Option<Arc<FixedSchedule>>,
}
impl LeaderScheduleCache {
@ -41,6 +46,7 @@ impl LeaderScheduleCache {
epoch_schedule,
max_epoch: RwLock::new(0),
max_schedules: CacheCapacity::default(),
fixed_schedule: None,
};
// This sets the root and calculates the schedule at leader_schedule_epoch(root)
@ -153,8 +159,17 @@ impl LeaderScheduleCache {
first_slot.map(|slot| (slot, last_slot))
}
pub fn set_fixed_leader_schedule(&mut self, fixed_schedule: Option<FixedSchedule>) {
self.fixed_schedule = fixed_schedule.map(Arc::new);
}
fn slot_leader_at_no_compute(&self, slot: Slot) -> Option<Pubkey> {
let (epoch, slot_index) = self.epoch_schedule.get_epoch_and_slot_index(slot);
if let Some(ref fixed_schedule) = self.fixed_schedule {
if epoch >= fixed_schedule.start_epoch {
return Some(fixed_schedule.leader_schedule[slot_index]);
}
}
self.cached_schedules
.read()
.unwrap()
@ -191,6 +206,11 @@ impl LeaderScheduleCache {
epoch: Epoch,
bank: &Bank,
) -> Option<Arc<LeaderSchedule>> {
if let Some(ref fixed_schedule) = self.fixed_schedule {
if epoch >= fixed_schedule.start_epoch {
return Some(fixed_schedule.leader_schedule.clone());
}
}
let epoch_schedule = self.cached_schedules.read().unwrap().0.get(&epoch).cloned();
if epoch_schedule.is_some() {

View File

@ -3,31 +3,32 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-local-cluster"
description = "Blockchain, Rebuilt for Scale"
version = "0.21.1"
version = "0.21.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
[dependencies]
itertools = "0.8.1"
log = "0.4.8"
rand = "0.6.5"
solana-config-program = { path = "../programs/config", version = "0.21.1" }
solana-core = { path = "../core", version = "0.21.1" }
solana-client = { path = "../client", version = "0.21.1" }
solana-drone = { path = "../drone", version = "0.21.1" }
solana-exchange-program = { path = "../programs/exchange", version = "0.21.1" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.21.1" }
solana-ledger = { path = "../ledger", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-runtime = { path = "../runtime", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-stake-program = { path = "../programs/stake", version = "0.21.1" }
solana-storage-program = { path = "../programs/storage", version = "0.21.1" }
solana-vest-program = { path = "../programs/vest", version = "0.21.1" }
solana-vote-program = { path = "../programs/vote", version = "0.21.1" }
solana-config-program = { path = "../programs/config", version = "0.21.8" }
solana-core = { path = "../core", version = "0.21.8" }
solana-client = { path = "../client", version = "0.21.8" }
solana-drone = { path = "../drone", version = "0.21.8" }
solana-exchange-program = { path = "../programs/exchange", version = "0.21.8" }
solana-genesis-programs = { path = "../genesis-programs", version = "0.21.8" }
solana-ledger = { path = "../ledger", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
solana-runtime = { path = "../runtime", version = "0.21.8" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
solana-stake-program = { path = "../programs/stake", version = "0.21.8" }
solana-storage-program = { path = "../programs/storage", version = "0.21.8" }
solana-vest-program = { path = "../programs/vest", version = "0.21.8" }
solana-vote-program = { path = "../programs/vote", version = "0.21.8" }
symlink = "0.1.0"
tempfile = "3.1.0"
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.1" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.8" }
[dev-dependencies]
serial_test = "0.2.0"

View File

@ -1,5 +1,6 @@
use solana_client::thin_client::ThinClient;
use solana_core::contact_info::ContactInfo;
use solana_core::validator::Validator;
use solana_core::validator::ValidatorConfig;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::signature::Keypair;
@ -17,13 +18,19 @@ pub struct ValidatorInfo {
pub struct ClusterValidatorInfo {
pub info: ValidatorInfo,
pub config: ValidatorConfig,
pub validator: Option<Validator>,
}
impl ClusterValidatorInfo {
pub fn new(validator_info: ValidatorInfo, config: ValidatorConfig) -> Self {
pub fn new(
validator_info: ValidatorInfo,
config: ValidatorConfig,
validator: Validator,
) -> Self {
Self {
info: validator_info,
config,
validator: Some(validator),
}
}
}

View File

@ -15,9 +15,12 @@ use solana_ledger::{
};
use solana_sdk::{
client::SyncClient,
clock::{Slot, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT, NUM_CONSECUTIVE_LEADER_SLOTS},
clock::{
Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT,
NUM_CONSECUTIVE_LEADER_SLOTS,
},
commitment_config::CommitmentConfig,
epoch_schedule::MINIMUM_SLOTS_PER_EPOCH,
epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH},
hash::Hash,
poh_config::PohConfig,
pubkey::Pubkey,
@ -169,6 +172,11 @@ pub fn verify_ledger_ticks(ledger_path: &Path, ticks_per_slot: usize) {
}
}
pub fn time_until_nth_epoch(epoch: u64, slots_per_epoch: u64, stakers_slot_offset: u64) -> u64 {
let epoch_schedule = EpochSchedule::custom(slots_per_epoch, stakers_slot_offset, true);
epoch_schedule.get_last_slot_in_epoch(epoch) * DEFAULT_MS_PER_SLOT
}
pub fn sleep_n_epochs(
num_epochs: f64,
config: &PohConfig,

View File

@ -1,4 +1,5 @@
use crate::cluster::{Cluster, ClusterValidatorInfo, ValidatorInfo};
use itertools::izip;
use log::*;
use solana_client::thin_client::{create_client, ThinClient};
use solana_core::{
@ -39,6 +40,7 @@ use std::{
collections::HashMap,
fs::remove_dir_all,
io::{Error, ErrorKind, Result},
iter,
path::PathBuf,
sync::Arc,
};
@ -66,6 +68,8 @@ pub struct ClusterConfig {
pub num_archivers: usize,
/// Number of nodes that are unstaked and not voting (a.k.a listening)
pub num_listeners: u64,
/// The specific pubkeys of each node if specified
pub validator_keys: Option<Vec<Arc<Keypair>>>,
/// The stakes of each node
pub node_stakes: Vec<u64>,
/// The total lamports available to the cluster
@ -85,6 +89,7 @@ impl Default for ClusterConfig {
validator_configs: vec![],
num_archivers: 0,
num_listeners: 0,
validator_keys: None,
node_stakes: vec![],
cluster_lamports: 0,
ticks_per_slot: DEFAULT_TICKS_PER_SLOT,
@ -103,9 +108,7 @@ pub struct LocalCluster {
pub funding_keypair: Keypair,
/// Entry point from which the rest of the network can be discovered
pub entry_point_info: ContactInfo,
pub validator_infos: HashMap<Pubkey, ClusterValidatorInfo>,
pub listener_infos: HashMap<Pubkey, ClusterValidatorInfo>,
validators: HashMap<Pubkey, Validator>,
pub validators: HashMap<Pubkey, ClusterValidatorInfo>,
pub genesis_config: GenesisConfig,
archivers: Vec<Archiver>,
pub archiver_infos: HashMap<Pubkey, ArchiverInfo>,
@ -129,9 +132,20 @@ impl LocalCluster {
pub fn new(config: &ClusterConfig) -> Self {
assert_eq!(config.validator_configs.len(), config.node_stakes.len());
let leader_keypair = Arc::new(Keypair::new());
let validator_keys = {
if let Some(ref keys) = config.validator_keys {
assert_eq!(config.validator_configs.len(), keys.len());
keys.clone()
} else {
iter::repeat_with(|| Arc::new(Keypair::new()))
.take(config.validator_configs.len())
.collect()
}
};
let leader_keypair = &validator_keys[0];
let leader_pubkey = leader_keypair.pubkey();
let leader_node = Node::new_localhost_with_pubkey(&leader_keypair.pubkey());
let leader_node = Node::new_localhost_with_pubkey(&leader_pubkey);
let GenesisConfigInfo {
mut genesis_config,
mint_keypair,
@ -208,20 +222,22 @@ impl LocalCluster {
);
let mut validators = HashMap::new();
let mut validator_infos = HashMap::new();
validators.insert(leader_pubkey, leader_server);
error!("leader_pubkey: {}", leader_pubkey);
let leader_info = ValidatorInfo {
keypair: leader_keypair,
keypair: leader_keypair.clone(),
voting_keypair: leader_voting_keypair,
storage_keypair: leader_storage_keypair,
ledger_path: leader_ledger_path,
contact_info: leader_contact_info.clone(),
};
let cluster_leader =
ClusterValidatorInfo::new(leader_info, config.validator_configs[0].clone());
let cluster_leader = ClusterValidatorInfo::new(
leader_info,
config.validator_configs[0].clone(),
leader_server,
);
validator_infos.insert(leader_pubkey, cluster_leader);
validators.insert(leader_pubkey, cluster_leader);
let mut cluster = Self {
funding_keypair: mint_keypair,
@ -229,23 +245,24 @@ impl LocalCluster {
validators,
archivers: vec![],
genesis_config,
validator_infos,
archiver_infos: HashMap::new(),
listener_infos: HashMap::new(),
};
for (stake, validator_config) in (&config.node_stakes[1..])
.iter()
.zip((&config.validator_configs[1..]).iter())
{
cluster.add_validator(validator_config, *stake);
for (stake, validator_config, key) in izip!(
(&config.node_stakes[1..]).iter(),
config.validator_configs[1..].iter(),
validator_keys[1..].iter(),
) {
cluster.add_validator(validator_config, *stake, key.clone());
}
let listener_config = ValidatorConfig {
voting_disabled: true,
..config.validator_configs[0].clone()
};
(0..config.num_listeners).for_each(|_| cluster.add_validator(&listener_config, 0));
(0..config.num_listeners).for_each(|_| {
cluster.add_validator(&listener_config, 0, Arc::new(Keypair::new()));
});
discover_cluster(
&cluster.entry_point_info.gossip,
@ -268,14 +285,18 @@ impl LocalCluster {
pub fn exit(&mut self) {
for node in self.validators.values_mut() {
node.exit();
if let Some(ref mut v) = node.validator {
v.exit();
}
}
}
pub fn close_preserve_ledgers(&mut self) {
self.exit();
for (_, node) in self.validators.drain() {
node.join().unwrap();
for (_, node) in self.validators.iter_mut() {
if let Some(v) = node.validator.take() {
v.join().unwrap();
}
}
while let Some(archiver) = self.archivers.pop() {
@ -283,14 +304,18 @@ impl LocalCluster {
}
}
pub fn add_validator(&mut self, validator_config: &ValidatorConfig, stake: u64) {
pub fn add_validator(
&mut self,
validator_config: &ValidatorConfig,
stake: u64,
validator_keypair: Arc<Keypair>,
) -> Pubkey {
let client = create_client(
self.entry_point_info.client_facing_addr(),
VALIDATOR_PORT_RANGE,
);
// Must have enough tokens to fund vote account and set delegate
let validator_keypair = Arc::new(Keypair::new());
let voting_keypair = Keypair::new();
let storage_keypair = Arc::new(Keypair::new());
let validator_pubkey = validator_keypair.pubkey();
@ -341,8 +366,6 @@ impl LocalCluster {
&config,
);
self.validators
.insert(validator_keypair.pubkey(), validator_server);
let validator_pubkey = validator_keypair.pubkey();
let validator_info = ClusterValidatorInfo::new(
ValidatorInfo {
@ -353,14 +376,11 @@ impl LocalCluster {
contact_info,
},
validator_config.clone(),
validator_server,
);
if validator_config.voting_disabled {
self.listener_infos.insert(validator_pubkey, validator_info);
} else {
self.validator_infos
.insert(validator_pubkey, validator_info);
}
self.validators.insert(validator_pubkey, validator_info);
validator_pubkey
}
fn add_archiver(&mut self) {
@ -405,7 +425,7 @@ impl LocalCluster {
fn close(&mut self) {
self.close_preserve_ledgers();
for ledger_path in self
.validator_infos
.validators
.values()
.map(|f| &f.info.ledger_path)
.chain(self.archiver_infos.values().map(|info| &info.ledger_path))
@ -616,7 +636,7 @@ impl Cluster for LocalCluster {
}
fn get_validator_client(&self, pubkey: &Pubkey) -> Option<ThinClient> {
self.validator_infos.get(pubkey).map(|f| {
self.validators.get(pubkey).map(|f| {
create_client(
f.info.contact_info.client_facing_addr(),
VALIDATOR_PORT_RANGE,
@ -628,10 +648,10 @@ impl Cluster for LocalCluster {
let mut node = self.validators.remove(&pubkey).unwrap();
// Shut down the validator
node.exit();
node.join().unwrap();
self.validator_infos.remove(&pubkey).unwrap()
let mut validator = node.validator.take().expect("Validator must be running");
validator.exit();
validator.join().unwrap();
node
}
fn restart_node(&mut self, pubkey: &Pubkey, mut cluster_validator_info: ClusterValidatorInfo) {
@ -666,8 +686,8 @@ impl Cluster for LocalCluster {
&cluster_validator_info.config,
);
self.validators.insert(*pubkey, restarted_node);
self.validator_infos.insert(*pubkey, cluster_validator_info);
cluster_validator_info.validator = Some(restarted_node);
self.validators.insert(*pubkey, cluster_validator_info);
}
fn exit_restart_node(&mut self, pubkey: &Pubkey, validator_config: ValidatorConfig) {

View File

@ -8,7 +8,10 @@ use solana_core::{
partition_cfg::{Partition, PartitionCfg},
validator::ValidatorConfig,
};
use solana_ledger::{bank_forks::SnapshotConfig, blocktree::Blocktree, snapshot_utils};
use solana_ledger::{
bank_forks::SnapshotConfig, blocktree::Blocktree, leader_schedule::FixedSchedule,
leader_schedule::LeaderSchedule, snapshot_utils,
};
use solana_local_cluster::{
cluster::Cluster,
cluster_tests,
@ -22,13 +25,15 @@ use solana_sdk::{
epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH},
genesis_config::OperatingMode,
poh_config::PohConfig,
signature::{Keypair, KeypairUtil},
};
use std::{
collections::{HashMap, HashSet},
fs,
fs, iter,
path::{Path, PathBuf},
sync::Arc,
thread::sleep,
time::Duration,
time::{Duration, Instant},
};
use tempfile::TempDir;
@ -59,7 +64,7 @@ fn test_ledger_cleanup_service() {
);
cluster.close_preserve_ledgers();
//check everyone's ledgers and make sure only ~100 slots are stored
for (_, info) in &cluster.validator_infos {
for (_, info) in &cluster.validators {
let mut slots = 0;
let blocktree = Blocktree::open(&info.info.ledger_path).unwrap();
blocktree
@ -153,6 +158,8 @@ fn test_validator_exit_2() {
let num_nodes = 2;
let mut validator_config = ValidatorConfig::default();
validator_config.rpc_config.enable_validator_exit = true;
validator_config.wait_for_supermajority = true;
let config = ClusterConfig {
cluster_lamports: 10_000,
node_stakes: vec![100; num_nodes],
@ -187,71 +194,166 @@ fn test_leader_failure_4() {
);
}
fn run_network_partition(partitions: &[usize]) {
/// This function runs a network, initiates a partition based on a
/// configuration, resolve the partition, then checks that the network
/// continues to achieve consensus
/// # Arguments
/// * `partitions` - A slice of partition configurations, where each partition
/// configuration is a slice of (usize, bool), representing a node's stake and
/// whether or not it should be killed during the partition
/// * `leader_schedule` - An option that specifies whether the cluster should
/// run with a fixed, predetermined leader schedule
fn run_cluster_partition(
partitions: &[&[(usize, bool)]],
leader_schedule: Option<(LeaderSchedule, Vec<Arc<Keypair>>)>,
) {
solana_logger::setup();
info!("PARTITION_TEST!");
let num_nodes = partitions.iter().sum();
let validator_config = ValidatorConfig::default();
let num_nodes = partitions.len();
let node_stakes: Vec<_> = partitions
.iter()
.flat_map(|p| p.iter().map(|(stake_weight, _)| 100 * *stake_weight as u64))
.collect();
assert_eq!(node_stakes.len(), num_nodes);
let cluster_lamports = node_stakes.iter().sum::<u64>() * 2;
let partition_start_epoch = 2;
let mut validator_config = ValidatorConfig::default();
// Returns:
// 1) The keys for the validiators
// 2) The amount of time it would take to iterate through one full iteration of the given
// leader schedule
let (validator_keys, leader_schedule_time): (Vec<_>, u64) = {
if let Some((leader_schedule, validator_keys)) = leader_schedule {
assert_eq!(validator_keys.len(), num_nodes);
let num_slots_per_rotation = leader_schedule.num_slots() as u64;
let fixed_schedule = FixedSchedule {
start_epoch: partition_start_epoch,
leader_schedule: Arc::new(leader_schedule),
};
validator_config.fixed_leader_schedule = Some(fixed_schedule);
(
validator_keys,
num_slots_per_rotation * clock::DEFAULT_MS_PER_SLOT,
)
} else {
(
iter::repeat_with(|| Arc::new(Keypair::new()))
.take(partitions.len())
.collect(),
10_000,
)
}
};
let validator_pubkeys: Vec<_> = validator_keys.iter().map(|v| v.pubkey()).collect();
let mut config = ClusterConfig {
cluster_lamports: 10_000,
node_stakes: vec![100; num_nodes],
cluster_lamports,
node_stakes,
validator_configs: vec![validator_config.clone(); num_nodes],
validator_keys: Some(validator_keys),
..ClusterConfig::default()
};
let now = timestamp();
let partition_start = now + 60_000;
let partition_end = partition_start + 10_000;
let mut total = 0;
for (j, pn) in partitions.iter().enumerate() {
info!(
"PARTITION_TEST configuring partition {} for nodes {} - {}",
j,
total,
total + *pn
);
for i in total..(total + *pn) {
// Partition needs to start after the first few shorter warmup epochs, otherwise
// no root will be set before the partition is resolved, the leader schedule will
// not be computable, and the cluster wll halt.
let partition_epoch_start_offset = cluster_tests::time_until_nth_epoch(
partition_start_epoch,
config.slots_per_epoch,
config.stakers_slot_offset,
);
// Assume it takes <= 10 seconds for `LocalCluster::new` to boot up.
let local_cluster_boot_time = 10_000;
let partition_start = now + partition_epoch_start_offset + local_cluster_boot_time;
let partition_end = partition_start + leader_schedule_time as u64;
let mut validator_index = 0;
for (i, partition) in partitions.iter().enumerate() {
for _ in partition.iter() {
let mut p1 = Partition::default();
p1.num_partitions = partitions.len();
p1.my_partition = j;
p1.my_partition = i;
p1.start_ts = partition_start;
p1.end_ts = partition_end;
config.validator_configs[i].partition_cfg = Some(PartitionCfg::new(vec![p1]));
config.validator_configs[validator_index].partition_cfg =
Some(PartitionCfg::new(vec![p1]));
validator_index += 1;
}
total += *pn;
}
info!(
"PARTITION_TEST starting cluster with {:?} partitions",
partitions
);
let cluster = LocalCluster::new(&config);
let now = Instant::now();
let mut cluster = LocalCluster::new(&config);
let elapsed = now.elapsed();
assert!(elapsed.as_millis() < local_cluster_boot_time as u128);
let now = timestamp();
let timeout = partition_start as i64 - now as i64;
let timeout = partition_start as u64 - now as u64;
info!(
"PARTITION_TEST sleeping until partition start timeout {}",
timeout
);
let mut dead_nodes = HashSet::new();
if timeout > 0 {
sleep(Duration::from_millis(timeout as u64));
}
info!("PARTITION_TEST done sleeping until partition start timeout");
let now = timestamp();
let timeout = partition_end as i64 - now as i64;
let timeout = partition_end as u64 - now as u64;
info!(
"PARTITION_TEST sleeping until partition end timeout {}",
timeout
);
let mut alive_node_contact_infos = vec![];
let should_exits: Vec<_> = partitions
.iter()
.flat_map(|p| p.iter().map(|(_, should_exit)| should_exit))
.collect();
assert_eq!(should_exits.len(), validator_pubkeys.len());
if timeout > 0 {
sleep(Duration::from_millis(timeout as u64));
// Give partitions time to propagate their blocks from durinig the partition
// after the partition resolves
let propagation_time = leader_schedule_time;
info!("PARTITION_TEST resolving partition");
sleep(Duration::from_millis(timeout));
info!("PARTITION_TEST waiting for blocks to propagate after partition");
sleep(Duration::from_millis(propagation_time));
info!("PARTITION_TEST resuming normal operation");
for (pubkey, should_exit) in validator_pubkeys.iter().zip(should_exits) {
if *should_exit {
info!("Killing validator with id: {}", pubkey);
cluster.exit_node(pubkey);
dead_nodes.insert(*pubkey);
} else {
alive_node_contact_infos.push(
cluster
.validators
.get(pubkey)
.unwrap()
.info
.contact_info
.clone(),
);
}
}
}
info!("PARTITION_TEST done sleeping until partition end timeout");
assert!(alive_node_contact_infos.len() > 0);
info!("PARTITION_TEST discovering nodes");
let (cluster_nodes, _) = discover_cluster(&cluster.entry_point_info.gossip, num_nodes).unwrap();
let (cluster_nodes, _) = discover_cluster(
&alive_node_contact_infos[0].gossip,
alive_node_contact_infos.len(),
)
.unwrap();
info!("PARTITION_TEST discovered {} nodes", cluster_nodes.len());
info!("PARTITION_TEST looking for new roots on all nodes");
let mut roots = vec![HashSet::new(); cluster_nodes.len()];
let mut roots = vec![HashSet::new(); alive_node_contact_infos.len()];
let mut done = false;
while !done {
for (i, ingress_node) in cluster_nodes.iter().enumerate() {
for (i, ingress_node) in alive_node_contact_infos.iter().enumerate() {
let client = create_client(
ingress_node.client_facing_addr(),
solana_core::cluster_info::VALIDATOR_PORT_RANGE,
@ -271,22 +373,64 @@ fn run_network_partition(partitions: &[usize]) {
#[ignore]
#[test]
#[serial]
fn test_network_partition_1_2() {
run_network_partition(&[1, 2])
fn test_cluster_partition_1_2() {
run_cluster_partition(&[&[(1, false)], &[(1, false), (1, false)]], None)
}
#[allow(unused_attributes)]
#[ignore]
#[test]
#[serial]
fn test_network_partition_1_1() {
run_network_partition(&[1, 1])
fn test_cluster_partition_1_1() {
run_cluster_partition(&[&[(1, false)], &[(1, false)]], None)
}
#[test]
#[serial]
fn test_network_partition_1_1_1() {
run_network_partition(&[1, 1, 1])
fn test_cluster_partition_1_1_1() {
run_cluster_partition(&[&[(1, false)], &[(1, false)], &[(1, false)]], None)
}
#[test]
#[serial]
fn test_kill_partition() {
// This test:
// 1) Spins up three partitions
// 2) Forces more slots in the leader schedule for the first partition so
// that this partition will be the heaviiest
// 3) Schedules the other validators for sufficient slots in the schedule
// so that they will still be locked out of voting for the major partitoin
// when the partition resolves
// 4) Kills the major partition. Validators are locked out, but should be
// able to reset to the major partition
// 5) Check for recovery
let mut leader_schedule = vec![];
let num_slots_per_validator = 8;
let partitions: [&[(usize, bool)]; 3] = [&[(9, true)], &[(10, false)], &[(10, false)]];
let validator_keys: Vec<_> = iter::repeat_with(|| Arc::new(Keypair::new()))
.take(partitions.len())
.collect();
for (i, k) in validator_keys.iter().enumerate() {
let num_slots = {
if i == 0 {
// Set up the leader to have 50% of the slots
num_slots_per_validator * (partitions.len() - 1)
} else {
num_slots_per_validator
}
};
for _ in 0..num_slots {
leader_schedule.push(k.pubkey())
}
}
run_cluster_partition(
&partitions,
Some((
LeaderSchedule::new_from_schedule(leader_schedule),
validator_keys,
)),
)
}
#[test]
@ -318,10 +462,7 @@ fn test_two_unbalanced_stakes() {
);
cluster.close_preserve_ledgers();
let leader_pubkey = cluster.entry_point_info.id;
let leader_ledger = cluster.validator_infos[&leader_pubkey]
.info
.ledger_path
.clone();
let leader_ledger = cluster.validators[&leader_pubkey].info.ledger_path.clone();
cluster_tests::verify_ledger_ticks(&leader_ledger, num_ticks_per_slot as usize);
}
@ -559,6 +700,7 @@ fn test_snapshots_blocktree_floor() {
cluster.add_validator(
&validator_snapshot_test_config.validator_config,
validator_stake,
Arc::new(Keypair::new()),
);
let all_pubkeys = cluster.get_node_pubkeys();
let validator_id = all_pubkeys
@ -582,7 +724,7 @@ fn test_snapshots_blocktree_floor() {
// Check the validator ledger doesn't contain any slots < slot_floor
cluster.close_preserve_ledgers();
let validator_ledger_path = &cluster.validator_infos[&validator_id];
let validator_ledger_path = &cluster.validators[&validator_id];
let blocktree = Blocktree::open(&validator_ledger_path.info.ledger_path).unwrap();
// Skip the zeroth slot in blocktree that the ledger is initialized with
@ -720,7 +862,7 @@ fn test_faulty_node(faulty_node_type: BroadcastStageType) {
);
let corrupt_node = cluster
.validator_infos
.validators
.iter()
.find(|(_, v)| v.config.broadcast_stage_type == faulty_node_type)
.unwrap()
@ -767,10 +909,7 @@ fn test_no_voting() {
cluster.close_preserve_ledgers();
let leader_pubkey = cluster.entry_point_info.id;
let ledger_path = cluster.validator_infos[&leader_pubkey]
.info
.ledger_path
.clone();
let ledger_path = cluster.validators[&leader_pubkey].info.ledger_path.clone();
let ledger = Blocktree::open(&ledger_path).unwrap();
for i in 0..2 * VOTE_THRESHOLD_DEPTH {
let meta = ledger.meta(i as u64).unwrap().unwrap();
@ -849,7 +988,7 @@ fn run_repairman_catchup(num_repairmen: u64) {
// Start up a new node, wait for catchup. Backwards repair won't be sufficient because the
// leader is sending shreds past this validator's first two confirmed epochs. Thus, the repairman
// protocol will have to kick in for this validator to repair.
cluster.add_validator(&validator_config, repairee_stake);
cluster.add_validator(&validator_config, repairee_stake, Arc::new(Keypair::new()));
let all_pubkeys = cluster.get_node_pubkeys();
let repairee_id = all_pubkeys

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-log-analyzer"
description = "The solana cluster network analysis tool"
version = "0.21.1"
version = "0.21.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -17,8 +17,8 @@ semver = "0.9.0"
serde = "1.0.102"
serde_derive = "1.0.102"
serde_json = "1.0.41"
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
[[bin]]
name = "solana-log-analyzer"

View File

@ -1,6 +1,6 @@
[package]
name = "solana-logger"
version = "0.21.1"
version = "0.21.8"
description = "Solana Logger"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"

View File

@ -1,7 +1,7 @@
[package]
name = "solana-measure"
description = "Blockchain, Rebuilt for Scale"
version = "0.21.1"
version = "0.21.8"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "../README.md"
@ -11,4 +11,10 @@ license = "Apache-2.0"
edition = "2018"
[dependencies]
solana-sdk = { path = "../sdk", version = "0.21.1" }
log = "0.4.8"
solana-sdk = { path = "../sdk", version = "0.21.8" }
solana-metrics = { path = "../metrics", version = "0.21.8" }
[target."cfg(unix)".dependencies]
jemallocator = "0.3.2"
jemalloc-ctl = "0.3.2"

View File

@ -1 +1,9 @@
pub mod measure;
pub mod thread_mem_usage;
#[cfg(unix)]
extern crate jemallocator;
#[cfg(unix)]
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;

View File

@ -28,6 +28,7 @@ impl Allocatedp {
Self {}
}
/// Return current thread heap usage
pub fn get(&self) -> u64 {
#[cfg(unix)]
{
@ -36,4 +37,9 @@ impl Allocatedp {
#[cfg(not(unix))]
0
}
/// Return the difference in thread heap usage since a previous `get()`
pub fn since(&self, previous: u64) -> i64 {
self.get() as i64 - previous as i64
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "solana-merkle-tree"
version = "0.21.1"
version = "0.21.8"
description = "Solana Merkle Tree"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -9,7 +9,7 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
[dev-dependencies]
hex = "0.4.0"

View File

@ -1,6 +1,6 @@
[package]
name = "solana-metrics"
version = "0.21.1"
version = "0.21.8"
description = "Solana Metrics"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -13,7 +13,7 @@ env_logger = "0.7.1"
lazy_static = "1.4.0"
log = "0.4.8"
reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tls"] }
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
sys-info = "0.5.8"
[dev-dependencies]

View File

@ -95,7 +95,24 @@ elif channel == 'stable':
'text': 'testnet-perf',
'value': 'testnet-perf'}],
'query': 'testnet,testnet-perf',
'type': 'custom'}]
'type': 'custom'},
{'allValue': ".*",
'datasource': '$datasource',
'hide': 0,
'includeAll': True,
'label': 'HostID',
'multi': False,
'name': 'hostid',
'options': [],
'query': 'SELECT DISTINCT(\"id\") FROM \"$testnet\".\"autogen\".\"validator-new\" ',
'refresh': 2,
'regex': '',
'sort': 1,
'tagValuesQuery': '',
'tags': [],
'tagsQuery': '',
'type': 'query',
'useTags': False}]
else:
# Non-stable dashboard only allows the user to select between all testnet
# databases

View File

@ -4538,7 +4538,7 @@
"x": 0,
"y": 50
},
"id": 68,
"id": 38,
"links": [],
"pageSize": null,
"scroll": true,
@ -4621,7 +4621,7 @@
"x": 0,
"y": 55
},
"id": 38,
"id": 39,
"panels": [],
"title": "Bench Exchange",
"type": "row"
@ -4639,7 +4639,7 @@
"x": 0,
"y": 56
},
"id": 39,
"id": 40,
"legend": {
"avg": false,
"current": false,
@ -4790,7 +4790,7 @@
"x": 12,
"y": 56
},
"id": 40,
"id": 41,
"legend": {
"avg": false,
"current": false,
@ -4973,7 +4973,7 @@
"x": 0,
"y": 61
},
"id": 41,
"id": 42,
"panels": [],
"title": "Validator Streamer",
"type": "row"
@ -4991,7 +4991,7 @@
"x": 0,
"y": 62
},
"id": 42,
"id": 43,
"legend": {
"alignAsTable": false,
"avg": false,
@ -5093,45 +5093,6 @@
],
"tags": []
},
{
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"hide": false,
"measurement": "cluster_info-vote-count",
"orderByTime": "ASC",
"policy": "autogen",
"query": "SELECT mean(\"clone_and_seed\") AS \"clone_and_seed\" FROM \"$testnet\".\"autogen\".\"broadcast-bank-stats\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"rawQuery": true,
"refId": "C",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"count"
],
"type": "field"
},
{
"params": [],
"type": "sum"
}
]
],
"tags": []
},
{
"groupBy": [
{
@ -5248,6 +5209,45 @@
]
],
"tags": []
},
{
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"hide": false,
"measurement": "cluster_info-vote-count",
"orderByTime": "ASC",
"policy": "autogen",
"query": "SELECT mean(\"insertion_time\") AS \"insertion_time\" FROM \"$testnet\".\"autogen\".\"broadcast-bank-stats\" WHERE host_id::tag =~ /$hostid/ AND $timeFilter GROUP BY time($__interval)",
"rawQuery": true,
"refId": "D",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"count"
],
"type": "field"
},
{
"params": [],
"type": "sum"
}
]
],
"tags": []
}
],
"thresholds": [],
@ -5310,7 +5310,7 @@
"x": 8,
"y": 62
},
"id": 43,
"id": 44,
"legend": {
"alignAsTable": false,
"avg": false,
@ -5668,7 +5668,7 @@
"x": 16,
"y": 62
},
"id": 44,
"id": 45,
"legend": {
"alignAsTable": false,
"avg": false,
@ -5980,7 +5980,7 @@
"x": 0,
"y": 68
},
"id": 45,
"id": 46,
"legend": {
"alignAsTable": false,
"avg": false,
@ -6636,7 +6636,7 @@
"x": 16,
"y": 68
},
"id": 47,
"id": 48,
"legend": {
"alignAsTable": false,
"avg": false,
@ -6757,7 +6757,7 @@
"x": 16,
"y": 73
},
"id": 48,
"id": 49,
"legend": {
"alignAsTable": false,
"avg": false,
@ -6913,7 +6913,7 @@
"x": 0,
"y": 74
},
"id": 49,
"id": 50,
"legend": {
"alignAsTable": false,
"avg": false,
@ -7069,7 +7069,7 @@
"x": 8,
"y": 74
},
"id": 50,
"id": 51,
"legend": {
"alignAsTable": false,
"avg": false,
@ -7184,7 +7184,7 @@
"x": 16,
"y": 78
},
"id": 51,
"id": 52,
"legend": {
"alignAsTable": false,
"avg": false,
@ -7388,7 +7388,7 @@
"x": 0,
"y": 79
},
"id": 52,
"id": 53,
"legend": {
"alignAsTable": false,
"avg": false,
@ -7690,7 +7690,7 @@
"x": 8,
"y": 79
},
"id": 53,
"id": 54,
"legend": {
"alignAsTable": false,
"avg": false,
@ -7838,7 +7838,7 @@
"x": 0,
"y": 84
},
"id": 54,
"id": 55,
"panels": [],
"title": "Tower Consensus",
"type": "row"
@ -7861,7 +7861,7 @@
"x": 0,
"y": 85
},
"id": 55,
"id": 56,
"legend": {
"alignAsTable": false,
"avg": false,
@ -8021,7 +8021,7 @@
"x": 8,
"y": 85
},
"id": 56,
"id": 57,
"legend": {
"alignAsTable": false,
"avg": false,
@ -8181,7 +8181,7 @@
"x": 16,
"y": 85
},
"id": 57,
"id": 58,
"legend": {
"alignAsTable": false,
"avg": false,
@ -8366,7 +8366,7 @@
"x": 0,
"y": 90
},
"id": 58,
"id": 59,
"panels": [],
"repeat": null,
"title": "IP Network",
@ -8385,7 +8385,7 @@
"x": 0,
"y": 91
},
"id": 59,
"id": 60,
"legend": {
"alignAsTable": false,
"avg": false,
@ -8618,7 +8618,7 @@
"x": 12,
"y": 91
},
"id": 60,
"id": 61,
"legend": {
"alignAsTable": false,
"avg": false,
@ -8771,7 +8771,7 @@
"x": 0,
"y": 96
},
"id": 61,
"id": 62,
"panels": [],
"title": "Signature Verification",
"type": "row"
@ -8789,7 +8789,7 @@
"x": 0,
"y": 97
},
"id": 62,
"id": 63,
"legend": {
"avg": false,
"current": false,
@ -8830,7 +8830,83 @@
"measurement": "cluster_info-vote-count",
"orderByTime": "ASC",
"policy": "autogen",
"query": "SELECT max(\"total_time_ms\") AS \"max\" FROM \"$testnet\".\"autogen\".\"sigverify_stage-total_verify_time\" WHERE $timeFilter GROUP BY time($__interval) FILL(0)\n\n",
"query": "SELECT mean(\"num_packets\") AS \"num_packets\" FROM \"$testnet\".\"autogen\".\"sigverify_stage-total_verify_time\" WHERE $timeFilter GROUP BY time(500ms) FILL(0)\n\n",
"rawQuery": true,
"refId": "B",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"count"
],
"type": "field"
},
{
"params": [],
"type": "sum"
}
]
],
"tags": []
},
{
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"measurement": "cluster_info-vote-count",
"orderByTime": "ASC",
"policy": "autogen",
"query": "SELECT mean(\"verify_time_ms\") AS \"verify_time\" FROM \"$testnet\".\"autogen\".\"sigverify_stage-total_verify_time\" WHERE $timeFilter GROUP BY time(500ms) FILL(0)\n\n",
"rawQuery": true,
"refId": "C",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"count"
],
"type": "field"
},
{
"params": [],
"type": "sum"
}
]
],
"tags": []
},
{
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"measurement": "cluster_info-vote-count",
"orderByTime": "ASC",
"policy": "autogen",
"query": "SELECT mean(\"recv_time\") AS \"recv_time\" FROM \"$testnet\".\"autogen\".\"sigverify_stage-total_verify_time\" WHERE $timeFilter GROUP BY time(500ms) FILL(0)\n\n",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@ -8910,7 +8986,7 @@
"x": 12,
"y": 97
},
"id": 63,
"id": 64,
"legend": {
"alignAsTable": false,
"avg": false,
@ -9059,7 +9135,7 @@
"x": 0,
"y": 102
},
"id": 64,
"id": 65,
"panels": [],
"title": "Snapshots",
"type": "row"
@ -9077,7 +9153,7 @@
"x": 0,
"y": 103
},
"id": 65,
"id": 66,
"legend": {
"avg": false,
"current": false,
@ -9269,7 +9345,7 @@
"x": 8,
"y": 103
},
"id": 66,
"id": 67,
"legend": {
"avg": false,
"current": false,
@ -9461,7 +9537,7 @@
"x": 16,
"y": 103
},
"id": 67,
"id": 68,
"legend": {
"avg": false,
"current": false,
@ -9650,7 +9726,7 @@
"x": 0,
"y": 109
},
"id": 74,
"id": 69,
"panels": [],
"title": "Resources",
"type": "row"
@ -9905,6 +9981,7 @@
{
"allValue": ".*",
"current": {
"selected": false,
"text": "testnet",
"value": "testnet"
},
@ -9979,4 +10056,4 @@
"title": "Testnet Monitor (edge)",
"uid": "testnet-edge",
"version": 2
}
}

View File

@ -278,7 +278,7 @@ setup_validator_accounts() {
return 0
}
rpc_url=$($solana_gossip get-rpc-url --entrypoint "$gossip_entrypoint")
rpc_url=$($solana_gossip get-rpc-url --entrypoint "$gossip_entrypoint" --any)
[[ -r "$identity_keypair_path" ]] || $solana_keygen new --no-passphrase -so "$identity_keypair_path"
[[ -r "$voting_keypair_path" ]] || $solana_keygen new --no-passphrase -so "$voting_keypair_path"

View File

@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
edition = "2018"
name = "solana-net-shaper"
description = "The solana cluster network shaping tool"
version = "0.21.1"
version = "0.21.8"
repository = "https://github.com/solana-labs/solana"
license = "Apache-2.0"
homepage = "https://solana.com/"
@ -16,8 +16,8 @@ semver = "0.9.0"
serde = "1.0.102"
serde_derive = "1.0.102"
serde_json = "1.0.41"
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
rand = "0.6.5"
[[bin]]

View File

@ -1,6 +1,6 @@
[package]
name = "solana-net-utils"
version = "0.21.1"
version = "0.21.8"
description = "Solana Network Utilities"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -18,8 +18,8 @@ rand = "0.6.1"
serde = "1.0.102"
serde_derive = "1.0.102"
socket2 = "0.3.11"
solana-clap-utils = { path = "../clap-utils", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-clap-utils = { path = "../clap-utils", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
tokio = "0.1"
tokio-codec = "0.1"

View File

@ -489,6 +489,7 @@ startBootstrapLeader() {
ssh "${sshOptions[@]}" -n "$ipAddress" \
"./solana/net/remote/remote-node.sh \
$deployMethod \
$ipAddress \
bootstrap-leader \
$entrypointIp \
$((${#validatorIpList[@]} + ${#blockstreamerIpList[@]} + ${#archiverIpList[@]})) \
@ -558,6 +559,7 @@ startNode() {
ssh "${sshOptions[@]}" -n "$ipAddress" \
"./solana/net/remote/remote-node.sh \
$deployMethod \
$ipAddress \
$nodeType \
$entrypointIp \
$((${#validatorIpList[@]} + ${#blockstreamerIpList[@]} + ${#archiverIpList[@]})) \

View File

@ -5,27 +5,28 @@ cd "$(dirname "$0")"/../..
set -x
deployMethod="$1"
nodeType="$2"
entrypointIp="$3"
numNodes="$4"
if [[ -n $5 ]]; then
export RUST_LOG="$5"
ipAddress="$2"
nodeType="$3"
entrypointIp="$4"
numNodes="$5"
if [[ -n $6 ]]; then
export RUST_LOG="$6"
fi
skipSetup="$6"
failOnValidatorBootupFailure="$7"
externalPrimordialAccountsFile="$8"
maybeDisableAirdrops="$9"
internalNodesStakeLamports="${10}"
internalNodesLamports="${11}"
nodeIndex="${12}"
numBenchTpsClients="${13}"
benchTpsExtraArgs="${14}"
numBenchExchangeClients="${15}"
benchExchangeExtraArgs="${16}"
genesisOptions="${17}"
extraNodeArgs="${18}"
gpuMode="${19:-auto}"
GEOLOCATION_API_KEY="${20}"
skipSetup="$7"
failOnValidatorBootupFailure="$8"
externalPrimordialAccountsFile="$9"
maybeDisableAirdrops="${10}"
internalNodesStakeLamports="${11}"
internalNodesLamports="${12}"
nodeIndex="${13}"
numBenchTpsClients="${14}"
benchTpsExtraArgs="${15}"
numBenchExchangeClients="${16}"
benchExchangeExtraArgs="${17}"
genesisOptions="${18}"
extraNodeArgs="${19}"
gpuMode="${20:-auto}"
GEOLOCATION_API_KEY="${21}"
set +x
# Use a very large stake (relative to the default multinode-demo/ stake of 42)
@ -42,6 +43,7 @@ missing() {
}
[[ -n $deployMethod ]] || missing deployMethod
[[ -n $ipAddress ]] || missing ipAddress
[[ -n $nodeType ]] || missing nodeType
[[ -n $entrypointIp ]] || missing entrypointIp
[[ -n $numNodes ]] || missing numNodes
@ -284,18 +286,22 @@ EOF
fi
args=(
--entrypoint "$entrypointIp:8001"
--gossip-port 8001
--rpc-port 8899
)
if [[ $nodeType = blockstreamer ]]; then
args+=(
--blockstream /tmp/solana-blockstream.sock
--entrypoint "$ipAddress:8001"
--gossip-port 9001
--no-voting
--dev-no-sigverify
--blockstream /tmp/solana-blockstream.sock
)
else
args+=(--enable-rpc-exit)
args+=(
--entrypoint "$entrypointIp:8001"
--gossip-port 8001
--enable-rpc-exit
)
if [[ -n $internalNodesLamports ]]; then
args+=(--node-lamports "$internalNodesLamports")
fi
@ -365,6 +371,11 @@ EOF
cat >> ~/solana/on-reboot <<EOF
~/solana/restart-explorer
echo --- Starting gossip spy node
ln -sfT gossip.log.\$now gossip.log
nohup solana-gossip spy --gossip-port 8001 --gossip-host "$ipAddress" --entrypoint $entrypointIp:8001 > gossip.log.\$now 2>&1 &
sleep 1
head gossip.log
EOF
fi

View File

@ -1,6 +1,6 @@
[package]
name = "solana-perf"
version = "0.21.1"
version = "0.21.8"
description = "Solana Performance APIs"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -18,11 +18,11 @@ serde_derive = "1.0.102"
dlopen_derive = "0.1.4"
lazy_static = "1.4.0"
log = "0.4.8"
solana-sdk = { path = "../sdk", version = "0.21.1" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.1" }
solana-budget-program = { path = "../programs/budget", version = "0.21.1" }
solana-logger = { path = "../logger", version = "0.21.1" }
solana-metrics = { path = "../metrics", version = "0.21.1" }
solana-sdk = { path = "../sdk", version = "0.21.8" }
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.21.8" }
solana-budget-program = { path = "../programs/budget", version = "0.21.8" }
solana-logger = { path = "../logger", version = "0.21.8" }
solana-metrics = { path = "../metrics", version = "0.21.8" }
[lib]
name = "solana_perf"

524
programs/bpf/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
[package]
name = "solana-bpf-programs"
description = "Blockchain, Rebuilt for Scale"
version = "0.21.1"
version = "0.21.8"
documentation = "https://docs.rs/solana"
homepage = "https://solana.com/"
readme = "README.md"
@ -22,10 +22,10 @@ walkdir = "2"
bincode = "1.1.4"
byteorder = "1.3.2"
elf = "0.0.10"
solana-bpf-loader-program = { path = "../bpf_loader", version = "0.21.1" }
solana-logger = { path = "../../logger", version = "0.21.1" }
solana-runtime = { path = "../../runtime", version = "0.21.1" }
solana-sdk = { path = "../../sdk", version = "0.21.1" }
solana-bpf-loader-program = { path = "../bpf_loader", version = "0.21.8" }
solana-logger = { path = "../../logger", version = "0.21.8" }
solana-runtime = { path = "../../runtime", version = "0.21.8" }
solana-sdk = { path = "../../sdk", version = "0.21.8" }
solana_rbpf = "=0.1.19"
[[bench]]

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-128bit"
version = "0.21.1"
version = "0.21.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,11 +12,11 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.21.1", default-features = false }
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.21.1" }
solana-sdk = { path = "../../../../sdk/", version = "0.21.8", default-features = false }
solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "0.21.8" }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.1" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.8" }
[features]
program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-128bit-dep"
version = "0.21.1"
version = "0.21.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.21.1", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.21.8", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.1" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.8" }
[features]
program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-alloc"
version = "0.21.1"
version = "0.21.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.21.1", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.21.8", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.1" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.8" }
[features]
program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-dep-crate"
version = "0.21.1"
version = "0.21.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -13,10 +13,10 @@ edition = "2018"
[dependencies]
byteorder = { version = "1", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.21.1", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.21.8", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.1" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.8" }
[features]
program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-external-spend"
version = "0.21.1"
version = "0.21.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.21.1", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.21.8", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.1" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.8" }
[features]
program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-iter"
version = "0.21.1"
version = "0.21.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.21.1", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.21.8", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.1" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.8" }
[features]
program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-many-args"
version = "0.21.1"
version = "0.21.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,11 +12,11 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.21.1", default-features = false }
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.21.1" }
solana-sdk = { path = "../../../../sdk/", version = "0.21.8", default-features = false }
solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "0.21.8" }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.1" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.8" }
[features]
program = ["solana-sdk/program"]

View File

@ -3,7 +3,7 @@
[package]
name = "solana-bpf-rust-many-args-dep"
version = "0.21.1"
version = "0.21.8"
description = "Solana BPF test program written in Rust"
authors = ["Solana Maintainers <maintainers@solana.com>"]
repository = "https://github.com/solana-labs/solana"
@ -12,10 +12,10 @@ homepage = "https://solana.com/"
edition = "2018"
[dependencies]
solana-sdk = { path = "../../../../sdk/", version = "0.21.1", default-features = false }
solana-sdk = { path = "../../../../sdk/", version = "0.21.8", default-features = false }
[dev_dependencies]
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.1" }
solana-sdk-bpf-test = { path = "../../../../sdk/bpf/rust/test", version = "0.21.8" }
[features]
program = ["solana-sdk/program"]

Some files were not shown because too many files have changed in this diff Show More