Compare commits
97 Commits
Author | SHA1 | Date | |
---|---|---|---|
e8db4997de | |||
8545ca2cc5 | |||
bbbe44b175 | |||
207b80035e | |||
7dd6ceeb49 | |||
cac5d9005c | |||
33aa2dbb1a | |||
123e444faa | |||
d0de05acaa | |||
f895c3f66d | |||
3cef0f0c03 | |||
1f1c287c6c | |||
b340f3a67f | |||
ae71f2a1a9 | |||
3f91c63110 | |||
5a0f892de4 | |||
92ae07aa93 | |||
3922615cbe | |||
3b5c86dd7c | |||
5f62ad77bd | |||
0f918d0b54 | |||
3616b683f6 | |||
0e3dc27c8d | |||
d0d8ac7eba | |||
b414d151b8 | |||
5766359cde | |||
b8ec5de36d | |||
28ba9e2fa8 | |||
80d780d666 | |||
e599a90333 | |||
efb8b6bf2f | |||
ea3eddfec4 | |||
3a6857c749 | |||
0efb12bb6b | |||
f87bbb9b09 | |||
7e17ea8705 | |||
fbaed07884 | |||
b2cc259197 | |||
17205c3e19 | |||
6b6d5ba4bb | |||
60efe30911 | |||
802695934e | |||
92c893fe2c | |||
96b303e5d9 | |||
854b6fc1ce | |||
a6646f3124 | |||
60ce18250d | |||
1f4222092a | |||
787096f503 | |||
8f6ead2c0f | |||
3fd09435af | |||
f4652561d4 | |||
d7a3610d45 | |||
b2ccf016d6 | |||
c0f62de2dd | |||
d9a9d6547f | |||
c86bf60a40 | |||
03ed4b868d | |||
de83bce0ce | |||
8b494272bf | |||
ee5c890c5d | |||
a4f5397ea4 | |||
66f3b4a3d6 | |||
3a4cd94391 | |||
f4658f3be2 | |||
41c70b9f41 | |||
d1c92db7ab | |||
a8721a5e19 | |||
dec9d00a64 | |||
09252ef084 | |||
c9d568c910 | |||
b054f5f12d | |||
23b4df0fef | |||
ca35841cb1 | |||
33d77357bf | |||
22e84abe5a | |||
9b532b16a9 | |||
c5a98a5b57 | |||
22d60d496b | |||
8243792430 | |||
1d1d85e0c5 | |||
9b0e40d1dc | |||
a231fbe978 | |||
cd2c09c473 | |||
774cd48cb1 | |||
d580603cd4 | |||
f0c931ea84 | |||
74b2eb4328 | |||
f1e9a944ef | |||
4cb38ddf01 | |||
593fde628c | |||
34fa025b17 | |||
33843f824a | |||
542bda0a6f | |||
d8bdbbf291 | |||
168b0f71f5 | |||
be79d97dde |
1593
Cargo.lock
generated
1593
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -34,9 +34,6 @@ members = [
|
||||
"programs/exchange_api",
|
||||
"programs/exchange_program",
|
||||
"programs/failure_program",
|
||||
"programs/move_loader_api",
|
||||
"programs/move_loader_program",
|
||||
"programs/librapay_api",
|
||||
"programs/noop_program",
|
||||
"programs/stake_api",
|
||||
"programs/stake_program",
|
||||
@ -61,4 +58,7 @@ members = [
|
||||
|
||||
exclude = [
|
||||
"programs/bpf",
|
||||
"programs/move_loader_api",
|
||||
"programs/move_loader_program",
|
||||
"programs/librapay_api",
|
||||
]
|
||||
|
@ -78,7 +78,7 @@ $ source $HOME/.cargo/env
|
||||
$ rustup component add rustfmt
|
||||
```
|
||||
|
||||
If your rustc version is lower than 1.38.0, please update it:
|
||||
If your rustc version is lower than 1.39.0, please update it:
|
||||
|
||||
```bash
|
||||
$ rustup update
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-archiver"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,9 +10,9 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
console = "0.9.0"
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-core = { path = "../core", version = "0.20.5" }
|
||||
solana-logger = { path = "../logger", version = "0.20.5" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.5" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.5" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,11 +10,11 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.2.0"
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-measure = { path = "../measure", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-core = { path = "../core", version = "0.20.5" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.5" }
|
||||
solana-logger = { path = "../logger", version = "0.20.5" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.5" }
|
||||
solana-measure = { path = "../measure", version = "0.20.5" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
rand = "0.6.5"
|
||||
crossbeam-channel = "0.3"
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -24,16 +24,16 @@ serde_derive = "1.0.101"
|
||||
serde_json = "1.0.41"
|
||||
serde_yaml = "0.8.11"
|
||||
# solana-runtime = { path = "../solana/runtime"}
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-genesis = { path = "../genesis", version = "0.20.0" }
|
||||
solana-client = { path = "../client", version = "0.20.0" }
|
||||
solana-drone = { path = "../drone", version = "0.20.0" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-core = { path = "../core", version = "0.20.5" }
|
||||
solana-genesis = { path = "../genesis", version = "0.20.5" }
|
||||
solana-client = { path = "../client", version = "0.20.5" }
|
||||
solana-drone = { path = "../drone", version = "0.20.5" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.5" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.5" }
|
||||
solana-logger = { path = "../logger", version = "0.20.5" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.5" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.5" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.5" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
untrusted = "0.7.0"
|
||||
ws = "0.9.1"
|
||||
|
@ -2,13 +2,13 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-core = { path = "../core", version = "0.20.5" }
|
||||
solana-logger = { path = "../logger", version = "0.20.5" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.5" }
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -16,23 +16,23 @@ serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
serde_json = "1.0.41"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-genesis = { path = "../genesis", version = "0.20.0" }
|
||||
solana-client = { path = "../client", version = "0.20.0" }
|
||||
solana-drone = { path = "../drone", version = "0.20.0" }
|
||||
solana-librapay-api = { path = "../programs/librapay_api", version = "0.20.0", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.0" }
|
||||
solana-measure = { path = "../measure", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.0", optional = true }
|
||||
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.0", optional = true }
|
||||
solana-core = { path = "../core", version = "0.20.5" }
|
||||
solana-genesis = { path = "../genesis", version = "0.20.5" }
|
||||
solana-client = { path = "../client", version = "0.20.5" }
|
||||
solana-drone = { path = "../drone", version = "0.20.5" }
|
||||
solana-librapay-api = { path = "../programs/librapay_api", version = "0.20.5", optional = true }
|
||||
solana-logger = { path = "../logger", version = "0.20.5" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.5" }
|
||||
solana-measure = { path = "../measure", version = "0.20.5" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.5" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.5" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.5", optional = true }
|
||||
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.5", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.2.0"
|
||||
serial_test_derive = "0.2.0"
|
||||
|
||||
[features]
|
||||
move = ["solana-core/move", "solana-librapay-api", "solana-move-loader-program", "solana-move-loader-api"]
|
||||
move = ["solana-librapay-api", "solana-move-loader-program", "solana-move-loader-api"]
|
||||
|
@ -18,7 +18,7 @@ use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, KeypairUtil},
|
||||
system_instruction, system_transaction,
|
||||
timing::{duration_as_ms, duration_as_s, timestamp},
|
||||
timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp},
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
@ -158,12 +158,13 @@ where
|
||||
let mut reclaim_lamports_back_to_source_account = false;
|
||||
let mut i = keypair0_balance;
|
||||
let mut blockhash = Hash::default();
|
||||
let mut blockhash_time = Instant::now();
|
||||
let mut blockhash_time;
|
||||
while start.elapsed() < duration {
|
||||
// ping-pong between source and destination accounts for each loop iteration
|
||||
// this seems to be faster than trying to determine the balance of individual
|
||||
// accounts
|
||||
let len = tx_count as usize;
|
||||
blockhash_time = Instant::now();
|
||||
if let Ok((new_blockhash, _fee_calculator)) = client.get_new_blockhash(&blockhash) {
|
||||
blockhash = new_blockhash;
|
||||
} else {
|
||||
@ -173,13 +174,19 @@ where
|
||||
sleep(Duration::from_millis(100));
|
||||
continue;
|
||||
}
|
||||
info!(
|
||||
"Took {} ms for new blockhash",
|
||||
duration_as_ms(&blockhash_time.elapsed())
|
||||
datapoint_debug!(
|
||||
"bench-tps-get_blockhash",
|
||||
("duration", duration_as_us(&blockhash_time.elapsed()), i64)
|
||||
);
|
||||
|
||||
blockhash_time = Instant::now();
|
||||
let balance = client.get_balance(&id.pubkey()).unwrap_or(0);
|
||||
metrics_submit_lamport_balance(balance);
|
||||
datapoint_debug!(
|
||||
"bench-tps-get_balance",
|
||||
("duration", duration_as_us(&blockhash_time.elapsed()), i64)
|
||||
);
|
||||
|
||||
generate_txs(
|
||||
&shared_txs,
|
||||
&blockhash,
|
||||
@ -367,7 +374,7 @@ fn generate_txs(
|
||||
);
|
||||
datapoint_debug!(
|
||||
"bench-tps-generate_txs",
|
||||
("duration", duration_as_ms(&duration), i64)
|
||||
("duration", duration_as_us(&duration), i64)
|
||||
);
|
||||
|
||||
let sz = transactions.len() / threads;
|
||||
@ -432,7 +439,7 @@ fn do_tx_transfers<T: Client>(
|
||||
);
|
||||
datapoint_debug!(
|
||||
"bench-tps-do_tx_transfers",
|
||||
("duration", duration_as_ms(&transfer_start.elapsed()), i64),
|
||||
("duration", duration_as_us(&transfer_start.elapsed()), i64),
|
||||
("count", tx_len, i64)
|
||||
);
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ pub struct Config {
|
||||
pub write_to_client_file: bool,
|
||||
pub read_from_client_file: bool,
|
||||
pub target_lamports_per_signature: u64,
|
||||
pub multi_client: bool,
|
||||
pub use_move: bool,
|
||||
pub num_lamports_per_account: u64,
|
||||
}
|
||||
@ -41,6 +42,7 @@ impl Default for Config {
|
||||
write_to_client_file: false,
|
||||
read_from_client_file: false,
|
||||
target_lamports_per_signature: FeeCalculator::default().target_lamports_per_signature,
|
||||
multi_client: true,
|
||||
use_move: false,
|
||||
num_lamports_per_account: NUM_LAMPORTS_PER_ACCOUNT_DEFAULT,
|
||||
}
|
||||
@ -108,6 +110,11 @@ pub fn build_args<'a, 'b>() -> App<'a, 'b> {
|
||||
.long("use-move")
|
||||
.help("Use Move language transactions to perform transfers."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("no-multi-client")
|
||||
.long("no-multi-client")
|
||||
.help("Disable multi-client support, only transact with the entrypoint."),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("tx_count")
|
||||
.long("tx_count")
|
||||
@ -229,6 +236,7 @@ pub fn extract_args<'a>(matches: &ArgMatches<'a>) -> Config {
|
||||
}
|
||||
|
||||
args.use_move = matches.is_present("use-move");
|
||||
args.multi_client = !matches.is_present("no-multi-client");
|
||||
|
||||
if let Some(v) = matches.value_of("num_lamports_per_account") {
|
||||
args.num_lamports_per_account = v.to_string().parse().expect("can't parse lamports");
|
||||
|
@ -1,7 +1,7 @@
|
||||
use log::*;
|
||||
use solana_bench_tps::bench::{do_bench_tps, generate_and_fund_keypairs, generate_keypairs};
|
||||
use solana_bench_tps::cli;
|
||||
use solana_core::gossip_service::{discover_cluster, get_multi_client};
|
||||
use solana_core::gossip_service::{discover_cluster, get_client, get_multi_client};
|
||||
use solana_genesis::Base64Account;
|
||||
use solana_sdk::fee_calculator::FeeCalculator;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
@ -29,6 +29,7 @@ fn main() {
|
||||
read_from_client_file,
|
||||
target_lamports_per_signature,
|
||||
use_move,
|
||||
multi_client,
|
||||
num_lamports_per_account,
|
||||
..
|
||||
} = &cli_config;
|
||||
@ -70,15 +71,19 @@ fn main() {
|
||||
exit(1);
|
||||
});
|
||||
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
|
||||
if nodes.len() < num_clients {
|
||||
eprintln!(
|
||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||
num_nodes
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
let client = if *multi_client {
|
||||
let (client, num_clients) = get_multi_client(&nodes);
|
||||
if nodes.len() < num_clients {
|
||||
eprintln!(
|
||||
"Error: Insufficient nodes discovered. Expecting {} or more",
|
||||
num_nodes
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
client
|
||||
} else {
|
||||
get_client(&nodes)
|
||||
};
|
||||
|
||||
let (keypairs, move_keypairs, keypair_balance) = if *read_from_client_file && !use_move {
|
||||
let path = Path::new(&client_ids_and_stake_file);
|
||||
|
@ -177,7 +177,7 @@ $ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
|
||||
## Usage
|
||||
### solana-cli
|
||||
```text
|
||||
solana-cli 0.20.0
|
||||
solana-cli 0.20.5
|
||||
Blockchain, Rebuilt for Scale
|
||||
|
||||
USAGE:
|
||||
|
@ -126,7 +126,7 @@ The result field will be a JSON object with the following sub fields:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getAccountInfo", "params":["2gVkYWexTHR5Hb2aLeQN3tnngvWzisFKXDUPrgMHpdST"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.20.5,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"id":1}
|
||||
```
|
||||
|
||||
### getBalance
|
||||
@ -729,7 +729,7 @@ Subscribe to an account to receive notifications when the lamports or data for a
|
||||
#### Notification Format:
|
||||
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||
{"jsonrpc": "2.0","method": "accountNotification", "params": {"result": {"executable":false,"owner":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lamports":1,"data":[3,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0.20.5,0,0,0,0,0,0,50,48,53,48,45,48,49,45,48,49,84,48,48,58,48,48,58,48,48,90,252,10,7,28,246,140,88,177,98,82,10,227,89,81,18,30,194,101,199,16,11,73,133,20,246,62,114,39,20,113,189,32,50,0,0,0,0,0,0,0,247,15,36,102,167,83,225,42,133,127,82,34,36,224,207,130,109,230,224,188,163,33,213,13,5,117,211,251,65,159,197,51,0,0,0,0,0,0]},"subscription":0}}
|
||||
```
|
||||
|
||||
### accountUnsubscribe
|
||||
@ -787,7 +787,7 @@ Subscribe to a program to receive notifications when the lamports or data for a
|
||||
* `object` - account info JSON object \(see [getAccountInfo](jsonrpc-api.md#getaccountinfo) for field details\)
|
||||
|
||||
```bash
|
||||
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0,20,0,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
|
||||
{"jsonrpc":"2.0","method":"programNotification","params":{{"result":["8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM",{"executable":false,"lamports":1,"owner":[129,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"data":[1,1,1,0,0,0,0,0,0,0.20.5,0,0,0,0,0,0,50,48,49,56,45,49,50,45,50,52,84,50,51,58,53,57,58,48,48,90,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,55,89,0,0,0,0,50,0,0,0,0,0,0,0,235,233,39,152,15,44,117,176,41,89,100,86,45,61,2,44,251,46,212,37,35,118,163,189,247,84,27,235,178,62,45,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}],"subscription":0}}
|
||||
```
|
||||
|
||||
### programUnsubscribe
|
||||
|
@ -11,7 +11,7 @@ This document proposes an easy to use software install and updater that can be u
|
||||
The easiest install method for supported platforms:
|
||||
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.5/install/solana-install-init.sh | sh
|
||||
```
|
||||
|
||||
This script will check github for the latest tagged release and download and run the `solana-install-init` binary from there.
|
||||
@ -20,7 +20,7 @@ If additional arguments need to be specified during the installation, the follow
|
||||
|
||||
```bash
|
||||
$ init_args=.... # arguments for `solana-install-init ...`
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - ${init_args}
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.5/install/solana-install-init.sh | sh -s - ${init_args}
|
||||
```
|
||||
|
||||
### Fetch and run a pre-built installer from a Github release
|
||||
@ -28,7 +28,7 @@ $ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install
|
||||
With a well-known release URL, a pre-built binary can be obtained for supported platforms:
|
||||
|
||||
```bash
|
||||
$ curl -o solana-install-init https://github.com/solana-labs/solana/releases/download/v0.18.0/solana-install-init-x86_64-apple-darwin
|
||||
$ curl -o solana-install-init https://github.com/solana-labs/solana/releases/download/v0.20.5/solana-install-init-x86_64-apple-darwin
|
||||
$ chmod +x ./solana-install-init
|
||||
$ ./solana-install-init --help
|
||||
```
|
||||
|
@ -29,7 +29,7 @@ Before starting an archiver node, sanity check that the cluster is accessible to
|
||||
Fetch the current transaction count over JSON RPC:
|
||||
|
||||
```bash
|
||||
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
|
||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
|
||||
```
|
||||
|
||||
Inspect the blockexplorer at [http://testnet.solana.com/](http://testnet.solana.com/) for activity.
|
||||
@ -47,13 +47,13 @@ The `solana-install` tool can be used to easily install and upgrade the cluster
|
||||
#### Linux and mac OS
|
||||
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.5/install/solana-install-init.sh | sh -s
|
||||
```
|
||||
|
||||
Alternatively build the `solana-install` program from source and run the following command to obtain the same result:
|
||||
|
||||
```bash
|
||||
$ solana-install init
|
||||
solana-install init
|
||||
```
|
||||
|
||||
#### Windows
|
||||
@ -71,9 +71,9 @@ If you would rather not use `solana-install` to manage the install, you can manu
|
||||
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-unknown-linux-gnu.tar.bz2**, then extract the archive:
|
||||
|
||||
```bash
|
||||
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
$ cd solana-release/
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
cd solana-release/
|
||||
export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
#### mac OS
|
||||
@ -81,9 +81,9 @@ $ export PATH=$PWD/bin:$PATH
|
||||
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-apple-darwin.tar.bz2**, then extract the archive:
|
||||
|
||||
```bash
|
||||
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
||||
$ cd solana-release/
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
||||
cd solana-release/
|
||||
export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
#### Windows
|
||||
@ -95,7 +95,7 @@ Download the binaries by navigating to [https://github.com/solana-labs/solana/re
|
||||
Try running following command to join the gossip network and view all the other nodes in the cluster:
|
||||
|
||||
```bash
|
||||
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
# Press ^C to exit
|
||||
```
|
||||
|
||||
@ -104,8 +104,8 @@ Now configure the keypairs for your archiver by running:
|
||||
Navigate to the solana install location and open a cmd prompt
|
||||
|
||||
```bash
|
||||
$ solana-keygen new -o archiver-keypair.json
|
||||
$ solana-keygen new -o storage-keypair.json
|
||||
solana-keygen new -o archiver-keypair.json
|
||||
solana-keygen new -o storage-keypair.json
|
||||
```
|
||||
|
||||
Use solana-keygen to show the public keys for each of the keypairs, they will be needed in the next step:
|
||||
@ -114,23 +114,23 @@ Use solana-keygen to show the public keys for each of the keypairs, they will be
|
||||
|
||||
```bash
|
||||
# The archiver's identity
|
||||
$ solana-keygen pubkey archiver-keypair.json
|
||||
$ solana-keygen pubkey storage-keypair.json
|
||||
solana-keygen pubkey archiver-keypair.json
|
||||
solana-keygen pubkey storage-keypair.json
|
||||
```
|
||||
|
||||
* Linux and mac OS
|
||||
|
||||
\`\`\`bash
|
||||
|
||||
$ export ARCHIVER\_IDENTITY=$\(solana-keygen pubkey archiver-keypair.json\)
|
||||
export ARCHIVER\_IDENTITY=$\(solana-keygen pubkey archiver-keypair.json\)
|
||||
|
||||
$ export STORAGE\_IDENTITY=$\(solana-keygen pubkey storage-keypair.json\)
|
||||
export STORAGE\_IDENTITY=$\(solana-keygen pubkey storage-keypair.json\)
|
||||
|
||||
```text
|
||||
Then set up the storage accounts for your archiver by running:
|
||||
```bash
|
||||
$ solana --keypair archiver-keypair.json airdrop 100000 lamports
|
||||
$ solana --keypair archiver-keypair.json create-archiver-storage-account $ARCHIVER_IDENTITY $STORAGE_IDENTITY
|
||||
solana --keypair archiver-keypair.json airdrop 100000 lamports
|
||||
solana --keypair archiver-keypair.json create-archiver-storage-account $ARCHIVER_IDENTITY $STORAGE_IDENTITY
|
||||
```
|
||||
|
||||
Note: Every time the testnet restarts, run the steps to setup the archiver accounts again.
|
||||
@ -138,7 +138,7 @@ Note: Every time the testnet restarts, run the steps to setup the archiver accou
|
||||
To start the archiver:
|
||||
|
||||
```bash
|
||||
$ solana-archiver --entrypoint testnet.solana.com:8001 --identity archiver-keypair.json --storage-keypair storage-keypair.json --ledger archiver-ledger
|
||||
solana-archiver --entrypoint testnet.solana.com:8001 --identity archiver-keypair.json --storage-keypair storage-keypair.json --ledger archiver-ledger
|
||||
```
|
||||
|
||||
## Verify Archiver Setup
|
||||
@ -146,12 +146,11 @@ $ solana-archiver --entrypoint testnet.solana.com:8001 --identity archiver-keypa
|
||||
From another console, confirm the IP address and **identity pubkey** of your archiver is visible in the gossip network by running:
|
||||
|
||||
```bash
|
||||
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
```
|
||||
|
||||
Provide the **storage account pubkey** to the `solana show-storage-account` command to view the recent mining activity from your archiver:
|
||||
|
||||
```bash
|
||||
$ solana --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY
|
||||
solana --keypair storage-keypair.json show-storage-account $STORAGE_IDENTITY
|
||||
```
|
||||
|
||||
|
@ -7,13 +7,13 @@ You can publish your validator information to the chain to be publicly visible t
|
||||
Run the solana CLI to populate a validator info account:
|
||||
|
||||
```bash
|
||||
$ solana validator-info publish --keypair ~/validator-keypair.json <VALIDATOR_INFO_ARGS> <VALIDATOR_NAME>
|
||||
solana validator-info publish --keypair ~/validator-keypair.json <VALIDATOR_INFO_ARGS> <VALIDATOR_NAME>
|
||||
```
|
||||
|
||||
For details about optional fields for VALIDATOR\_INFO\_ARGS:
|
||||
|
||||
```bash
|
||||
$ solana validator-info publish --help
|
||||
solana validator-info publish --help
|
||||
```
|
||||
|
||||
## Keybase
|
||||
@ -33,4 +33,3 @@ Including a Keybase username allows client applications \(like the Solana Networ
|
||||
3. Add or update your `solana validator-info` with your Keybase username. The
|
||||
|
||||
CLI will verify the `validator-<PUBKEY>` file
|
||||
|
||||
|
@ -5,70 +5,65 @@
|
||||
The **identity pubkey** for your validator can also be found by running:
|
||||
|
||||
```bash
|
||||
$ solana-keygen pubkey ~/validator-keypair.json
|
||||
solana-keygen pubkey ~/validator-keypair.json
|
||||
```
|
||||
|
||||
From another console, confirm the IP address and **identity pubkey** of your validator is visible in the gossip network by running:
|
||||
|
||||
```bash
|
||||
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
```
|
||||
|
||||
## Monitoring Catch Up
|
||||
|
||||
It may take some time to catch up with the cluster after your validator boots.
|
||||
Use the `catchup` command to monitor your validator through this process:
|
||||
|
||||
```bash
|
||||
solana catchup ~/validator-keypair.json
|
||||
```
|
||||
|
||||
Until your validator has caught up, it will not be able to vote successfully and
|
||||
stake cannot be delegated to it.
|
||||
|
||||
Also if you find the cluster's slot advancing faster than yours, you will likely
|
||||
never catch up. This typically implies some kind of networking issue between
|
||||
your validator and the rest of the cluster.
|
||||
|
||||
## Check Your Balance
|
||||
|
||||
Your account balance should decrease by the transaction fee amount as your
|
||||
validator submits votes, and increase after serving as the leader. Pass the
|
||||
`--lamports` are to observe in finer detail:
|
||||
|
||||
```bash
|
||||
solana balance --lamports
|
||||
```
|
||||
|
||||
## Check Vote Activity
|
||||
|
||||
The vote pubkey for the validator can be found by running:
|
||||
The `solana show-vote-account` command displays the recent voting activity from your validator:
|
||||
|
||||
```bash
|
||||
$ solana-keygen pubkey ~/validator-vote-keypair.json
|
||||
solana show-vote-account ~/validator-vote-keypair.json
|
||||
```
|
||||
|
||||
Provide the **vote pubkey** to the `solana show-vote-account` command to view the recent voting activity from your validator:
|
||||
|
||||
```bash
|
||||
$ solana show-vote-account 2ozWvfaXQd1X6uKh8jERoRGApDqSqcEy6fF1oN13LL2G
|
||||
```
|
||||
|
||||
## Check Your Balance
|
||||
|
||||
Your account balance should decrease by the transaction fee amount as your validator submits votes, and increase after serving as the leader. Pass the `--lamports` are to observe in finer detail:
|
||||
|
||||
```bash
|
||||
$ solana balance --lamports
|
||||
```
|
||||
|
||||
## Check Slot Number
|
||||
|
||||
After your validator boots, it may take some time to catch up with the cluster. Use the `get-slot` command to view the current slot that the cluster is processing:
|
||||
|
||||
```bash
|
||||
$ solana get-slot
|
||||
```
|
||||
|
||||
The current slot that your validator is processing can then been seen with:
|
||||
|
||||
```bash
|
||||
$ solana --url http://127.0.0.1:8899 get-slot
|
||||
```
|
||||
|
||||
Until your validator has caught up, it will not be able to vote successfully and stake cannot be delegated to it.
|
||||
|
||||
Also if you find the cluster's slot advancing faster than yours, you will likely never catch up. This typically implies some kind of networking issue between your validator and the rest of the cluster.
|
||||
|
||||
## Get Cluster Info
|
||||
|
||||
There are several useful JSON-RPC endpoints for monitoring your validator on the cluster, as well as the health of the cluster:
|
||||
|
||||
```bash
|
||||
# Similar to solana-gossip, you should see your validator in the list of cluster nodes
|
||||
$ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getClusterNodes"}' http://testnet.solana.com:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getClusterNodes"}' http://testnet.solana.com:8899
|
||||
# If your validator is properly voting, it should appear in the list of `current` vote accounts. If staked, `stake` should be > 0
|
||||
$ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts"}' http://testnet.solana.com:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts"}' http://testnet.solana.com:8899
|
||||
# Returns the current leader schedule
|
||||
$ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://testnet.solana.com:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLeaderSchedule"}' http://testnet.solana.com:8899
|
||||
# Returns info about the current epoch. slotIndex should progress on subsequent calls.
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getEpochInfo"}' http://testnet.solana.com:8899
|
||||
```
|
||||
|
||||
|
||||
## Validator Metrics
|
||||
|
||||
Metrics are available for local monitoring of your validator.
|
||||
@ -76,9 +71,9 @@ Metrics are available for local monitoring of your validator.
|
||||
Docker must be installed and the current user added to the docker group. Then download `solana-metrics.tar.bz2` from the Github Release and run
|
||||
|
||||
```bash
|
||||
$ tar jxf solana-metrics.tar.bz2
|
||||
$ cd solana-metrics/
|
||||
$ ./start.sh
|
||||
tar jxf solana-metrics.tar.bz2
|
||||
cd solana-metrics/
|
||||
./start.sh
|
||||
```
|
||||
|
||||
A local InfluxDB and Grafana instance is now running on your machine. Define `SOLANA_METRICS_CONFIG` in your environment as described at the end of the `start.sh` output and restart your validator.
|
||||
@ -92,6 +87,5 @@ Log messages emitted by your validator include a timestamp. When sharing logs wi
|
||||
To make it easier to compare logs between different sources we request that everybody use Pacific Time on their validator nodes. In Linux this can be accomplished by running:
|
||||
|
||||
```bash
|
||||
$ sudo ln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
|
||||
sudo ln -sf /usr/share/zoneinfo/America/Los_Angeles /etc/localtime
|
||||
```
|
||||
|
||||
|
@ -5,13 +5,13 @@
|
||||
The `solana-install` tool can be used to easily install and upgrade the validator software on Linux x86\_64 and mac OS systems.
|
||||
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.5/install/solana-install-init.sh | sh -s
|
||||
```
|
||||
|
||||
Alternatively build the `solana-install` program from source and run the following command to obtain the same result:
|
||||
|
||||
```bash
|
||||
$ solana-install init
|
||||
solana-install init
|
||||
```
|
||||
|
||||
After a successful install, `solana-install update` may be used to easily update the cluster software to a newer version at any time.
|
||||
@ -25,9 +25,9 @@ If you would rather not use `solana-install` to manage the install, you can manu
|
||||
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-unknown-linux-gnu.tar.bz2**, then extract the archive:
|
||||
|
||||
```bash
|
||||
$ tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
$ cd solana-release/
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
tar jxf solana-release-x86_64-unknown-linux-gnu.tar.bz2
|
||||
cd solana-release/
|
||||
export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
### mac OS
|
||||
@ -35,9 +35,9 @@ $ export PATH=$PWD/bin:$PATH
|
||||
Download the binaries by navigating to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), download **solana-release-x86\_64-apple-darwin.tar.bz2**, then extract the archive:
|
||||
|
||||
```bash
|
||||
$ tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
||||
$ cd solana-release/
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
tar jxf solana-release-x86_64-apple-darwin.tar.bz2
|
||||
cd solana-release/
|
||||
export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
## Build From Source
|
||||
@ -45,7 +45,6 @@ $ export PATH=$PWD/bin:$PATH
|
||||
If you are unable to use the prebuilt binaries or prefer to build it yourself from source, navigate to [https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), and download the **Source Code** archive. Extract the code and build the binaries with:
|
||||
|
||||
```bash
|
||||
$ ./scripts/cargo-install-all.sh .
|
||||
$ export PATH=$PWD/bin:$PATH
|
||||
./scripts/cargo-install-all.sh .
|
||||
export PATH=$PWD/bin:$PATH
|
||||
```
|
||||
|
||||
|
@ -7,14 +7,14 @@ Adding stake can be accomplished by using the `solana` CLI
|
||||
First create a stake account keypair with `solana-keygen`:
|
||||
|
||||
```bash
|
||||
$ solana-keygen new -o ~/validator-config/stake-keypair.json
|
||||
solana-keygen new -o ~/validator-stake-keypair.json
|
||||
```
|
||||
|
||||
and use the cli's `create-stake-account` and `delegate-stake` commands to stake your validator with 42 lamports:
|
||||
and use the cli's `create-stake-account` and `delegate-stake` commands to stake your validator with 4242 lamports:
|
||||
|
||||
```bash
|
||||
$ solana create-stake-account ~/validator-config/stake-keypair.json 42 lamports
|
||||
$ solana delegate-stake ~/validator-config/stake-keypair.json ~/validator-vote-keypair.json
|
||||
solana create-stake-account ~/validator-stake-keypair.json 4242 lamports
|
||||
solana delegate-stake ~/validator-stake-keypair.json ~/validator-vote-keypair.json
|
||||
```
|
||||
|
||||
Note that stakes need to warm up, and warmup increments are applied at Epoch boundaries, so it can take an hour or more for the change to fully take effect.
|
||||
@ -22,13 +22,13 @@ Note that stakes need to warm up, and warmup increments are applied at Epoch bou
|
||||
Stakes can be re-delegated to another node at any time with the same command:
|
||||
|
||||
```bash
|
||||
$ solana delegate-stake ~/validator-config/stake-keypair.json ~/some-other-validator-vote-keypair.json
|
||||
solana delegate-stake ~/validator-stake-keypair.json ~/some-other-validator-vote-keypair.json
|
||||
```
|
||||
|
||||
Assuming the node is voting, now you're up and running and generating validator rewards. You'll want to periodically redeem/claim your rewards:
|
||||
|
||||
```bash
|
||||
$ solana redeem-vote-credits ~/validator-config/stake-keypair.json ~/validator-vote-keypair.json
|
||||
solana redeem-vote-credits ~/validator-stake-keypair.json ~/validator-vote-keypair.json
|
||||
```
|
||||
|
||||
The rewards lamports earned are split between your stake account and the vote account according to the commission rate set in the vote account. Rewards can only be earned while the validator is up and running. Further, once staked, the validator becomes an important part of the network. In order to safely remove a validator from the network, first deactivate its stake.
|
||||
@ -36,7 +36,7 @@ The rewards lamports earned are split between your stake account and the vote ac
|
||||
Stake can be deactivated by running:
|
||||
|
||||
```bash
|
||||
$ solana deactivate-stake ~/validator-config/stake-keypair.json
|
||||
solana deactivate-stake ~/validator-stake-keypair.json
|
||||
```
|
||||
|
||||
The stake will cool down, deactivate over time. While cooling down, your stake will continue to earn rewards. Only after stake cooldown is it safe to turn off your validator or withdraw it from the network. Cooldown may take several epochs to complete, depending on active stake and the size of your stake.
|
||||
|
@ -7,7 +7,7 @@ Before attaching a validator node, sanity check that the cluster is accessible t
|
||||
Fetch the current transaction count over JSON RPC:
|
||||
|
||||
```bash
|
||||
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
|
||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://testnet.solana.com:8899
|
||||
```
|
||||
|
||||
Inspect the network explorer at [https://explorer.solana.com/](https://explorer.solana.com/) for activity.
|
||||
@ -19,16 +19,16 @@ View the [metrics dashboard](https://metrics.solana.com:3000/d/testnet-beta/test
|
||||
Sanity check that you are able to interact with the cluster by receiving a small airdrop of lamports from the testnet drone:
|
||||
|
||||
```bash
|
||||
$ solana set --url http://testnet.solana.com:8899
|
||||
$ solana get
|
||||
$ solana airdrop 123 lamports
|
||||
$ solana balance --lamports
|
||||
solana set --url http://testnet.solana.com:8899
|
||||
solana get
|
||||
solana airdrop 123 lamports
|
||||
solana balance --lamports
|
||||
```
|
||||
|
||||
Also try running following command to join the gossip network and view all the other nodes in the cluster:
|
||||
|
||||
```bash
|
||||
$ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
# Press ^C to exit
|
||||
```
|
||||
|
||||
@ -37,7 +37,7 @@ $ solana-gossip --entrypoint testnet.solana.com:8001 spy
|
||||
Create an identity keypair for your validator by running:
|
||||
|
||||
```bash
|
||||
$ solana-keygen new -o ~/validator-keypair.json
|
||||
solana-keygen new -o ~/validator-keypair.json
|
||||
```
|
||||
|
||||
### Wallet Configuration
|
||||
@ -45,30 +45,30 @@ $ solana-keygen new -o ~/validator-keypair.json
|
||||
You can set solana configuration to use your validator keypair for all following commands:
|
||||
|
||||
```bash
|
||||
$ solana set --keypair ~/validator-keypair.json
|
||||
solana set --keypair ~/validator-keypair.json
|
||||
```
|
||||
|
||||
**All following solana commands assume you have set `--keypair` config to** your validator identity keypair.\*\* If you haven't, you will need to add the `--keypair` argument to each command, like:
|
||||
|
||||
```bash
|
||||
$ solana --keypair ~/validator-keypair.json airdrop 1000 lamports
|
||||
solana --keypair ~/validator-keypair.json airdrop 10
|
||||
```
|
||||
|
||||
\(You can always override the set configuration by explicitly passing the `--keypair` argument with a command.\)
|
||||
|
||||
### Validator Start
|
||||
|
||||
Airdrop yourself some lamports to get started:
|
||||
Airdrop yourself some SOL to get started:
|
||||
|
||||
```bash
|
||||
$ solana airdrop 1000 lamports
|
||||
solana airdrop 10
|
||||
```
|
||||
|
||||
Your validator will need a vote account. Create it now with the following commands:
|
||||
|
||||
```bash
|
||||
$ solana-keygen new -o ~/validator-vote-keypair.json
|
||||
$ solana create-vote-account ~/validator-vote-keypair.json ~/validator-keypair.json 1 lamports
|
||||
solana-keygen new -o ~/validator-vote-keypair.json
|
||||
solana create-vote-account ~/validator-vote-keypair.json ~/validator-keypair.json
|
||||
```
|
||||
|
||||
Then use one of the following commands, depending on your installation choice, to start the node:
|
||||
@ -76,19 +76,19 @@ Then use one of the following commands, depending on your installation choice, t
|
||||
If this is a `solana-install`-installation:
|
||||
|
||||
```bash
|
||||
$ solana-validator --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001
|
||||
solana-validator --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001
|
||||
```
|
||||
|
||||
Alternatively, the `solana-install run` command can be used to run the validator node while periodically checking for and applying software updates:
|
||||
|
||||
```bash
|
||||
$ solana-install run solana-validator -- --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001
|
||||
solana-install run solana-validator -- --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 --entrypoint testnet.solana.com:8001
|
||||
```
|
||||
|
||||
If you built from source:
|
||||
|
||||
```bash
|
||||
$ NDEBUG=1 USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --rpc-port 8899 --entrypoint testnet.solana.com:8001
|
||||
NDEBUG=1 USE_INSTALL=1 ./multinode-demo/validator.sh --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --rpc-port 8899 --entrypoint testnet.solana.com:8001
|
||||
```
|
||||
|
||||
### Enabling CUDA
|
||||
@ -98,7 +98,7 @@ If your machine has a GPU with CUDA installed \(Linux-only currently\), include
|
||||
Or if you built from source, define the SOLANA\_CUDA flag in your environment _before_ running any of the previously mentioned commands
|
||||
|
||||
```bash
|
||||
$ export SOLANA_CUDA=1
|
||||
export SOLANA_CUDA=1
|
||||
```
|
||||
|
||||
When your validator is started look for the following log message to indicate that CUDA is enabled: `"[<timestamp> solana::validator] CUDA is enabled"`
|
||||
@ -110,4 +110,3 @@ By default the validator will dynamically select available network ports in the
|
||||
### Limiting ledger size to conserve disk space
|
||||
|
||||
By default the validator will retain the full ledger. To conserve disk space start the validator with the `--limit-ledger-size`, which will instruct the validator to only retain the last couple hours of ledger.
|
||||
|
||||
|
@ -15,8 +15,8 @@ Prior to mainnet, the testnets may be running different versions of solana softw
|
||||
You can submit a JSON-RPC request to see the specific version of the cluster.
|
||||
|
||||
```bash
|
||||
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' edge.testnet.solana.com:8899
|
||||
{"jsonrpc":"2.0","result":{"solana-core":"0.18.0-pre1"},"id":1}
|
||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' edge.testnet.solana.com:8899
|
||||
{"jsonrpc":"2.0","result":{"solana-core":"0.20.5"},"id":1}
|
||||
```
|
||||
|
||||
## Using a Different Testnet
|
||||
@ -28,17 +28,17 @@ This guide is written in the context of testnet.solana.com, our most stable clus
|
||||
If you are bootstrapping with `solana-install`, you can specify the release tag or named channel to install to match your desired testnet.
|
||||
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - 0.18.0
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.5/install/solana-install-init.sh | sh -s - 0.20.5
|
||||
```
|
||||
|
||||
```bash
|
||||
$ curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.18.0/install/solana-install-init.sh | sh -s - beta
|
||||
curl -sSf https://raw.githubusercontent.com/solana-labs/solana/v0.20.5/install/solana-install-init.sh | sh -s - beta
|
||||
```
|
||||
|
||||
Similarly, you can add this argument to the `solana-install` command if you've built the program from source:
|
||||
|
||||
```bash
|
||||
$ solana-install init 0.18.0
|
||||
solana-install init 0.20.5
|
||||
```
|
||||
|
||||
If you are downloading pre-compiled binaries or building from source, simply choose the release matching your desired testnet.
|
||||
@ -48,14 +48,14 @@ If you are downloading pre-compiled binaries or building from source, simply cho
|
||||
The Solana CLI tool points at testnet.solana.com by default. Include a `--url` argument to point at a different testnet. For instance:
|
||||
|
||||
```bash
|
||||
$ solana --url http://beta.testnet.solana.com:8899 balance
|
||||
solana --url http://beta.testnet.solana.com:8899 balance
|
||||
```
|
||||
|
||||
The solana cli includes `get` and `set` configuration commands to automatically set the `--url` argument for future cli commands. For example:
|
||||
|
||||
```bash
|
||||
$ solana set --url http://beta.testnet.solana.com:8899
|
||||
$ solana balance # Same result as command above
|
||||
solana set --url http://beta.testnet.solana.com:8899
|
||||
solana balance # Same result as command above
|
||||
```
|
||||
|
||||
\(You can always override the set configuration by explicitly passing the `--url` argument with a command.\)
|
||||
@ -63,12 +63,11 @@ $ solana balance # Same result as command above
|
||||
Solana-gossip and solana-validator commands already require an explicit `--entrypoint` argument. Simply replace testnet.solana.com in the examples with an alternate url to interact with a different testnet. For example:
|
||||
|
||||
```bash
|
||||
$ solana-validator --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 beta.testnet.solana.com
|
||||
solana-validator --identity ~/validator-keypair.json --voting-keypair ~/validator-vote-keypair.json --ledger ~/validator-config --rpc-port 8899 beta.testnet.solana.com
|
||||
```
|
||||
|
||||
You can also submit JSON-RPC requests to a different testnet, like:
|
||||
|
||||
```bash
|
||||
$ curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://beta.testnet.solana.com:8899
|
||||
curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","id":1, "method":"getTransactionCount"}' http://beta.testnet.solana.com:8899
|
||||
```
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -7,7 +7,7 @@ steps:
|
||||
timeout_in_minutes: 5
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
|
||||
name: "checks"
|
||||
timeout_in_minutes: 35
|
||||
timeout_in_minutes: 20
|
||||
- wait
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
@ -17,18 +17,21 @@ steps:
|
||||
- "queue=cuda"
|
||||
- command: "ci/test-bench.sh"
|
||||
name: "bench"
|
||||
timeout_in_minutes: 60
|
||||
timeout_in_minutes: 30
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
||||
name: "stable"
|
||||
timeout_in_minutes: 40
|
||||
artifact_paths: "log-*.txt"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
name: "move"
|
||||
timeout_in_minutes: 20
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
|
||||
name: "local-cluster"
|
||||
timeout_in_minutes: 40
|
||||
timeout_in_minutes: 30
|
||||
artifact_paths: "log-*.txt"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||
name: "coverage"
|
||||
timeout_in_minutes: 40
|
||||
timeout_in_minutes: 30
|
||||
- wait
|
||||
- trigger: "solana-secondary"
|
||||
branches: "!pull/*"
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM solanalabs/rust:1.38.0
|
||||
FROM solanalabs/rust:1.39.0
|
||||
ARG date
|
||||
|
||||
RUN set -x \
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.38.0
|
||||
FROM rust:1.39.0
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
|
@ -13,8 +13,8 @@
|
||||
# $ source ci/rust-version.sh
|
||||
#
|
||||
|
||||
stable_version=1.38.0
|
||||
nightly_version=2019-10-03
|
||||
stable_version=1.39.0
|
||||
nightly_version=2019-11-13
|
||||
|
||||
export rust_stable="$stable_version"
|
||||
export rust_stable_docker_image=solanalabs/rust:"$stable_version"
|
||||
|
@ -13,6 +13,6 @@ cd "$(dirname "$0")/.."
|
||||
-not -regex ".*/target/.*" \
|
||||
-print0 \
|
||||
| xargs -0 \
|
||||
ci/docker-run.sh koalaman/shellcheck --color=always --external-sources --shell=bash
|
||||
ci/docker-run.sh koalaman/shellcheck@sha256:fe24ab9a9b6b62d3adb162f4a80e006b6a63cae8c6ffafbae45772bab85e7294 --color=always --external-sources --shell=bash
|
||||
)
|
||||
echo --- ok
|
||||
|
1
ci/test-move.sh
Symbolic link
1
ci/test-move.sh
Symbolic link
@ -0,0 +1 @@
|
||||
test-stable.sh
|
@ -28,17 +28,13 @@ rm -rf target/xargo # Issue #3105
|
||||
|
||||
# Run the appropriate test based on entrypoint
|
||||
testName=$(basename "$0" .sh)
|
||||
echo "Executing $testName"
|
||||
case $testName in
|
||||
test-stable)
|
||||
echo "Executing $testName"
|
||||
|
||||
_ cargo +"$rust_stable" build --tests --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path local_cluster/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
|
||||
;;
|
||||
test-stable-perf)
|
||||
echo "Executing $testName"
|
||||
|
||||
ci/affects-files.sh \
|
||||
.rs$ \
|
||||
Cargo.lock$ \
|
||||
@ -53,7 +49,7 @@ test-stable-perf)
|
||||
^sdk/ \
|
||||
|| {
|
||||
annotate --style info \
|
||||
"Skipped test-stable-perf as no relevant files were modified"
|
||||
"Skipped $testName as no relevant files were modified"
|
||||
exit 0
|
||||
}
|
||||
|
||||
@ -82,8 +78,30 @@ test-stable-perf)
|
||||
_ cargo +"$rust_stable" build --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture
|
||||
;;
|
||||
test-move)
|
||||
ci/affects-files.sh \
|
||||
Cargo.lock$ \
|
||||
Cargo.toml$ \
|
||||
^ci/rust-version.sh \
|
||||
^ci/test-stable.sh \
|
||||
^ci/test-move.sh \
|
||||
^programs/move_loader_api \
|
||||
^programs/move_loader_program \
|
||||
^programs/librapay_api \
|
||||
^logger/ \
|
||||
^runtime/ \
|
||||
^sdk/ \
|
||||
|| {
|
||||
annotate --style info \
|
||||
"Skipped $testName as no relevant files were modified"
|
||||
exit 0
|
||||
}
|
||||
_ cargo +"$rust_stable" test --manifest-path programs/move_loader_api/Cargo.toml ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path programs/move_loader_program/Cargo.toml ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path programs/librapay_api/Cargo.toml ${V:+--verbose} -- --nocapture
|
||||
exit 0
|
||||
;;
|
||||
test-local-cluster)
|
||||
echo "Executing $testName"
|
||||
_ cargo +"$rust_stable" build --release --bins ${V:+--verbose}
|
||||
_ cargo +"$rust_stable" test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
exit 0
|
||||
|
@ -262,6 +262,11 @@ trap shutdown EXIT INT
|
||||
|
||||
set -x
|
||||
|
||||
# Fetch reusable testnet keypairs
|
||||
if [[ ! -d net/keypairs ]]; then
|
||||
git clone git@github.com:solana-labs/testnet-keypairs.git net/keypairs
|
||||
fi
|
||||
|
||||
# Build a string to pass zone opts to $cloudProvider.sh: "-z zone1 -z zone2 ..."
|
||||
zone_args=()
|
||||
for val in "${zone[@]}"; do
|
||||
@ -288,11 +293,15 @@ if ! $skipCreate; then
|
||||
echo "--- $cloudProvider.sh create"
|
||||
create_args=(
|
||||
-p "$netName"
|
||||
-a "$bootstrapValidatorAddress"
|
||||
-c "$clientNodeCount"
|
||||
-n "$additionalValidatorCount"
|
||||
--dedicated
|
||||
)
|
||||
|
||||
if [[ -n $bootstrapValidatorAddress ]]; then
|
||||
create_args+=(-a "$bootstrapValidatorAddress")
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2206
|
||||
create_args+=(${zone_args[@]})
|
||||
|
||||
|
@ -216,7 +216,7 @@ maybe_deploy_software() {
|
||||
(
|
||||
echo "--- net.sh restart"
|
||||
set -x
|
||||
time net/net.sh restart --skip-setup -t "$CHANNEL_OR_TAG" --skip-ledger-verify "$arg"
|
||||
time net/net.sh restart --skip-setup -t "$CHANNEL_OR_TAG" --skip-poh-verify "$arg"
|
||||
) || ok=false
|
||||
if ! $ok; then
|
||||
net/net.sh logs
|
||||
@ -546,7 +546,7 @@ deploy() {
|
||||
${maybeInternalNodesLamports} \
|
||||
${maybeExternalAccountsFile} \
|
||||
${maybeLamports} \
|
||||
--target-lamports-per-signature 1 \
|
||||
--target-lamports-per-signature 0 \
|
||||
--slots-per-epoch 4096 \
|
||||
${maybeAdditionalDisk}
|
||||
)
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -19,6 +19,7 @@ console = "0.9.0"
|
||||
dirs = "2.0.2"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.8"
|
||||
indicatif = "0.13.0"
|
||||
num-traits = "0.2"
|
||||
pretty-hex = "0.1.1"
|
||||
reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tls"] }
|
||||
@ -26,23 +27,23 @@ serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
serde_json = "1.0.41"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.0" }
|
||||
solana-client = { path = "../client", version = "0.20.0" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.0" }
|
||||
solana-drone = { path = "../drone", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.20.0" }
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.5" }
|
||||
solana-client = { path = "../client", version = "0.20.5" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.5" }
|
||||
solana-drone = { path = "../drone", version = "0.20.5" }
|
||||
solana-logger = { path = "../logger", version = "0.20.5" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.5" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.5" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.5" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.5" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.5" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.20.5" }
|
||||
url = "2.1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.20.0" }
|
||||
solana-core = { path = "../core", version = "0.20.5" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.20.5" }
|
||||
|
||||
[[bin]]
|
||||
name = "solana"
|
||||
|
@ -46,6 +46,9 @@ const USERDATA_CHUNK_SIZE: usize = 229; // Keep program chunks under PACKET_DATA
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum CliCommand {
|
||||
// Cluster Query Commands
|
||||
Catchup {
|
||||
node_pubkey: Pubkey,
|
||||
},
|
||||
ClusterVersion,
|
||||
Fees,
|
||||
GetEpochInfo,
|
||||
@ -211,6 +214,7 @@ impl Default for CliConfig {
|
||||
pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn error::Error>> {
|
||||
let response = match matches.subcommand() {
|
||||
// Cluster Query Commands
|
||||
("catchup", Some(matches)) => parse_catchup(matches),
|
||||
("cluster-version", Some(_matches)) => Ok(CliCommandInfo {
|
||||
command: CliCommand::ClusterVersion,
|
||||
require_keypair: false,
|
||||
@ -329,7 +333,6 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
}
|
||||
("balance", Some(matches)) => {
|
||||
let pubkey = pubkey_of(&matches, "pubkey");
|
||||
println!("{:?}", pubkey);
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Balance {
|
||||
pubkey,
|
||||
@ -357,7 +360,7 @@ pub fn parse_command(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, Box<dyn
|
||||
},
|
||||
("pay", Some(matches)) => {
|
||||
let lamports = amount_of(matches, "amount", "unit").expect("Invalid amount");
|
||||
let to = value_of(&matches, "to").unwrap();
|
||||
let to = pubkey_of(&matches, "to").unwrap();
|
||||
let timestamp = if matches.is_present("timestamp") {
|
||||
// Parse input for serde_json
|
||||
let date_string = if !matches.value_of("timestamp").unwrap().contains('Z') {
|
||||
@ -815,6 +818,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
// Cluster Query Commands
|
||||
|
||||
// Return software version of solana-cli and cluster entrypoint node
|
||||
CliCommand::Catchup { node_pubkey } => process_catchup(&rpc_client, node_pubkey),
|
||||
CliCommand::ClusterVersion => process_cluster_version(&rpc_client, config),
|
||||
CliCommand::Fees => process_fees(&rpc_client),
|
||||
CliCommand::GetGenesisBlockhash => process_get_genesis_blockhash(&rpc_client),
|
||||
@ -1289,10 +1293,10 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.arg(
|
||||
Arg::with_name("to")
|
||||
.index(1)
|
||||
.value_name("PUBKEY")
|
||||
.value_name("TO PUBKEY")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.validator(is_pubkey)
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.help("The pubkey of recipient"),
|
||||
)
|
||||
.arg(
|
||||
|
@ -4,19 +4,24 @@ use crate::{
|
||||
CliError, ProcessResult,
|
||||
},
|
||||
display::println_name_value,
|
||||
input_parsers::pubkey_of,
|
||||
input_validators::is_pubkey_or_keypair,
|
||||
};
|
||||
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
|
||||
use console::{style, Emoji};
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use serde_json::Value;
|
||||
use solana_client::{rpc_client::RpcClient, rpc_request::RpcVoteAccountInfo};
|
||||
use solana_sdk::{
|
||||
clock,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, KeypairUtil},
|
||||
system_transaction,
|
||||
};
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
@ -31,6 +36,19 @@ pub trait ClusterQuerySubCommands {
|
||||
impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
fn cluster_query_subcommands(self) -> Self {
|
||||
self.subcommand(
|
||||
SubCommand::with_name("catchup")
|
||||
.about("Wait for a validator to catch up to the cluster")
|
||||
.arg(
|
||||
Arg::with_name("node_pubkey")
|
||||
.index(1)
|
||||
.takes_value(true)
|
||||
.value_name("PUBKEY")
|
||||
.validator(is_pubkey_or_keypair)
|
||||
.required(true)
|
||||
.help("Identity pubkey of the validator"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("cluster-version")
|
||||
.about("Get the version of the cluster entrypoint"),
|
||||
)
|
||||
@ -89,6 +107,14 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_catchup(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let node_pubkey = pubkey_of(matches, "node_pubkey").unwrap();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Catchup { node_pubkey },
|
||||
require_keypair: false,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_cluster_ping(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let interval = Duration::from_secs(value_t_or_exit!(matches, "interval", u64));
|
||||
let count = if matches.is_present("count") {
|
||||
@ -133,6 +159,73 @@ pub fn process_cluster_version(rpc_client: &RpcClient, config: &CliConfig) -> Pr
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
/// Creates a new process bar for processing that will take an unknown amount of time
|
||||
fn new_spinner_progress_bar() -> ProgressBar {
|
||||
let progress_bar = ProgressBar::new(42);
|
||||
progress_bar
|
||||
.set_style(ProgressStyle::default_spinner().template("{spinner:.green} {wide_msg}"));
|
||||
progress_bar.enable_steady_tick(100);
|
||||
progress_bar
|
||||
}
|
||||
|
||||
pub fn process_catchup(rpc_client: &RpcClient, node_pubkey: &Pubkey) -> ProcessResult {
|
||||
let cluster_nodes = rpc_client.get_cluster_nodes()?;
|
||||
|
||||
let rpc_addr = cluster_nodes
|
||||
.iter()
|
||||
.find(|contact_info| contact_info.pubkey == node_pubkey.to_string())
|
||||
.ok_or_else(|| format!("Contact information not found for {}", node_pubkey))?
|
||||
.rpc
|
||||
.ok_or_else(|| format!("RPC service not found for {}", node_pubkey))?;
|
||||
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
progress_bar.set_message("Connecting...");
|
||||
|
||||
let node_client = RpcClient::new_socket(rpc_addr);
|
||||
let mut previous_rpc_slot = std::u64::MAX;
|
||||
let mut previous_slot_distance = 0;
|
||||
let sleep_interval = 5;
|
||||
loop {
|
||||
let rpc_slot = rpc_client.get_slot()?;
|
||||
let node_slot = node_client.get_slot()?;
|
||||
if node_slot > std::cmp::min(previous_rpc_slot, rpc_slot) {
|
||||
progress_bar.finish_and_clear();
|
||||
return Ok(format!(
|
||||
"{} has caught up (us:{} them:{})",
|
||||
node_pubkey, node_slot, rpc_slot,
|
||||
));
|
||||
}
|
||||
|
||||
let slot_distance = rpc_slot as i64 - node_slot as i64;
|
||||
progress_bar.set_message(&format!(
|
||||
"Validator is {} slots away (us:{} them:{}){}",
|
||||
slot_distance,
|
||||
node_slot,
|
||||
rpc_slot,
|
||||
if previous_rpc_slot == std::u64::MAX {
|
||||
"".to_string()
|
||||
} else {
|
||||
let slots_per_second =
|
||||
(previous_slot_distance - slot_distance) as f64 / f64::from(sleep_interval);
|
||||
|
||||
format!(
|
||||
" and {} at {:.1} slots/second",
|
||||
if slots_per_second < 0.0 {
|
||||
"falling behind"
|
||||
} else {
|
||||
"gaining"
|
||||
},
|
||||
slots_per_second,
|
||||
)
|
||||
}
|
||||
));
|
||||
|
||||
sleep(Duration::from_secs(sleep_interval as u64));
|
||||
previous_rpc_slot = rpc_slot;
|
||||
previous_slot_distance = slot_distance;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn process_fees(rpc_client: &RpcClient) -> ProcessResult {
|
||||
let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?;
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,10 +19,10 @@ reqwest = { version = "0.9.22", default-features = false, features = ["rustls-tl
|
||||
serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
serde_json = "1.0.41"
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.5" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
|
||||
[dev-dependencies]
|
||||
jsonrpc-core = "14.0.3"
|
||||
jsonrpc-http-server = "14.0.1"
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
jsonrpc-http-server = "14.0.3"
|
||||
solana-logger = { path = "../logger", version = "0.20.5" }
|
||||
|
@ -2,7 +2,7 @@ use crate::client_error::ClientError;
|
||||
use crate::generic_rpc_client_request::GenericRpcClientRequest;
|
||||
use crate::mock_rpc_client_request::MockRpcClientRequest;
|
||||
use crate::rpc_client_request::RpcClientRequest;
|
||||
use crate::rpc_request::{RpcEpochInfo, RpcRequest, RpcVoteAccountStatus};
|
||||
use crate::rpc_request::{RpcContactInfo, RpcEpochInfo, RpcRequest, RpcVoteAccountStatus};
|
||||
use bincode::serialize;
|
||||
use log::*;
|
||||
use serde_json::{json, Value};
|
||||
@ -120,6 +120,25 @@ impl RpcClient {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_cluster_nodes(&self) -> io::Result<Vec<RpcContactInfo>> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetClusterNodes, None, 0)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetClusterNodes request failure: {:?}", err),
|
||||
)
|
||||
})?;
|
||||
|
||||
serde_json::from_value(response).map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("GetClusterNodes parse failure: {}", err),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_epoch_info(&self) -> io::Result<RpcEpochInfo> {
|
||||
let response = self
|
||||
.client
|
||||
|
@ -1,5 +1,5 @@
|
||||
use serde_json::{json, Value};
|
||||
use std::{error, fmt};
|
||||
use std::{error, fmt, net::SocketAddr};
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@ -49,6 +49,18 @@ pub struct RpcVoteAccountInfo {
|
||||
pub root_slot: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct RpcContactInfo {
|
||||
/// Pubkey of the node as a base-58 string
|
||||
pub pubkey: String,
|
||||
/// Gossip port
|
||||
pub gossip: Option<SocketAddr>,
|
||||
/// Tpu port
|
||||
pub tpu: Option<SocketAddr>,
|
||||
/// JSON RPC port
|
||||
pub rpc: Option<SocketAddr>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum RpcRequest {
|
||||
ConfirmTransaction,
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@ -15,7 +15,6 @@ codecov = { repository = "solana-labs/solana", branch = "master", service = "git
|
||||
|
||||
[features]
|
||||
pin_gpu_memory = []
|
||||
move = []
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.2.0"
|
||||
@ -30,7 +29,7 @@ indexmap = "1.1"
|
||||
itertools = "0.8.0"
|
||||
jsonrpc-core = "14.0.3"
|
||||
jsonrpc-derive = "14.0.3"
|
||||
jsonrpc-http-server = "14.0.1"
|
||||
jsonrpc-http-server = "14.0.3"
|
||||
jsonrpc-pubsub = "14.0.3"
|
||||
jsonrpc-ws-server = "14.0.3"
|
||||
lazy_static = "1.4.0"
|
||||
@ -45,25 +44,25 @@ rayon = "1.2.0"
|
||||
serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
serde_json = "1.0.41"
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.0" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.20.0" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.20.0" }
|
||||
solana-client = { path = "../client", version = "0.20.0" }
|
||||
solana-drone = { path = "../drone", version = "0.20.0" }
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.5" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.20.5" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "0.20.5" }
|
||||
solana-client = { path = "../client", version = "0.20.5" }
|
||||
solana-drone = { path = "../drone", version = "0.20.5" }
|
||||
solana-ed25519-dalek = "0.2.0"
|
||||
solana-ledger = { path = "../ledger", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.20.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.0" }
|
||||
solana-measure = { path = "../measure", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.20.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.20.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.5" }
|
||||
solana-logger = { path = "../logger", version = "0.20.5" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.20.5" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.5" }
|
||||
solana-measure = { path = "../measure", version = "0.20.5" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.5" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.5" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.5" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.5" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.20.5" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.5" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "0.20.5" }
|
||||
symlink = "0.1.0"
|
||||
sys-info = "0.5.8"
|
||||
tempfile = "3.1.0"
|
||||
@ -72,7 +71,7 @@ tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
untrusted = "0.7.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.5" }
|
||||
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
@ -18,8 +18,8 @@ fn bench_write_shreds(bench: &mut Bencher, entries: Vec<Entry>, ledger_path: &Pa
|
||||
let blocktree =
|
||||
Blocktree::open(ledger_path).expect("Expected to be able to open database ledger");
|
||||
bench.iter(move || {
|
||||
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true);
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
});
|
||||
|
||||
Blocktree::destroy(ledger_path).expect("Expected successful database destruction");
|
||||
@ -36,9 +36,9 @@ fn setup_read_bench(
|
||||
let entries = create_ticks(num_large_shreds * 4 + num_small_shreds * 2, Hash::default());
|
||||
|
||||
// Convert the entries to shreds, write the shreds to the ledger
|
||||
let shreds = entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true);
|
||||
let shreds = entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true, 0);
|
||||
blocktree
|
||||
.insert_shreds(shreds, None)
|
||||
.insert_shreds(shreds, None, false)
|
||||
.expect("Expectd successful insertion of shreds into ledger");
|
||||
}
|
||||
|
||||
@ -129,8 +129,8 @@ fn bench_insert_data_shred_small(bench: &mut Bencher) {
|
||||
let num_entries = 32 * 1024;
|
||||
let entries = create_ticks(num_entries, Hash::default());
|
||||
bench.iter(move || {
|
||||
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true);
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
});
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
@ -144,8 +144,8 @@ fn bench_insert_data_shred_big(bench: &mut Bencher) {
|
||||
let num_entries = 32 * 1024;
|
||||
let entries = create_ticks(num_entries, Hash::default());
|
||||
bench.iter(move || {
|
||||
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true);
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
let shreds = entries_to_test_shreds(entries.clone(), 0, 0, true, 0);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
});
|
||||
Blocktree::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
|
||||
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
|
||||
let entries = create_ticks(num_ticks, Hash::default());
|
||||
bencher.iter(|| {
|
||||
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone()).unwrap();
|
||||
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone(), 0, 0).unwrap();
|
||||
shredder.entries_to_shreds(&entries, true, 0);
|
||||
})
|
||||
}
|
||||
@ -50,7 +50,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
|
||||
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
|
||||
// 1Mb
|
||||
bencher.iter(|| {
|
||||
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone()).unwrap();
|
||||
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone(), 0, 0).unwrap();
|
||||
shredder.entries_to_shreds(&entries, true, 0);
|
||||
})
|
||||
}
|
||||
@ -63,7 +63,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
|
||||
let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size;
|
||||
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
|
||||
let entries = create_ticks(num_ticks, Hash::default());
|
||||
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp).unwrap();
|
||||
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp, 0, 0).unwrap();
|
||||
let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0;
|
||||
bencher.iter(|| {
|
||||
let raw = &mut Shredder::deshred(&data_shreds).unwrap();
|
||||
@ -75,7 +75,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
|
||||
fn bench_deserialize_hdr(bencher: &mut Bencher) {
|
||||
let data = vec![0; SIZE_OF_DATA_SHRED_PAYLOAD];
|
||||
|
||||
let shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true);
|
||||
let shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true, 0, 0);
|
||||
|
||||
bencher.iter(|| {
|
||||
let payload = shred.payload.clone();
|
||||
|
@ -874,7 +874,7 @@ impl Archiver {
|
||||
.into_iter()
|
||||
.filter_map(|p| Shred::new_from_serialized_shred(p.data.to_vec()).ok())
|
||||
.collect();
|
||||
blocktree.insert_shreds(shreds, None)?;
|
||||
blocktree.insert_shreds(shreds, None, false)?;
|
||||
}
|
||||
// check if all the slots in the segment are complete
|
||||
if Self::segment_complete(start_slot, slots_per_segment, blocktree) {
|
||||
|
@ -158,6 +158,7 @@ mod test {
|
||||
true,
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
@ -43,6 +43,7 @@ impl BroadcastStageType {
|
||||
receiver: Receiver<WorkingBankEntry>,
|
||||
exit_sender: &Arc<AtomicBool>,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
shred_version: u16,
|
||||
) -> BroadcastStage {
|
||||
match self {
|
||||
BroadcastStageType::Standard => {
|
||||
@ -53,7 +54,7 @@ impl BroadcastStageType {
|
||||
receiver,
|
||||
exit_sender,
|
||||
blocktree,
|
||||
StandardBroadcastRun::new(keypair),
|
||||
StandardBroadcastRun::new(keypair, shred_version),
|
||||
)
|
||||
}
|
||||
|
||||
@ -63,7 +64,7 @@ impl BroadcastStageType {
|
||||
receiver,
|
||||
exit_sender,
|
||||
blocktree,
|
||||
FailEntryVerificationBroadcastRun::new(),
|
||||
FailEntryVerificationBroadcastRun::new(shred_version),
|
||||
),
|
||||
|
||||
BroadcastStageType::BroadcastFakeBlobs => BroadcastStage::new(
|
||||
@ -72,7 +73,7 @@ impl BroadcastStageType {
|
||||
receiver,
|
||||
exit_sender,
|
||||
blocktree,
|
||||
BroadcastFakeBlobsRun::new(0),
|
||||
BroadcastFakeBlobsRun::new(0, shred_version),
|
||||
),
|
||||
}
|
||||
}
|
||||
@ -246,7 +247,7 @@ mod test {
|
||||
entry_receiver,
|
||||
&exit_sender,
|
||||
&blocktree,
|
||||
StandardBroadcastRun::new(leader_keypair),
|
||||
StandardBroadcastRun::new(leader_keypair, 0),
|
||||
);
|
||||
|
||||
MockBroadcastStage {
|
||||
|
@ -6,13 +6,15 @@ use solana_sdk::hash::Hash;
|
||||
pub(super) struct BroadcastFakeBlobsRun {
|
||||
last_blockhash: Hash,
|
||||
partition: usize,
|
||||
shred_version: u16,
|
||||
}
|
||||
|
||||
impl BroadcastFakeBlobsRun {
|
||||
pub(super) fn new(partition: usize) -> Self {
|
||||
pub(super) fn new(partition: usize, shred_version: u16) -> Self {
|
||||
Self {
|
||||
last_blockhash: Hash::default(),
|
||||
partition,
|
||||
shred_version,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -44,6 +46,8 @@ impl BroadcastRun for BroadcastFakeBlobsRun {
|
||||
bank.parent().unwrap().slot(),
|
||||
RECOMMENDED_FEC_RATE,
|
||||
keypair.clone(),
|
||||
(bank.tick_height() % bank.ticks_per_slot()) as u8,
|
||||
self.shred_version,
|
||||
)
|
||||
.expect("Expected to create a new shredder");
|
||||
|
||||
@ -75,8 +79,7 @@ impl BroadcastRun for BroadcastFakeBlobsRun {
|
||||
self.last_blockhash = Hash::default();
|
||||
}
|
||||
|
||||
blocktree.insert_shreds(data_shreds.clone(), None)?;
|
||||
blocktree.insert_shreds(coding_shreds.clone(), None)?;
|
||||
blocktree.insert_shreds(data_shreds.clone(), None, true)?;
|
||||
|
||||
// 3) Start broadcast step
|
||||
let peers = cluster_info.read().unwrap().tvu_peers();
|
||||
|
@ -2,11 +2,13 @@ use super::*;
|
||||
use solana_ledger::shred::{Shredder, RECOMMENDED_FEC_RATE};
|
||||
use solana_sdk::hash::Hash;
|
||||
|
||||
pub(super) struct FailEntryVerificationBroadcastRun {}
|
||||
pub(super) struct FailEntryVerificationBroadcastRun {
|
||||
shred_version: u16,
|
||||
}
|
||||
|
||||
impl FailEntryVerificationBroadcastRun {
|
||||
pub(super) fn new() -> Self {
|
||||
Self {}
|
||||
pub(super) fn new(shred_version: u16) -> Self {
|
||||
Self { shred_version }
|
||||
}
|
||||
}
|
||||
|
||||
@ -42,6 +44,8 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
bank.parent().unwrap().slot(),
|
||||
RECOMMENDED_FEC_RATE,
|
||||
keypair.clone(),
|
||||
(bank.tick_height() % bank.ticks_per_slot()) as u8,
|
||||
self.shred_version,
|
||||
)
|
||||
.expect("Expected to create a new shredder");
|
||||
|
||||
@ -58,7 +62,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun {
|
||||
.collect::<Vec<_>>();
|
||||
let all_seeds: Vec<[u8; 32]> = all_shreds.iter().map(|s| s.seed()).collect();
|
||||
blocktree
|
||||
.insert_shreds(all_shreds, None)
|
||||
.insert_shreds(all_shreds, None, true)
|
||||
.expect("Failed to insert shreds in blocktree");
|
||||
|
||||
// 3) Start broadcast step
|
||||
|
@ -2,9 +2,11 @@ use super::broadcast_utils::{self, ReceiveResults};
|
||||
use super::*;
|
||||
use crate::broadcast_stage::broadcast_utils::UnfinishedSlotInfo;
|
||||
use solana_ledger::entry::Entry;
|
||||
use solana_ledger::shred::{Shred, Shredder, RECOMMENDED_FEC_RATE};
|
||||
use solana_ledger::shred::{Shred, Shredder, RECOMMENDED_FEC_RATE, SHRED_TICK_REFERENCE_MASK};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Keypair;
|
||||
use solana_sdk::timing::duration_as_us;
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Default)]
|
||||
@ -14,7 +16,7 @@ struct BroadcastStats {
|
||||
insert_shreds_elapsed: u64,
|
||||
broadcast_elapsed: u64,
|
||||
receive_elapsed: u64,
|
||||
clone_and_seed_elapsed: u64,
|
||||
seed_elapsed: u64,
|
||||
}
|
||||
|
||||
impl BroadcastStats {
|
||||
@ -23,7 +25,7 @@ impl BroadcastStats {
|
||||
self.shredding_elapsed = 0;
|
||||
self.broadcast_elapsed = 0;
|
||||
self.receive_elapsed = 0;
|
||||
self.clone_and_seed_elapsed = 0;
|
||||
self.seed_elapsed = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -33,22 +35,24 @@ pub(super) struct StandardBroadcastRun {
|
||||
current_slot_and_parent: Option<(u64, u64)>,
|
||||
slot_broadcast_start: Option<Instant>,
|
||||
keypair: Arc<Keypair>,
|
||||
shred_version: u16,
|
||||
}
|
||||
|
||||
impl StandardBroadcastRun {
|
||||
pub(super) fn new(keypair: Arc<Keypair>) -> Self {
|
||||
pub(super) fn new(keypair: Arc<Keypair>, shred_version: u16) -> Self {
|
||||
Self {
|
||||
stats: BroadcastStats::default(),
|
||||
unfinished_slot: None,
|
||||
current_slot_and_parent: None,
|
||||
slot_broadcast_start: None,
|
||||
keypair,
|
||||
shred_version,
|
||||
}
|
||||
}
|
||||
|
||||
fn check_for_interrupted_slot(&mut self) -> Option<Shred> {
|
||||
fn check_for_interrupted_slot(&mut self, max_ticks_in_slot: u8) -> Option<Shred> {
|
||||
let (slot, _) = self.current_slot_and_parent.unwrap();
|
||||
let last_unfinished_slot_shred = self
|
||||
let mut last_unfinished_slot_shred = self
|
||||
.unfinished_slot
|
||||
.map(|last_unfinished_slot| {
|
||||
if last_unfinished_slot.slot != slot {
|
||||
@ -60,6 +64,8 @@ impl StandardBroadcastRun {
|
||||
None,
|
||||
true,
|
||||
true,
|
||||
max_ticks_in_slot & SHRED_TICK_REFERENCE_MASK,
|
||||
self.shred_version,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
@ -68,39 +74,20 @@ impl StandardBroadcastRun {
|
||||
.unwrap_or(None);
|
||||
|
||||
// This shred should only be Some if the previous slot was interrupted
|
||||
if last_unfinished_slot_shred.is_some() {
|
||||
if let Some(ref mut shred) = last_unfinished_slot_shred {
|
||||
Shredder::sign_shred(&self.keypair, shred);
|
||||
self.unfinished_slot = None;
|
||||
}
|
||||
|
||||
last_unfinished_slot_shred
|
||||
}
|
||||
|
||||
fn coalesce_shreds(
|
||||
data_shreds: Vec<Shred>,
|
||||
coding_shreds: Vec<Shred>,
|
||||
last_unfinished_slot_shred: Option<Shred>,
|
||||
) -> Vec<Shred> {
|
||||
if let Some(shred) = last_unfinished_slot_shred {
|
||||
data_shreds
|
||||
.iter()
|
||||
.chain(coding_shreds.iter())
|
||||
.cloned()
|
||||
.chain(std::iter::once(shred))
|
||||
.collect::<Vec<_>>()
|
||||
} else {
|
||||
data_shreds
|
||||
.iter()
|
||||
.chain(coding_shreds.iter())
|
||||
.cloned()
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
}
|
||||
|
||||
fn entries_to_shreds(
|
||||
&mut self,
|
||||
blocktree: &Blocktree,
|
||||
entries: &[Entry],
|
||||
is_slot_end: bool,
|
||||
reference_tick: u8,
|
||||
) -> (Vec<Shred>, Vec<Shred>) {
|
||||
let (slot, parent_slot) = self.current_slot_and_parent.unwrap();
|
||||
let shredder = Shredder::new(
|
||||
@ -108,6 +95,8 @@ impl StandardBroadcastRun {
|
||||
parent_slot,
|
||||
RECOMMENDED_FEC_RATE,
|
||||
self.keypair.clone(),
|
||||
reference_tick,
|
||||
self.shred_version,
|
||||
)
|
||||
.expect("Expected to create a new shredder");
|
||||
|
||||
@ -167,82 +156,104 @@ impl StandardBroadcastRun {
|
||||
let to_shreds_start = Instant::now();
|
||||
|
||||
// 1) Check if slot was interrupted
|
||||
let last_unfinished_slot_shred = self.check_for_interrupted_slot();
|
||||
let last_unfinished_slot_shred =
|
||||
self.check_for_interrupted_slot(bank.ticks_per_slot() as u8);
|
||||
|
||||
// 2) Convert entries to shreds and coding shreds
|
||||
let (data_shreds, coding_shreds) = self.entries_to_shreds(
|
||||
let (mut data_shreds, coding_shreds) = self.entries_to_shreds(
|
||||
blocktree,
|
||||
&receive_results.entries,
|
||||
last_tick_height == bank.max_tick_height(),
|
||||
(bank.tick_height() % bank.ticks_per_slot()) as u8,
|
||||
);
|
||||
if let Some(last_shred) = last_unfinished_slot_shred {
|
||||
data_shreds.push(last_shred);
|
||||
}
|
||||
let to_shreds_elapsed = to_shreds_start.elapsed();
|
||||
|
||||
let clone_and_seed_start = Instant::now();
|
||||
let all_shreds =
|
||||
Self::coalesce_shreds(data_shreds, coding_shreds, last_unfinished_slot_shred);
|
||||
let all_shreds_ = all_shreds.clone();
|
||||
let all_seeds: Vec<[u8; 32]> = all_shreds.iter().map(|s| s.seed()).collect();
|
||||
let clone_and_seed_elapsed = clone_and_seed_start.elapsed();
|
||||
|
||||
// 3) Insert shreds into blocktree
|
||||
let insert_shreds_start = Instant::now();
|
||||
blocktree
|
||||
.insert_shreds(all_shreds_, None)
|
||||
.expect("Failed to insert shreds in blocktree");
|
||||
let insert_shreds_elapsed = insert_shreds_start.elapsed();
|
||||
|
||||
// 4) Broadcast the shreds
|
||||
let broadcast_start = Instant::now();
|
||||
let bank_epoch = bank.get_leader_schedule_epoch(bank.slot());
|
||||
let stakes = staking_utils::staked_nodes_at_epoch(&bank, bank_epoch);
|
||||
|
||||
let all_shred_bufs: Vec<Vec<u8>> = all_shreds.into_iter().map(|s| s.payload).collect();
|
||||
trace!("Broadcasting {:?} shreds", all_shred_bufs.len());
|
||||
|
||||
cluster_info.read().unwrap().broadcast_shreds(
|
||||
sock,
|
||||
all_shred_bufs,
|
||||
&all_seeds,
|
||||
self.maybe_insert_and_broadcast(
|
||||
data_shreds,
|
||||
true,
|
||||
blocktree,
|
||||
cluster_info,
|
||||
stakes.as_ref(),
|
||||
sock,
|
||||
)?;
|
||||
self.maybe_insert_and_broadcast(
|
||||
coding_shreds,
|
||||
false,
|
||||
blocktree,
|
||||
cluster_info,
|
||||
stakes.as_ref(),
|
||||
sock,
|
||||
)?;
|
||||
|
||||
let broadcast_elapsed = broadcast_start.elapsed();
|
||||
|
||||
self.update_broadcast_stats(
|
||||
duration_as_us(&receive_elapsed),
|
||||
duration_as_us(&to_shreds_elapsed),
|
||||
duration_as_us(&insert_shreds_elapsed),
|
||||
duration_as_us(&broadcast_elapsed),
|
||||
duration_as_us(&clone_and_seed_elapsed),
|
||||
last_tick_height == bank.max_tick_height(),
|
||||
);
|
||||
self.update_broadcast_stats(BroadcastStats {
|
||||
shredding_elapsed: duration_as_us(&to_shreds_elapsed),
|
||||
receive_elapsed: duration_as_us(&receive_elapsed),
|
||||
..BroadcastStats::default()
|
||||
});
|
||||
|
||||
if last_tick_height == bank.max_tick_height() {
|
||||
self.report_and_reset_stats();
|
||||
self.unfinished_slot = None;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn update_broadcast_stats(
|
||||
fn maybe_insert_and_broadcast(
|
||||
&mut self,
|
||||
receive_entries_elapsed: u64,
|
||||
shredding_elapsed: u64,
|
||||
insert_shreds_elapsed: u64,
|
||||
broadcast_elapsed: u64,
|
||||
clone_and_seed_elapsed: u64,
|
||||
slot_ended: bool,
|
||||
) {
|
||||
self.stats.receive_elapsed += receive_entries_elapsed;
|
||||
self.stats.shredding_elapsed += shredding_elapsed;
|
||||
self.stats.insert_shreds_elapsed += insert_shreds_elapsed;
|
||||
self.stats.broadcast_elapsed += broadcast_elapsed;
|
||||
self.stats.clone_and_seed_elapsed += clone_and_seed_elapsed;
|
||||
shreds: Vec<Shred>,
|
||||
insert: bool,
|
||||
blocktree: &Arc<Blocktree>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
stakes: Option<&HashMap<Pubkey, u64>>,
|
||||
sock: &UdpSocket,
|
||||
) -> Result<()> {
|
||||
let seed_start = Instant::now();
|
||||
let seeds: Vec<[u8; 32]> = shreds.iter().map(|s| s.seed()).collect();
|
||||
let seed_elapsed = seed_start.elapsed();
|
||||
|
||||
if slot_ended {
|
||||
self.report_and_reset_stats()
|
||||
// Insert shreds into blocktree
|
||||
let insert_shreds_start = Instant::now();
|
||||
if insert {
|
||||
blocktree
|
||||
.insert_shreds(shreds.clone(), None, true)
|
||||
.expect("Failed to insert shreds in blocktree");
|
||||
}
|
||||
let insert_shreds_elapsed = insert_shreds_start.elapsed();
|
||||
|
||||
// Broadcast the shreds
|
||||
let broadcast_start = Instant::now();
|
||||
let shred_bufs: Vec<Vec<u8>> = shreds.into_iter().map(|s| s.payload).collect();
|
||||
trace!("Broadcasting {:?} shreds", shred_bufs.len());
|
||||
|
||||
cluster_info
|
||||
.read()
|
||||
.unwrap()
|
||||
.broadcast_shreds(sock, shred_bufs, &seeds, stakes)?;
|
||||
|
||||
let broadcast_elapsed = broadcast_start.elapsed();
|
||||
|
||||
self.update_broadcast_stats(BroadcastStats {
|
||||
insert_shreds_elapsed: duration_as_us(&insert_shreds_elapsed),
|
||||
broadcast_elapsed: duration_as_us(&broadcast_elapsed),
|
||||
seed_elapsed: duration_as_us(&seed_elapsed),
|
||||
..BroadcastStats::default()
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_broadcast_stats(&mut self, stats: BroadcastStats) {
|
||||
self.stats.receive_elapsed += stats.receive_elapsed;
|
||||
self.stats.shredding_elapsed += stats.shredding_elapsed;
|
||||
self.stats.insert_shreds_elapsed += stats.insert_shreds_elapsed;
|
||||
self.stats.broadcast_elapsed += stats.broadcast_elapsed;
|
||||
self.stats.seed_elapsed += stats.seed_elapsed;
|
||||
}
|
||||
|
||||
fn report_and_reset_stats(&mut self) {
|
||||
@ -258,11 +269,7 @@ impl StandardBroadcastRun {
|
||||
),
|
||||
("broadcast_time", self.stats.broadcast_elapsed as i64, i64),
|
||||
("receive_time", self.stats.receive_elapsed as i64, i64),
|
||||
(
|
||||
"clone_and_seed",
|
||||
self.stats.clone_and_seed_elapsed as i64,
|
||||
i64
|
||||
),
|
||||
("seed", self.stats.seed_elapsed as i64, i64),
|
||||
(
|
||||
"num_shreds",
|
||||
i64::from(self.unfinished_slot.unwrap().next_shred_index),
|
||||
@ -340,6 +347,38 @@ mod test {
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_interrupted_slot_last_shred() {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let mut run = StandardBroadcastRun::new(keypair.clone(), 0);
|
||||
|
||||
// Set up the slot to be interrupted
|
||||
let next_shred_index = 10;
|
||||
let slot = 1;
|
||||
let parent = 0;
|
||||
run.unfinished_slot = Some(UnfinishedSlotInfo {
|
||||
next_shred_index,
|
||||
slot,
|
||||
parent,
|
||||
});
|
||||
run.slot_broadcast_start = Some(Instant::now());
|
||||
|
||||
// Set up a slot to interrupt the old slot
|
||||
run.current_slot_and_parent = Some((4, 2));
|
||||
|
||||
// Slot 2 interrupted slot 1
|
||||
let shred = run
|
||||
.check_for_interrupted_slot(0)
|
||||
.expect("Expected a shred that signals an interrupt");
|
||||
|
||||
// Validate the shred
|
||||
assert_eq!(shred.parent(), parent);
|
||||
assert_eq!(shred.slot(), slot);
|
||||
assert_eq!(shred.index(), next_shred_index);
|
||||
assert!(shred.is_data());
|
||||
assert!(shred.verify(&keypair.pubkey()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_slot_interrupt() {
|
||||
// Setup
|
||||
@ -348,16 +387,16 @@ mod test {
|
||||
setup(num_shreds_per_slot);
|
||||
|
||||
// Insert 1 less than the number of ticks needed to finish the slot
|
||||
let ticks = create_ticks(genesis_block.ticks_per_slot - 1, genesis_block.hash());
|
||||
let ticks0 = create_ticks(genesis_block.ticks_per_slot - 1, genesis_block.hash());
|
||||
let receive_results = ReceiveResults {
|
||||
entries: ticks.clone(),
|
||||
entries: ticks0.clone(),
|
||||
time_elapsed: Duration::new(3, 0),
|
||||
bank: bank0.clone(),
|
||||
last_tick_height: (ticks.len() - 1) as u64,
|
||||
last_tick_height: (ticks0.len() - 1) as u64,
|
||||
};
|
||||
|
||||
// Step 1: Make an incomplete transmission for slot 0
|
||||
let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair.clone());
|
||||
let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair.clone(), 0);
|
||||
standard_broadcast_run
|
||||
.process_receive_results(&cluster_info, &socket, &blocktree, receive_results)
|
||||
.unwrap();
|
||||
@ -371,7 +410,7 @@ mod test {
|
||||
standard_broadcast_run.stats.receive_elapsed = 10;
|
||||
|
||||
// Try to fetch ticks from blocktree, nothing should break
|
||||
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), ticks);
|
||||
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), ticks0);
|
||||
assert_eq!(
|
||||
blocktree
|
||||
.get_slot_entries(0, num_shreds_per_slot, None)
|
||||
@ -386,12 +425,12 @@ mod test {
|
||||
// Interrupting the slot should cause the unfinished_slot and stats to reset
|
||||
let num_shreds = 1;
|
||||
assert!(num_shreds < num_shreds_per_slot);
|
||||
let ticks = create_ticks(max_ticks_per_n_shreds(num_shreds), genesis_block.hash());
|
||||
let ticks1 = create_ticks(max_ticks_per_n_shreds(num_shreds), genesis_block.hash());
|
||||
let receive_results = ReceiveResults {
|
||||
entries: ticks.clone(),
|
||||
entries: ticks1.clone(),
|
||||
time_elapsed: Duration::new(2, 0),
|
||||
bank: bank2.clone(),
|
||||
last_tick_height: (ticks.len() - 1) as u64,
|
||||
last_tick_height: (ticks1.len() - 1) as u64,
|
||||
};
|
||||
standard_broadcast_run
|
||||
.process_receive_results(&cluster_info, &socket, &blocktree, receive_results)
|
||||
@ -403,8 +442,18 @@ mod test {
|
||||
assert_eq!(unfinished_slot.next_shred_index as u64, num_shreds);
|
||||
assert_eq!(unfinished_slot.slot, 2);
|
||||
assert_eq!(unfinished_slot.parent, 0);
|
||||
|
||||
// Check that the stats were reset as well
|
||||
assert_eq!(standard_broadcast_run.stats.receive_elapsed, 0);
|
||||
|
||||
// Try to fetch the incomplete ticks from blocktree, should succeed
|
||||
assert_eq!(blocktree.get_slot_entries(0, 0, None).unwrap(), ticks0);
|
||||
assert_eq!(
|
||||
blocktree
|
||||
.get_slot_entries(0, num_shreds_per_slot, None)
|
||||
.unwrap(),
|
||||
vec![],
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -423,7 +472,7 @@ mod test {
|
||||
last_tick_height: ticks.len() as u64,
|
||||
};
|
||||
|
||||
let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair);
|
||||
let mut standard_broadcast_run = StandardBroadcastRun::new(leader_keypair, 0);
|
||||
standard_broadcast_run
|
||||
.process_receive_results(&cluster_info, &socket, &blocktree, receive_results)
|
||||
.unwrap();
|
||||
|
@ -149,6 +149,7 @@ mod tests {
|
||||
true,
|
||||
&Arc::new(keypair),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -164,8 +165,8 @@ mod tests {
|
||||
let mut hasher = Hasher::default();
|
||||
hasher.hash(&buf[..size]);
|
||||
|
||||
// golden needs to be updated if blob stuff changes....
|
||||
let golden: Hash = "BdmY3efqu7zbnFuGRAeFANwa35HkDdQ7hwhYez3xGXiM"
|
||||
// golden needs to be updated if blob structure changes....
|
||||
let golden: Hash = "9K6NR4cazo7Jzk2CpyXmNaZMGqvfXG83JzyJipkoHare"
|
||||
.parse()
|
||||
.unwrap();
|
||||
|
||||
|
@ -147,6 +147,7 @@ mod tests {
|
||||
true,
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -208,6 +209,7 @@ mod tests {
|
||||
true,
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
@ -12,17 +12,19 @@
|
||||
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
|
||||
//!
|
||||
//! Bank needs to provide an interface for us to query the stake weight
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds_gossip::CrdsGossip;
|
||||
use crate::crds_gossip_error::CrdsGossipError;
|
||||
use crate::crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS};
|
||||
use crate::crds_value::{CrdsValue, CrdsValueLabel, EpochSlots, Vote};
|
||||
use crate::packet::{to_shared_blob, Blob, Packet, SharedBlob};
|
||||
use crate::repair_service::RepairType;
|
||||
use crate::result::{Error, Result};
|
||||
use crate::sendmmsg::{multicast, send_mmsg};
|
||||
use crate::streamer::{BlobReceiver, BlobSender};
|
||||
use crate::weighted_shuffle::{weighted_best, weighted_shuffle};
|
||||
use crate::{
|
||||
contact_info::ContactInfo,
|
||||
crds_gossip::CrdsGossip,
|
||||
crds_gossip_error::CrdsGossipError,
|
||||
crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
|
||||
crds_value::{self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlots, Vote},
|
||||
packet::{to_shared_blob, Blob, Packet, SharedBlob},
|
||||
repair_service::RepairType,
|
||||
result::{Error, Result},
|
||||
sendmmsg::{multicast, send_mmsg},
|
||||
streamer::{BlobReceiver, BlobSender},
|
||||
weighted_shuffle::{weighted_best, weighted_shuffle},
|
||||
};
|
||||
use bincode::{deserialize, serialize, serialized_size};
|
||||
use core::cmp;
|
||||
use itertools::Itertools;
|
||||
@ -195,8 +197,8 @@ impl ClusterInfo {
|
||||
|
||||
pub fn insert_self(&mut self, contact_info: ContactInfo) {
|
||||
if self.id() == contact_info.id {
|
||||
let mut value = CrdsValue::ContactInfo(contact_info.clone());
|
||||
value.sign(&self.keypair);
|
||||
let value =
|
||||
CrdsValue::new_signed(CrdsData::ContactInfo(contact_info.clone()), &self.keypair);
|
||||
let _ = self.gossip.crds.insert(value, timestamp());
|
||||
}
|
||||
}
|
||||
@ -205,8 +207,7 @@ impl ClusterInfo {
|
||||
let mut my_data = self.my_data();
|
||||
let now = timestamp();
|
||||
my_data.wallclock = now;
|
||||
let mut entry = CrdsValue::ContactInfo(my_data);
|
||||
entry.sign(&self.keypair);
|
||||
let entry = CrdsValue::new_signed(CrdsData::ContactInfo(my_data), &self.keypair);
|
||||
self.gossip.refresh_push_active_set(stakes);
|
||||
self.gossip
|
||||
.process_push_message(&self.id(), vec![entry], now);
|
||||
@ -214,8 +215,7 @@ impl ClusterInfo {
|
||||
|
||||
// TODO kill insert_info, only used by tests
|
||||
pub fn insert_info(&mut self, contact_info: ContactInfo) {
|
||||
let mut value = CrdsValue::ContactInfo(contact_info);
|
||||
value.sign(&self.keypair);
|
||||
let value = CrdsValue::new_signed(CrdsData::ContactInfo(contact_info), &self.keypair);
|
||||
let _ = self.gossip.crds.insert(value, timestamp());
|
||||
}
|
||||
|
||||
@ -297,17 +297,26 @@ impl ClusterInfo {
|
||||
|
||||
pub fn push_epoch_slots(&mut self, id: Pubkey, root: u64, slots: BTreeSet<u64>) {
|
||||
let now = timestamp();
|
||||
let mut entry = CrdsValue::EpochSlots(EpochSlots::new(id, root, slots, now));
|
||||
entry.sign(&self.keypair);
|
||||
let entry = CrdsValue::new_signed(
|
||||
CrdsData::EpochSlots(EpochSlots::new(id, root, slots, now)),
|
||||
&self.keypair,
|
||||
);
|
||||
self.gossip
|
||||
.process_push_message(&self.id(), vec![entry], now);
|
||||
}
|
||||
|
||||
pub fn push_vote(&mut self, vote: Transaction) {
|
||||
pub fn push_vote(&mut self, tower_index: usize, vote: Transaction) {
|
||||
let now = timestamp();
|
||||
let vote = Vote::new(&self.id(), vote, now);
|
||||
let mut entry = CrdsValue::Vote(vote);
|
||||
entry.sign(&self.keypair);
|
||||
let current_votes: Vec<_> = (0..crds_value::MAX_VOTES)
|
||||
.filter_map(|ix| {
|
||||
self.gossip
|
||||
.crds
|
||||
.lookup(&CrdsValueLabel::Vote(ix, self.id()))
|
||||
})
|
||||
.collect();
|
||||
let vote_ix = CrdsValue::compute_vote_index(tower_index, current_votes);
|
||||
let entry = CrdsValue::new_signed(CrdsData::Vote(vote_ix, vote), &self.keypair);
|
||||
self.gossip
|
||||
.process_push_message(&self.id(), vec![entry], now);
|
||||
}
|
||||
@ -811,7 +820,7 @@ impl ClusterInfo {
|
||||
}
|
||||
pub fn map_repair_request(&self, repair_request: &RepairType) -> Result<Vec<u8>> {
|
||||
match repair_request {
|
||||
RepairType::Blob(slot, blob_index) => {
|
||||
RepairType::Shred(slot, blob_index) => {
|
||||
datapoint_debug!(
|
||||
"cluster_info-repair",
|
||||
("repair-slot", *slot, i64),
|
||||
@ -915,7 +924,7 @@ impl ClusterInfo {
|
||||
.expect("unable to serialize default filter") as usize;
|
||||
let protocol = Protocol::PullRequest(
|
||||
CrdsFilter::default(),
|
||||
CrdsValue::ContactInfo(ContactInfo::default()),
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default())),
|
||||
);
|
||||
let protocol_size =
|
||||
serialized_size(&protocol).expect("unable to serialize gossip protocol") as usize;
|
||||
@ -977,7 +986,7 @@ impl ClusterInfo {
|
||||
fn gossip_request(&mut self, stakes: &HashMap<Pubkey, u64>) -> Vec<(SocketAddr, Protocol)> {
|
||||
let pulls: Vec<_> = self.new_pull_requests(stakes);
|
||||
let pushes: Vec<_> = self.new_push_requests();
|
||||
vec![pulls, pushes].into_iter().flat_map(|x| x).collect()
|
||||
vec![pulls, pushes].into_iter().flatten().collect()
|
||||
}
|
||||
|
||||
/// At random pick a node and try to get updated changes from them
|
||||
@ -1161,9 +1170,7 @@ impl ClusterInfo {
|
||||
1
|
||||
);
|
||||
} else if caller.contact_info().is_some() {
|
||||
if caller.contact_info().unwrap().pubkey()
|
||||
== me.read().unwrap().gossip.id
|
||||
{
|
||||
if caller.contact_info().unwrap().id == me.read().unwrap().gossip.id {
|
||||
warn!("PullRequest ignored, I'm talking to myself");
|
||||
inc_new_counter_debug!("cluster_info-window-request-loopback", 1);
|
||||
} else {
|
||||
@ -1509,6 +1516,7 @@ impl ClusterInfo {
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
timestamp(),
|
||||
);
|
||||
(node, gossip_socket, Some(ip_echo))
|
||||
@ -1529,6 +1537,7 @@ impl ClusterInfo {
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
timestamp(),
|
||||
);
|
||||
(node, gossip_socket, None)
|
||||
@ -1612,6 +1621,7 @@ impl Node {
|
||||
gossip.local_addr().unwrap(),
|
||||
tvu.local_addr().unwrap(),
|
||||
tvu_forwards.local_addr().unwrap(),
|
||||
repair.local_addr().unwrap(),
|
||||
empty,
|
||||
empty,
|
||||
storage.local_addr().unwrap(),
|
||||
@ -1658,6 +1668,7 @@ impl Node {
|
||||
gossip_addr,
|
||||
tvu.local_addr().unwrap(),
|
||||
tvu_forwards.local_addr().unwrap(),
|
||||
repair.local_addr().unwrap(),
|
||||
tpu.local_addr().unwrap(),
|
||||
tpu_forwards.local_addr().unwrap(),
|
||||
storage.local_addr().unwrap(),
|
||||
@ -1719,7 +1730,7 @@ impl Node {
|
||||
let (_, retransmit_sockets) =
|
||||
multi_bind_in_range(port_range, 8).expect("retransmit multi_bind");
|
||||
|
||||
let (_, repair) = Self::bind(port_range);
|
||||
let (repair_port, repair) = Self::bind(port_range);
|
||||
let (_, broadcast) = Self::bind(port_range);
|
||||
|
||||
let info = ContactInfo::new(
|
||||
@ -1727,6 +1738,7 @@ impl Node {
|
||||
SocketAddr::new(gossip_addr.ip(), gossip_port),
|
||||
SocketAddr::new(gossip_addr.ip(), tvu_port),
|
||||
SocketAddr::new(gossip_addr.ip(), tvu_forwards_port),
|
||||
SocketAddr::new(gossip_addr.ip(), repair_port),
|
||||
SocketAddr::new(gossip_addr.ip(), tpu_port),
|
||||
SocketAddr::new(gossip_addr.ip(), tpu_forwards_port),
|
||||
socketaddr_any!(),
|
||||
@ -1870,7 +1882,7 @@ mod tests {
|
||||
fn window_index_request() {
|
||||
let me = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
|
||||
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(me);
|
||||
let rv = cluster_info.repair_request(&RepairType::Blob(0, 0));
|
||||
let rv = cluster_info.repair_request(&RepairType::Shred(0, 0));
|
||||
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
|
||||
|
||||
let gossip_addr = socketaddr!([127, 0, 0, 1], 1234);
|
||||
@ -1884,11 +1896,12 @@ mod tests {
|
||||
socketaddr!([127, 0, 0, 1], 1239),
|
||||
socketaddr!([127, 0, 0, 1], 1240),
|
||||
socketaddr!([127, 0, 0, 1], 1241),
|
||||
socketaddr!([127, 0, 0, 1], 1242),
|
||||
0,
|
||||
);
|
||||
cluster_info.insert_info(nxt.clone());
|
||||
let rv = cluster_info
|
||||
.repair_request(&RepairType::Blob(0, 0))
|
||||
.repair_request(&RepairType::Shred(0, 0))
|
||||
.unwrap();
|
||||
assert_eq!(nxt.gossip, gossip_addr);
|
||||
assert_eq!(rv.0, nxt.gossip);
|
||||
@ -1904,6 +1917,7 @@ mod tests {
|
||||
socketaddr!([127, 0, 0, 1], 1239),
|
||||
socketaddr!([127, 0, 0, 1], 1240),
|
||||
socketaddr!([127, 0, 0, 1], 1241),
|
||||
socketaddr!([127, 0, 0, 1], 1242),
|
||||
0,
|
||||
);
|
||||
cluster_info.insert_info(nxt);
|
||||
@ -1912,7 +1926,7 @@ mod tests {
|
||||
while !one || !two {
|
||||
//this randomly picks an option, so eventually it should pick both
|
||||
let rv = cluster_info
|
||||
.repair_request(&RepairType::Blob(0, 0))
|
||||
.repair_request(&RepairType::Shred(0, 0))
|
||||
.unwrap();
|
||||
if rv.0 == gossip_addr {
|
||||
one = true;
|
||||
@ -1941,6 +1955,7 @@ mod tests {
|
||||
socketaddr!("127.0.0.1:1239"),
|
||||
socketaddr!("127.0.0.1:1240"),
|
||||
socketaddr!("127.0.0.1:1241"),
|
||||
socketaddr!("127.0.0.1:1242"),
|
||||
0,
|
||||
);
|
||||
let rv = ClusterInfo::run_window_request(
|
||||
@ -1964,7 +1979,7 @@ mod tests {
|
||||
);
|
||||
|
||||
blocktree
|
||||
.insert_shreds(vec![shred_info], None)
|
||||
.insert_shreds(vec![shred_info], None, false)
|
||||
.expect("Expect successful ledger write");
|
||||
|
||||
let rv = ClusterInfo::run_window_request(
|
||||
@ -2046,7 +2061,7 @@ mod tests {
|
||||
let (shreds, _) = make_many_slot_entries(1, 3, 5);
|
||||
|
||||
blocktree
|
||||
.insert_shreds(shreds, None)
|
||||
.insert_shreds(shreds, None, false)
|
||||
.expect("Expect successful ledger write");
|
||||
|
||||
// We don't have slot 4, so we don't know how to service this requeset
|
||||
@ -2336,7 +2351,7 @@ mod tests {
|
||||
|
||||
// add a vote
|
||||
let tx = test_tx();
|
||||
cluster_info.push_vote(tx.clone());
|
||||
cluster_info.push_vote(0, tx.clone());
|
||||
|
||||
// -1 to make sure that the clock is strictly lower then when insert occurred
|
||||
let (votes, max_ts) = cluster_info.get_votes(now - 1);
|
||||
@ -2376,7 +2391,8 @@ mod tests {
|
||||
}
|
||||
|
||||
// now add this message back to the table and make sure after the next pull, the entrypoint is unset
|
||||
let entrypoint_crdsvalue = CrdsValue::ContactInfo(entrypoint.clone());
|
||||
let entrypoint_crdsvalue =
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(entrypoint.clone()));
|
||||
let cluster_info = Arc::new(RwLock::new(cluster_info));
|
||||
ClusterInfo::handle_pull_response(
|
||||
&cluster_info,
|
||||
@ -2393,7 +2409,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_split_messages_small() {
|
||||
let value = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
test_split_messages(value);
|
||||
}
|
||||
|
||||
@ -2403,13 +2419,12 @@ mod tests {
|
||||
for i in 0..128 {
|
||||
btree_slots.insert(i);
|
||||
}
|
||||
let value = CrdsValue::EpochSlots(EpochSlots {
|
||||
let value = CrdsValue::new_unsigned(CrdsData::EpochSlots(EpochSlots {
|
||||
from: Pubkey::default(),
|
||||
root: 0,
|
||||
slots: btree_slots,
|
||||
signature: Signature::default(),
|
||||
wallclock: 0,
|
||||
});
|
||||
}));
|
||||
test_split_messages(value);
|
||||
}
|
||||
|
||||
@ -2433,7 +2448,7 @@ mod tests {
|
||||
}
|
||||
|
||||
fn check_pull_request_size(filter: CrdsFilter) {
|
||||
let value = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
let protocol = Protocol::PullRequest(filter, value.clone());
|
||||
assert!(serialized_size(&protocol).unwrap() <= PACKET_DATA_SIZE as u64);
|
||||
}
|
||||
|
@ -8,7 +8,6 @@ use rand::SeedableRng;
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_ledger::blocktree::Blocktree;
|
||||
use solana_ledger::rooted_slot_iterator::RootedSlotIterator;
|
||||
use solana_metrics::datapoint;
|
||||
use solana_sdk::{epoch_schedule::EpochSchedule, pubkey::Pubkey};
|
||||
use std::{
|
||||
cmp,
|
||||
@ -27,6 +26,9 @@ pub const REPAIR_REDUNDANCY: usize = 1;
|
||||
pub const NUM_BUFFER_SLOTS: usize = 50;
|
||||
pub const GOSSIP_DELAY_SLOTS: usize = 2;
|
||||
pub const NUM_SLOTS_PER_UPDATE: usize = 2;
|
||||
// Time between allowing repair for same slot for same validator
|
||||
pub const REPAIR_SAME_SLOT_THRESHOLD: u64 = 5000;
|
||||
use solana_sdk::timing::timestamp;
|
||||
|
||||
// Represents the blobs that a repairman is responsible for repairing in specific slot. More
|
||||
// specifically, a repairman is responsible for every blob in this slot with index
|
||||
@ -73,6 +75,13 @@ impl Iterator for BlobIndexesToRepairIterator {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct RepaireeInfo {
|
||||
last_root: u64,
|
||||
last_ts: u64,
|
||||
last_repaired_slot_and_ts: (u64, u64),
|
||||
}
|
||||
|
||||
pub struct ClusterInfoRepairListener {
|
||||
thread_hdls: Vec<JoinHandle<()>>,
|
||||
}
|
||||
@ -93,10 +102,10 @@ impl ClusterInfoRepairListener {
|
||||
// 1) The latest timestamp of the EpochSlots gossip message at which a repair was
|
||||
// sent to this peer
|
||||
// 2) The latest root the peer gossiped
|
||||
let mut peer_roots: HashMap<Pubkey, (u64, u64)> = HashMap::new();
|
||||
let mut peer_infos: HashMap<Pubkey, RepaireeInfo> = HashMap::new();
|
||||
let _ = Self::recv_loop(
|
||||
&blocktree,
|
||||
&mut peer_roots,
|
||||
&mut peer_infos,
|
||||
&exit,
|
||||
&cluster_info,
|
||||
&epoch_schedule,
|
||||
@ -110,7 +119,7 @@ impl ClusterInfoRepairListener {
|
||||
|
||||
fn recv_loop(
|
||||
blocktree: &Blocktree,
|
||||
peer_roots: &mut HashMap<Pubkey, (u64, u64)>,
|
||||
peer_infos: &mut HashMap<Pubkey, RepaireeInfo>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
epoch_schedule: &EpochSchedule,
|
||||
@ -134,7 +143,7 @@ impl ClusterInfoRepairListener {
|
||||
&my_pubkey,
|
||||
&peer.id,
|
||||
cluster_info,
|
||||
peer_roots,
|
||||
peer_infos,
|
||||
&mut my_gossiped_root,
|
||||
) {
|
||||
peers_needing_repairs.insert(peer.id, repairee_epoch_slots);
|
||||
@ -145,7 +154,7 @@ impl ClusterInfoRepairListener {
|
||||
let _ = Self::serve_repairs(
|
||||
&my_pubkey,
|
||||
blocktree,
|
||||
peer_roots,
|
||||
peer_infos,
|
||||
&peers_needing_repairs,
|
||||
&socket,
|
||||
cluster_info,
|
||||
@ -161,10 +170,10 @@ impl ClusterInfoRepairListener {
|
||||
my_pubkey: &Pubkey,
|
||||
peer_pubkey: &Pubkey,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
peer_roots: &mut HashMap<Pubkey, (u64, u64)>,
|
||||
peer_infos: &mut HashMap<Pubkey, RepaireeInfo>,
|
||||
my_gossiped_root: &mut u64,
|
||||
) -> Option<EpochSlots> {
|
||||
let last_cached_repair_ts = Self::get_last_ts(peer_pubkey, peer_roots);
|
||||
let last_cached_repair_ts = Self::get_last_ts(peer_pubkey, peer_infos);
|
||||
let my_root = Self::read_my_gossiped_root(&my_pubkey, cluster_info, my_gossiped_root);
|
||||
{
|
||||
let r_cluster_info = cluster_info.read().unwrap();
|
||||
@ -173,8 +182,8 @@ impl ClusterInfoRepairListener {
|
||||
if let Some((peer_epoch_slots, updated_ts)) =
|
||||
r_cluster_info.get_epoch_state_for_node(&peer_pubkey, last_cached_repair_ts)
|
||||
{
|
||||
let peer_entry = peer_roots.entry(*peer_pubkey).or_default();
|
||||
let peer_root = cmp::max(peer_epoch_slots.root, peer_entry.1);
|
||||
let peer_info = peer_infos.entry(*peer_pubkey).or_default();
|
||||
let peer_root = cmp::max(peer_epoch_slots.root, peer_info.last_root);
|
||||
let mut result = None;
|
||||
let last_repair_ts = {
|
||||
// Following logic needs to be fast because it holds the lock
|
||||
@ -185,11 +194,12 @@ impl ClusterInfoRepairListener {
|
||||
updated_ts
|
||||
} else {
|
||||
// No repairs were sent, don't need to update the timestamp
|
||||
peer_entry.0
|
||||
peer_info.last_ts
|
||||
}
|
||||
};
|
||||
|
||||
*peer_entry = (last_repair_ts, peer_root);
|
||||
peer_info.last_ts = last_repair_ts;
|
||||
peer_info.last_root = peer_root;
|
||||
result
|
||||
} else {
|
||||
None
|
||||
@ -200,7 +210,7 @@ impl ClusterInfoRepairListener {
|
||||
fn serve_repairs(
|
||||
my_pubkey: &Pubkey,
|
||||
blocktree: &Blocktree,
|
||||
peer_roots: &HashMap<Pubkey, (u64, u64)>,
|
||||
peer_infos: &mut HashMap<Pubkey, RepaireeInfo>,
|
||||
repairees: &HashMap<Pubkey, EpochSlots>,
|
||||
socket: &UdpSocket,
|
||||
cluster_info: &Arc<RwLock<ClusterInfo>>,
|
||||
@ -210,18 +220,18 @@ impl ClusterInfoRepairListener {
|
||||
for (repairee_pubkey, repairee_epoch_slots) in repairees {
|
||||
let repairee_root = repairee_epoch_slots.root;
|
||||
|
||||
let repairee_tvu = {
|
||||
let repairee_repair_addr = {
|
||||
let r_cluster_info = cluster_info.read().unwrap();
|
||||
let contact_info = r_cluster_info.get_contact_info_for_node(repairee_pubkey);
|
||||
contact_info.map(|c| c.tvu)
|
||||
contact_info.map(|c| c.repair)
|
||||
};
|
||||
|
||||
if let Some(repairee_tvu) = repairee_tvu {
|
||||
if let Some(repairee_addr) = repairee_repair_addr {
|
||||
// For every repairee, get the set of repairmen who are responsible for
|
||||
let mut eligible_repairmen = Self::find_eligible_repairmen(
|
||||
my_pubkey,
|
||||
repairee_root,
|
||||
peer_roots,
|
||||
peer_infos,
|
||||
NUM_BUFFER_SLOTS,
|
||||
);
|
||||
|
||||
@ -234,7 +244,7 @@ impl ClusterInfoRepairListener {
|
||||
let my_root =
|
||||
Self::read_my_gossiped_root(my_pubkey, cluster_info, my_gossiped_root);
|
||||
|
||||
let _ = Self::serve_repairs_to_repairee(
|
||||
let repair_results = Self::serve_repairs_to_repairee(
|
||||
my_pubkey,
|
||||
repairee_pubkey,
|
||||
my_root,
|
||||
@ -242,10 +252,19 @@ impl ClusterInfoRepairListener {
|
||||
&repairee_epoch_slots,
|
||||
&eligible_repairmen,
|
||||
socket,
|
||||
&repairee_tvu,
|
||||
&repairee_addr,
|
||||
NUM_SLOTS_PER_UPDATE,
|
||||
epoch_schedule,
|
||||
peer_infos
|
||||
.get(repairee_pubkey)
|
||||
.unwrap()
|
||||
.last_repaired_slot_and_ts,
|
||||
);
|
||||
|
||||
if let Ok(Some(new_last_repaired_slot)) = repair_results {
|
||||
let peer_info = peer_infos.get_mut(repairee_pubkey).unwrap();
|
||||
peer_info.last_repaired_slot_and_ts = (new_last_repaired_slot, timestamp());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -261,17 +280,18 @@ impl ClusterInfoRepairListener {
|
||||
repairee_epoch_slots: &EpochSlots,
|
||||
eligible_repairmen: &[&Pubkey],
|
||||
socket: &UdpSocket,
|
||||
repairee_tvu: &SocketAddr,
|
||||
repairee_addr: &SocketAddr,
|
||||
num_slots_to_repair: usize,
|
||||
epoch_schedule: &EpochSchedule,
|
||||
) -> Result<()> {
|
||||
last_repaired_slot_and_ts: (u64, u64),
|
||||
) -> Result<Option<u64>> {
|
||||
let slot_iter = RootedSlotIterator::new(repairee_epoch_slots.root, &blocktree);
|
||||
if slot_iter.is_err() {
|
||||
info!(
|
||||
"Root for repairee is on different fork. My root: {}, repairee_root: {} repairee_pubkey: {:?}",
|
||||
my_root, repairee_epoch_slots.root, repairee_pubkey,
|
||||
);
|
||||
return Ok(());
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let mut slot_iter = slot_iter?;
|
||||
@ -284,9 +304,13 @@ impl ClusterInfoRepairListener {
|
||||
let max_confirmed_repairee_slot =
|
||||
epoch_schedule.get_last_slot_in_epoch(max_confirmed_repairee_epoch);
|
||||
|
||||
let last_repaired_slot = last_repaired_slot_and_ts.0;
|
||||
let last_repaired_ts = last_repaired_slot_and_ts.1;
|
||||
|
||||
// Skip the first slot in the iterator because we know it's the root slot which the repairee
|
||||
// already has
|
||||
slot_iter.next();
|
||||
let mut new_repaired_slot: Option<u64> = None;
|
||||
for (slot, slot_meta) in slot_iter {
|
||||
if slot > my_root
|
||||
|| num_slots_repaired >= num_slots_to_repair
|
||||
@ -303,49 +327,76 @@ impl ClusterInfoRepairListener {
|
||||
// calculate_my_repairman_index_for_slot() will divide responsibility evenly across
|
||||
// the cluster
|
||||
let num_blobs_in_slot = slot_meta.received as usize;
|
||||
|
||||
// Check if I'm responsible for repairing this slots
|
||||
if let Some(my_repair_indexes) = Self::calculate_my_repairman_index_for_slot(
|
||||
my_pubkey,
|
||||
&eligible_repairmen,
|
||||
num_blobs_in_slot,
|
||||
REPAIR_REDUNDANCY,
|
||||
) {
|
||||
// Repairee is missing this slot, send them the blobs for this slot
|
||||
for blob_index in my_repair_indexes {
|
||||
// Loop over the sblob indexes and query the database for these blob that
|
||||
// this node is reponsible for repairing. This should be faster than using
|
||||
// a database iterator over the slots because by the time this node is
|
||||
// sending the blobs in this slot for repair, we expect these slots
|
||||
// to be full.
|
||||
if let Some(blob_data) = blocktree
|
||||
.get_data_shred(slot, blob_index as u64)
|
||||
.expect("Failed to read data blob from blocktree")
|
||||
{
|
||||
socket.send_to(&blob_data[..], repairee_tvu)?;
|
||||
total_data_blobs_sent += 1;
|
||||
// If I've already sent blobs >= this slot before, then don't send them again
|
||||
// until the timeout has expired
|
||||
if slot > last_repaired_slot
|
||||
|| timestamp() - last_repaired_ts > REPAIR_SAME_SLOT_THRESHOLD
|
||||
{
|
||||
error!(
|
||||
"Serving repair for slot {} to {}. Repairee slots: {:?}",
|
||||
slot, repairee_pubkey, repairee_epoch_slots.slots
|
||||
);
|
||||
// Repairee is missing this slot, send them the blobs for this slot
|
||||
for blob_index in my_repair_indexes {
|
||||
// Loop over the blob indexes and query the database for these blob that
|
||||
// this node is reponsible for repairing. This should be faster than using
|
||||
// a database iterator over the slots because by the time this node is
|
||||
// sending the blobs in this slot for repair, we expect these slots
|
||||
// to be full.
|
||||
if let Some(blob_data) = blocktree
|
||||
.get_data_shred(slot, blob_index as u64)
|
||||
.expect("Failed to read data blob from blocktree")
|
||||
{
|
||||
socket.send_to(&blob_data[..], repairee_addr)?;
|
||||
total_data_blobs_sent += 1;
|
||||
}
|
||||
|
||||
if let Some(coding_bytes) = blocktree
|
||||
.get_coding_shred(slot, blob_index as u64)
|
||||
.expect("Failed to read coding blob from blocktree")
|
||||
{
|
||||
socket.send_to(&coding_bytes[..], repairee_addr)?;
|
||||
total_coding_blobs_sent += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(coding_bytes) = blocktree
|
||||
.get_coding_shred(slot, blob_index as u64)
|
||||
.expect("Failed to read coding blob from blocktree")
|
||||
{
|
||||
socket.send_to(&coding_bytes[..], repairee_tvu)?;
|
||||
total_coding_blobs_sent += 1;
|
||||
}
|
||||
new_repaired_slot = Some(slot);
|
||||
Self::report_repair_metrics(
|
||||
slot,
|
||||
repairee_pubkey,
|
||||
total_data_blobs_sent,
|
||||
total_coding_blobs_sent,
|
||||
);
|
||||
total_data_blobs_sent = 0;
|
||||
total_coding_blobs_sent = 0;
|
||||
}
|
||||
|
||||
num_slots_repaired += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Self::report_repair_metrics(total_data_blobs_sent, total_coding_blobs_sent);
|
||||
Ok(())
|
||||
Ok(new_repaired_slot)
|
||||
}
|
||||
|
||||
fn report_repair_metrics(total_data_blobs_sent: u64, total_coding_blobs_sent: u64) {
|
||||
fn report_repair_metrics(
|
||||
slot: u64,
|
||||
repairee_id: &Pubkey,
|
||||
total_data_blobs_sent: u64,
|
||||
total_coding_blobs_sent: u64,
|
||||
) {
|
||||
if total_data_blobs_sent > 0 || total_coding_blobs_sent > 0 {
|
||||
datapoint!(
|
||||
"repairman_activity",
|
||||
("slot", slot, i64),
|
||||
("repairee_id", repairee_id.to_string(), String),
|
||||
("data_sent", total_data_blobs_sent, i64),
|
||||
("coding_sent", total_coding_blobs_sent, i64)
|
||||
);
|
||||
@ -407,14 +458,14 @@ impl ClusterInfoRepairListener {
|
||||
fn find_eligible_repairmen<'a>(
|
||||
my_pubkey: &'a Pubkey,
|
||||
repairee_root: u64,
|
||||
repairman_roots: &'a HashMap<Pubkey, (u64, u64)>,
|
||||
repairman_roots: &'a HashMap<Pubkey, RepaireeInfo>,
|
||||
num_buffer_slots: usize,
|
||||
) -> Vec<&'a Pubkey> {
|
||||
let mut repairmen: Vec<_> = repairman_roots
|
||||
.iter()
|
||||
.filter_map(|(repairman_pubkey, (_, repairman_root))| {
|
||||
.filter_map(|(repairman_pubkey, repairman_info)| {
|
||||
if Self::should_repair_peer(
|
||||
*repairman_root,
|
||||
repairman_info.last_root,
|
||||
repairee_root,
|
||||
num_buffer_slots - GOSSIP_DELAY_SLOTS,
|
||||
) {
|
||||
@ -461,8 +512,8 @@ impl ClusterInfoRepairListener {
|
||||
repairman_root > repairee_root + num_buffer_slots as u64
|
||||
}
|
||||
|
||||
fn get_last_ts(pubkey: &Pubkey, peer_roots: &mut HashMap<Pubkey, (u64, u64)>) -> Option<u64> {
|
||||
peer_roots.get(pubkey).map(|(last_ts, _)| *last_ts)
|
||||
fn get_last_ts(pubkey: &Pubkey, peer_infos: &mut HashMap<Pubkey, RepaireeInfo>) -> Option<u64> {
|
||||
peer_infos.get(pubkey).map(|p| p.last_ts)
|
||||
}
|
||||
}
|
||||
|
||||
@ -564,7 +615,7 @@ mod tests {
|
||||
);
|
||||
|
||||
// Set up locally cached information
|
||||
let mut peer_roots = HashMap::new();
|
||||
let mut peer_info = HashMap::new();
|
||||
let mut my_gossiped_root = repairee_root;
|
||||
|
||||
// Root is not sufficiently far ahead, we shouldn't repair
|
||||
@ -572,7 +623,7 @@ mod tests {
|
||||
&my_pubkey,
|
||||
&peer_pubkey,
|
||||
&cluster_info,
|
||||
&mut peer_roots,
|
||||
&mut peer_info,
|
||||
&mut my_gossiped_root,
|
||||
)
|
||||
.is_none());
|
||||
@ -584,7 +635,7 @@ mod tests {
|
||||
&my_pubkey,
|
||||
&peer_pubkey,
|
||||
&cluster_info,
|
||||
&mut peer_roots,
|
||||
&mut peer_info,
|
||||
&mut my_gossiped_root,
|
||||
)
|
||||
.is_some());
|
||||
@ -596,7 +647,7 @@ mod tests {
|
||||
&my_pubkey,
|
||||
&peer_pubkey,
|
||||
&cluster_info,
|
||||
&mut peer_roots,
|
||||
&mut peer_info,
|
||||
&mut my_gossiped_root,
|
||||
)
|
||||
.is_none());
|
||||
@ -612,12 +663,78 @@ mod tests {
|
||||
&my_pubkey,
|
||||
&peer_pubkey,
|
||||
&cluster_info,
|
||||
&mut peer_roots,
|
||||
&mut peer_info,
|
||||
&mut my_gossiped_root,
|
||||
)
|
||||
.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serve_same_repairs_to_repairee() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
let num_slots = 2;
|
||||
let (shreds, _) = make_many_slot_entries(0, num_slots, 1);
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
|
||||
// Write roots so that these slots will qualify to be sent by the repairman
|
||||
let last_root = num_slots - 1;
|
||||
let roots: Vec<_> = (0..=last_root).collect();
|
||||
blocktree.set_roots(&roots).unwrap();
|
||||
|
||||
// Set up my information
|
||||
let my_pubkey = Pubkey::new_rand();
|
||||
let my_socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
// Set up a mock repairee with a socket listening for incoming repairs
|
||||
let mock_repairee = MockRepairee::make_mock_repairee();
|
||||
|
||||
// Set up the repairee's EpochSlots, such that they are missing every odd indexed slot
|
||||
// in the range (repairee_root, num_slots]
|
||||
let repairee_root = 0;
|
||||
let repairee_slots: BTreeSet<_> = (0..=num_slots).step_by(2).collect();
|
||||
let repairee_epoch_slots =
|
||||
EpochSlots::new(mock_repairee.id, repairee_root, repairee_slots, 1);
|
||||
let eligible_repairmen = vec![&my_pubkey];
|
||||
let epoch_schedule = EpochSchedule::custom(32, 16, false);
|
||||
assert!(ClusterInfoRepairListener::serve_repairs_to_repairee(
|
||||
&my_pubkey,
|
||||
&mock_repairee.id,
|
||||
num_slots - 1,
|
||||
&blocktree,
|
||||
&repairee_epoch_slots,
|
||||
&eligible_repairmen,
|
||||
&my_socket,
|
||||
&mock_repairee.tvu_address,
|
||||
1,
|
||||
&epoch_schedule,
|
||||
// Simulate having already sent a slot very recently
|
||||
(last_root, timestamp()),
|
||||
)
|
||||
.unwrap()
|
||||
.is_none());
|
||||
|
||||
// Simulate the threshold having elapsed, allowing the repairman
|
||||
// to send the slot again
|
||||
assert_eq!(
|
||||
ClusterInfoRepairListener::serve_repairs_to_repairee(
|
||||
&my_pubkey,
|
||||
&mock_repairee.id,
|
||||
num_slots - 1,
|
||||
&blocktree,
|
||||
&repairee_epoch_slots,
|
||||
&eligible_repairmen,
|
||||
&my_socket,
|
||||
&mock_repairee.tvu_address,
|
||||
1,
|
||||
&epoch_schedule,
|
||||
(last_root, timestamp() - REPAIR_SAME_SLOT_THRESHOLD * 2),
|
||||
)
|
||||
.unwrap(),
|
||||
Some(1)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serve_repairs_to_repairee() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
@ -629,7 +746,7 @@ mod tests {
|
||||
let num_shreds_per_slot = shreds.len() as u64 / num_slots;
|
||||
|
||||
// Write slots in the range [0, num_slots] to blocktree
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
|
||||
// Write roots so that these slots will qualify to be sent by the repairman
|
||||
let roots: Vec<_> = (0..=num_slots - 1).collect();
|
||||
@ -671,6 +788,7 @@ mod tests {
|
||||
&mock_repairee.tvu_address,
|
||||
num_missing_slots as usize,
|
||||
&epoch_schedule,
|
||||
(0, 0),
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
@ -706,7 +824,7 @@ mod tests {
|
||||
// Create blobs for first two epochs and write them to blocktree
|
||||
let total_slots = slots_per_epoch * 2;
|
||||
let (shreds, _) = make_many_slot_entries(0, total_slots, 1);
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
|
||||
// Write roots so that these slots will qualify to be sent by the repairman
|
||||
let roots: Vec<_> = (0..=slots_per_epoch * 2 - 1).collect();
|
||||
@ -741,6 +859,7 @@ mod tests {
|
||||
&mock_repairee.tvu_address,
|
||||
1 as usize,
|
||||
&epoch_schedule,
|
||||
(0, 0),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -763,6 +882,7 @@ mod tests {
|
||||
&mock_repairee.tvu_address,
|
||||
1 as usize,
|
||||
&epoch_schedule,
|
||||
(0, 0),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
@ -48,14 +48,15 @@ impl ClusterInfoVoteListener {
|
||||
sender: &CrossbeamSender<VerifiedPackets>,
|
||||
poh_recorder: Arc<Mutex<PohRecorder>>,
|
||||
) -> Result<()> {
|
||||
let mut last_ts = 0;
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return Ok(());
|
||||
}
|
||||
let (votes, new_ts) = cluster_info.read().unwrap().get_votes(last_ts);
|
||||
if poh_recorder.lock().unwrap().has_bank() {
|
||||
last_ts = new_ts;
|
||||
if let Some(bank) = poh_recorder.lock().unwrap().bank() {
|
||||
let last_ts = bank.last_vote_sync.load(Ordering::Relaxed);
|
||||
let (votes, new_ts) = cluster_info.read().unwrap().get_votes(last_ts);
|
||||
bank.last_vote_sync
|
||||
.compare_and_swap(last_ts, new_ts, Ordering::Relaxed);
|
||||
inc_new_counter_debug!("cluster_info_vote_listener-recv_count", votes.len());
|
||||
let msgs = packet::to_packets(&votes);
|
||||
if !msgs.is_empty() {
|
||||
|
@ -11,7 +11,7 @@ use std::sync::Arc;
|
||||
pub const VOTE_THRESHOLD_DEPTH: usize = 8;
|
||||
pub const VOTE_THRESHOLD_SIZE: f64 = 2f64 / 3f64;
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct StakeLockout {
|
||||
lockout: u64,
|
||||
stake: u64,
|
||||
@ -172,7 +172,7 @@ impl Tower {
|
||||
slot: u64,
|
||||
hash: Hash,
|
||||
last_bank_slot: Option<u64>,
|
||||
) -> Vote {
|
||||
) -> (Vote, usize) {
|
||||
let mut local_vote_state = local_vote_state.clone();
|
||||
let vote = Vote {
|
||||
slots: vec![slot],
|
||||
@ -195,7 +195,7 @@ impl Tower {
|
||||
slots,
|
||||
local_vote_state.votes
|
||||
);
|
||||
Vote { slots, hash }
|
||||
(Vote { slots, hash }, local_vote_state.votes.len() - 1)
|
||||
}
|
||||
fn last_bank_vote(bank: &Bank, vote_account_pubkey: &Pubkey) -> Option<u64> {
|
||||
let vote_account = bank.vote_accounts().get(vote_account_pubkey)?.1.clone();
|
||||
@ -203,7 +203,7 @@ impl Tower {
|
||||
bank_vote_state.votes.iter().map(|v| v.slot).last()
|
||||
}
|
||||
|
||||
pub fn new_vote_from_bank(&self, bank: &Bank, vote_account_pubkey: &Pubkey) -> Vote {
|
||||
pub fn new_vote_from_bank(&self, bank: &Bank, vote_account_pubkey: &Pubkey) -> (Vote, usize) {
|
||||
let last_vote = Self::last_bank_vote(bank, vote_account_pubkey);
|
||||
Self::new_vote(&self.lockouts, bank.slot(), bank.hash(), last_vote)
|
||||
}
|
||||
@ -254,7 +254,7 @@ impl Tower {
|
||||
}
|
||||
|
||||
// a slot is not recent if it's older than the newest vote we have
|
||||
fn is_recent(&self, slot: u64) -> bool {
|
||||
pub fn is_recent(&self, slot: u64) -> bool {
|
||||
if let Some(last_vote) = self.lockouts.votes.back() {
|
||||
if slot <= last_vote.slot {
|
||||
return false;
|
||||
@ -312,7 +312,15 @@ impl Tower {
|
||||
let vote = lockouts.nth_recent_vote(self.threshold_depth);
|
||||
if let Some(vote) = vote {
|
||||
if let Some(fork_stake) = stake_lockouts.get(&vote.slot) {
|
||||
(fork_stake.stake as f64 / total_staked as f64) > self.threshold_size
|
||||
let lockout = fork_stake.stake as f64 / total_staked as f64;
|
||||
trace!(
|
||||
"fork_stake {} {} {} {}",
|
||||
slot,
|
||||
lockout,
|
||||
fork_stake.stake,
|
||||
total_staked
|
||||
);
|
||||
lockout > self.threshold_size
|
||||
} else {
|
||||
false
|
||||
}
|
||||
@ -790,14 +798,16 @@ mod test {
|
||||
fn test_new_vote() {
|
||||
let local = VoteState::default();
|
||||
let vote = Tower::new_vote(&local, 0, Hash::default(), None);
|
||||
assert_eq!(vote.slots, vec![0]);
|
||||
assert_eq!(local.votes.len(), 0);
|
||||
assert_eq!(vote.0.slots, vec![0]);
|
||||
assert_eq!(vote.1, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_vote_dup_vote() {
|
||||
let local = VoteState::default();
|
||||
let vote = Tower::new_vote(&local, 0, Hash::default(), Some(0));
|
||||
assert!(vote.slots.is_empty());
|
||||
assert!(vote.0.slots.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -808,8 +818,25 @@ mod test {
|
||||
hash: Hash::default(),
|
||||
};
|
||||
local.process_vote_unchecked(&vote);
|
||||
assert_eq!(local.votes.len(), 1);
|
||||
let vote = Tower::new_vote(&local, 1, Hash::default(), Some(0));
|
||||
assert_eq!(vote.slots, vec![1]);
|
||||
assert_eq!(vote.0.slots, vec![1]);
|
||||
assert_eq!(vote.1, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_vote_next_after_expired_vote() {
|
||||
let mut local = VoteState::default();
|
||||
let vote = Vote {
|
||||
slots: vec![0],
|
||||
hash: Hash::default(),
|
||||
};
|
||||
local.process_vote_unchecked(&vote);
|
||||
assert_eq!(local.votes.len(), 1);
|
||||
let vote = Tower::new_vote(&local, 3, Hash::default(), Some(0));
|
||||
//first vote expired, so index should be 0
|
||||
assert_eq!(vote.0.slots, vec![3]);
|
||||
assert_eq!(vote.1, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1,12 +1,9 @@
|
||||
use bincode::serialize;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
#[cfg(test)]
|
||||
use solana_sdk::rpc_port;
|
||||
#[cfg(test)]
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::signature::{Signable, Signature};
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::borrow::Cow;
|
||||
use std::cmp::{Ord, Ordering, PartialEq, PartialOrd};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
|
||||
@ -14,14 +11,14 @@ use std::net::{IpAddr, SocketAddr};
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct ContactInfo {
|
||||
pub id: Pubkey,
|
||||
/// signature of this ContactInfo
|
||||
pub signature: Signature,
|
||||
/// gossip address
|
||||
pub gossip: SocketAddr,
|
||||
/// address to connect to for replication
|
||||
pub tvu: SocketAddr,
|
||||
/// address to forward blobs to
|
||||
/// address to forward shreds to
|
||||
pub tvu_forwards: SocketAddr,
|
||||
/// address to send repairs to
|
||||
pub repair: SocketAddr,
|
||||
/// transactions address
|
||||
pub tpu: SocketAddr,
|
||||
/// address to forward unprocessed transactions to
|
||||
@ -80,13 +77,13 @@ impl Default for ContactInfo {
|
||||
gossip: socketaddr_any!(),
|
||||
tvu: socketaddr_any!(),
|
||||
tvu_forwards: socketaddr_any!(),
|
||||
repair: socketaddr_any!(),
|
||||
tpu: socketaddr_any!(),
|
||||
tpu_forwards: socketaddr_any!(),
|
||||
storage_addr: socketaddr_any!(),
|
||||
rpc: socketaddr_any!(),
|
||||
rpc_pubsub: socketaddr_any!(),
|
||||
wallclock: 0,
|
||||
signature: Signature::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -98,6 +95,7 @@ impl ContactInfo {
|
||||
gossip: SocketAddr,
|
||||
tvu: SocketAddr,
|
||||
tvu_forwards: SocketAddr,
|
||||
repair: SocketAddr,
|
||||
tpu: SocketAddr,
|
||||
tpu_forwards: SocketAddr,
|
||||
storage_addr: SocketAddr,
|
||||
@ -107,10 +105,10 @@ impl ContactInfo {
|
||||
) -> Self {
|
||||
Self {
|
||||
id: *id,
|
||||
signature: Signature::default(),
|
||||
gossip,
|
||||
tvu,
|
||||
tvu_forwards,
|
||||
repair,
|
||||
tpu,
|
||||
tpu_forwards,
|
||||
storage_addr,
|
||||
@ -131,6 +129,7 @@ impl ContactInfo {
|
||||
socketaddr!("127.0.0.1:1239"),
|
||||
socketaddr!("127.0.0.1:1240"),
|
||||
socketaddr!("127.0.0.1:1241"),
|
||||
socketaddr!("127.0.0.1:1242"),
|
||||
now,
|
||||
)
|
||||
}
|
||||
@ -150,6 +149,7 @@ impl ContactInfo {
|
||||
addr,
|
||||
addr,
|
||||
addr,
|
||||
addr,
|
||||
0,
|
||||
)
|
||||
}
|
||||
@ -167,6 +167,7 @@ impl ContactInfo {
|
||||
let tvu_addr = next_port(&bind_addr, 2);
|
||||
let tpu_forwards_addr = next_port(&bind_addr, 3);
|
||||
let tvu_forwards_addr = next_port(&bind_addr, 4);
|
||||
let repair = next_port(&bind_addr, 5);
|
||||
let rpc_addr = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT);
|
||||
let rpc_pubsub_addr = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT);
|
||||
Self::new(
|
||||
@ -174,6 +175,7 @@ impl ContactInfo {
|
||||
gossip_addr,
|
||||
tvu_addr,
|
||||
tvu_forwards_addr,
|
||||
repair,
|
||||
tpu_addr,
|
||||
tpu_forwards_addr,
|
||||
"0.0.0.0:0".parse().unwrap(),
|
||||
@ -202,6 +204,7 @@ impl ContactInfo {
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
daddr,
|
||||
timestamp(),
|
||||
)
|
||||
}
|
||||
@ -232,49 +235,6 @@ impl ContactInfo {
|
||||
}
|
||||
}
|
||||
|
||||
impl Signable for ContactInfo {
|
||||
fn pubkey(&self) -> Pubkey {
|
||||
self.id
|
||||
}
|
||||
|
||||
fn signable_data(&self) -> Cow<[u8]> {
|
||||
#[derive(Serialize)]
|
||||
struct SignData {
|
||||
id: Pubkey,
|
||||
gossip: SocketAddr,
|
||||
tvu: SocketAddr,
|
||||
tpu: SocketAddr,
|
||||
tpu_forwards: SocketAddr,
|
||||
storage_addr: SocketAddr,
|
||||
rpc: SocketAddr,
|
||||
rpc_pubsub: SocketAddr,
|
||||
wallclock: u64,
|
||||
}
|
||||
|
||||
let me = self;
|
||||
let data = SignData {
|
||||
id: me.id,
|
||||
gossip: me.gossip,
|
||||
tvu: me.tvu,
|
||||
tpu: me.tpu,
|
||||
storage_addr: me.storage_addr,
|
||||
tpu_forwards: me.tpu_forwards,
|
||||
rpc: me.rpc,
|
||||
rpc_pubsub: me.rpc_pubsub,
|
||||
wallclock: me.wallclock,
|
||||
};
|
||||
Cow::Owned(serialize(&data).expect("failed to serialize ContactInfo"))
|
||||
}
|
||||
|
||||
fn get_signature(&self) -> Signature {
|
||||
self.signature
|
||||
}
|
||||
|
||||
fn set_signature(&mut self, signature: Signature) {
|
||||
self.signature = signature
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@ -165,11 +165,12 @@ impl Crds {
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds_value::CrdsData;
|
||||
|
||||
#[test]
|
||||
fn test_insert() {
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(crds.insert(val.clone(), 0).ok(), Some(None));
|
||||
assert_eq!(crds.table.len(), 1);
|
||||
assert!(crds.table.contains_key(&val.label()));
|
||||
@ -178,7 +179,7 @@ mod test {
|
||||
#[test]
|
||||
fn test_update_old() {
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(crds.insert(val.clone(), 0), Ok(None));
|
||||
assert_eq!(crds.insert(val.clone(), 1), Err(CrdsError::InsertFailed));
|
||||
assert_eq!(crds.table[&val.label()].local_timestamp, 0);
|
||||
@ -186,9 +187,15 @@ mod test {
|
||||
#[test]
|
||||
fn test_update_new() {
|
||||
let mut crds = Crds::default();
|
||||
let original = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 0));
|
||||
let original = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::default(),
|
||||
0,
|
||||
)));
|
||||
assert_matches!(crds.insert(original.clone(), 0), Ok(_));
|
||||
let val = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 1));
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
)));
|
||||
assert_eq!(
|
||||
crds.insert(val.clone(), 1).unwrap().unwrap().value,
|
||||
original
|
||||
@ -198,14 +205,17 @@ mod test {
|
||||
#[test]
|
||||
fn test_update_timestamp() {
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 0));
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::default(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(val.clone(), 0), Ok(None));
|
||||
|
||||
crds.update_label_timestamp(&val.label(), 1);
|
||||
assert_eq!(crds.table[&val.label()].local_timestamp, 1);
|
||||
assert_eq!(crds.table[&val.label()].insert_timestamp, 0);
|
||||
|
||||
let val2 = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let val2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(val2.label().pubkey(), val.label().pubkey());
|
||||
assert_matches!(crds.insert(val2.clone(), 0), Ok(Some(_)));
|
||||
|
||||
@ -221,7 +231,7 @@ mod test {
|
||||
|
||||
let mut ci = ContactInfo::default();
|
||||
ci.wallclock += 1;
|
||||
let val3 = CrdsValue::ContactInfo(ci);
|
||||
let val3 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci));
|
||||
assert_matches!(crds.insert(val3.clone(), 3), Ok(Some(_)));
|
||||
assert_eq!(crds.table[&val2.label()].local_timestamp, 3);
|
||||
assert_eq!(crds.table[&val2.label()].insert_timestamp, 3);
|
||||
@ -229,7 +239,7 @@ mod test {
|
||||
#[test]
|
||||
fn test_find_old_records() {
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(crds.insert(val.clone(), 1), Ok(None));
|
||||
|
||||
assert!(crds.find_old_labels(0).is_empty());
|
||||
@ -239,7 +249,7 @@ mod test {
|
||||
#[test]
|
||||
fn test_remove() {
|
||||
let mut crds = Crds::default();
|
||||
let val = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_matches!(crds.insert(val.clone(), 1), Ok(_));
|
||||
|
||||
assert_eq!(crds.find_old_labels(1), vec![val.label()]);
|
||||
@ -248,7 +258,7 @@ mod test {
|
||||
}
|
||||
#[test]
|
||||
fn test_equal() {
|
||||
let val = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let val = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
let v1 = VersionedCrdsValue::new(1, val.clone());
|
||||
let v2 = VersionedCrdsValue::new(1, val);
|
||||
assert_eq!(v1, v2);
|
||||
@ -258,12 +268,15 @@ mod test {
|
||||
fn test_hash_order() {
|
||||
let v1 = VersionedCrdsValue::new(
|
||||
1,
|
||||
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 0)),
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::default(),
|
||||
0,
|
||||
))),
|
||||
);
|
||||
let v2 = VersionedCrdsValue::new(1, {
|
||||
let mut contact_info = ContactInfo::new_localhost(&Pubkey::default(), 0);
|
||||
contact_info.rpc = socketaddr!("0.0.0.0:0");
|
||||
CrdsValue::ContactInfo(contact_info)
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(contact_info))
|
||||
});
|
||||
|
||||
assert_eq!(v1.value.label(), v2.value.label());
|
||||
@ -285,11 +298,17 @@ mod test {
|
||||
fn test_wallclock_order() {
|
||||
let v1 = VersionedCrdsValue::new(
|
||||
1,
|
||||
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 1)),
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::default(),
|
||||
1,
|
||||
))),
|
||||
);
|
||||
let v2 = VersionedCrdsValue::new(
|
||||
1,
|
||||
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::default(), 0)),
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::default(),
|
||||
0,
|
||||
))),
|
||||
);
|
||||
assert_eq!(v1.value.label(), v2.value.label());
|
||||
assert!(v1 > v2);
|
||||
@ -301,11 +320,17 @@ mod test {
|
||||
fn test_label_order() {
|
||||
let v1 = VersionedCrdsValue::new(
|
||||
1,
|
||||
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)),
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
))),
|
||||
);
|
||||
let v2 = VersionedCrdsValue::new(
|
||||
1,
|
||||
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0)),
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
))),
|
||||
);
|
||||
assert_ne!(v1, v2);
|
||||
assert!(!(v1 == v2));
|
||||
|
@ -9,7 +9,6 @@ use crate::crds_gossip_pull::{CrdsFilter, CrdsGossipPull};
|
||||
use crate::crds_gossip_push::{CrdsGossipPush, CRDS_GOSSIP_NUM_ACTIVE};
|
||||
use crate::crds_value::{CrdsValue, CrdsValueLabel};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::Signable;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
///The min size for bloom filters
|
||||
@ -204,6 +203,7 @@ pub fn get_weight(max_weight: f32, time_since_last_selected: u32, stake: f32) ->
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds_value::CrdsData;
|
||||
use solana_sdk::hash::hash;
|
||||
use solana_sdk::timing::timestamp;
|
||||
|
||||
@ -216,7 +216,10 @@ mod test {
|
||||
let prune_pubkey = Pubkey::new(&[2; 32]);
|
||||
crds_gossip
|
||||
.crds
|
||||
.insert(CrdsValue::ContactInfo(ci.clone()), 0)
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone())),
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
crds_gossip.refresh_push_active_set(&HashMap::new());
|
||||
let now = timestamp();
|
||||
|
@ -294,6 +294,7 @@ impl CrdsGossipPull {
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::crds_value::CrdsData;
|
||||
use itertools::Itertools;
|
||||
use solana_sdk::hash::hash;
|
||||
use solana_sdk::packet::PACKET_DATA_SIZE;
|
||||
@ -303,10 +304,16 @@ mod test {
|
||||
let mut crds = Crds::default();
|
||||
let mut stakes = HashMap::new();
|
||||
let node = CrdsGossipPull::default();
|
||||
let me = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
crds.insert(me.clone(), 0).unwrap();
|
||||
for i in 1..=30 {
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let id = entry.label().pubkey();
|
||||
crds.insert(entry.clone(), 0).unwrap();
|
||||
stakes.insert(id, i * 100);
|
||||
@ -325,7 +332,10 @@ mod test {
|
||||
#[test]
|
||||
fn test_new_pull_request() {
|
||||
let mut crds = Crds::default();
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let id = entry.label().pubkey();
|
||||
let node = CrdsGossipPull::default();
|
||||
assert_eq!(
|
||||
@ -339,7 +349,10 @@ mod test {
|
||||
Err(CrdsGossipError::NoPeers)
|
||||
);
|
||||
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
crds.insert(new.clone(), 0).unwrap();
|
||||
let req = node.new_pull_request(&crds, &id, 0, &HashMap::new(), PACKET_DATA_SIZE);
|
||||
let (to, _, self_info) = req.unwrap();
|
||||
@ -350,13 +363,22 @@ mod test {
|
||||
#[test]
|
||||
fn test_new_mark_creation_time() {
|
||||
let mut crds = Crds::default();
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let node_pubkey = entry.label().pubkey();
|
||||
let mut node = CrdsGossipPull::default();
|
||||
crds.insert(entry.clone(), 0).unwrap();
|
||||
let old = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
crds.insert(old.clone(), 0).unwrap();
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
crds.insert(new.clone(), 0).unwrap();
|
||||
|
||||
// set request creation time to max_value
|
||||
@ -380,11 +402,17 @@ mod test {
|
||||
#[test]
|
||||
fn test_process_pull_request() {
|
||||
let mut node_crds = Crds::default();
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let node_pubkey = entry.label().pubkey();
|
||||
let node = CrdsGossipPull::default();
|
||||
node_crds.insert(entry.clone(), 0).unwrap();
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
node_crds.insert(new.clone(), 0).unwrap();
|
||||
let req = node.new_pull_request(
|
||||
&node_crds,
|
||||
@ -419,22 +447,32 @@ mod test {
|
||||
#[test]
|
||||
fn test_process_pull_request_response() {
|
||||
let mut node_crds = Crds::default();
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let node_pubkey = entry.label().pubkey();
|
||||
let mut node = CrdsGossipPull::default();
|
||||
node_crds.insert(entry.clone(), 0).unwrap();
|
||||
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
node_crds.insert(new.clone(), 0).unwrap();
|
||||
|
||||
let mut dest = CrdsGossipPull::default();
|
||||
let mut dest_crds = Crds::default();
|
||||
let new_id = Pubkey::new_rand();
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&new_id, 1));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&new_id, 1,
|
||||
)));
|
||||
dest_crds.insert(new.clone(), 0).unwrap();
|
||||
|
||||
// node contains a key from the dest node, but at an older local timestamp
|
||||
let same_key = CrdsValue::ContactInfo(ContactInfo::new_localhost(&new_id, 0));
|
||||
let same_key = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&new_id, 0,
|
||||
)));
|
||||
assert_eq!(same_key.label(), new.label());
|
||||
assert!(same_key.wallclock() < new.wallclock());
|
||||
node_crds.insert(same_key.clone(), 0).unwrap();
|
||||
@ -494,12 +532,18 @@ mod test {
|
||||
#[test]
|
||||
fn test_gossip_purge() {
|
||||
let mut node_crds = Crds::default();
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let node_label = entry.label();
|
||||
let node_pubkey = node_label.pubkey();
|
||||
let mut node = CrdsGossipPull::default();
|
||||
node_crds.insert(entry.clone(), 0).unwrap();
|
||||
let old = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
node_crds.insert(old.clone(), 0).unwrap();
|
||||
let value_hash = node_crds.lookup_versioned(&old.label()).unwrap().value_hash;
|
||||
|
||||
|
@ -340,7 +340,7 @@ impl CrdsGossipPush {
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use solana_sdk::signature::Signable;
|
||||
use crate::crds_value::CrdsData;
|
||||
|
||||
#[test]
|
||||
fn test_prune() {
|
||||
@ -353,7 +353,9 @@ mod test {
|
||||
stakes.insert(self_id, 100);
|
||||
stakes.insert(origin, 100);
|
||||
|
||||
let value = CrdsValue::ContactInfo(ContactInfo::new_localhost(&origin, 0));
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&origin, 0,
|
||||
)));
|
||||
let label = value.label();
|
||||
let low_staked_peers = (0..10).map(|_| Pubkey::new_rand());
|
||||
let mut low_staked_set = HashSet::new();
|
||||
@ -395,7 +397,10 @@ mod test {
|
||||
fn test_process_push() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let value = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let label = value.label();
|
||||
// push a new message
|
||||
assert_eq!(
|
||||
@ -416,7 +421,7 @@ mod test {
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||
ci.wallclock = 1;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
|
||||
// push a new message
|
||||
assert_eq!(
|
||||
@ -426,7 +431,7 @@ mod test {
|
||||
|
||||
// push an old version
|
||||
ci.wallclock = 0;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
|
||||
Err(CrdsGossipError::PushMessageOldVersion)
|
||||
@ -441,7 +446,7 @@ mod test {
|
||||
|
||||
// push a version to far in the future
|
||||
ci.wallclock = timeout + 1;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, 0),
|
||||
Err(CrdsGossipError::PushMessageTimeout)
|
||||
@ -449,7 +454,7 @@ mod test {
|
||||
|
||||
// push a version to far in the past
|
||||
ci.wallclock = 0;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, timeout + 1),
|
||||
Err(CrdsGossipError::PushMessageTimeout)
|
||||
@ -461,7 +466,7 @@ mod test {
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||
ci.wallclock = 0;
|
||||
let value_old = CrdsValue::ContactInfo(ci.clone());
|
||||
let value_old = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
|
||||
// push a new message
|
||||
assert_eq!(
|
||||
@ -471,7 +476,7 @@ mod test {
|
||||
|
||||
// push an old version
|
||||
ci.wallclock = 1;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), value, 0)
|
||||
.unwrap()
|
||||
@ -492,13 +497,19 @@ mod test {
|
||||
solana_logger::setup();
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let value1 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let value1 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
|
||||
assert_eq!(crds.insert(value1.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
|
||||
assert!(push.active_set.get(&value1.label().pubkey()).is_some());
|
||||
let value2 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
assert!(push.active_set.get(&value2.label().pubkey()).is_none());
|
||||
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
|
||||
for _ in 0..30 {
|
||||
@ -510,7 +521,9 @@ mod test {
|
||||
assert!(push.active_set.get(&value2.label().pubkey()).is_some());
|
||||
|
||||
for _ in 0..push.num_active {
|
||||
let value2 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(
|
||||
ContactInfo::new_localhost(&Pubkey::new_rand(), 0),
|
||||
));
|
||||
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
|
||||
}
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
@ -523,8 +536,10 @@ mod test {
|
||||
let push = CrdsGossipPush::default();
|
||||
let mut stakes = HashMap::new();
|
||||
for i in 1..=100 {
|
||||
let peer =
|
||||
CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), time));
|
||||
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
time,
|
||||
)));
|
||||
let id = peer.label().pubkey();
|
||||
crds.insert(peer.clone(), time).unwrap();
|
||||
stakes.insert(id, i * 100);
|
||||
@ -542,11 +557,17 @@ mod test {
|
||||
fn test_new_push_messages() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let peer = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
|
||||
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let mut expected = HashMap::new();
|
||||
expected.insert(peer.label().pubkey(), vec![new_msg.clone()]);
|
||||
assert_eq!(
|
||||
@ -560,11 +581,20 @@ mod test {
|
||||
fn test_personalized_push_messages() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let peer_1 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let peer_1 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer_1.clone(), 0), Ok(None));
|
||||
let peer_2 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let peer_2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer_2.clone(), 0), Ok(None));
|
||||
let peer_3 = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let peer_3 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), peer_3.clone(), 0),
|
||||
Ok(None)
|
||||
@ -572,7 +602,10 @@ mod test {
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
|
||||
// push 3's contact info to 1 and 2 and 3
|
||||
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&peer_3.pubkey(), 0));
|
||||
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&peer_3.pubkey(),
|
||||
0,
|
||||
)));
|
||||
let mut expected = HashMap::new();
|
||||
expected.insert(peer_1.pubkey(), vec![new_msg.clone()]);
|
||||
expected.insert(peer_2.pubkey(), vec![new_msg.clone()]);
|
||||
@ -583,11 +616,17 @@ mod test {
|
||||
fn test_process_prune() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let peer = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
|
||||
let new_msg = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let expected = HashMap::new();
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 0),
|
||||
@ -600,13 +639,16 @@ mod test {
|
||||
fn test_purge_old_pending_push_messages() {
|
||||
let mut crds = Crds::default();
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let peer = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let peer = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
|
||||
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||
ci.wallclock = 1;
|
||||
let new_msg = CrdsValue::ContactInfo(ci.clone());
|
||||
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
let expected = HashMap::new();
|
||||
assert_eq!(
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), new_msg.clone(), 1),
|
||||
@ -622,7 +664,7 @@ mod test {
|
||||
let mut push = CrdsGossipPush::default();
|
||||
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||
ci.wallclock = 0;
|
||||
let value = CrdsValue::ContactInfo(ci.clone());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone()));
|
||||
let label = value.label();
|
||||
// push a new message
|
||||
assert_eq!(
|
||||
|
@ -3,107 +3,29 @@ use bincode::{serialize, serialized_size};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, Signable, Signature};
|
||||
use solana_sdk::transaction::Transaction;
|
||||
use std::borrow::Borrow;
|
||||
use std::borrow::Cow;
|
||||
use std::collections::BTreeSet;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt;
|
||||
|
||||
pub type VoteIndex = u8;
|
||||
pub const MAX_VOTES: VoteIndex = 32;
|
||||
|
||||
/// CrdsValue that is replicated across the cluster
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub enum CrdsValue {
|
||||
/// * Merge Strategy - Latest wallclock is picked
|
||||
ContactInfo(ContactInfo),
|
||||
/// * Merge Strategy - Latest wallclock is picked
|
||||
Vote(Vote),
|
||||
/// * Merge Strategy - Latest wallclock is picked
|
||||
EpochSlots(EpochSlots),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct EpochSlots {
|
||||
pub from: Pubkey,
|
||||
pub root: u64,
|
||||
pub slots: BTreeSet<u64>,
|
||||
pub struct CrdsValue {
|
||||
pub signature: Signature,
|
||||
pub wallclock: u64,
|
||||
pub data: CrdsData,
|
||||
}
|
||||
|
||||
impl EpochSlots {
|
||||
pub fn new(from: Pubkey, root: u64, slots: BTreeSet<u64>, wallclock: u64) -> Self {
|
||||
Self {
|
||||
from,
|
||||
root,
|
||||
slots,
|
||||
signature: Signature::default(),
|
||||
wallclock,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Signable for EpochSlots {
|
||||
impl Signable for CrdsValue {
|
||||
fn pubkey(&self) -> Pubkey {
|
||||
self.from
|
||||
self.pubkey()
|
||||
}
|
||||
|
||||
fn signable_data(&self) -> Cow<[u8]> {
|
||||
#[derive(Serialize)]
|
||||
struct SignData<'a> {
|
||||
root: u64,
|
||||
slots: &'a BTreeSet<u64>,
|
||||
wallclock: u64,
|
||||
}
|
||||
let data = SignData {
|
||||
root: self.root,
|
||||
slots: &self.slots,
|
||||
wallclock: self.wallclock,
|
||||
};
|
||||
Cow::Owned(serialize(&data).expect("unable to serialize EpochSlots"))
|
||||
}
|
||||
|
||||
fn get_signature(&self) -> Signature {
|
||||
self.signature
|
||||
}
|
||||
|
||||
fn set_signature(&mut self, signature: Signature) {
|
||||
self.signature = signature;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct Vote {
|
||||
pub from: Pubkey,
|
||||
pub transaction: Transaction,
|
||||
pub signature: Signature,
|
||||
pub wallclock: u64,
|
||||
}
|
||||
|
||||
impl Vote {
|
||||
pub fn new(from: &Pubkey, transaction: Transaction, wallclock: u64) -> Self {
|
||||
Self {
|
||||
from: *from,
|
||||
transaction,
|
||||
signature: Signature::default(),
|
||||
wallclock,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Signable for Vote {
|
||||
fn pubkey(&self) -> Pubkey {
|
||||
self.from
|
||||
}
|
||||
|
||||
fn signable_data(&self) -> Cow<[u8]> {
|
||||
#[derive(Serialize)]
|
||||
struct SignData<'a> {
|
||||
transaction: &'a Transaction,
|
||||
wallclock: u64,
|
||||
}
|
||||
let data = SignData {
|
||||
transaction: &self.transaction,
|
||||
wallclock: self.wallclock,
|
||||
};
|
||||
Cow::Owned(serialize(&data).expect("unable to serialize Vote"))
|
||||
Cow::Owned(serialize(&self.data).expect("failed to serialize CrdsData"))
|
||||
}
|
||||
|
||||
fn get_signature(&self) -> Signature {
|
||||
@ -113,6 +35,65 @@ impl Signable for Vote {
|
||||
fn set_signature(&mut self, signature: Signature) {
|
||||
self.signature = signature
|
||||
}
|
||||
|
||||
fn verify(&self) -> bool {
|
||||
let sig_check = self
|
||||
.get_signature()
|
||||
.verify(&self.pubkey().as_ref(), self.signable_data().borrow());
|
||||
let data_check = match &self.data {
|
||||
CrdsData::Vote(ix, _) => *ix < MAX_VOTES,
|
||||
_ => true,
|
||||
};
|
||||
sig_check && data_check
|
||||
}
|
||||
}
|
||||
|
||||
/// CrdsData that defines the different types of items CrdsValues can hold
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub enum CrdsData {
|
||||
/// * Merge Strategy - Latest wallclock is picked
|
||||
ContactInfo(ContactInfo),
|
||||
/// * Merge Strategy - Latest wallclock is picked
|
||||
Vote(VoteIndex, Vote),
|
||||
/// * Merge Strategy - Latest wallclock is picked
|
||||
EpochSlots(EpochSlots),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct EpochSlots {
|
||||
pub from: Pubkey,
|
||||
pub root: u64,
|
||||
pub slots: BTreeSet<u64>,
|
||||
pub wallclock: u64,
|
||||
}
|
||||
|
||||
impl EpochSlots {
|
||||
pub fn new(from: Pubkey, root: u64, slots: BTreeSet<u64>, wallclock: u64) -> Self {
|
||||
Self {
|
||||
from,
|
||||
root,
|
||||
slots,
|
||||
wallclock,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct Vote {
|
||||
pub from: Pubkey,
|
||||
pub transaction: Transaction,
|
||||
pub wallclock: u64,
|
||||
}
|
||||
|
||||
impl Vote {
|
||||
pub fn new(from: &Pubkey, transaction: Transaction, wallclock: u64) -> Self {
|
||||
Self {
|
||||
from: *from,
|
||||
transaction,
|
||||
wallclock,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Type of the replicated value
|
||||
@ -120,7 +101,7 @@ impl Signable for Vote {
|
||||
#[derive(PartialEq, Hash, Eq, Clone, Debug)]
|
||||
pub enum CrdsValueLabel {
|
||||
ContactInfo(Pubkey),
|
||||
Vote(Pubkey),
|
||||
Vote(VoteIndex, Pubkey),
|
||||
EpochSlots(Pubkey),
|
||||
}
|
||||
|
||||
@ -128,7 +109,7 @@ impl fmt::Display for CrdsValueLabel {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
CrdsValueLabel::ContactInfo(_) => write!(f, "ContactInfo({})", self.pubkey()),
|
||||
CrdsValueLabel::Vote(_) => write!(f, "Vote({})", self.pubkey()),
|
||||
CrdsValueLabel::Vote(ix, _) => write!(f, "Vote({}, {})", ix, self.pubkey()),
|
||||
CrdsValueLabel::EpochSlots(_) => write!(f, "EpochSlots({})", self.pubkey()),
|
||||
}
|
||||
}
|
||||
@ -138,104 +119,115 @@ impl CrdsValueLabel {
|
||||
pub fn pubkey(&self) -> Pubkey {
|
||||
match self {
|
||||
CrdsValueLabel::ContactInfo(p) => *p,
|
||||
CrdsValueLabel::Vote(p) => *p,
|
||||
CrdsValueLabel::Vote(_, p) => *p,
|
||||
CrdsValueLabel::EpochSlots(p) => *p,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CrdsValue {
|
||||
pub fn new_unsigned(data: CrdsData) -> Self {
|
||||
Self {
|
||||
signature: Signature::default(),
|
||||
data,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_signed(data: CrdsData, keypair: &Keypair) -> Self {
|
||||
let mut value = Self::new_unsigned(data);
|
||||
value.sign(keypair);
|
||||
value
|
||||
}
|
||||
/// Totally unsecure unverfiable wallclock of the node that generated this message
|
||||
/// Latest wallclock is always picked.
|
||||
/// This is used to time out push messages.
|
||||
pub fn wallclock(&self) -> u64 {
|
||||
match self {
|
||||
CrdsValue::ContactInfo(contact_info) => contact_info.wallclock,
|
||||
CrdsValue::Vote(vote) => vote.wallclock,
|
||||
CrdsValue::EpochSlots(vote) => vote.wallclock,
|
||||
match &self.data {
|
||||
CrdsData::ContactInfo(contact_info) => contact_info.wallclock,
|
||||
CrdsData::Vote(_, vote) => vote.wallclock,
|
||||
CrdsData::EpochSlots(vote) => vote.wallclock,
|
||||
}
|
||||
}
|
||||
pub fn pubkey(&self) -> Pubkey {
|
||||
match &self.data {
|
||||
CrdsData::ContactInfo(contact_info) => contact_info.id,
|
||||
CrdsData::Vote(_, vote) => vote.from,
|
||||
CrdsData::EpochSlots(slots) => slots.from,
|
||||
}
|
||||
}
|
||||
pub fn label(&self) -> CrdsValueLabel {
|
||||
match self {
|
||||
CrdsValue::ContactInfo(contact_info) => {
|
||||
CrdsValueLabel::ContactInfo(contact_info.pubkey())
|
||||
}
|
||||
CrdsValue::Vote(vote) => CrdsValueLabel::Vote(vote.pubkey()),
|
||||
CrdsValue::EpochSlots(slots) => CrdsValueLabel::EpochSlots(slots.pubkey()),
|
||||
match &self.data {
|
||||
CrdsData::ContactInfo(_) => CrdsValueLabel::ContactInfo(self.pubkey()),
|
||||
CrdsData::Vote(ix, _) => CrdsValueLabel::Vote(*ix, self.pubkey()),
|
||||
CrdsData::EpochSlots(_) => CrdsValueLabel::EpochSlots(self.pubkey()),
|
||||
}
|
||||
}
|
||||
pub fn contact_info(&self) -> Option<&ContactInfo> {
|
||||
match self {
|
||||
CrdsValue::ContactInfo(contact_info) => Some(contact_info),
|
||||
match &self.data {
|
||||
CrdsData::ContactInfo(contact_info) => Some(contact_info),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
pub fn vote(&self) -> Option<&Vote> {
|
||||
match self {
|
||||
CrdsValue::Vote(vote) => Some(vote),
|
||||
match &self.data {
|
||||
CrdsData::Vote(_, vote) => Some(vote),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn vote_index(&self) -> Option<VoteIndex> {
|
||||
match &self.data {
|
||||
CrdsData::Vote(ix, _) => Some(*ix),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn epoch_slots(&self) -> Option<&EpochSlots> {
|
||||
match self {
|
||||
CrdsValue::EpochSlots(slots) => Some(slots),
|
||||
match &self.data {
|
||||
CrdsData::EpochSlots(slots) => Some(slots),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
/// Return all the possible labels for a record identified by Pubkey.
|
||||
pub fn record_labels(key: &Pubkey) -> [CrdsValueLabel; 3] {
|
||||
[
|
||||
pub fn record_labels(key: &Pubkey) -> Vec<CrdsValueLabel> {
|
||||
let mut labels = vec![
|
||||
CrdsValueLabel::ContactInfo(*key),
|
||||
CrdsValueLabel::Vote(*key),
|
||||
CrdsValueLabel::EpochSlots(*key),
|
||||
]
|
||||
];
|
||||
labels.extend((0..MAX_VOTES).map(|ix| CrdsValueLabel::Vote(ix, *key)));
|
||||
labels
|
||||
}
|
||||
|
||||
/// Returns the size (in bytes) of a CrdsValue
|
||||
pub fn size(&self) -> u64 {
|
||||
serialized_size(&self).expect("unable to serialize contact info")
|
||||
}
|
||||
}
|
||||
|
||||
impl Signable for CrdsValue {
|
||||
fn sign(&mut self, keypair: &Keypair) {
|
||||
match self {
|
||||
CrdsValue::ContactInfo(contact_info) => contact_info.sign(keypair),
|
||||
CrdsValue::Vote(vote) => vote.sign(keypair),
|
||||
CrdsValue::EpochSlots(epoch_slots) => epoch_slots.sign(keypair),
|
||||
};
|
||||
}
|
||||
pub fn compute_vote_index(tower_index: usize, mut votes: Vec<&CrdsValue>) -> VoteIndex {
|
||||
let mut available: HashSet<VoteIndex> = (0..MAX_VOTES).collect();
|
||||
votes.iter().filter_map(|v| v.vote_index()).for_each(|ix| {
|
||||
available.remove(&ix);
|
||||
});
|
||||
|
||||
fn verify(&self) -> bool {
|
||||
match self {
|
||||
CrdsValue::ContactInfo(contact_info) => contact_info.verify(),
|
||||
CrdsValue::Vote(vote) => vote.verify(),
|
||||
CrdsValue::EpochSlots(epoch_slots) => epoch_slots.verify(),
|
||||
// free index
|
||||
if !available.is_empty() {
|
||||
return *available.iter().next().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn pubkey(&self) -> Pubkey {
|
||||
match self {
|
||||
CrdsValue::ContactInfo(contact_info) => contact_info.pubkey(),
|
||||
CrdsValue::Vote(vote) => vote.pubkey(),
|
||||
CrdsValue::EpochSlots(epoch_slots) => epoch_slots.pubkey(),
|
||||
assert!(votes.len() == MAX_VOTES as usize);
|
||||
votes.sort_by_key(|v| v.vote().expect("all values must be votes").wallclock);
|
||||
|
||||
// If Tower is full, oldest removed first
|
||||
if tower_index + 1 == MAX_VOTES as usize {
|
||||
return votes[0].vote_index().expect("all values must be votes");
|
||||
}
|
||||
}
|
||||
|
||||
fn signable_data(&self) -> Cow<[u8]> {
|
||||
unimplemented!()
|
||||
}
|
||||
// If Tower is not full, the early votes have expired
|
||||
assert!(tower_index < MAX_VOTES as usize);
|
||||
|
||||
fn get_signature(&self) -> Signature {
|
||||
match self {
|
||||
CrdsValue::ContactInfo(contact_info) => contact_info.get_signature(),
|
||||
CrdsValue::Vote(vote) => vote.get_signature(),
|
||||
CrdsValue::EpochSlots(epoch_slots) => epoch_slots.get_signature(),
|
||||
}
|
||||
}
|
||||
|
||||
fn set_signature(&mut self, _: Signature) {
|
||||
unimplemented!()
|
||||
votes[tower_index]
|
||||
.vote_index()
|
||||
.expect("all values must be votes")
|
||||
}
|
||||
}
|
||||
|
||||
@ -250,49 +242,125 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_labels() {
|
||||
let mut hits = [false; 3];
|
||||
let mut hits = [false; 2 + MAX_VOTES as usize];
|
||||
// this method should cover all the possible labels
|
||||
for v in &CrdsValue::record_labels(&Pubkey::default()) {
|
||||
match v {
|
||||
CrdsValueLabel::ContactInfo(_) => hits[0] = true,
|
||||
CrdsValueLabel::Vote(_) => hits[1] = true,
|
||||
CrdsValueLabel::EpochSlots(_) => hits[2] = true,
|
||||
CrdsValueLabel::EpochSlots(_) => hits[1] = true,
|
||||
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 2] = true,
|
||||
}
|
||||
}
|
||||
assert!(hits.iter().all(|x| *x));
|
||||
}
|
||||
#[test]
|
||||
fn test_keys_and_values() {
|
||||
let v = CrdsValue::ContactInfo(ContactInfo::default());
|
||||
let v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default()));
|
||||
assert_eq!(v.wallclock(), 0);
|
||||
let key = v.clone().contact_info().unwrap().id;
|
||||
assert_eq!(v.label(), CrdsValueLabel::ContactInfo(key));
|
||||
|
||||
let v = CrdsValue::Vote(Vote::new(&Pubkey::default(), test_tx(), 0));
|
||||
let v = CrdsValue::new_unsigned(CrdsData::Vote(
|
||||
0,
|
||||
Vote::new(&Pubkey::default(), test_tx(), 0),
|
||||
));
|
||||
assert_eq!(v.wallclock(), 0);
|
||||
let key = v.clone().vote().unwrap().from;
|
||||
assert_eq!(v.label(), CrdsValueLabel::Vote(key));
|
||||
assert_eq!(v.label(), CrdsValueLabel::Vote(0, key));
|
||||
|
||||
let v = CrdsValue::EpochSlots(EpochSlots::new(Pubkey::default(), 0, BTreeSet::new(), 0));
|
||||
let v = CrdsValue::new_unsigned(CrdsData::EpochSlots(EpochSlots::new(
|
||||
Pubkey::default(),
|
||||
0,
|
||||
BTreeSet::new(),
|
||||
0,
|
||||
)));
|
||||
assert_eq!(v.wallclock(), 0);
|
||||
let key = v.clone().epoch_slots().unwrap().from;
|
||||
assert_eq!(v.label(), CrdsValueLabel::EpochSlots(key));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_signature() {
|
||||
let keypair = Keypair::new();
|
||||
let wrong_keypair = Keypair::new();
|
||||
let mut v =
|
||||
CrdsValue::ContactInfo(ContactInfo::new_localhost(&keypair.pubkey(), timestamp()));
|
||||
let mut v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&keypair.pubkey(),
|
||||
timestamp(),
|
||||
)));
|
||||
verify_signatures(&mut v, &keypair, &wrong_keypair);
|
||||
v = CrdsValue::Vote(Vote::new(&keypair.pubkey(), test_tx(), timestamp()));
|
||||
v = CrdsValue::new_unsigned(CrdsData::Vote(
|
||||
0,
|
||||
Vote::new(&keypair.pubkey(), test_tx(), timestamp()),
|
||||
));
|
||||
verify_signatures(&mut v, &keypair, &wrong_keypair);
|
||||
let btreeset: BTreeSet<u64> = vec![1, 2, 3, 6, 8].into_iter().collect();
|
||||
v = CrdsValue::EpochSlots(EpochSlots::new(keypair.pubkey(), 0, btreeset, timestamp()));
|
||||
v = CrdsValue::new_unsigned(CrdsData::EpochSlots(EpochSlots::new(
|
||||
keypair.pubkey(),
|
||||
0,
|
||||
btreeset,
|
||||
timestamp(),
|
||||
)));
|
||||
verify_signatures(&mut v, &keypair, &wrong_keypair);
|
||||
}
|
||||
|
||||
fn test_serialize_deserialize_value(value: &mut CrdsValue, keypair: &Keypair) {
|
||||
#[test]
|
||||
fn test_max_vote_index() {
|
||||
let keypair = Keypair::new();
|
||||
let vote = CrdsValue::new_signed(
|
||||
CrdsData::Vote(
|
||||
MAX_VOTES,
|
||||
Vote::new(&keypair.pubkey(), test_tx(), timestamp()),
|
||||
),
|
||||
&keypair,
|
||||
);
|
||||
assert!(!vote.verify());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_vote_index_empty() {
|
||||
for i in 0..MAX_VOTES {
|
||||
let votes = vec![];
|
||||
assert!(CrdsValue::compute_vote_index(i as usize, votes) < MAX_VOTES);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_vote_index_one() {
|
||||
let keypair = Keypair::new();
|
||||
let vote = CrdsValue::new_unsigned(CrdsData::Vote(
|
||||
0,
|
||||
Vote::new(&keypair.pubkey(), test_tx(), 0),
|
||||
));
|
||||
for i in 0..MAX_VOTES {
|
||||
let votes = vec![&vote];
|
||||
assert!(CrdsValue::compute_vote_index(i as usize, votes) > 0);
|
||||
let votes = vec![&vote];
|
||||
assert!(CrdsValue::compute_vote_index(i as usize, votes) < MAX_VOTES);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_vote_index_full() {
|
||||
let keypair = Keypair::new();
|
||||
let votes: Vec<_> = (0..MAX_VOTES)
|
||||
.map(|x| {
|
||||
CrdsValue::new_unsigned(CrdsData::Vote(
|
||||
x,
|
||||
Vote::new(&keypair.pubkey(), test_tx(), x as u64),
|
||||
))
|
||||
})
|
||||
.collect();
|
||||
let vote_refs = votes.iter().collect();
|
||||
//pick the oldest vote when full
|
||||
assert_eq!(CrdsValue::compute_vote_index(31, vote_refs), 0);
|
||||
//pick the index
|
||||
let vote_refs = votes.iter().collect();
|
||||
assert_eq!(CrdsValue::compute_vote_index(0, vote_refs), 0);
|
||||
let vote_refs = votes.iter().collect();
|
||||
assert_eq!(CrdsValue::compute_vote_index(30, vote_refs), 30);
|
||||
}
|
||||
|
||||
fn serialize_deserialize_value(value: &mut CrdsValue, keypair: &Keypair) {
|
||||
let num_tries = 10;
|
||||
value.sign(keypair);
|
||||
let original_signature = value.get_signature();
|
||||
@ -319,6 +387,6 @@ mod test {
|
||||
assert!(value.verify());
|
||||
value.sign(&wrong_keypair);
|
||||
assert!(!value.verify());
|
||||
test_serialize_deserialize_value(value, correct_keypair);
|
||||
serialize_deserialize_value(value, correct_keypair);
|
||||
}
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ mod tests {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blocktree = Blocktree::open(&blocktree_path).unwrap();
|
||||
let (shreds, _) = make_many_slot_entries(0, 50, 5);
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
let blocktree = Arc::new(blocktree);
|
||||
let (sender, receiver) = channel();
|
||||
|
||||
|
@ -5,6 +5,24 @@
|
||||
//! command-line tools to spin up validators and a Rust library
|
||||
//!
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! version {
|
||||
() => {
|
||||
&*format!(
|
||||
"{}{}",
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
if option_env!("CI_TAG").is_none() {
|
||||
format!(
|
||||
" [channel={} commit={}]",
|
||||
option_env!("CHANNEL").unwrap_or("unknown"),
|
||||
option_env!("CI_COMMIT").unwrap_or("unknown"),
|
||||
)
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
)
|
||||
};
|
||||
}
|
||||
pub mod banking_stage;
|
||||
pub mod broadcast_stage;
|
||||
pub mod chacha;
|
||||
@ -35,6 +53,7 @@ pub mod gossip_service;
|
||||
pub mod ledger_cleanup_service;
|
||||
pub mod local_vote_signer_service;
|
||||
pub mod packet;
|
||||
pub mod partition_cfg;
|
||||
pub mod poh_recorder;
|
||||
pub mod poh_service;
|
||||
pub mod recvmmsg;
|
||||
@ -58,7 +77,6 @@ pub mod test_tx;
|
||||
pub mod tpu;
|
||||
pub mod tvu;
|
||||
pub mod validator;
|
||||
pub(crate) mod version;
|
||||
pub mod weighted_shuffle;
|
||||
pub mod window_service;
|
||||
|
||||
|
92
core/src/partition_cfg.rs
Normal file
92
core/src/partition_cfg.rs
Normal file
@ -0,0 +1,92 @@
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
use solana_ledger::shred::Shred;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use std::sync::RwLock;
|
||||
|
||||
///Configure a partition in the retransmit stage
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Partition {
|
||||
pub num_partitions: usize,
|
||||
pub my_partition: usize,
|
||||
pub start_ts: u64,
|
||||
pub end_ts: u64,
|
||||
leaders: Arc<RwLock<Vec<Pubkey>>>,
|
||||
}
|
||||
impl Default for Partition {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
num_partitions: 0,
|
||||
my_partition: 0,
|
||||
start_ts: 0,
|
||||
end_ts: 0,
|
||||
leaders: Arc::new(RwLock::new(vec![])),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct PartitionCfg {
|
||||
partitions: Vec<Partition>,
|
||||
}
|
||||
|
||||
impl PartitionCfg {
|
||||
pub fn new(partitions: Vec<Partition>) -> Self {
|
||||
Self { partitions }
|
||||
}
|
||||
pub fn is_connected(
|
||||
&self,
|
||||
bank: &Option<Arc<Bank>>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
shred: &Shred,
|
||||
) -> bool {
|
||||
if bank.is_none() {
|
||||
return true;
|
||||
}
|
||||
let bank = bank.as_ref().unwrap().clone();
|
||||
let slot_leader_pubkey = leader_schedule_cache.slot_leader_at(shred.slot(), Some(&bank));
|
||||
let slot_leader_pubkey = slot_leader_pubkey.unwrap_or_default();
|
||||
let time = timestamp();
|
||||
for p in &self.partitions {
|
||||
let is_time = (p.start_ts <= time) && (time < p.end_ts);
|
||||
if !is_time {
|
||||
continue;
|
||||
}
|
||||
trace!("PARTITION_TEST partition time! {}", p.my_partition);
|
||||
if p.num_partitions == 0 {
|
||||
continue;
|
||||
}
|
||||
if p.leaders.read().unwrap().is_empty() {
|
||||
let mut leader_vec = p.leaders.write().unwrap();
|
||||
let mut leaders: Vec<Pubkey> = bank.vote_accounts().keys().cloned().collect();
|
||||
leaders.sort();
|
||||
*leader_vec = leaders;
|
||||
warn!("PARTITION_TEST partition enabled {}", p.my_partition);
|
||||
}
|
||||
let is_connected: bool = {
|
||||
let leaders = p.leaders.read().unwrap();
|
||||
let start = p.my_partition * leaders.len() / p.num_partitions;
|
||||
let partition_size = leaders.len() / p.num_partitions;
|
||||
let end = start + partition_size;
|
||||
let end = if leaders.len() - end < partition_size {
|
||||
leaders.len()
|
||||
} else {
|
||||
end
|
||||
};
|
||||
let my_leaders: HashSet<_> = leaders[start..end].iter().collect();
|
||||
my_leaders.contains(&slot_leader_pubkey)
|
||||
};
|
||||
if is_connected {
|
||||
trace!("PARTITION_TEST connected {}", p.my_partition);
|
||||
continue;
|
||||
}
|
||||
trace!("PARTITION_TEST not connected {}", p.my_partition);
|
||||
return false;
|
||||
}
|
||||
trace!("PARTITION_TEST connected");
|
||||
true
|
||||
}
|
||||
}
|
@ -313,7 +313,7 @@ impl PohRecorder {
|
||||
let poh_entry = self.poh.lock().unwrap().tick();
|
||||
inc_new_counter_warn!(
|
||||
"poh_recorder-tick_lock_contention",
|
||||
timing::duration_as_ms(&now.elapsed()) as usize
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
let now = Instant::now();
|
||||
if let Some(poh_entry) = poh_entry {
|
||||
@ -323,7 +323,7 @@ impl PohRecorder {
|
||||
if self.leader_first_tick_height.is_none() {
|
||||
inc_new_counter_warn!(
|
||||
"poh_recorder-tick_overhead",
|
||||
timing::duration_as_ms(&now.elapsed()) as usize
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
return;
|
||||
}
|
||||
@ -339,7 +339,7 @@ impl PohRecorder {
|
||||
}
|
||||
inc_new_counter_warn!(
|
||||
"poh_recorder-tick_overhead",
|
||||
timing::duration_as_ms(&now.elapsed()) as usize
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
}
|
||||
|
||||
@ -363,20 +363,29 @@ impl PohRecorder {
|
||||
return Err(Error::PohRecorderError(PohRecorderError::MaxHeightReached));
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
if let Some(poh_entry) = self.poh.lock().unwrap().record(mixin) {
|
||||
{
|
||||
let now = Instant::now();
|
||||
let mut poh_lock = self.poh.lock().unwrap();
|
||||
inc_new_counter_warn!(
|
||||
"poh_recorder-record_lock_contention",
|
||||
timing::duration_as_ms(&now.elapsed()) as usize
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
let entry = Entry {
|
||||
num_hashes: poh_entry.num_hashes,
|
||||
hash: poh_entry.hash,
|
||||
transactions,
|
||||
};
|
||||
self.sender
|
||||
.send((working_bank.bank.clone(), (entry, self.tick_height)))?;
|
||||
return Ok(());
|
||||
let now = Instant::now();
|
||||
let res = poh_lock.record(mixin);
|
||||
inc_new_counter_warn!(
|
||||
"poh_recorder-record_ms",
|
||||
timing::duration_as_us(&now.elapsed()) as usize
|
||||
);
|
||||
if let Some(poh_entry) = res {
|
||||
let entry = Entry {
|
||||
num_hashes: poh_entry.num_hashes,
|
||||
hash: poh_entry.hash,
|
||||
transactions,
|
||||
};
|
||||
self.sender
|
||||
.send((working_bank.bank.clone(), (entry, self.tick_height)))?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
// record() might fail if the next PoH hash needs to be a tick. But that's ok, tick()
|
||||
// and re-record()
|
||||
|
@ -18,7 +18,7 @@ use std::{
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
pub const MAX_REPAIR_LENGTH: usize = 16;
|
||||
pub const MAX_REPAIR_LENGTH: usize = 512;
|
||||
pub const REPAIR_MS: u64 = 100;
|
||||
pub const MAX_ORPHANS: usize = 5;
|
||||
|
||||
@ -35,7 +35,7 @@ pub enum RepairStrategy {
|
||||
pub enum RepairType {
|
||||
Orphan(u64),
|
||||
HighestBlob(u64, u64),
|
||||
Blob(u64, u64),
|
||||
Shred(u64, u64),
|
||||
}
|
||||
|
||||
pub struct RepairSlotRange {
|
||||
@ -168,15 +168,7 @@ impl RepairService {
|
||||
})
|
||||
.collect();
|
||||
|
||||
for ((to, req), repair_request) in reqs {
|
||||
if let Ok(local_addr) = repair_socket.local_addr() {
|
||||
datapoint_debug!(
|
||||
"repair_service",
|
||||
("repair_request", format!("{:?}", repair_request), String),
|
||||
("to", to.to_string(), String),
|
||||
("from", local_addr.to_string(), String),
|
||||
);
|
||||
}
|
||||
for ((to, req), _) in reqs {
|
||||
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
|
||||
info!("{} repair req send_to({}) error {:?}", id, to, e);
|
||||
0
|
||||
@ -252,13 +244,13 @@ impl RepairService {
|
||||
} else {
|
||||
let reqs = blocktree.find_missing_data_indexes(
|
||||
slot,
|
||||
slot_meta.first_shred_timestamp,
|
||||
slot_meta.consumed,
|
||||
slot_meta.received,
|
||||
max_repairs,
|
||||
);
|
||||
|
||||
reqs.into_iter()
|
||||
.map(|i| RepairType::Blob(slot, i))
|
||||
.map(|i| RepairType::Shred(slot, i))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
@ -421,7 +413,7 @@ mod test {
|
||||
let (mut shreds, _) = make_slot_entries(1, 0, 1);
|
||||
let (shreds2, _) = make_slot_entries(5, 2, 1);
|
||||
shreds.extend(shreds2);
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
assert_eq!(
|
||||
RepairService::generate_repairs(&blocktree, 0, 2).unwrap(),
|
||||
vec![RepairType::HighestBlob(0, 0), RepairType::Orphan(2)]
|
||||
@ -441,7 +433,7 @@ mod test {
|
||||
|
||||
// Write this blob to slot 2, should chain to slot 0, which we haven't received
|
||||
// any blobs for
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
|
||||
// Check that repair tries to patch the empty slot
|
||||
assert_eq!(
|
||||
@ -477,13 +469,16 @@ mod test {
|
||||
missing_indexes_per_slot.insert(0, index);
|
||||
}
|
||||
}
|
||||
blocktree.insert_shreds(shreds_to_write, None).unwrap();
|
||||
|
||||
blocktree
|
||||
.insert_shreds(shreds_to_write, None, false)
|
||||
.unwrap();
|
||||
// sleep so that the holes are ready for repair
|
||||
sleep(Duration::from_secs(1));
|
||||
let expected: Vec<RepairType> = (0..num_slots)
|
||||
.flat_map(|slot| {
|
||||
missing_indexes_per_slot
|
||||
.iter()
|
||||
.map(move |blob_index| RepairType::Blob(slot as u64, *blob_index))
|
||||
.map(move |blob_index| RepairType::Shred(slot as u64, *blob_index))
|
||||
})
|
||||
.collect();
|
||||
|
||||
@ -515,7 +510,7 @@ mod test {
|
||||
// Remove last shred (which is also last in slot) so that slot is not complete
|
||||
shreds.pop();
|
||||
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
|
||||
// We didn't get the last blob for this slot, so ask for the highest blob for that slot
|
||||
let expected: Vec<RepairType> =
|
||||
@ -541,9 +536,10 @@ mod test {
|
||||
let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot);
|
||||
for (mut slot_shreds, _) in shreds.into_iter() {
|
||||
slot_shreds.remove(0);
|
||||
blocktree.insert_shreds(slot_shreds, None).unwrap();
|
||||
blocktree.insert_shreds(slot_shreds, None, false).unwrap();
|
||||
}
|
||||
|
||||
// sleep to make slot eligible for repair
|
||||
sleep(Duration::from_secs(1));
|
||||
// Iterate through all possible combinations of start..end (inclusive on both
|
||||
// sides of the range)
|
||||
for start in 0..slots.len() {
|
||||
@ -555,7 +551,7 @@ mod test {
|
||||
..=repair_slot_range.end)
|
||||
.map(|slot_index| {
|
||||
if slots.contains(&(slot_index as u64)) {
|
||||
RepairType::Blob(slot_index as u64, 0)
|
||||
RepairType::Shred(slot_index as u64, 0)
|
||||
} else {
|
||||
RepairType::HighestBlob(slot_index as u64, 0)
|
||||
}
|
||||
@ -593,7 +589,7 @@ mod test {
|
||||
let parent = if i > 0 { i - 1 } else { 0 };
|
||||
let (shreds, _) = make_slot_entries(i, parent, num_entries_per_slot as u64);
|
||||
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
}
|
||||
|
||||
let end = 4;
|
||||
@ -646,9 +642,9 @@ mod test {
|
||||
.collect();
|
||||
let mut full_slots = BTreeSet::new();
|
||||
|
||||
blocktree.insert_shreds(fork1_shreds, None).unwrap();
|
||||
blocktree.insert_shreds(fork1_shreds, None, false).unwrap();
|
||||
blocktree
|
||||
.insert_shreds(fork2_incomplete_shreds, None)
|
||||
.insert_shreds(fork2_incomplete_shreds, None, false)
|
||||
.unwrap();
|
||||
|
||||
// Test that only slots > root from fork1 were included
|
||||
@ -672,7 +668,7 @@ mod test {
|
||||
.into_iter()
|
||||
.flat_map(|(shreds, _)| shreds)
|
||||
.collect();
|
||||
blocktree.insert_shreds(fork3_shreds, None).unwrap();
|
||||
blocktree.insert_shreds(fork3_shreds, None, false).unwrap();
|
||||
RepairService::get_completed_slots_past_root(
|
||||
&blocktree,
|
||||
&mut full_slots,
|
||||
@ -719,7 +715,9 @@ mod test {
|
||||
let step = rng.gen_range(1, max_step + 1) as usize;
|
||||
let step = std::cmp::min(step, num_shreds - i);
|
||||
let shreds_to_insert = shreds.drain(..step).collect_vec();
|
||||
blocktree_.insert_shreds(shreds_to_insert, None).unwrap();
|
||||
blocktree_
|
||||
.insert_shreds(shreds_to_insert, None, false)
|
||||
.unwrap();
|
||||
sleep(Duration::from_millis(repair_interval_ms));
|
||||
i += step;
|
||||
}
|
||||
@ -749,7 +747,7 @@ mod test {
|
||||
// Update with new root, should filter out the slots <= root
|
||||
root = num_slots / 2;
|
||||
let (shreds, _) = make_slot_entries(num_slots + 2, num_slots + 1, entries_per_slot);
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
RepairService::update_epoch_slots(
|
||||
Pubkey::default(),
|
||||
root,
|
||||
|
@ -17,8 +17,9 @@ use solana_ledger::entry::{Entry, EntrySlice};
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
use solana_ledger::snapshot_package::SnapshotPackageSender;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::{datapoint_warn, inc_new_counter_info};
|
||||
use solana_metrics::inc_new_counter_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::KeypairUtil;
|
||||
@ -36,6 +37,8 @@ use std::time::Instant;
|
||||
|
||||
pub const MAX_ENTRY_RECV_PER_ITER: usize = 512;
|
||||
|
||||
type VoteAndPoHBank = (Option<(Arc<Bank>, u64)>, Option<Arc<Bank>>);
|
||||
|
||||
// Implement a destructor for the ReplayStage thread to signal it exited
|
||||
// even on panics
|
||||
struct Finalizer {
|
||||
@ -70,6 +73,21 @@ struct ReplaySlotStats {
|
||||
replay_start: Instant,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
struct ForkStats {
|
||||
weight: u128,
|
||||
total_staked: u64,
|
||||
slot: Slot,
|
||||
block_height: u64,
|
||||
has_voted: bool,
|
||||
is_recent: bool,
|
||||
vote_threshold: bool,
|
||||
is_locked_out: bool,
|
||||
stake_lockouts: HashMap<u64, StakeLockout>,
|
||||
computed: bool,
|
||||
confirmation_reported: bool,
|
||||
}
|
||||
|
||||
impl ReplaySlotStats {
|
||||
pub fn new(slot: u64) -> Self {
|
||||
Self {
|
||||
@ -116,6 +134,7 @@ struct ForkProgress {
|
||||
started_ms: u64,
|
||||
is_dead: bool,
|
||||
stats: ReplaySlotStats,
|
||||
fork_stats: ForkStats,
|
||||
}
|
||||
|
||||
impl ForkProgress {
|
||||
@ -127,6 +146,7 @@ impl ForkProgress {
|
||||
started_ms: timing::timestamp(),
|
||||
is_dead: false,
|
||||
stats: ReplaySlotStats::new(slot),
|
||||
fork_stats: ForkStats::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -177,8 +197,13 @@ impl ReplayStage {
|
||||
.spawn(move || {
|
||||
let _exit = Finalizer::new(exit_.clone());
|
||||
let mut progress = HashMap::new();
|
||||
// Initialize progress map with any root banks
|
||||
for bank in bank_forks.read().unwrap().frozen_banks().values() {
|
||||
progress.insert(bank.slot(), ForkProgress::new(bank.slot(), bank.last_blockhash()));
|
||||
}
|
||||
let mut current_leader = None;
|
||||
|
||||
let mut last_reset = Hash::default();
|
||||
let mut partition = false;
|
||||
loop {
|
||||
let now = Instant::now();
|
||||
// Stop getting entries if we get exit signal
|
||||
@ -203,51 +228,68 @@ impl ReplayStage {
|
||||
);
|
||||
|
||||
let ancestors = Arc::new(bank_forks.read().unwrap().ancestors());
|
||||
let votable = Self::generate_votable_banks(
|
||||
&ancestors,
|
||||
&bank_forks,
|
||||
&tower,
|
||||
&mut progress,
|
||||
);
|
||||
|
||||
if let Some((_, bank, _, total_staked)) = votable.into_iter().last() {
|
||||
subscriptions.notify_subscribers(bank.slot(), &bank_forks);
|
||||
|
||||
if let Some(votable_leader) =
|
||||
leader_schedule_cache.slot_leader_at(bank.slot(), Some(&bank))
|
||||
{
|
||||
Self::log_leader_change(
|
||||
&my_pubkey,
|
||||
bank.slot(),
|
||||
&mut current_leader,
|
||||
&votable_leader,
|
||||
);
|
||||
loop {
|
||||
let (vote_bank, heaviest) =
|
||||
Self::select_fork(&ancestors, &bank_forks, &tower, &mut progress);
|
||||
let done = vote_bank.is_none();
|
||||
let mut vote_bank_slot = 0;
|
||||
let reset_bank = vote_bank.as_ref().map(|b| b.0.clone()).or(heaviest);
|
||||
if let Some((bank, total_staked)) = vote_bank {
|
||||
info!("voting: {}", bank.slot());
|
||||
subscriptions.notify_subscribers(bank.slot(), &bank_forks);
|
||||
if let Some(votable_leader) =
|
||||
leader_schedule_cache.slot_leader_at(bank.slot(), Some(&bank))
|
||||
{
|
||||
Self::log_leader_change(
|
||||
&my_pubkey,
|
||||
bank.slot(),
|
||||
&mut current_leader,
|
||||
&votable_leader,
|
||||
);
|
||||
}
|
||||
vote_bank_slot = bank.slot();
|
||||
Self::handle_votable_bank(
|
||||
&bank,
|
||||
&bank_forks,
|
||||
&mut tower,
|
||||
&mut progress,
|
||||
&vote_account,
|
||||
&voting_keypair,
|
||||
&cluster_info,
|
||||
&blocktree,
|
||||
&leader_schedule_cache,
|
||||
&root_bank_sender,
|
||||
total_staked,
|
||||
&lockouts_sender,
|
||||
&snapshot_package_sender,
|
||||
)?;
|
||||
}
|
||||
if let Some(bank) = reset_bank {
|
||||
if last_reset != bank.last_blockhash() {
|
||||
Self::reset_poh_recorder(
|
||||
&my_pubkey,
|
||||
&blocktree,
|
||||
&bank,
|
||||
&poh_recorder,
|
||||
&leader_schedule_cache,
|
||||
);
|
||||
last_reset = bank.last_blockhash();
|
||||
tpu_has_bank = false;
|
||||
info!("vote bank: {} reset bank: {}", vote_bank_slot, bank.slot());
|
||||
if !partition && vote_bank_slot != bank.slot() {
|
||||
warn!("PARTITION DETECTED waiting to join fork: {} last vote: {:?}", bank.slot(), tower.last_vote());
|
||||
inc_new_counter_info!("replay_stage-partition_detected", 1);
|
||||
partition = true;
|
||||
} else if partition && vote_bank_slot == bank.slot() {
|
||||
warn!("PARTITION resolved fork: {} last vote: {:?}", bank.slot(), tower.last_vote());
|
||||
partition = false;
|
||||
inc_new_counter_info!("replay_stage-partition_resolved", 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
if done {
|
||||
break;
|
||||
}
|
||||
|
||||
Self::handle_votable_bank(
|
||||
&bank,
|
||||
&bank_forks,
|
||||
&mut tower,
|
||||
&mut progress,
|
||||
&vote_account,
|
||||
&voting_keypair,
|
||||
&cluster_info,
|
||||
&blocktree,
|
||||
&leader_schedule_cache,
|
||||
&root_bank_sender,
|
||||
total_staked,
|
||||
&lockouts_sender,
|
||||
&snapshot_package_sender,
|
||||
)?;
|
||||
|
||||
Self::reset_poh_recorder(
|
||||
&my_pubkey,
|
||||
&blocktree,
|
||||
&bank,
|
||||
&poh_recorder,
|
||||
&leader_schedule_cache,
|
||||
);
|
||||
tpu_has_bank = false;
|
||||
}
|
||||
|
||||
if !tpu_has_bank {
|
||||
@ -269,7 +311,7 @@ impl ReplayStage {
|
||||
}
|
||||
|
||||
inc_new_counter_info!(
|
||||
"replicate_stage-duration",
|
||||
"replay_stage-duration",
|
||||
duration_as_ms(&now.elapsed()) as usize
|
||||
);
|
||||
if did_complete_bank {
|
||||
@ -379,6 +421,7 @@ impl ReplayStage {
|
||||
("leader", next_leader.to_string(), String),
|
||||
);
|
||||
|
||||
info!("new fork:{} parent:{} (leader)", poh_slot, parent_slot);
|
||||
let tpu_bank = bank_forks
|
||||
.write()
|
||||
.unwrap()
|
||||
@ -409,12 +452,9 @@ impl ReplayStage {
|
||||
fn replay_blocktree_into_bank(
|
||||
bank: &Arc<Bank>,
|
||||
blocktree: &Blocktree,
|
||||
progress: &mut HashMap<u64, ForkProgress>,
|
||||
bank_progress: &mut ForkProgress,
|
||||
) -> (Result<()>, usize) {
|
||||
let mut tx_count = 0;
|
||||
let bank_progress = &mut progress
|
||||
.entry(bank.slot())
|
||||
.or_insert_with(|| ForkProgress::new(bank.slot(), bank.last_blockhash()));
|
||||
let now = Instant::now();
|
||||
let load_result =
|
||||
Self::load_blocktree_entries_with_shred_count(bank, blocktree, bank_progress);
|
||||
@ -442,19 +482,19 @@ impl ReplayStage {
|
||||
bank.slot(),
|
||||
replay_result
|
||||
);
|
||||
datapoint_warn!("replay-stage-mark_dead_slot", ("slot", bank.slot(), i64),);
|
||||
Self::mark_dead_slot(bank.slot(), blocktree, progress);
|
||||
datapoint_error!(
|
||||
"replay-stage-mark_dead_slot",
|
||||
("error", format!("error: {:?}", replay_result), String),
|
||||
("slot", bank.slot(), i64)
|
||||
);
|
||||
Self::mark_dead_slot(bank.slot(), blocktree, bank_progress);
|
||||
}
|
||||
|
||||
(replay_result, tx_count)
|
||||
}
|
||||
|
||||
fn mark_dead_slot(slot: u64, blocktree: &Blocktree, progress: &mut HashMap<u64, ForkProgress>) {
|
||||
// Remove from progress map so we no longer try to replay this bank
|
||||
let mut progress_entry = progress
|
||||
.get_mut(&slot)
|
||||
.expect("Progress entry must exist after call to replay_entries_into_bank()");
|
||||
progress_entry.is_dead = true;
|
||||
fn mark_dead_slot(slot: Slot, blocktree: &Blocktree, bank_progress: &mut ForkProgress) {
|
||||
bank_progress.is_dead = true;
|
||||
blocktree
|
||||
.set_dead_slot(slot)
|
||||
.expect("Failed to mark slot as dead in blocktree");
|
||||
@ -479,8 +519,11 @@ impl ReplayStage {
|
||||
where
|
||||
T: 'static + KeypairUtil + Send + Sync,
|
||||
{
|
||||
if bank.is_empty() {
|
||||
inc_new_counter_info!("replay_stage-voted_empty_bank", 1);
|
||||
}
|
||||
trace!("handle votable bank {}", bank.slot());
|
||||
let vote = tower.new_vote_from_bank(bank, vote_account);
|
||||
let (vote, tower_index) = tower.new_vote_from_bank(bank, vote_account);
|
||||
if let Some(new_root) = tower.record_bank_vote(vote) {
|
||||
// get the root bank before squash
|
||||
let root_bank = bank_forks
|
||||
@ -526,7 +569,10 @@ impl ReplayStage {
|
||||
let blockhash = bank.last_blockhash();
|
||||
vote_tx.partial_sign(&[node_keypair.as_ref()], blockhash);
|
||||
vote_tx.partial_sign(&[voting_keypair.as_ref()], blockhash);
|
||||
cluster_info.write().unwrap().push_vote(vote_tx);
|
||||
cluster_info
|
||||
.write()
|
||||
.unwrap()
|
||||
.push_vote(tower_index, vote_tx);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -562,9 +608,10 @@ impl ReplayStage {
|
||||
};
|
||||
|
||||
info!(
|
||||
"{} voted and reset PoH at tick height {}. {}",
|
||||
"{} reset PoH to tick {} (within slot {}). {}",
|
||||
my_pubkey,
|
||||
bank.tick_height(),
|
||||
bank.slot(),
|
||||
next_leader_msg,
|
||||
);
|
||||
}
|
||||
@ -589,9 +636,16 @@ impl ReplayStage {
|
||||
}
|
||||
|
||||
let bank = bank_forks.read().unwrap().get(*bank_slot).unwrap().clone();
|
||||
|
||||
// Insert a progress entry even for slots this node is the leader for, so that
|
||||
// 1) confirm_forks can report confirmation, 2) we can cache computations about
|
||||
// this bank in `select_fork()`
|
||||
let bank_progress = &mut progress
|
||||
.entry(bank.slot())
|
||||
.or_insert_with(|| ForkProgress::new(bank.slot(), bank.last_blockhash()));
|
||||
if bank.collector_id() != my_pubkey {
|
||||
let (replay_result, replay_tx_count) =
|
||||
Self::replay_blocktree_into_bank(&bank, &blocktree, progress);
|
||||
Self::replay_blocktree_into_bank(&bank, &blocktree, bank_progress);
|
||||
tx_count += replay_tx_count;
|
||||
if Self::is_replay_result_fatal(&replay_result) {
|
||||
trace!("replay_result_fatal slot {}", bank_slot);
|
||||
@ -622,77 +676,155 @@ impl ReplayStage {
|
||||
did_complete_bank
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn generate_votable_banks(
|
||||
fn select_fork(
|
||||
ancestors: &HashMap<u64, HashSet<u64>>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
tower: &Tower,
|
||||
progress: &mut HashMap<u64, ForkProgress>,
|
||||
) -> Vec<(u128, Arc<Bank>, HashMap<u64, StakeLockout>, u64)> {
|
||||
) -> VoteAndPoHBank {
|
||||
let tower_start = Instant::now();
|
||||
let frozen_banks = bank_forks.read().unwrap().frozen_banks();
|
||||
trace!("frozen_banks {}", frozen_banks.len());
|
||||
let mut votable: Vec<(u128, Arc<Bank>, HashMap<u64, StakeLockout>, u64)> = frozen_banks
|
||||
let mut frozen_banks: Vec<_> = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.frozen_banks()
|
||||
.values()
|
||||
.filter(|b| {
|
||||
let is_votable = b.is_votable();
|
||||
trace!("bank is votable: {} {}", b.slot(), is_votable);
|
||||
is_votable
|
||||
})
|
||||
.filter(|b| {
|
||||
let has_voted = tower.has_voted(b.slot());
|
||||
trace!("bank has_voted: {} {}", b.slot(), has_voted);
|
||||
!has_voted
|
||||
})
|
||||
.filter(|b| {
|
||||
let is_locked_out = tower.is_locked_out(b.slot(), &ancestors);
|
||||
trace!("bank is is_locked_out: {} {}", b.slot(), is_locked_out);
|
||||
!is_locked_out
|
||||
})
|
||||
.cloned()
|
||||
.collect();
|
||||
frozen_banks.sort_by_key(|bank| bank.slot());
|
||||
|
||||
trace!("frozen_banks {}", frozen_banks.len());
|
||||
let stats: Vec<ForkStats> = frozen_banks
|
||||
.iter()
|
||||
.map(|bank| {
|
||||
(
|
||||
bank,
|
||||
tower.collect_vote_lockouts(
|
||||
// Only time progress map should be missing a bank slot
|
||||
// is if this node was the leader for this slot as those banks
|
||||
// are not replayed in replay_active_banks()
|
||||
let mut stats = progress
|
||||
.get(&bank.slot())
|
||||
.expect("All frozen banks must exist in the Progress map")
|
||||
.fork_stats
|
||||
.clone();
|
||||
|
||||
if !stats.computed {
|
||||
stats.slot = bank.slot();
|
||||
let (stake_lockouts, total_staked) = tower.collect_vote_lockouts(
|
||||
bank.slot(),
|
||||
bank.vote_accounts().into_iter(),
|
||||
&ancestors,
|
||||
),
|
||||
)
|
||||
})
|
||||
.filter(|(b, (stake_lockouts, total_staked))| {
|
||||
let vote_threshold =
|
||||
tower.check_vote_stake_threshold(b.slot(), &stake_lockouts, *total_staked);
|
||||
Self::confirm_forks(tower, &stake_lockouts, *total_staked, progress, bank_forks);
|
||||
debug!("bank vote_threshold: {} {}", b.slot(), vote_threshold);
|
||||
vote_threshold
|
||||
})
|
||||
.map(|(b, (stake_lockouts, total_staked))| {
|
||||
(
|
||||
tower.calculate_weight(&stake_lockouts),
|
||||
b.clone(),
|
||||
stake_lockouts,
|
||||
total_staked,
|
||||
)
|
||||
);
|
||||
Self::confirm_forks(tower, &stake_lockouts, total_staked, progress, bank_forks);
|
||||
stats.total_staked = total_staked;
|
||||
stats.weight = tower.calculate_weight(&stake_lockouts);
|
||||
stats.stake_lockouts = stake_lockouts;
|
||||
stats.block_height = bank.block_height();
|
||||
}
|
||||
stats.vote_threshold = tower.check_vote_stake_threshold(
|
||||
bank.slot(),
|
||||
&stats.stake_lockouts,
|
||||
stats.total_staked,
|
||||
);
|
||||
if !stats.computed {
|
||||
if !stats.vote_threshold {
|
||||
info!("vote threshold check failed: {}", bank.slot());
|
||||
}
|
||||
stats.computed = true;
|
||||
}
|
||||
stats.is_locked_out = tower.is_locked_out(bank.slot(), &ancestors);
|
||||
stats.has_voted = tower.has_voted(bank.slot());
|
||||
stats.is_recent = tower.is_recent(bank.slot());
|
||||
progress
|
||||
.get_mut(&bank.slot())
|
||||
.expect("All frozen banks must exist in the Progress map")
|
||||
.fork_stats = stats.clone();
|
||||
stats
|
||||
})
|
||||
.collect();
|
||||
let mut candidates: Vec<_> = frozen_banks
|
||||
.iter()
|
||||
.zip(stats.iter())
|
||||
.filter(|(_, stats)| stats.is_recent && !stats.has_voted)
|
||||
.collect();
|
||||
|
||||
votable.sort_by_key(|b| b.0);
|
||||
//highest weight, lowest slot first
|
||||
candidates.sort_by_key(|b| (b.1.weight, 0i64 - b.1.slot as i64));
|
||||
|
||||
candidates.iter().for_each(|(_, stats)| {
|
||||
let mut parents: Vec<_> = if let Some(set) = ancestors.get(&stats.slot) {
|
||||
set.iter().collect()
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
parents.sort();
|
||||
debug!("{}: {:?} {:?}", stats.slot, stats, parents,);
|
||||
});
|
||||
let rv = Self::pick_best_fork(ancestors, &candidates);
|
||||
let ms = timing::duration_as_ms(&tower_start.elapsed());
|
||||
|
||||
trace!("votable_banks {}", votable.len());
|
||||
if !votable.is_empty() {
|
||||
let weights: Vec<u128> = votable.iter().map(|x| x.0).collect();
|
||||
info!(
|
||||
"@{:?} tower duration: {:?} len: {} weights: {:?}",
|
||||
timing::timestamp(),
|
||||
ms,
|
||||
votable.len(),
|
||||
weights
|
||||
);
|
||||
}
|
||||
let weights: Vec<(u128, u64, u64)> = candidates
|
||||
.iter()
|
||||
.map(|x| (x.1.weight, x.1.slot, x.1.block_height))
|
||||
.collect();
|
||||
debug!(
|
||||
"@{:?} tower duration: {:?} len: {}/{} weights: {:?} voting: {}",
|
||||
timing::timestamp(),
|
||||
ms,
|
||||
candidates.len(),
|
||||
stats.iter().filter(|s| !s.has_voted).count(),
|
||||
weights,
|
||||
rv.0.is_some()
|
||||
);
|
||||
inc_new_counter_info!("replay_stage-tower_duration", ms as usize);
|
||||
rv
|
||||
}
|
||||
|
||||
votable
|
||||
fn pick_best_fork(
|
||||
ancestors: &HashMap<u64, HashSet<u64>>,
|
||||
best_banks: &[(&Arc<Bank>, &ForkStats)],
|
||||
) -> VoteAndPoHBank {
|
||||
if best_banks.is_empty() {
|
||||
return (None, None);
|
||||
}
|
||||
let mut vote = None;
|
||||
let (best_bank, best_stats) = best_banks.last().unwrap();
|
||||
debug!("best bank: {:?}", best_stats);
|
||||
let mut by_slot: Vec<_> = best_banks.iter().collect();
|
||||
by_slot.sort_by_key(|x| x.1.slot);
|
||||
//look for the oldest ancestors of the best bank
|
||||
if let Some(best_ancestors) = ancestors.get(&best_stats.slot) {
|
||||
for (parent, parent_stats) in by_slot.iter() {
|
||||
if parent_stats.is_locked_out || !parent_stats.vote_threshold {
|
||||
continue;
|
||||
}
|
||||
if !best_ancestors.contains(&parent_stats.slot) {
|
||||
continue;
|
||||
}
|
||||
debug!("best bank found ancestor: {}", parent_stats.slot);
|
||||
inc_new_counter_info!("replay_stage-pick_best_fork-ancestor", 1);
|
||||
vote = Some(((*parent).clone(), parent_stats.total_staked));
|
||||
}
|
||||
}
|
||||
//look for the oldest child of the best bank
|
||||
if vote.is_none() {
|
||||
for (child, child_stats) in by_slot.iter().rev() {
|
||||
if child_stats.is_locked_out || !child_stats.vote_threshold {
|
||||
continue;
|
||||
}
|
||||
let has_best = best_stats.slot == child_stats.slot
|
||||
|| ancestors
|
||||
.get(&child.slot())
|
||||
.map(|set| set.contains(&best_stats.slot))
|
||||
.unwrap_or(false);
|
||||
if !has_best {
|
||||
continue;
|
||||
}
|
||||
inc_new_counter_info!("replay_stage-pick_best_fork-child", 1);
|
||||
debug!("best bank found child: {}", child_stats.slot);
|
||||
vote = Some(((*child).clone(), child_stats.total_staked));
|
||||
}
|
||||
}
|
||||
if vote.is_none() {
|
||||
inc_new_counter_info!("replay_stage-fork_selection-heavy_bank_lockout", 1);
|
||||
}
|
||||
(vote, Some((*best_bank).clone()))
|
||||
}
|
||||
|
||||
fn confirm_forks(
|
||||
@ -702,29 +834,30 @@ impl ReplayStage {
|
||||
progress: &mut HashMap<u64, ForkProgress>,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
) {
|
||||
progress.retain(|slot, prog| {
|
||||
let duration = timing::timestamp() - prog.started_ms;
|
||||
if tower.is_slot_confirmed(*slot, stake_lockouts, total_staked)
|
||||
&& bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(*slot)
|
||||
.map(|s| s.is_frozen())
|
||||
.unwrap_or(true)
|
||||
{
|
||||
info!("validator fork confirmed {} {}ms", *slot, duration);
|
||||
datapoint_warn!("validator-confirmation", ("duration_ms", duration, i64));
|
||||
false
|
||||
} else {
|
||||
debug!(
|
||||
"validator fork not confirmed {} {}ms {:?}",
|
||||
*slot,
|
||||
duration,
|
||||
stake_lockouts.get(slot)
|
||||
);
|
||||
true
|
||||
for (slot, prog) in progress.iter_mut() {
|
||||
if !prog.fork_stats.confirmation_reported {
|
||||
let duration = timing::timestamp() - prog.started_ms;
|
||||
if tower.is_slot_confirmed(*slot, stake_lockouts, total_staked)
|
||||
&& bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.get(*slot)
|
||||
.map(|s| s.is_frozen())
|
||||
.unwrap_or(true)
|
||||
{
|
||||
info!("validator fork confirmed {} {}ms", *slot, duration);
|
||||
datapoint_warn!("validatorconfirmation", ("duration_ms", duration, i64));
|
||||
prog.fork_stats.confirmation_reported = true;
|
||||
} else {
|
||||
debug!(
|
||||
"validator fork not confirmed {} {}ms {:?}",
|
||||
*slot,
|
||||
duration,
|
||||
stake_lockouts.get(slot)
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn load_blocktree_entries_with_shred_count(
|
||||
@ -765,7 +898,7 @@ impl ReplayStage {
|
||||
shred_index: usize,
|
||||
bank_progress: &mut ForkProgress,
|
||||
) -> Result<()> {
|
||||
datapoint_info!("verify-batch-size", ("size", entries.len() as i64, i64));
|
||||
datapoint_debug!("verify-batch-size", ("size", entries.len() as i64, i64));
|
||||
let mut verify_total = Measure::start("verify_and_process_entries");
|
||||
let last_entry = &bank_progress.last_entry;
|
||||
let mut entry_state = entries.start_verify(last_entry);
|
||||
@ -840,21 +973,21 @@ impl ReplayStage {
|
||||
next_slots.sort();
|
||||
next_slots
|
||||
});
|
||||
for (parent_id, children) in next_slots {
|
||||
for (parent_slot, children) in next_slots {
|
||||
let parent_bank = frozen_banks
|
||||
.get(&parent_id)
|
||||
.get(&parent_slot)
|
||||
.expect("missing parent in bank forks")
|
||||
.clone();
|
||||
for child_id in children {
|
||||
if forks.get(child_id).is_some() {
|
||||
trace!("child already active or frozen {}", child_id);
|
||||
for child_slot in children {
|
||||
if forks.get(child_slot).is_some() {
|
||||
trace!("child already active or frozen {}", child_slot);
|
||||
continue;
|
||||
}
|
||||
let leader = leader_schedule_cache
|
||||
.slot_leader_at(child_id, Some(&parent_bank))
|
||||
.slot_leader_at(child_slot, Some(&parent_bank))
|
||||
.unwrap();
|
||||
info!("new fork:{} parent:{}", child_id, parent_id);
|
||||
forks.insert(Bank::new_from_parent(&parent_bank, &leader, child_id));
|
||||
info!("new fork:{} parent:{}", child_slot, parent_slot);
|
||||
forks.insert(Bank::new_from_parent(&parent_bank, &leader, child_slot));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -908,7 +1041,7 @@ mod test {
|
||||
|
||||
// Insert blob for slot 1, generate new forks, check result
|
||||
let (shreds, _) = make_slot_entries(1, 0, 8);
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
assert!(bank_forks.get(1).is_none());
|
||||
ReplayStage::generate_new_bank_forks(
|
||||
&blocktree,
|
||||
@ -919,7 +1052,7 @@ mod test {
|
||||
|
||||
// Insert blob for slot 3, generate new forks, check result
|
||||
let (shreds, _) = make_slot_entries(2, 0, 8);
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
assert!(bank_forks.get(2).is_none());
|
||||
ReplayStage::generate_new_bank_forks(
|
||||
&blocktree,
|
||||
@ -965,7 +1098,7 @@ mod test {
|
||||
), // should cause AccountNotFound error
|
||||
],
|
||||
);
|
||||
entries_to_test_shreds(vec![entry], slot, slot.saturating_sub(1), false)
|
||||
entries_to_test_shreds(vec![entry], slot, slot.saturating_sub(1), false, 0)
|
||||
});
|
||||
|
||||
assert_matches!(
|
||||
@ -990,7 +1123,7 @@ mod test {
|
||||
*blockhash,
|
||||
)],
|
||||
);
|
||||
entries_to_test_shreds(vec![entry], slot, slot.saturating_sub(1), false)
|
||||
entries_to_test_shreds(vec![entry], slot, slot.saturating_sub(1), false, 0)
|
||||
});
|
||||
|
||||
assert_matches!(res, Err(Error::BlobError(BlobError::VerificationFailed)));
|
||||
@ -1003,7 +1136,7 @@ mod test {
|
||||
let payload_len = SIZE_OF_DATA_SHRED_PAYLOAD;
|
||||
let gibberish = [0xa5u8; PACKET_DATA_SIZE];
|
||||
let mut data_header = DataShredHeader::default();
|
||||
data_header.flags = DATA_COMPLETE_SHRED;
|
||||
data_header.flags |= DATA_COMPLETE_SHRED;
|
||||
let mut shred = Shred::new_empty_from_header(
|
||||
ShredCommonHeader::default(),
|
||||
data_header,
|
||||
@ -1042,11 +1175,13 @@ mod test {
|
||||
let bank0 = Arc::new(Bank::new(&genesis_block));
|
||||
let mut progress = HashMap::new();
|
||||
let last_blockhash = bank0.last_blockhash();
|
||||
progress.insert(bank0.slot(), ForkProgress::new(0, last_blockhash));
|
||||
let mut bank0_progress = progress
|
||||
.entry(bank0.slot())
|
||||
.or_insert_with(|| ForkProgress::new(0, last_blockhash));
|
||||
let shreds = shred_to_insert(&mint_keypair, &last_blockhash, bank0.slot());
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
let (res, _tx_count) =
|
||||
ReplayStage::replay_blocktree_into_bank(&bank0, &blocktree, &mut progress);
|
||||
ReplayStage::replay_blocktree_into_bank(&bank0, &blocktree, &mut bank0_progress);
|
||||
|
||||
// Check that the erroring bank was marked as dead in the progress map
|
||||
assert!(progress
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
use crate::{
|
||||
cluster_info::{compute_retransmit_peers, ClusterInfo, DATA_PLANE_FANOUT},
|
||||
partition_cfg::PartitionCfg,
|
||||
repair_service::RepairStrategy,
|
||||
result::{Error, Result},
|
||||
service::Service,
|
||||
@ -212,6 +213,8 @@ impl RetransmitStage {
|
||||
exit: &Arc<AtomicBool>,
|
||||
completed_slots_receiver: CompletedSlotsReceiver,
|
||||
epoch_schedule: EpochSchedule,
|
||||
cfg: Option<PartitionCfg>,
|
||||
shred_version: u16,
|
||||
) -> Self {
|
||||
let (retransmit_sender, retransmit_receiver) = channel();
|
||||
|
||||
@ -240,13 +243,19 @@ impl RetransmitStage {
|
||||
repair_strategy,
|
||||
&leader_schedule_cache.clone(),
|
||||
move |id, shred, working_bank, last_root| {
|
||||
should_retransmit_and_persist(
|
||||
let is_connected = cfg
|
||||
.as_ref()
|
||||
.map(|x| x.is_connected(&working_bank, &leader_schedule_cache, shred))
|
||||
.unwrap_or(true);
|
||||
let rv = should_retransmit_and_persist(
|
||||
shred,
|
||||
working_bank,
|
||||
&leader_schedule_cache,
|
||||
id,
|
||||
last_root,
|
||||
)
|
||||
shred_version,
|
||||
);
|
||||
rv && is_connected
|
||||
},
|
||||
);
|
||||
|
||||
|
@ -7,12 +7,13 @@ use crate::{
|
||||
packet::PACKET_DATA_SIZE,
|
||||
storage_stage::StorageState,
|
||||
validator::ValidatorExit,
|
||||
version::VERSION,
|
||||
};
|
||||
use bincode::{deserialize, serialize};
|
||||
use jsonrpc_core::{Error, Metadata, Result};
|
||||
use jsonrpc_derive::rpc;
|
||||
use solana_client::rpc_request::{RpcEpochInfo, RpcVoteAccountInfo, RpcVoteAccountStatus};
|
||||
use solana_client::rpc_request::{
|
||||
RpcContactInfo, RpcEpochInfo, RpcVoteAccountInfo, RpcVoteAccountStatus,
|
||||
};
|
||||
use solana_drone::drone::request_airdrop_transaction;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_runtime::bank::Bank;
|
||||
@ -256,18 +257,6 @@ pub struct Meta {
|
||||
}
|
||||
impl Metadata for Meta {}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct RpcContactInfo {
|
||||
/// Pubkey of the node as a base-58 string
|
||||
pub pubkey: String,
|
||||
/// Gossip port
|
||||
pub gossip: Option<SocketAddr>,
|
||||
/// Tpu port
|
||||
pub tpu: Option<SocketAddr>,
|
||||
/// JSON RPC port
|
||||
pub rpc: Option<SocketAddr>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct RpcVersionInfo {
|
||||
@ -723,7 +712,7 @@ impl RpcSol for RpcSolImpl {
|
||||
|
||||
fn get_version(&self, _: Self::Metadata) -> Result<RpcVersionInfo> {
|
||||
Ok(RpcVersionInfo {
|
||||
solana_core: VERSION.to_string(),
|
||||
solana_core: crate::version!().to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
@ -1388,7 +1377,7 @@ pub mod tests {
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"result": {
|
||||
"solana-core": VERSION
|
||||
"solana-core": crate::version!().to_string()
|
||||
},
|
||||
"id": 1
|
||||
});
|
||||
|
@ -53,6 +53,7 @@ impl RpcRequestMiddleware {
|
||||
}
|
||||
|
||||
fn get(&self, filename: &str) -> RequestMiddlewareAction {
|
||||
info!("get {}", filename);
|
||||
let filename = self.ledger_path.join(filename);
|
||||
RequestMiddlewareAction::Respond {
|
||||
should_validate_hosts: true,
|
||||
|
@ -115,7 +115,6 @@ impl SnapshotPackagerService {
|
||||
|
||||
// Once everything is successful, overwrite the previous tarball so that other validators
|
||||
// can fetch this newly packaged snapshot
|
||||
let _ = fs::remove_file(&snapshot_package.tar_output_file);
|
||||
let metadata = fs::metadata(&archive_path)?;
|
||||
fs::rename(&archive_path, &snapshot_package.tar_output_file)?;
|
||||
|
||||
|
@ -38,6 +38,7 @@ impl Tpu {
|
||||
blocktree: &Arc<Blocktree>,
|
||||
broadcast_type: &BroadcastStageType,
|
||||
exit: &Arc<AtomicBool>,
|
||||
shred_version: u16,
|
||||
) -> Self {
|
||||
let (packet_sender, packet_receiver) = channel();
|
||||
let fetch_stage = FetchStage::new_with_sender(
|
||||
@ -74,6 +75,7 @@ impl Tpu {
|
||||
entry_receiver,
|
||||
&exit,
|
||||
blocktree,
|
||||
shred_version,
|
||||
);
|
||||
|
||||
Self {
|
||||
|
@ -16,6 +16,7 @@ use crate::blockstream_service::BlockstreamService;
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use crate::confidence::ForkConfidenceCache;
|
||||
use crate::ledger_cleanup_service::LedgerCleanupService;
|
||||
use crate::partition_cfg::PartitionCfg;
|
||||
use crate::poh_recorder::PohRecorder;
|
||||
use crate::replay_stage::ReplayStage;
|
||||
use crate::retransmit_stage::RetransmitStage;
|
||||
@ -79,6 +80,8 @@ impl Tvu {
|
||||
exit: &Arc<AtomicBool>,
|
||||
completed_slots_receiver: CompletedSlotsReceiver,
|
||||
fork_confidence_cache: Arc<RwLock<ForkConfidenceCache>>,
|
||||
cfg: Option<PartitionCfg>,
|
||||
shred_version: u16,
|
||||
) -> Self
|
||||
where
|
||||
T: 'static + KeypairUtil + Sync + Send,
|
||||
@ -124,6 +127,8 @@ impl Tvu {
|
||||
&exit,
|
||||
completed_slots_receiver,
|
||||
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
|
||||
cfg,
|
||||
shred_version,
|
||||
);
|
||||
|
||||
let (blockstream_slot_sender, blockstream_slot_receiver) = channel();
|
||||
@ -286,6 +291,8 @@ pub mod tests {
|
||||
&exit,
|
||||
completed_slots_receiver,
|
||||
fork_confidence_cache,
|
||||
None,
|
||||
0,
|
||||
);
|
||||
exit.store(true, Ordering::Relaxed);
|
||||
tvu.join().unwrap();
|
||||
|
@ -1,42 +1,52 @@
|
||||
//! The `validator` module hosts all the validator microservices.
|
||||
|
||||
use crate::broadcast_stage::BroadcastStageType;
|
||||
use crate::cluster_info::{ClusterInfo, Node};
|
||||
use crate::confidence::ForkConfidenceCache;
|
||||
use crate::contact_info::ContactInfo;
|
||||
use crate::gossip_service::{discover_cluster, GossipService};
|
||||
use crate::poh_recorder::PohRecorder;
|
||||
use crate::poh_service::PohService;
|
||||
use crate::rpc::JsonRpcConfig;
|
||||
use crate::rpc_pubsub_service::PubSubService;
|
||||
use crate::rpc_service::JsonRpcService;
|
||||
use crate::rpc_subscriptions::RpcSubscriptions;
|
||||
use crate::service::Service;
|
||||
use crate::sigverify;
|
||||
use crate::storage_stage::StorageState;
|
||||
use crate::tpu::Tpu;
|
||||
use crate::tvu::{Sockets, Tvu};
|
||||
use solana_ledger::bank_forks::{BankForks, SnapshotConfig};
|
||||
use solana_ledger::blocktree::{Blocktree, CompletedSlotsReceiver};
|
||||
use solana_ledger::blocktree_processor::{self, BankForksInfo};
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
use solana_ledger::snapshot_utils;
|
||||
use crate::{
|
||||
broadcast_stage::BroadcastStageType,
|
||||
cluster_info::{ClusterInfo, Node},
|
||||
confidence::ForkConfidenceCache,
|
||||
contact_info::ContactInfo,
|
||||
gossip_service::{discover_cluster, GossipService},
|
||||
partition_cfg::PartitionCfg,
|
||||
poh_recorder::PohRecorder,
|
||||
poh_service::PohService,
|
||||
rpc::JsonRpcConfig,
|
||||
rpc_pubsub_service::PubSubService,
|
||||
rpc_service::JsonRpcService,
|
||||
rpc_subscriptions::RpcSubscriptions,
|
||||
service::Service,
|
||||
sigverify,
|
||||
storage_stage::StorageState,
|
||||
tpu::Tpu,
|
||||
tvu::{Sockets, Tvu},
|
||||
};
|
||||
use solana_ledger::{
|
||||
bank_forks::{BankForks, SnapshotConfig},
|
||||
bank_forks_utils,
|
||||
blocktree::{Blocktree, CompletedSlotsReceiver},
|
||||
blocktree_processor::{self, BankForksInfo},
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
};
|
||||
use solana_metrics::datapoint_info;
|
||||
use solana_sdk::clock::{Slot, DEFAULT_SLOTS_PER_TURN};
|
||||
use solana_sdk::genesis_block::GenesisBlock;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::poh_config::PohConfig;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::timing::timestamp;
|
||||
use solana_sdk::{
|
||||
clock::{Slot, DEFAULT_SLOTS_PER_TURN},
|
||||
genesis_block::GenesisBlock,
|
||||
hash::Hash,
|
||||
poh_config::PohConfig,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, KeypairUtil},
|
||||
timing::timestamp,
|
||||
};
|
||||
|
||||
use std::fs;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::mpsc::Receiver;
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::thread::Result;
|
||||
use solana_ledger::shred::Shred;
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr},
|
||||
path::{Path, PathBuf},
|
||||
process,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::mpsc::Receiver,
|
||||
sync::{Arc, Mutex, RwLock},
|
||||
thread::Result,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ValidatorConfig {
|
||||
@ -51,6 +61,7 @@ pub struct ValidatorConfig {
|
||||
pub snapshot_config: Option<SnapshotConfig>,
|
||||
pub max_ledger_slots: Option<u64>,
|
||||
pub broadcast_stage_type: BroadcastStageType,
|
||||
pub partition_cfg: Option<PartitionCfg>,
|
||||
}
|
||||
|
||||
impl Default for ValidatorConfig {
|
||||
@ -67,6 +78,7 @@ impl Default for ValidatorConfig {
|
||||
rpc_config: JsonRpcConfig::default(),
|
||||
snapshot_config: None,
|
||||
broadcast_stage_type: BroadcastStageType::Standard,
|
||||
partition_cfg: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -110,7 +122,7 @@ impl Validator {
|
||||
voting_keypair: &Arc<Keypair>,
|
||||
storage_keypair: &Arc<Keypair>,
|
||||
entrypoint_info_option: Option<&ContactInfo>,
|
||||
verify_ledger: bool,
|
||||
poh_verify: bool,
|
||||
config: &ValidatorConfig,
|
||||
) -> Self {
|
||||
let id = keypair.pubkey();
|
||||
@ -126,6 +138,20 @@ impl Validator {
|
||||
"dis"
|
||||
}
|
||||
);
|
||||
|
||||
// Validator binaries built on a machine with AVX support will generate invalid opcodes
|
||||
// when run on machines without AVX causing a non-obvious process abort. Instead detect
|
||||
// the mismatch and error cleanly.
|
||||
#[target_feature(enable = "avx")]
|
||||
{
|
||||
if is_x86_feature_detected!("avx") {
|
||||
info!("AVX detected");
|
||||
} else {
|
||||
error!("Your machine does not have AVX support, please rebuild from source on your machine");
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
info!("entrypoint: {:?}", entrypoint_info_option);
|
||||
|
||||
Self::print_node_info(&node);
|
||||
@ -149,7 +175,7 @@ impl Validator {
|
||||
ledger_path,
|
||||
config.account_paths.clone(),
|
||||
config.snapshot_config.clone(),
|
||||
verify_ledger,
|
||||
poh_verify,
|
||||
config.dev_halt_at_slot,
|
||||
);
|
||||
|
||||
@ -159,6 +185,8 @@ impl Validator {
|
||||
let bank = bank_forks[bank_info.bank_slot].clone();
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
let fork_confidence_cache = Arc::new(RwLock::new(ForkConfidenceCache::default()));
|
||||
// The version used by shreds, derived from genesis
|
||||
let shred_version = Shred::version_from_hash(&genesis_blockhash);
|
||||
|
||||
let mut validator_exit = ValidatorExit::default();
|
||||
let exit_ = exit.clone();
|
||||
@ -318,6 +346,8 @@ impl Validator {
|
||||
&exit,
|
||||
completed_slots_receiver,
|
||||
fork_confidence_cache,
|
||||
config.partition_cfg.clone(),
|
||||
shred_version,
|
||||
);
|
||||
|
||||
if config.dev_sigverify_disabled {
|
||||
@ -335,6 +365,7 @@ impl Validator {
|
||||
&blocktree,
|
||||
&config.broadcast_stage_type,
|
||||
&exit,
|
||||
shred_version,
|
||||
);
|
||||
|
||||
datapoint_info!("validator-new", ("id", id.to_string(), String));
|
||||
@ -385,112 +416,12 @@ impl Validator {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_bank_forks(
|
||||
genesis_block: &GenesisBlock,
|
||||
blocktree: &Blocktree,
|
||||
account_paths: Option<String>,
|
||||
snapshot_config: Option<&SnapshotConfig>,
|
||||
verify_ledger: bool,
|
||||
dev_halt_at_slot: Option<Slot>,
|
||||
) -> (BankForks, Vec<BankForksInfo>, LeaderScheduleCache) {
|
||||
let process_options = blocktree_processor::ProcessOptions {
|
||||
verify_ledger,
|
||||
dev_halt_at_slot,
|
||||
..blocktree_processor::ProcessOptions::default()
|
||||
};
|
||||
|
||||
if let Some(snapshot_config) = snapshot_config.as_ref() {
|
||||
info!(
|
||||
"Initializing snapshot path: {:?}",
|
||||
snapshot_config.snapshot_path
|
||||
);
|
||||
let _ = fs::remove_dir_all(&snapshot_config.snapshot_path);
|
||||
fs::create_dir_all(&snapshot_config.snapshot_path)
|
||||
.expect("Couldn't create snapshot directory");
|
||||
|
||||
let tar =
|
||||
snapshot_utils::get_snapshot_tar_path(&snapshot_config.snapshot_package_output_path);
|
||||
if tar.exists() {
|
||||
info!("Loading snapshot package: {:?}", tar);
|
||||
// Fail hard here if snapshot fails to load, don't silently continue
|
||||
let deserialized_bank = snapshot_utils::bank_from_archive(
|
||||
account_paths
|
||||
.clone()
|
||||
.expect("Account paths not present when booting from snapshot"),
|
||||
&snapshot_config.snapshot_path,
|
||||
&tar,
|
||||
)
|
||||
.expect("Load from snapshot failed");
|
||||
|
||||
return blocktree_processor::process_blocktree_from_root(
|
||||
genesis_block,
|
||||
blocktree,
|
||||
Arc::new(deserialized_bank),
|
||||
&process_options,
|
||||
)
|
||||
.expect("processing blocktree after loading snapshot failed");
|
||||
} else {
|
||||
info!("Snapshot package does not exist: {:?}", tar);
|
||||
}
|
||||
} else {
|
||||
info!("Snapshots disabled");
|
||||
}
|
||||
|
||||
info!("Processing ledger from genesis");
|
||||
blocktree_processor::process_blocktree(
|
||||
&genesis_block,
|
||||
&blocktree,
|
||||
account_paths,
|
||||
process_options,
|
||||
)
|
||||
.expect("process_blocktree failed")
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn adjust_ulimit_nofile() {}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn adjust_ulimit_nofile() {
|
||||
// Rocks DB likes to have many open files. The default open file descriptor limit is
|
||||
// usually not enough
|
||||
let desired_nofile = 65000;
|
||||
|
||||
fn get_nofile() -> libc::rlimit {
|
||||
let mut nofile = libc::rlimit {
|
||||
rlim_cur: 0,
|
||||
rlim_max: 0,
|
||||
};
|
||||
if unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut nofile) } != 0 {
|
||||
warn!("getrlimit(RLIMIT_NOFILE) failed");
|
||||
}
|
||||
nofile
|
||||
}
|
||||
|
||||
let mut nofile = get_nofile();
|
||||
if nofile.rlim_cur < desired_nofile {
|
||||
nofile.rlim_cur = desired_nofile;
|
||||
if unsafe { libc::setrlimit(libc::RLIMIT_NOFILE, &nofile) } != 0 {
|
||||
error!(
|
||||
"Unable to increase the maximum open file descriptor limit to {}",
|
||||
desired_nofile
|
||||
);
|
||||
|
||||
if cfg!(target_os = "macos") {
|
||||
error!("On mac OS you may need to run |sudo launchctl limit maxfiles 65536 200000| first");
|
||||
}
|
||||
}
|
||||
|
||||
nofile = get_nofile();
|
||||
}
|
||||
info!("Maximum open file descriptors: {}", nofile.rlim_cur);
|
||||
}
|
||||
|
||||
pub fn new_banks_from_blocktree(
|
||||
expected_genesis_blockhash: Option<Hash>,
|
||||
blocktree_path: &Path,
|
||||
account_paths: Option<String>,
|
||||
snapshot_config: Option<SnapshotConfig>,
|
||||
verify_ledger: bool,
|
||||
poh_verify: bool,
|
||||
dev_halt_at_slot: Option<Slot>,
|
||||
) -> (
|
||||
Hash,
|
||||
@ -516,28 +447,32 @@ pub fn new_banks_from_blocktree(
|
||||
"Delete the ledger directory to continue: {:?}",
|
||||
blocktree_path
|
||||
);
|
||||
// TODO: bubble error up to caller?
|
||||
std::process::exit(1);
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
adjust_ulimit_nofile();
|
||||
|
||||
let (blocktree, ledger_signal_receiver, completed_slots_receiver) =
|
||||
Blocktree::open_with_signal(blocktree_path).expect("Failed to open ledger database");
|
||||
|
||||
let (mut bank_forks, bank_forks_info, leader_schedule_cache) = get_bank_forks(
|
||||
let process_options = blocktree_processor::ProcessOptions {
|
||||
poh_verify,
|
||||
dev_halt_at_slot,
|
||||
..blocktree_processor::ProcessOptions::default()
|
||||
};
|
||||
|
||||
let (mut bank_forks, bank_forks_info, leader_schedule_cache) = bank_forks_utils::load(
|
||||
&genesis_block,
|
||||
&blocktree,
|
||||
account_paths,
|
||||
snapshot_config.as_ref(),
|
||||
verify_ledger,
|
||||
dev_halt_at_slot,
|
||||
);
|
||||
process_options,
|
||||
)
|
||||
.unwrap_or_else(|err| {
|
||||
error!("Failed to load ledger: {:?}", err);
|
||||
std::process::exit(1);
|
||||
});
|
||||
|
||||
if let Some(snapshot_config) = snapshot_config {
|
||||
bank_forks.set_snapshot_config(snapshot_config);
|
||||
}
|
||||
bank_forks.set_snapshot_config(snapshot_config);
|
||||
|
||||
(
|
||||
genesis_blockhash,
|
||||
|
@ -1 +0,0 @@
|
||||
pub(crate) const VERSION: &str = env!("CARGO_PKG_VERSION");
|
@ -9,25 +9,26 @@ use std::ops::Div;
|
||||
|
||||
/// Returns a list of indexes shuffled based on the input weights
|
||||
/// Note - The sum of all weights must not exceed `u64::MAX`
|
||||
pub fn weighted_shuffle<T>(weights: Vec<T>, rng: ChaChaRng) -> Vec<usize>
|
||||
pub fn weighted_shuffle<T>(weights: Vec<T>, mut rng: ChaChaRng) -> Vec<usize>
|
||||
where
|
||||
T: Copy + PartialOrd + iter::Sum + Div<T, Output = T> + FromPrimitive + ToPrimitive,
|
||||
{
|
||||
let mut rng = rng;
|
||||
let total_weight: T = weights.clone().into_iter().sum();
|
||||
weights
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, v)| {
|
||||
// This generates an "inverse" weight but it avoids floating point math
|
||||
let x = (total_weight / v)
|
||||
.to_u64()
|
||||
.expect("values > u64::max are not supported");
|
||||
(
|
||||
i,
|
||||
// capture the u64 into u128s to prevent overflow
|
||||
(&mut rng).gen_range(1, u128::from(std::u16::MAX)) * u128::from(x),
|
||||
rng.gen_range(1, u128::from(std::u16::MAX)) * u128::from(x),
|
||||
)
|
||||
})
|
||||
// sort in ascending order
|
||||
.sorted_by(|(_, l_val), (_, r_val)| l_val.cmp(r_val))
|
||||
.map(|x| x.0)
|
||||
.collect()
|
||||
@ -35,22 +36,23 @@ where
|
||||
|
||||
/// Returns the highest index after computing a weighted shuffle.
|
||||
/// Saves doing any sorting for O(n) max calculation.
|
||||
pub fn weighted_best(weights_and_indicies: &[(u64, usize)], rng: ChaChaRng) -> usize {
|
||||
let mut rng = rng;
|
||||
if weights_and_indicies.is_empty() {
|
||||
pub fn weighted_best(weights_and_indexes: &[(u64, usize)], mut rng: ChaChaRng) -> usize {
|
||||
if weights_and_indexes.is_empty() {
|
||||
return 0;
|
||||
}
|
||||
let total_weight: u64 = weights_and_indicies.iter().map(|x| x.0).sum();
|
||||
let mut best_weight = 0;
|
||||
let total_weight: u64 = weights_and_indexes.iter().map(|x| x.0).sum();
|
||||
let mut lowest_weight = std::u128::MAX;
|
||||
let mut best_index = 0;
|
||||
for v in weights_and_indicies {
|
||||
for v in weights_and_indexes {
|
||||
// This generates an "inverse" weight but it avoids floating point math
|
||||
let x = (total_weight / v.0)
|
||||
.to_u64()
|
||||
.expect("values > u64::max are not supported");
|
||||
// capture the u64 into u128s to prevent overflow
|
||||
let weight = (&mut rng).gen_range(1, u128::from(std::u16::MAX)) * u128::from(x);
|
||||
if weight > best_weight {
|
||||
best_weight = weight;
|
||||
let computed_weight = rng.gen_range(1, u128::from(std::u16::MAX)) * u128::from(x);
|
||||
// The highest input weight maps to the lowest computed weight
|
||||
if computed_weight < lowest_weight {
|
||||
lowest_weight = computed_weight;
|
||||
best_index = v.1;
|
||||
}
|
||||
}
|
||||
@ -120,9 +122,12 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_weighted_best() {
|
||||
let mut weights = vec![(std::u32::MAX as u64, 0); 3];
|
||||
weights.push((1, 5));
|
||||
let best = weighted_best(&weights, ChaChaRng::from_seed([0x5b; 32]));
|
||||
assert_eq!(best, 5);
|
||||
let weights_and_indexes: Vec<_> = vec![100u64, 1000, 10_000, 10]
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(i, weight)| (weight, i))
|
||||
.collect();
|
||||
let best_index = weighted_best(&weights_and_indexes, ChaChaRng::from_seed([0x5b; 32]));
|
||||
assert_eq!(best_index, 2);
|
||||
}
|
||||
}
|
||||
|
@ -41,6 +41,7 @@ pub fn should_retransmit_and_persist(
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
my_pubkey: &Pubkey,
|
||||
root: u64,
|
||||
shred_version: u16,
|
||||
) -> bool {
|
||||
let slot_leader_pubkey = match bank {
|
||||
None => leader_schedule_cache.slot_leader_at(shred.slot(), None),
|
||||
@ -56,6 +57,9 @@ pub fn should_retransmit_and_persist(
|
||||
} else if !shred.verify(&leader_id) {
|
||||
inc_new_counter_debug!("streamer-recv_window-invalid_signature", 1);
|
||||
false
|
||||
} else if shred.version() != shred_version {
|
||||
inc_new_counter_debug!("streamer-recv_window-incorrect_shred_version", 1);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
@ -85,6 +89,7 @@ where
|
||||
total_packets += more_packets.packets.len();
|
||||
packets.push(more_packets)
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
inc_new_counter_debug!("streamer-recv_window-recv", total_packets);
|
||||
|
||||
@ -127,7 +132,9 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
blocktree.insert_shreds(shreds, Some(leader_schedule_cache))?;
|
||||
let blocktree_insert_metrics =
|
||||
blocktree.insert_shreds(shreds, Some(leader_schedule_cache), false)?;
|
||||
blocktree_insert_metrics.report_metrics("recv-window-insert-shreds");
|
||||
|
||||
trace!(
|
||||
"Elapsed processing time in recv_window(): {}",
|
||||
@ -306,7 +313,7 @@ mod test {
|
||||
parent: u64,
|
||||
keypair: &Arc<Keypair>,
|
||||
) -> Vec<Shred> {
|
||||
let shredder = Shredder::new(slot, parent, 0.0, keypair.clone())
|
||||
let shredder = Shredder::new(slot, parent, 0.0, keypair.clone(), 0, 0)
|
||||
.expect("Failed to create entry shredder");
|
||||
shredder.entries_to_shreds(&entries, true, 0).0
|
||||
}
|
||||
@ -320,7 +327,7 @@ mod test {
|
||||
let mut shreds = local_entries_to_shred(&original_entries, 0, 0, &Arc::new(Keypair::new()));
|
||||
shreds.reverse();
|
||||
blocktree
|
||||
.insert_shreds(shreds, None)
|
||||
.insert_shreds(shreds, None, false)
|
||||
.expect("Expect successful processing of shred");
|
||||
|
||||
assert_eq!(
|
||||
@ -346,25 +353,30 @@ mod test {
|
||||
|
||||
// with a Bank for slot 0, blob continues
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, 0,),
|
||||
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, 0, 0),
|
||||
true
|
||||
);
|
||||
// with the wrong shred_version, shred gets thrown out
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, 0, 1),
|
||||
false
|
||||
);
|
||||
|
||||
// If it's a coding shred, test that slot >= root
|
||||
let (common, coding) = Shredder::new_coding_shred_header(5, 5, 6, 6, 0);
|
||||
let (common, coding) = Shredder::new_coding_shred_header(5, 5, 6, 6, 0, 0);
|
||||
let mut coding_shred =
|
||||
Shred::new_empty_from_header(common, DataShredHeader::default(), coding);
|
||||
Shredder::sign_shred(&leader_keypair, &mut coding_shred);
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 0),
|
||||
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 0, 0),
|
||||
true
|
||||
);
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 5),
|
||||
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 5, 0),
|
||||
true
|
||||
);
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 6),
|
||||
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 6, 0),
|
||||
false
|
||||
);
|
||||
|
||||
@ -381,7 +393,8 @@ mod test {
|
||||
Some(wrong_bank.clone()),
|
||||
&wrong_cache,
|
||||
&me_id,
|
||||
0
|
||||
0,
|
||||
0,
|
||||
),
|
||||
false
|
||||
);
|
||||
@ -389,7 +402,7 @@ mod test {
|
||||
// with a Bank and no idea who leader is, blob gets thrown out
|
||||
shreds[0].set_slot(MINIMUM_SLOTS_PER_EPOCH as u64 * 3);
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, 0),
|
||||
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, 0, 0),
|
||||
false
|
||||
);
|
||||
|
||||
@ -397,7 +410,7 @@ mod test {
|
||||
let slot = MINIMUM_SLOTS_PER_EPOCH as u64 * 3;
|
||||
let shreds = local_entries_to_shred(&[Entry::default()], slot, slot - 1, &leader_keypair);
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, slot),
|
||||
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, slot, 0),
|
||||
false
|
||||
);
|
||||
|
||||
@ -406,13 +419,13 @@ mod test {
|
||||
let shreds =
|
||||
local_entries_to_shred(&[Entry::default()], slot + 1, slot - 1, &leader_keypair);
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, slot),
|
||||
should_retransmit_and_persist(&shreds[0], Some(bank.clone()), &cache, &me_id, slot, 0),
|
||||
false
|
||||
);
|
||||
|
||||
// if the blob came back from me, it doesn't continue, whether or not I have a bank
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&shreds[0], None, &cache, &me_id, 0),
|
||||
should_retransmit_and_persist(&shreds[0], None, &cache, &me_id, 0, 0),
|
||||
false
|
||||
);
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ mod tests {
|
||||
snapshot_package_output_path: PathBuf::from(snapshot_output_path.path()),
|
||||
snapshot_path: PathBuf::from(snapshot_dir.path()),
|
||||
};
|
||||
bank_forks.set_snapshot_config(snapshot_config.clone());
|
||||
bank_forks.set_snapshot_config(Some(snapshot_config.clone()));
|
||||
SnapshotTestConfig {
|
||||
accounts_dir,
|
||||
snapshot_dir,
|
||||
|
@ -6,8 +6,8 @@ use solana_core::contact_info::ContactInfo;
|
||||
use solana_core::crds_gossip::*;
|
||||
use solana_core::crds_gossip_error::CrdsGossipError;
|
||||
use solana_core::crds_gossip_push::CRDS_GOSSIP_PUSH_MSG_TIMEOUT_MS;
|
||||
use solana_core::crds_value::CrdsValue;
|
||||
use solana_core::crds_value::CrdsValueLabel;
|
||||
use solana_core::crds_value::{CrdsData, CrdsValue};
|
||||
use solana_sdk::hash::hash;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::timestamp;
|
||||
@ -72,10 +72,16 @@ fn stakes(network: &Network) -> HashMap<Pubkey, u64> {
|
||||
}
|
||||
|
||||
fn star_network_create(num: usize) -> Network {
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let mut network: HashMap<_, _> = (1..num)
|
||||
.map(|_| {
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let id = new.label().pubkey();
|
||||
let mut node = CrdsGossip::default();
|
||||
node.crds.insert(new.clone(), 0).unwrap();
|
||||
@ -93,14 +99,20 @@ fn star_network_create(num: usize) -> Network {
|
||||
}
|
||||
|
||||
fn rstar_network_create(num: usize) -> Network {
|
||||
let entry = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let entry = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let mut origin = CrdsGossip::default();
|
||||
let id = entry.label().pubkey();
|
||||
origin.crds.insert(entry.clone(), 0).unwrap();
|
||||
origin.set_self(&id);
|
||||
let mut network: HashMap<_, _> = (1..num)
|
||||
.map(|_| {
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let id = new.label().pubkey();
|
||||
let mut node = CrdsGossip::default();
|
||||
node.crds.insert(new.clone(), 0).unwrap();
|
||||
@ -116,7 +128,10 @@ fn rstar_network_create(num: usize) -> Network {
|
||||
fn ring_network_create(num: usize) -> Network {
|
||||
let mut network: HashMap<_, _> = (0..num)
|
||||
.map(|_| {
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let id = new.label().pubkey();
|
||||
let mut node = CrdsGossip::default();
|
||||
node.crds.insert(new.clone(), 0).unwrap();
|
||||
@ -147,7 +162,10 @@ fn connected_staked_network_create(stakes: &[u64]) -> Network {
|
||||
let num = stakes.len();
|
||||
let mut network: HashMap<_, _> = (0..num)
|
||||
.map(|n| {
|
||||
let new = CrdsValue::ContactInfo(ContactInfo::new_localhost(&Pubkey::new_rand(), 0));
|
||||
let new = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
0,
|
||||
)));
|
||||
let id = new.label().pubkey();
|
||||
let mut node = CrdsGossip::default();
|
||||
node.crds.insert(new.clone(), 0).unwrap();
|
||||
@ -219,7 +237,11 @@ fn network_simulator(network: &mut Network, max_convergance: f64) {
|
||||
.and_then(|v| v.contact_info().cloned())
|
||||
.unwrap();
|
||||
m.wallclock = now;
|
||||
node.process_push_message(&Pubkey::default(), vec![CrdsValue::ContactInfo(m)], now);
|
||||
node.process_push_message(
|
||||
&Pubkey::default(),
|
||||
vec![CrdsValue::new_unsigned(CrdsData::ContactInfo(m))],
|
||||
now,
|
||||
);
|
||||
});
|
||||
// push for a bit
|
||||
let (queue_size, bytes_tx) = network_run_push(network, start, end);
|
||||
@ -547,7 +569,10 @@ fn test_prune_errors() {
|
||||
let prune_pubkey = Pubkey::new(&[2; 32]);
|
||||
crds_gossip
|
||||
.crds
|
||||
.insert(CrdsValue::ContactInfo(ci.clone()), 0)
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::ContactInfo(ci.clone())),
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
crds_gossip.refresh_push_active_set(&HashMap::new());
|
||||
let now = timestamp();
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-drone"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
description = "Solana Drone"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,9 +19,9 @@ clap = "2.33"
|
||||
log = "0.4.8"
|
||||
serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.5" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.5" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-fixed-buf"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
description = "A fixed-size byte array that supports bincode serde"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-genesis"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -15,10 +15,10 @@ serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
serde_json = "1.0.41"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-genesis-programs = { path = "../genesis_programs", version = "0.20.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" }
|
||||
solana-genesis-programs = { path = "../genesis_programs", version = "0.20.5" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.5" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.5" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.5" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.5" }
|
||||
tempfile = "3.1.0"
|
||||
|
@ -76,6 +76,7 @@ pub fn add_genesis_accounts(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
let default_bootstrap_leader_lamports = &sol_to_lamports(500.0).to_string();
|
||||
let default_bootstrap_leader_stake_lamports = &sol_to_lamports(0.5).to_string();
|
||||
@ -310,16 +311,16 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
let bootstrap_storage_keypair = read_keypair_file(bootstrap_storage_keypair_file)?;
|
||||
let mint_keypair = read_keypair_file(mint_keypair_file)?;
|
||||
|
||||
let vote_account = vote_state::create_account(
|
||||
let bootstrap_leader_vote_account = vote_state::create_account(
|
||||
&bootstrap_vote_keypair.pubkey(),
|
||||
&bootstrap_leader_keypair.pubkey(),
|
||||
0,
|
||||
1,
|
||||
);
|
||||
let stake_account = stake_state::create_account(
|
||||
&bootstrap_stake_keypair.pubkey(),
|
||||
let bootstrap_leader_stake_account = stake_state::create_account(
|
||||
&bootstrap_leader_keypair.pubkey(),
|
||||
&bootstrap_vote_keypair.pubkey(),
|
||||
&vote_account,
|
||||
&bootstrap_leader_vote_account,
|
||||
bootstrap_leader_stake_lamports,
|
||||
);
|
||||
|
||||
@ -335,9 +336,15 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
Account::new(bootstrap_leader_lamports, 0, &system_program::id()),
|
||||
),
|
||||
// where votes go to
|
||||
(bootstrap_vote_keypair.pubkey(), vote_account),
|
||||
(
|
||||
bootstrap_vote_keypair.pubkey(),
|
||||
bootstrap_leader_vote_account,
|
||||
),
|
||||
// passive bootstrap leader stake
|
||||
(bootstrap_stake_keypair.pubkey(), stake_account),
|
||||
(
|
||||
bootstrap_stake_keypair.pubkey(),
|
||||
bootstrap_leader_stake_account,
|
||||
),
|
||||
(
|
||||
bootstrap_storage_keypair.pubkey(),
|
||||
storage_contract::create_validator_storage_account(
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-genesis-programs"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
description = "Solana genesis programs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,30 +10,25 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = { version = "0.4.8" }
|
||||
solana-bpf-loader-api = { path = "../programs/bpf_loader_api", version = "0.20.0" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader_program", version = "0.20.0" }
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.0" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.20.0" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.0" }
|
||||
solana-config-program = { path = "../programs/config_program", version = "0.20.0" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.0" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.0", optional = true }
|
||||
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.0", optional = true }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" }
|
||||
solana-stake-program = { path = "../programs/stake_program", version = "0.20.0" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.20.0" }
|
||||
solana-vest-api = { path = "../programs/vest_api", version = "0.20.0" }
|
||||
solana-vest-program = { path = "../programs/vest_program", version = "0.20.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" }
|
||||
solana-vote-program = { path = "../programs/vote_program", version = "0.20.0" }
|
||||
solana-bpf-loader-api = { path = "../programs/bpf_loader_api", version = "0.20.5" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader_program", version = "0.20.5" }
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.5" }
|
||||
solana-budget-program = { path = "../programs/budget_program", version = "0.20.5" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.5" }
|
||||
solana-config-program = { path = "../programs/config_program", version = "0.20.5" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.5" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.5" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.5" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.5" }
|
||||
solana-stake-program = { path = "../programs/stake_program", version = "0.20.5" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.5" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.20.5" }
|
||||
solana-vest-api = { path = "../programs/vest_api", version = "0.20.5" }
|
||||
solana-vest-program = { path = "../programs/vest_program", version = "0.20.5" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.5" }
|
||||
solana-vote-program = { path = "../programs/vote_program", version = "0.20.5" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
name = "solana_genesis_programs"
|
||||
|
||||
[features]
|
||||
move = ["solana-move-loader-program", "solana-move-loader-api"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
use solana_sdk::{
|
||||
clock::Epoch, genesis_block::OperatingMode, pubkey::Pubkey,
|
||||
system_program::solana_system_program,
|
||||
clock::Epoch, genesis_block::OperatingMode, move_loader::solana_move_loader_program,
|
||||
pubkey::Pubkey, system_program::solana_system_program,
|
||||
};
|
||||
|
||||
#[macro_use]
|
||||
@ -11,9 +11,6 @@ extern crate solana_budget_program;
|
||||
extern crate solana_config_program;
|
||||
#[macro_use]
|
||||
extern crate solana_exchange_program;
|
||||
#[cfg(feature = "move")]
|
||||
#[macro_use]
|
||||
extern crate solana_move_loader_program;
|
||||
#[macro_use]
|
||||
extern crate solana_stake_program;
|
||||
#[macro_use]
|
||||
@ -42,8 +39,7 @@ pub fn get(operating_mode: OperatingMode, epoch: Epoch) -> Option<Vec<(String, P
|
||||
// Programs that are only available in Development mode
|
||||
solana_budget_program!(),
|
||||
solana_exchange_program!(),
|
||||
#[cfg(feature = "move")]
|
||||
solana_move_loader_program!(),
|
||||
solana_move_loader_program(),
|
||||
])
|
||||
} else {
|
||||
None
|
||||
@ -107,7 +103,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_development_programs() {
|
||||
assert_eq!(get(OperatingMode::Development, 0).unwrap().len(), 9);
|
||||
assert_eq!(get(OperatingMode::Development, 0).unwrap().len(), 10);
|
||||
assert_eq!(get(OperatingMode::Development, 1), None);
|
||||
}
|
||||
|
||||
|
@ -3,18 +3,18 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-gossip"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-client = { path = "../client", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-core = { path = "../core", version = "0.20.5" }
|
||||
solana-client = { path = "../client", version = "0.20.5" }
|
||||
solana-logger = { path = "../logger", version = "0.20.5" }
|
||||
solana-netutil = { path = "../netutil", version = "0.20.5" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
|
||||
|
||||
|
||||
|
@ -3,11 +3,10 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-install"
|
||||
description = "The solana cluster software installer"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
default-run = "solana-install"
|
||||
|
||||
[dependencies]
|
||||
atty = "0.2.11"
|
||||
@ -29,10 +28,10 @@ serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
serde_yaml = "0.8.11"
|
||||
sha2 = "0.8.0"
|
||||
solana-client = { path = "../client", version = "0.20.0" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-client = { path = "../client", version = "0.20.5" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.5" }
|
||||
solana-logger = { path = "../logger", version = "0.20.5" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
tar = "0.4.26"
|
||||
tempdir = "0.3.7"
|
||||
url = "2.1.0"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-keygen"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
description = "Solana key generation utility"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -15,7 +15,7 @@ edition = "2018"
|
||||
clap = "2.33"
|
||||
dirs = "2.0.2"
|
||||
rpassword = "4.0"
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
tiny-bip39 = "0.6.2"
|
||||
|
||||
[[bin]]
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-ledger-tool"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -15,10 +15,11 @@ serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
serde_json = "1.0.41"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-ledger = { path = "../ledger", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.5" }
|
||||
solana-logger = { path = "../logger", version = "0.20.5" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.5" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.5" }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "0.11"
|
||||
|
@ -1,15 +1,28 @@
|
||||
use clap::{crate_description, crate_name, crate_version, value_t_or_exit, App, Arg, SubCommand};
|
||||
use solana_ledger::blocktree::Blocktree;
|
||||
use solana_ledger::blocktree_processor::{process_blocktree, ProcessOptions};
|
||||
use solana_ledger::rooted_slot_iterator::RootedSlotIterator;
|
||||
use solana_sdk::clock::Slot;
|
||||
use solana_sdk::genesis_block::GenesisBlock;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fs::File;
|
||||
use std::io::{stdout, Write};
|
||||
use std::path::PathBuf;
|
||||
use std::process::exit;
|
||||
use std::str::FromStr;
|
||||
use clap::{
|
||||
crate_description, crate_name, crate_version, value_t, value_t_or_exit, values_t_or_exit, App,
|
||||
Arg, SubCommand,
|
||||
};
|
||||
use solana_ledger::{
|
||||
bank_forks::{BankForks, SnapshotConfig},
|
||||
bank_forks_utils,
|
||||
blocktree::Blocktree,
|
||||
blocktree_processor,
|
||||
rooted_slot_iterator::RootedSlotIterator,
|
||||
};
|
||||
use solana_sdk::{
|
||||
clock::Slot, genesis_block::GenesisBlock, instruction_processor_utils::limited_deserialize,
|
||||
native_token::lamports_to_sol, pubkey::Pubkey,
|
||||
};
|
||||
use solana_vote_api::vote_state::VoteState;
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap, HashSet},
|
||||
ffi::OsStr,
|
||||
fs::File,
|
||||
io::{self, stdout, Write},
|
||||
path::{Path, PathBuf},
|
||||
process::{exit, Command, Stdio},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
#[derive(PartialEq)]
|
||||
enum LedgerOutputMethod {
|
||||
@ -25,9 +38,71 @@ fn output_slot(blocktree: &Blocktree, slot: u64, method: &LedgerOutputMethod) {
|
||||
exit(1);
|
||||
});
|
||||
|
||||
for entry in entries {
|
||||
for (entry_index, entry) in entries.iter().enumerate() {
|
||||
match method {
|
||||
LedgerOutputMethod::Print => println!("{:?}", entry),
|
||||
LedgerOutputMethod::Print => {
|
||||
println!(
|
||||
" Entry {} - num_hashes: {}, hashes: {}, transactions: {}",
|
||||
entry_index,
|
||||
entry.num_hashes,
|
||||
entry.hash,
|
||||
entry.transactions.len()
|
||||
);
|
||||
for (transactions_index, transaction) in entry.transactions.iter().enumerate() {
|
||||
let message = &transaction.message;
|
||||
println!(" Transaction {}", transactions_index);
|
||||
println!(" Recent Blockhash: {:?}", message.recent_blockhash);
|
||||
for (signature_index, signature) in transaction.signatures.iter().enumerate() {
|
||||
println!(" Signature {}: {:?}", signature_index, signature);
|
||||
}
|
||||
println!(" Header: {:?}", message.header);
|
||||
for (account_index, account) in message.account_keys.iter().enumerate() {
|
||||
println!(" Account {}: {:?}", account_index, account);
|
||||
}
|
||||
for (instruction_index, instruction) in message.instructions.iter().enumerate()
|
||||
{
|
||||
let program_pubkey =
|
||||
message.account_keys[instruction.program_id_index as usize];
|
||||
println!(" Instruction {}", instruction_index);
|
||||
println!(
|
||||
" Program: {} ({})",
|
||||
program_pubkey, instruction.program_id_index
|
||||
);
|
||||
for (account_index, account) in instruction.accounts.iter().enumerate() {
|
||||
let account_pubkey = message.account_keys[*account as usize];
|
||||
println!(
|
||||
" Account {}: {} ({})",
|
||||
account_index, account_pubkey, account
|
||||
);
|
||||
}
|
||||
|
||||
let mut raw = true;
|
||||
if program_pubkey == solana_vote_api::id() {
|
||||
if let Ok(vote_instruction) =
|
||||
limited_deserialize::<
|
||||
solana_vote_api::vote_instruction::VoteInstruction,
|
||||
>(&instruction.data)
|
||||
{
|
||||
println!(" {:?}", vote_instruction);
|
||||
raw = false;
|
||||
}
|
||||
} else if program_pubkey == solana_sdk::system_program::id() {
|
||||
if let Ok(system_instruction) =
|
||||
limited_deserialize::<
|
||||
solana_sdk::system_instruction::SystemInstruction,
|
||||
>(&instruction.data)
|
||||
{
|
||||
println!(" {:?}", system_instruction);
|
||||
raw = false;
|
||||
}
|
||||
}
|
||||
|
||||
if raw {
|
||||
println!(" Data: {:?}", instruction.data);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
LedgerOutputMethod::Json => {
|
||||
serde_json::to_writer(stdout(), &entry).expect("serialize entry");
|
||||
stdout().write_all(b",\n").expect("newline");
|
||||
@ -67,9 +142,257 @@ fn output_ledger(blocktree: Blocktree, starting_slot: u64, method: LedgerOutputM
|
||||
}
|
||||
}
|
||||
|
||||
fn render_dot(dot: String, output_file: &str, output_format: &str) -> io::Result<()> {
|
||||
let mut child = Command::new("dot")
|
||||
.arg(format!("-T{}", output_format))
|
||||
.arg(format!("-o{}", output_file))
|
||||
.stdin(Stdio::piped())
|
||||
.spawn()
|
||||
.map_err(|err| {
|
||||
eprintln!("Failed to spawn dot: {:?}", err);
|
||||
err
|
||||
})?;
|
||||
|
||||
let stdin = child.stdin.as_mut().unwrap();
|
||||
stdin.write_all(&dot.into_bytes())?;
|
||||
|
||||
let status = child.wait_with_output()?.status;
|
||||
if !status.success() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!("dot failed with error {}", status.code().unwrap_or(-1)),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
fn graph_forks(
|
||||
bank_forks: BankForks,
|
||||
bank_forks_info: Vec<blocktree_processor::BankForksInfo>,
|
||||
include_all_votes: bool,
|
||||
) -> String {
|
||||
// Search all forks and collect the last vote made by each validator
|
||||
let mut last_votes = HashMap::new();
|
||||
for bfi in &bank_forks_info {
|
||||
let bank = bank_forks.banks.get(&bfi.bank_slot).unwrap();
|
||||
|
||||
let total_stake = bank
|
||||
.vote_accounts()
|
||||
.iter()
|
||||
.fold(0, |acc, (_, (stake, _))| acc + stake);
|
||||
for (_, (stake, vote_account)) in bank.vote_accounts() {
|
||||
let vote_state = VoteState::from(&vote_account).unwrap_or_default();
|
||||
if let Some(last_vote) = vote_state.votes.iter().last() {
|
||||
let entry = last_votes.entry(vote_state.node_pubkey).or_insert((
|
||||
last_vote.slot,
|
||||
vote_state.clone(),
|
||||
stake,
|
||||
total_stake,
|
||||
));
|
||||
if entry.0 < last_vote.slot {
|
||||
*entry = (last_vote.slot, vote_state, stake, total_stake);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Figure the stake distribution at all the nodes containing the last vote from each
|
||||
// validator
|
||||
let mut slot_stake_and_vote_count = HashMap::new();
|
||||
for (last_vote_slot, _, stake, total_stake) in last_votes.values() {
|
||||
let entry = slot_stake_and_vote_count
|
||||
.entry(last_vote_slot)
|
||||
.or_insert((0, 0, *total_stake));
|
||||
entry.0 += 1;
|
||||
entry.1 += stake;
|
||||
assert_eq!(entry.2, *total_stake)
|
||||
}
|
||||
|
||||
let mut dot = vec!["digraph {".to_string()];
|
||||
|
||||
// Build a subgraph consisting of all banks and links to their parent banks
|
||||
dot.push(" subgraph cluster_banks {".to_string());
|
||||
dot.push(" style=invis".to_string());
|
||||
let mut styled_slots = HashSet::new();
|
||||
let mut all_votes: HashMap<Pubkey, HashMap<Slot, VoteState>> = HashMap::new();
|
||||
for bfi in &bank_forks_info {
|
||||
let bank = bank_forks.banks.get(&bfi.bank_slot).unwrap();
|
||||
let mut bank = bank.clone();
|
||||
|
||||
let mut first = true;
|
||||
loop {
|
||||
for (_, (_, vote_account)) in bank.vote_accounts() {
|
||||
let vote_state = VoteState::from(&vote_account).unwrap_or_default();
|
||||
if let Some(last_vote) = vote_state.votes.iter().last() {
|
||||
let validator_votes = all_votes.entry(vote_state.node_pubkey).or_default();
|
||||
validator_votes
|
||||
.entry(last_vote.slot)
|
||||
.or_insert_with(|| vote_state.clone());
|
||||
}
|
||||
}
|
||||
|
||||
if !styled_slots.contains(&bank.slot()) {
|
||||
dot.push(format!(
|
||||
r#" "{}"[label="{} (epoch {})\nleader: {}{}{}",style="{}{}"];"#,
|
||||
bank.slot(),
|
||||
bank.slot(),
|
||||
bank.epoch(),
|
||||
bank.collector_id(),
|
||||
if let Some(parent) = bank.parent() {
|
||||
format!(
|
||||
"\ntransactions: {}",
|
||||
bank.transaction_count() - parent.transaction_count(),
|
||||
)
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
if let Some((votes, stake, total_stake)) =
|
||||
slot_stake_and_vote_count.get(&bank.slot())
|
||||
{
|
||||
format!(
|
||||
"\nvotes: {}, stake: {:.1} SOL ({:.1}%)",
|
||||
votes,
|
||||
lamports_to_sol(*stake),
|
||||
*stake as f64 / *total_stake as f64 * 100.,
|
||||
)
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
if first { "filled," } else { "" },
|
||||
""
|
||||
));
|
||||
styled_slots.insert(bank.slot());
|
||||
}
|
||||
first = false;
|
||||
|
||||
match bank.parent() {
|
||||
None => {
|
||||
if bank.slot() > 0 {
|
||||
dot.push(format!(r#" "{}" -> "..." [dir=back]"#, bank.slot(),));
|
||||
}
|
||||
break;
|
||||
}
|
||||
Some(parent) => {
|
||||
let slot_distance = bank.slot() - parent.slot();
|
||||
let penwidth = if bank.epoch() > parent.epoch() {
|
||||
"5"
|
||||
} else {
|
||||
"1"
|
||||
};
|
||||
let link_label = if slot_distance > 1 {
|
||||
format!("label=\"{} slots\",color=red", slot_distance)
|
||||
} else {
|
||||
"color=blue".to_string()
|
||||
};
|
||||
dot.push(format!(
|
||||
r#" "{}" -> "{}"[{},dir=back,penwidth={}];"#,
|
||||
bank.slot(),
|
||||
parent.slot(),
|
||||
link_label,
|
||||
penwidth
|
||||
));
|
||||
|
||||
bank = parent.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
dot.push(" }".to_string());
|
||||
|
||||
// Strafe the banks with links from validators to the bank they last voted on,
|
||||
// while collecting information about the absent votes and stakes
|
||||
let mut absent_stake = 0;
|
||||
let mut absent_votes = 0;
|
||||
let mut lowest_last_vote_slot = std::u64::MAX;
|
||||
let mut lowest_total_stake = 0;
|
||||
for (node_pubkey, (last_vote_slot, vote_state, stake, total_stake)) in &last_votes {
|
||||
all_votes.entry(*node_pubkey).and_modify(|validator_votes| {
|
||||
validator_votes.remove(&last_vote_slot);
|
||||
});
|
||||
|
||||
dot.push(format!(
|
||||
r#" "last vote {}"[shape=box,label="Latest validator vote: {}\nstake: {} SOL\nroot slot: {}\nvote history:\n{}"];"#,
|
||||
node_pubkey,
|
||||
node_pubkey,
|
||||
lamports_to_sol(*stake),
|
||||
vote_state.root_slot.unwrap_or(0),
|
||||
vote_state
|
||||
.votes
|
||||
.iter()
|
||||
.map(|vote| format!("slot {} (conf={})", vote.slot, vote.confirmation_count))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n")
|
||||
));
|
||||
|
||||
dot.push(format!(
|
||||
r#" "last vote {}" -> "{}" [style=dashed,label="latest vote"];"#,
|
||||
node_pubkey,
|
||||
if styled_slots.contains(&last_vote_slot) {
|
||||
last_vote_slot.to_string()
|
||||
} else {
|
||||
if *last_vote_slot < lowest_last_vote_slot {
|
||||
lowest_last_vote_slot = *last_vote_slot;
|
||||
lowest_total_stake = *total_stake;
|
||||
}
|
||||
absent_votes += 1;
|
||||
absent_stake += stake;
|
||||
|
||||
"...".to_string()
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
// Annotate the final "..." node with absent vote and stake information
|
||||
if absent_votes > 0 {
|
||||
dot.push(format!(
|
||||
r#" "..."[label="...\nvotes: {}, stake: {:.1} SOL {:.1}%"];"#,
|
||||
absent_votes,
|
||||
lamports_to_sol(absent_stake),
|
||||
absent_stake as f64 / lowest_total_stake as f64 * 100.,
|
||||
));
|
||||
}
|
||||
|
||||
// Add for vote information from all banks.
|
||||
if include_all_votes {
|
||||
for (node_pubkey, validator_votes) in &all_votes {
|
||||
for (vote_slot, vote_state) in validator_votes {
|
||||
dot.push(format!(
|
||||
r#" "{} vote {}"[shape=box,style=dotted,label="validator vote: {}\nroot slot: {}\nvote history:\n{}"];"#,
|
||||
node_pubkey,
|
||||
vote_slot,
|
||||
node_pubkey,
|
||||
vote_state.root_slot.unwrap_or(0),
|
||||
vote_state
|
||||
.votes
|
||||
.iter()
|
||||
.map(|vote| format!("slot {} (conf={})", vote.slot, vote.confirmation_count))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n")
|
||||
));
|
||||
|
||||
dot.push(format!(
|
||||
r#" "{} vote {}" -> "{}" [style=dotted,label="vote"];"#,
|
||||
node_pubkey,
|
||||
vote_slot,
|
||||
if styled_slots.contains(&vote_slot) {
|
||||
vote_slot.to_string()
|
||||
} else {
|
||||
"...".to_string()
|
||||
},
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dot.push("}".to_string());
|
||||
dot.join("\n")
|
||||
}
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
fn main() {
|
||||
const DEFAULT_ROOT_COUNT: &str = "1";
|
||||
solana_logger::setup();
|
||||
solana_logger::setup_with_filter("solana=info");
|
||||
|
||||
let starting_slot_arg = Arg::with_name("starting_slot")
|
||||
.long("starting-slot")
|
||||
@ -90,26 +413,87 @@ fn main() {
|
||||
.global(true)
|
||||
.help("Use directory for ledger location"),
|
||||
)
|
||||
.subcommand(SubCommand::with_name("print").about("Print the ledger").arg(&starting_slot_arg))
|
||||
.subcommand(SubCommand::with_name("print-slot").about("Print the contents of one slot").arg(
|
||||
Arg::with_name("slot")
|
||||
.index(1)
|
||||
.value_name("SLOT")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("The slot to print"),
|
||||
))
|
||||
.subcommand(SubCommand::with_name("bounds").about("Print lowest and highest non-empty slots. Note: This ignores gaps in slots"))
|
||||
.subcommand(SubCommand::with_name("json").about("Print the ledger in JSON format").arg(&starting_slot_arg))
|
||||
.subcommand(SubCommand::with_name("verify").about("Verify the ledger's PoH"))
|
||||
.subcommand(SubCommand::with_name("prune").about("Prune the ledger at the block height").arg(
|
||||
Arg::with_name("slot_list")
|
||||
.long("slot-list")
|
||||
.value_name("FILENAME")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("The location of the YAML file with a list of rollback slot heights and hashes"),
|
||||
))
|
||||
.subcommand(
|
||||
SubCommand::with_name("print")
|
||||
.about("Print the ledger")
|
||||
.arg(&starting_slot_arg)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("print-slot")
|
||||
.about("Print the contents of one or more slots")
|
||||
.arg(
|
||||
Arg::with_name("slots")
|
||||
.index(1)
|
||||
.value_name("SLOTS")
|
||||
.takes_value(true)
|
||||
.multiple(true)
|
||||
.required(true)
|
||||
.help("List of slots to print"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("bounds")
|
||||
.about("Print lowest and highest non-empty slots. Note: This ignores gaps in slots")
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("json")
|
||||
.about("Print the ledger in JSON format")
|
||||
.arg(&starting_slot_arg)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("verify")
|
||||
.about("Verify the ledger")
|
||||
.arg(
|
||||
Arg::with_name("no_snapshot")
|
||||
.long("no-snapshot")
|
||||
.takes_value(false)
|
||||
.help("Do not start from a local snapshot if present"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("account_paths")
|
||||
.long("accounts")
|
||||
.value_name("PATHS")
|
||||
.takes_value(true)
|
||||
.help("Comma separated persistent accounts location"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("halt_at_slot")
|
||||
.long("halt-at-slot")
|
||||
.value_name("SLOT")
|
||||
.takes_value(true)
|
||||
.help("Halt processing at the given slot"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("skip_poh_verify")
|
||||
.long("skip-poh-verify")
|
||||
.takes_value(false)
|
||||
.help("Skip ledger PoH verification"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("graph_forks")
|
||||
.long("graph-forks")
|
||||
.value_name("FILENAME")
|
||||
.takes_value(true)
|
||||
.help("Create a Graphviz DOT file representing the active forks once the ledger is verified"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("graph_forks_include_all_votes")
|
||||
.long("graph-forks-include-all-votes")
|
||||
.requires("graph_forks")
|
||||
.help("Include all votes in forks graph"),
|
||||
)
|
||||
).subcommand(
|
||||
SubCommand::with_name("prune")
|
||||
.about("Prune the ledger at the block height")
|
||||
.arg(
|
||||
Arg::with_name("slot_list")
|
||||
.long("slot-list")
|
||||
.value_name("FILENAME")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("The location of the YAML file with a list of rollback slot heights and hashes"),
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("list-roots")
|
||||
.about("Output upto last <num-roots> root hashes and their heights starting at the given block height")
|
||||
@ -119,21 +503,26 @@ fn main() {
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.required(true)
|
||||
.help("Maximum block height")).arg(
|
||||
Arg::with_name("slot_list")
|
||||
.long("slot-list")
|
||||
.value_name("FILENAME")
|
||||
.required(false)
|
||||
.takes_value(true)
|
||||
.help("The location of the output YAML file. A list of rollback slot heights and hashes will be written to the file.")).arg(
|
||||
Arg::with_name("num_roots")
|
||||
.long("num-roots")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.default_value(DEFAULT_ROOT_COUNT)
|
||||
.required(false)
|
||||
.help("Number of roots in the output"),
|
||||
))
|
||||
.help("Maximum block height")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("slot_list")
|
||||
.long("slot-list")
|
||||
.value_name("FILENAME")
|
||||
.required(false)
|
||||
.takes_value(true)
|
||||
.help("The location of the output YAML file. A list of rollback slot heights and hashes will be written to the file.")
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("num_roots")
|
||||
.long("num-roots")
|
||||
.value_name("NUM")
|
||||
.takes_value(true)
|
||||
.default_value(DEFAULT_ROOT_COUNT)
|
||||
.required(false)
|
||||
.help("Number of roots in the output"),
|
||||
)
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let ledger_path = PathBuf::from(value_t_or_exit!(matches, "ledger", String));
|
||||
@ -160,22 +549,75 @@ fn main() {
|
||||
output_ledger(blocktree, starting_slot, LedgerOutputMethod::Print);
|
||||
}
|
||||
("print-slot", Some(args_matches)) => {
|
||||
let slot = value_t_or_exit!(args_matches, "slot", Slot);
|
||||
output_slot(&blocktree, slot, &LedgerOutputMethod::Print);
|
||||
let slots = values_t_or_exit!(args_matches, "slots", Slot);
|
||||
for slot in slots {
|
||||
println!("Slot {}", slot);
|
||||
output_slot(&blocktree, slot, &LedgerOutputMethod::Print);
|
||||
}
|
||||
}
|
||||
("json", Some(args_matches)) => {
|
||||
let starting_slot = value_t_or_exit!(args_matches, "starting_slot", Slot);
|
||||
output_ledger(blocktree, starting_slot, LedgerOutputMethod::Json);
|
||||
}
|
||||
("verify", _) => {
|
||||
("verify", Some(arg_matches)) => {
|
||||
println!("Verifying ledger...");
|
||||
let options = ProcessOptions {
|
||||
verify_ledger: true,
|
||||
..ProcessOptions::default()
|
||||
|
||||
let dev_halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok();
|
||||
let poh_verify = !arg_matches.is_present("skip_poh_verify");
|
||||
|
||||
let snapshot_config = if arg_matches.is_present("no_snapshot") {
|
||||
None
|
||||
} else {
|
||||
Some(SnapshotConfig {
|
||||
snapshot_interval_slots: 0, // Value doesn't matter
|
||||
snapshot_package_output_path: ledger_path.clone(),
|
||||
snapshot_path: ledger_path.clone().join("snapshot"),
|
||||
})
|
||||
};
|
||||
match process_blocktree(&genesis_block, &blocktree, None, options) {
|
||||
Ok((_bank_forks, bank_forks_info, _)) => {
|
||||
println!("{:?}", bank_forks_info);
|
||||
let account_paths = if let Some(account_paths) = matches.value_of("account_paths") {
|
||||
Some(account_paths.to_string())
|
||||
} else {
|
||||
Some(ledger_path.join("accounts").to_str().unwrap().to_string())
|
||||
};
|
||||
|
||||
let process_options = blocktree_processor::ProcessOptions {
|
||||
poh_verify,
|
||||
dev_halt_at_slot,
|
||||
..blocktree_processor::ProcessOptions::default()
|
||||
};
|
||||
|
||||
match bank_forks_utils::load(
|
||||
&genesis_block,
|
||||
&blocktree,
|
||||
account_paths,
|
||||
snapshot_config.as_ref(),
|
||||
process_options,
|
||||
) {
|
||||
Ok((bank_forks, bank_forks_info, _leader_schedule_cache)) => {
|
||||
println!("Ok");
|
||||
|
||||
if let Some(output_file) = arg_matches.value_of("graph_forks") {
|
||||
let dot = graph_forks(
|
||||
bank_forks,
|
||||
bank_forks_info,
|
||||
arg_matches.is_present("graph_forks_include_all_votes"),
|
||||
);
|
||||
|
||||
let extension = Path::new(output_file).extension();
|
||||
let result = if extension == Some(OsStr::new("pdf")) {
|
||||
render_dot(dot, output_file, "pdf")
|
||||
} else if extension == Some(OsStr::new("png")) {
|
||||
render_dot(dot, output_file, "png")
|
||||
} else {
|
||||
File::create(output_file)
|
||||
.and_then(|mut file| file.write_all(&dot.into_bytes()))
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(_) => println!("Wrote {}", output_file),
|
||||
Err(err) => eprintln!("Unable to write {}: {}", output_file, err),
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Ledger verification failed: {:?}", err);
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-ledger"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
description = "Solana ledger"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,6 +19,7 @@ dlopen_derive = "0.1.4"
|
||||
fs_extra = "1.1.0"
|
||||
itertools = "0.8.0"
|
||||
lazy_static = "1.4.0"
|
||||
libc = "0.2.65"
|
||||
log = { version = "0.4.8" }
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
@ -26,16 +27,16 @@ rayon = "1.2.0"
|
||||
reed-solomon-erasure = { package = "solana-reed-solomon-erasure", version = "4.0.1-3", features = ["simd-accel"] }
|
||||
serde = "1.0.101"
|
||||
serde_derive = "1.0.101"
|
||||
solana-genesis-programs = { path = "../genesis_programs", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-measure = { path = "../measure", version = "0.20.0" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.20.0" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.0" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" }
|
||||
solana-genesis-programs = { path = "../genesis_programs", version = "0.20.5" }
|
||||
solana-logger = { path = "../logger", version = "0.20.5" }
|
||||
solana-measure = { path = "../measure", version = "0.20.5" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "0.20.5" }
|
||||
solana-metrics = { path = "../metrics", version = "0.20.5" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.5" }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.5" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.5" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.5" }
|
||||
sys-info = "0.5.8"
|
||||
tar = "0.4.26"
|
||||
tempfile = "3.1.0"
|
||||
@ -43,13 +44,13 @@ tempfile = "3.1.0"
|
||||
[dependencies.rocksdb]
|
||||
# Avoid the vendored bzip2 within rocksdb-sys that can cause linker conflicts
|
||||
# when also using the bzip2 crate
|
||||
version = "0.12.4"
|
||||
version = "0.13.0"
|
||||
default-features = false
|
||||
features = ["lz4"]
|
||||
|
||||
[dev-dependencies]
|
||||
matches = "0.1.6"
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.0" }
|
||||
solana-budget-api = { path = "../programs/budget_api", version = "0.20.5" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
|
@ -287,8 +287,8 @@ impl BankForks {
|
||||
.retain(|slot, _| slot == &root || descendants[&root].contains(slot));
|
||||
}
|
||||
|
||||
pub fn set_snapshot_config(&mut self, snapshot_config: SnapshotConfig) {
|
||||
self.snapshot_config = Some(snapshot_config);
|
||||
pub fn set_snapshot_config(&mut self, snapshot_config: Option<SnapshotConfig>) {
|
||||
self.snapshot_config = snapshot_config;
|
||||
}
|
||||
|
||||
pub fn snapshot_config(&self) -> &Option<SnapshotConfig> {
|
||||
|
62
ledger/src/bank_forks_utils.rs
Normal file
62
ledger/src/bank_forks_utils.rs
Normal file
@ -0,0 +1,62 @@
|
||||
use crate::{
|
||||
bank_forks::{BankForks, SnapshotConfig},
|
||||
blocktree::Blocktree,
|
||||
blocktree_processor::{self, BankForksInfo, BlocktreeProcessorError, ProcessOptions},
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
snapshot_utils,
|
||||
};
|
||||
use log::*;
|
||||
use solana_sdk::genesis_block::GenesisBlock;
|
||||
use std::{fs, sync::Arc};
|
||||
|
||||
pub fn load(
|
||||
genesis_block: &GenesisBlock,
|
||||
blocktree: &Blocktree,
|
||||
account_paths: Option<String>,
|
||||
snapshot_config: Option<&SnapshotConfig>,
|
||||
process_options: ProcessOptions,
|
||||
) -> Result<(BankForks, Vec<BankForksInfo>, LeaderScheduleCache), BlocktreeProcessorError> {
|
||||
if let Some(snapshot_config) = snapshot_config.as_ref() {
|
||||
info!(
|
||||
"Initializing snapshot path: {:?}",
|
||||
snapshot_config.snapshot_path
|
||||
);
|
||||
let _ = fs::remove_dir_all(&snapshot_config.snapshot_path);
|
||||
fs::create_dir_all(&snapshot_config.snapshot_path)
|
||||
.expect("Couldn't create snapshot directory");
|
||||
|
||||
let tar =
|
||||
snapshot_utils::get_snapshot_tar_path(&snapshot_config.snapshot_package_output_path);
|
||||
if tar.exists() {
|
||||
info!("Loading snapshot package: {:?}", tar);
|
||||
// Fail hard here if snapshot fails to load, don't silently continue
|
||||
let deserialized_bank = snapshot_utils::bank_from_archive(
|
||||
account_paths
|
||||
.clone()
|
||||
.expect("Account paths not present when booting from snapshot"),
|
||||
&snapshot_config.snapshot_path,
|
||||
&tar,
|
||||
)
|
||||
.expect("Load from snapshot failed");
|
||||
|
||||
return blocktree_processor::process_blocktree_from_root(
|
||||
genesis_block,
|
||||
blocktree,
|
||||
Arc::new(deserialized_bank),
|
||||
&process_options,
|
||||
);
|
||||
} else {
|
||||
info!("Snapshot package does not exist: {:?}", tar);
|
||||
}
|
||||
} else {
|
||||
info!("Snapshots disabled");
|
||||
}
|
||||
|
||||
info!("Processing ledger from genesis");
|
||||
blocktree_processor::process_blocktree(
|
||||
&genesis_block,
|
||||
&blocktree,
|
||||
account_paths,
|
||||
process_options,
|
||||
)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -19,7 +19,6 @@ use std::sync::Arc;
|
||||
// A good value for this is the number of cores on the machine
|
||||
const TOTAL_THREADS: i32 = 8;
|
||||
const MAX_WRITE_BUFFER_SIZE: u64 = 256 * 1024 * 1024; // 256MB
|
||||
const MIN_WRITE_BUFFER_SIZE: u64 = 64 * 1024; // 64KB
|
||||
|
||||
// Column family for metadata about a leader slot
|
||||
const META_CF: &str = "meta";
|
||||
@ -129,22 +128,18 @@ impl Rocks {
|
||||
let db_options = get_db_options();
|
||||
|
||||
// Column family names
|
||||
let meta_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options(SlotMeta::NAME));
|
||||
let meta_cf_descriptor = ColumnFamilyDescriptor::new(SlotMeta::NAME, get_cf_options());
|
||||
let dead_slots_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(DeadSlots::NAME, get_cf_options(DeadSlots::NAME));
|
||||
ColumnFamilyDescriptor::new(DeadSlots::NAME, get_cf_options());
|
||||
let erasure_meta_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options(ErasureMeta::NAME));
|
||||
let orphans_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options(Orphans::NAME));
|
||||
let root_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(Root::NAME, get_cf_options(Root::NAME));
|
||||
let index_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(Index::NAME, get_cf_options(Index::NAME));
|
||||
ColumnFamilyDescriptor::new(ErasureMeta::NAME, get_cf_options());
|
||||
let orphans_cf_descriptor = ColumnFamilyDescriptor::new(Orphans::NAME, get_cf_options());
|
||||
let root_cf_descriptor = ColumnFamilyDescriptor::new(Root::NAME, get_cf_options());
|
||||
let index_cf_descriptor = ColumnFamilyDescriptor::new(Index::NAME, get_cf_options());
|
||||
let shred_data_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(ShredData::NAME, get_cf_options(ShredData::NAME));
|
||||
ColumnFamilyDescriptor::new(ShredData::NAME, get_cf_options());
|
||||
let shred_code_cf_descriptor =
|
||||
ColumnFamilyDescriptor::new(ShredCode::NAME, get_cf_options(ShredCode::NAME));
|
||||
ColumnFamilyDescriptor::new(ShredCode::NAME, get_cf_options());
|
||||
|
||||
let cfs = vec![
|
||||
meta_cf_descriptor,
|
||||
@ -186,25 +181,25 @@ impl Rocks {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn cf_handle(&self, cf: &str) -> ColumnFamily {
|
||||
fn cf_handle(&self, cf: &str) -> &ColumnFamily {
|
||||
self.0
|
||||
.cf_handle(cf)
|
||||
.expect("should never get an unknown column")
|
||||
}
|
||||
|
||||
fn get_cf(&self, cf: ColumnFamily, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||
fn get_cf(&self, cf: &ColumnFamily, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||
let opt = self.0.get_cf(cf, key)?.map(|db_vec| db_vec.to_vec());
|
||||
Ok(opt)
|
||||
}
|
||||
|
||||
fn put_cf(&self, cf: ColumnFamily, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
fn put_cf(&self, cf: &ColumnFamily, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
self.0.put_cf(cf, key, value)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn iterator_cf<C>(
|
||||
&self,
|
||||
cf: ColumnFamily,
|
||||
cf: &ColumnFamily,
|
||||
iterator_mode: IteratorMode<C::Index>,
|
||||
) -> Result<DBIterator>
|
||||
where
|
||||
@ -223,7 +218,7 @@ impl Rocks {
|
||||
Ok(iter)
|
||||
}
|
||||
|
||||
fn raw_iterator_cf(&self, cf: ColumnFamily) -> Result<DBRawIterator> {
|
||||
fn raw_iterator_cf(&self, cf: &ColumnFamily) -> Result<DBRawIterator> {
|
||||
let raw_iter = self.0.raw_iterator_cf(cf)?;
|
||||
|
||||
Ok(raw_iter)
|
||||
@ -482,7 +477,7 @@ where
|
||||
|
||||
pub struct WriteBatch<'a> {
|
||||
write_batch: RWriteBatch,
|
||||
map: HashMap<&'static str, ColumnFamily<'a>>,
|
||||
map: HashMap<&'static str, &'a ColumnFamily>,
|
||||
}
|
||||
|
||||
impl Database {
|
||||
@ -524,7 +519,7 @@ impl Database {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn cf_handle<C>(&self) -> ColumnFamily
|
||||
pub fn cf_handle<C>(&self) -> &ColumnFamily
|
||||
where
|
||||
C: Column,
|
||||
{
|
||||
@ -542,7 +537,7 @@ impl Database {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn raw_iterator_cf(&self, cf: ColumnFamily) -> Result<DBRawIterator> {
|
||||
pub fn raw_iterator_cf(&self, cf: &ColumnFamily) -> Result<DBRawIterator> {
|
||||
self.backend.raw_iterator_cf(cf)
|
||||
}
|
||||
|
||||
@ -615,7 +610,7 @@ where
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn handle(&self) -> ColumnFamily {
|
||||
pub fn handle(&self) -> &ColumnFamily {
|
||||
self.backend.cf_handle(C::NAME)
|
||||
}
|
||||
|
||||
@ -674,32 +669,18 @@ impl<'a> WriteBatch<'a> {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_cf<C: Column>(&self) -> ColumnFamily<'a> {
|
||||
fn get_cf<C: Column>(&self) -> &'a ColumnFamily {
|
||||
self.map[C::NAME]
|
||||
}
|
||||
}
|
||||
|
||||
fn get_cf_options(name: &'static str) -> Options {
|
||||
use columns::{ErasureMeta, Index, ShredCode, ShredData};
|
||||
|
||||
fn get_cf_options() -> Options {
|
||||
let mut options = Options::default();
|
||||
match name {
|
||||
ShredCode::NAME | ShredData::NAME | Index::NAME | ErasureMeta::NAME => {
|
||||
// 512MB * 8 = 4GB. 2 of these columns should take no more than 8GB of RAM
|
||||
options.set_max_write_buffer_number(8);
|
||||
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE as usize);
|
||||
options.set_target_file_size_base(MAX_WRITE_BUFFER_SIZE / 10);
|
||||
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE);
|
||||
}
|
||||
_ => {
|
||||
// We want smaller CFs to flush faster. This results in more WAL files but lowers
|
||||
// overall WAL space utilization and increases flush frequency
|
||||
options.set_write_buffer_size(MIN_WRITE_BUFFER_SIZE as usize);
|
||||
options.set_target_file_size_base(MIN_WRITE_BUFFER_SIZE);
|
||||
options.set_max_bytes_for_level_base(MIN_WRITE_BUFFER_SIZE);
|
||||
options.set_level_zero_file_num_compaction_trigger(1);
|
||||
}
|
||||
}
|
||||
// 256 * 8 = 2GB. 6 of these columns should take at most 12GB of RAM
|
||||
options.set_max_write_buffer_number(8);
|
||||
options.set_write_buffer_size(MAX_WRITE_BUFFER_SIZE as usize);
|
||||
options.set_target_file_size_base(MAX_WRITE_BUFFER_SIZE / 10);
|
||||
options.set_max_bytes_for_level_base(MAX_WRITE_BUFFER_SIZE);
|
||||
options
|
||||
}
|
||||
|
||||
|
@ -12,11 +12,13 @@ pub struct SlotMeta {
|
||||
// The total number of consecutive blobs starting from index 0
|
||||
// we have received for this slot.
|
||||
pub consumed: u64,
|
||||
// The index *plus one* of the highest blob received for this slot. Useful
|
||||
// for checking if the slot has received any blobs yet, and to calculate the
|
||||
// The index *plus one* of the highest shred received for this slot. Useful
|
||||
// for checking if the slot has received any shreds yet, and to calculate the
|
||||
// range where there is one or more holes: `(consumed..received)`.
|
||||
pub received: u64,
|
||||
// The index of the blob that is flagged as the last blob for this slot.
|
||||
// The timestamp of the first time a shred was added for this slot
|
||||
pub first_shred_timestamp: u64,
|
||||
// The index of the shred that is flagged as the last shred for this slot.
|
||||
pub last_index: u64,
|
||||
// The slot height of the block this one derives from.
|
||||
pub parent_slot: u64,
|
||||
@ -31,7 +33,7 @@ pub struct SlotMeta {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
|
||||
/// Index recording presence/absence of blobs
|
||||
/// Index recording presence/absence of shreds
|
||||
pub struct Index {
|
||||
pub slot: u64,
|
||||
data: DataIndex,
|
||||
@ -40,14 +42,14 @@ pub struct Index {
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
|
||||
pub struct DataIndex {
|
||||
/// Map representing presence/absence of data blobs
|
||||
/// Map representing presence/absence of data shreds
|
||||
index: BTreeSet<u64>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)]
|
||||
/// Erasure coding information
|
||||
pub struct CodingIndex {
|
||||
/// Map from set index, to hashmap from blob index to presence bool
|
||||
/// Map from set index, to hashmap from shred index to presence bool
|
||||
index: BTreeSet<u64>,
|
||||
}
|
||||
|
||||
@ -145,8 +147,8 @@ impl DataIndex {
|
||||
impl SlotMeta {
|
||||
pub fn is_full(&self) -> bool {
|
||||
// last_index is std::u64::MAX when it has no information about how
|
||||
// many blobs will fill this slot.
|
||||
// Note: A full slot with zero blobs is not possible.
|
||||
// many shreds will fill this slot.
|
||||
// Note: A full slot with zero shreds is not possible.
|
||||
if self.last_index == std::u64::MAX {
|
||||
return false;
|
||||
}
|
||||
@ -179,6 +181,7 @@ impl SlotMeta {
|
||||
slot,
|
||||
consumed: 0,
|
||||
received: 0,
|
||||
first_shred_timestamp: 0,
|
||||
parent_slot,
|
||||
next_slots: vec![],
|
||||
is_connected: slot == 0,
|
||||
|
@ -1,28 +1,31 @@
|
||||
use crate::bank_forks::BankForks;
|
||||
use crate::blocktree::Blocktree;
|
||||
use crate::blocktree_meta::SlotMeta;
|
||||
use crate::entry::{create_ticks, Entry, EntrySlice};
|
||||
use crate::leader_schedule_cache::LeaderScheduleCache;
|
||||
use crate::{
|
||||
bank_forks::BankForks,
|
||||
blocktree::Blocktree,
|
||||
blocktree_meta::SlotMeta,
|
||||
entry::{create_ticks, Entry, EntrySlice},
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use log::*;
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::thread_rng;
|
||||
use rayon::prelude::*;
|
||||
use rayon::ThreadPool;
|
||||
use rand::{seq::SliceRandom, thread_rng};
|
||||
use rayon::{prelude::*, ThreadPool};
|
||||
use solana_metrics::{datapoint, datapoint_error, inc_new_counter_debug};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_runtime::transaction_batch::TransactionBatch;
|
||||
use solana_sdk::clock::{Slot, MAX_RECENT_BLOCKHASHES};
|
||||
use solana_sdk::genesis_block::GenesisBlock;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil};
|
||||
use solana_sdk::timing::duration_as_ms;
|
||||
use solana_sdk::transaction::Result;
|
||||
use std::result;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use solana_rayon_threadlimit::get_thread_count;
|
||||
use std::cell::RefCell;
|
||||
use solana_runtime::{bank::Bank, transaction_batch::TransactionBatch};
|
||||
use solana_sdk::{
|
||||
clock::{Slot, MAX_RECENT_BLOCKHASHES},
|
||||
genesis_block::GenesisBlock,
|
||||
hash::Hash,
|
||||
signature::{Keypair, KeypairUtil},
|
||||
timing::duration_as_ms,
|
||||
transaction::Result,
|
||||
};
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
result,
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new()
|
||||
.num_threads(get_thread_count())
|
||||
@ -111,8 +114,18 @@ fn process_entries_with_callback(
|
||||
let mut tick_hashes = vec![];
|
||||
for entry in entries {
|
||||
if entry.is_tick() {
|
||||
// if its a tick, save it for later
|
||||
// If it's a tick, save it for later
|
||||
tick_hashes.push(entry.hash);
|
||||
if bank.is_block_boundary(bank.tick_height() + tick_hashes.len() as u64) {
|
||||
// If it's a tick that will cause a new blockhash to be created,
|
||||
// execute the group and register the tick
|
||||
execute_batches(bank, &batches, entry_callback)?;
|
||||
batches.clear();
|
||||
for hash in &tick_hashes {
|
||||
bank.register_tick(hash);
|
||||
}
|
||||
tick_hashes.clear();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// else loop on processing the entry
|
||||
@ -183,7 +196,7 @@ pub type ProcessCallback = Arc<dyn Fn(&Bank) -> () + Sync + Send>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ProcessOptions {
|
||||
pub verify_ledger: bool,
|
||||
pub poh_verify: bool,
|
||||
pub full_leader_cache: bool,
|
||||
pub dev_halt_at_slot: Option<Slot>,
|
||||
pub entry_callback: Option<ProcessCallback>,
|
||||
@ -269,9 +282,14 @@ pub fn process_blocktree_from_root(
|
||||
};
|
||||
|
||||
info!(
|
||||
"processing ledger...complete in {}ms, forks={}...",
|
||||
"ledger processed in {}ms. {} fork{} at {}",
|
||||
duration_as_ms(&now.elapsed()),
|
||||
bank_forks_info.len(),
|
||||
if bank_forks_info.len() > 1 { "s" } else { "" },
|
||||
bank_forks_info
|
||||
.iter()
|
||||
.map(|bfi| bfi.bank_slot.to_string())
|
||||
.join(", ")
|
||||
);
|
||||
|
||||
Ok((bank_forks, bank_forks_info, leader_schedule_cache))
|
||||
@ -285,7 +303,7 @@ fn verify_and_process_entries(
|
||||
) -> result::Result<Hash, BlocktreeProcessorError> {
|
||||
assert!(!entries.is_empty());
|
||||
|
||||
if opts.verify_ledger && !entries.verify(&last_entry_hash) {
|
||||
if opts.poh_verify && !entries.verify(&last_entry_hash) {
|
||||
warn!("Ledger proof of history failed at slot: {}", bank.slot());
|
||||
return Err(BlocktreeProcessorError::LedgerVerificationFailed);
|
||||
}
|
||||
@ -476,6 +494,7 @@ pub fn fill_blocktree_slot_with_ticks(
|
||||
true,
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -550,6 +569,7 @@ pub mod tests {
|
||||
false,
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.expect("Expected to write shredded entries to blocktree");
|
||||
}
|
||||
@ -558,7 +578,7 @@ pub mod tests {
|
||||
fill_blocktree_slot_with_ticks(&blocktree, ticks_per_slot, 2, 1, blockhash);
|
||||
|
||||
let opts = ProcessOptions {
|
||||
verify_ledger: true,
|
||||
poh_verify: true,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (mut _bank_forks, bank_forks_info, _) =
|
||||
@ -620,7 +640,7 @@ pub mod tests {
|
||||
blocktree.set_roots(&[0, 1, 4]).unwrap();
|
||||
|
||||
let opts = ProcessOptions {
|
||||
verify_ledger: true,
|
||||
poh_verify: true,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
@ -694,7 +714,7 @@ pub mod tests {
|
||||
blocktree.set_roots(&[0, 1]).unwrap();
|
||||
|
||||
let opts = ProcessOptions {
|
||||
verify_ledger: true,
|
||||
poh_verify: true,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
@ -774,7 +794,7 @@ pub mod tests {
|
||||
|
||||
// Check that we can properly restart the ledger / leader scheduler doesn't fail
|
||||
let opts = ProcessOptions {
|
||||
verify_ledger: true,
|
||||
poh_verify: true,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
@ -909,10 +929,11 @@ pub mod tests {
|
||||
true,
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
let opts = ProcessOptions {
|
||||
verify_ledger: true,
|
||||
poh_verify: true,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
@ -941,7 +962,7 @@ pub mod tests {
|
||||
|
||||
let blocktree = Blocktree::open(&ledger_path).unwrap();
|
||||
let opts = ProcessOptions {
|
||||
verify_ledger: true,
|
||||
poh_verify: true,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
let (bank_forks, bank_forks_info, _) =
|
||||
@ -1015,6 +1036,7 @@ pub mod tests {
|
||||
true,
|
||||
&Arc::new(Keypair::new()),
|
||||
entries,
|
||||
0,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -1676,7 +1698,7 @@ pub mod tests {
|
||||
// Set up bank1
|
||||
let bank0 = Arc::new(Bank::new(&genesis_block));
|
||||
let opts = ProcessOptions {
|
||||
verify_ledger: true,
|
||||
poh_verify: true,
|
||||
..ProcessOptions::default()
|
||||
};
|
||||
process_bank_0(&bank0, &blocktree, &opts).unwrap();
|
||||
@ -1817,6 +1839,33 @@ pub mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_process_ledger_ticks_ordering() {
|
||||
let GenesisBlockInfo {
|
||||
genesis_block,
|
||||
mint_keypair,
|
||||
..
|
||||
} = create_genesis_block(100);
|
||||
let bank0 = Arc::new(Bank::new(&genesis_block));
|
||||
let genesis_hash = genesis_block.hash();
|
||||
let keypair = Keypair::new();
|
||||
|
||||
// Simulate a slot of virtual ticks, creates a new blockhash
|
||||
let mut entries = create_ticks(genesis_block.ticks_per_slot, genesis_hash);
|
||||
|
||||
// The new blockhash is going to be the hash of the last tick in the block
|
||||
let new_blockhash = entries.last().unwrap().hash;
|
||||
// Create an transaction that references the new blockhash, should still
|
||||
// be able to find the blockhash if we process transactions all in the same
|
||||
// batch
|
||||
let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, new_blockhash);
|
||||
let entry = next_entry(&new_blockhash, 1, vec![tx]);
|
||||
entries.push(entry);
|
||||
|
||||
process_entries_with_callback(&bank0, &entries, true, None).unwrap();
|
||||
assert_eq!(bank0.get_balance(&keypair.pubkey()), 1)
|
||||
}
|
||||
|
||||
fn get_epoch_schedule(
|
||||
genesis_block: &GenesisBlock,
|
||||
account_paths: Option<String>,
|
||||
|
@ -146,7 +146,7 @@ impl LeaderScheduleCache {
|
||||
}
|
||||
start_index = 0;
|
||||
}
|
||||
first_slot.and_then(|slot| Some((slot, last_slot)))
|
||||
first_slot.map(|slot| (slot, last_slot))
|
||||
}
|
||||
|
||||
fn slot_leader_at_no_compute(&self, slot: u64) -> Option<Pubkey> {
|
||||
@ -432,7 +432,7 @@ mod tests {
|
||||
// Write a blob into slot 2 that chains to slot 1,
|
||||
// but slot 1 is empty so should not be skipped
|
||||
let (shreds, _) = make_slot_entries(2, 1, 1);
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
assert_eq!(
|
||||
cache
|
||||
.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree))
|
||||
@ -445,7 +445,7 @@ mod tests {
|
||||
let (shreds, _) = make_slot_entries(1, 0, 1);
|
||||
|
||||
// Check that slot 1 and 2 are skipped
|
||||
blocktree.insert_shreds(shreds, None).unwrap();
|
||||
blocktree.insert_shreds(shreds, None, false).unwrap();
|
||||
assert_eq!(
|
||||
cache
|
||||
.next_leader_slot(&pubkey, 0, &bank, Some(&blocktree))
|
||||
|
@ -1,4 +1,5 @@
|
||||
pub mod bank_forks;
|
||||
pub mod bank_forks_utils;
|
||||
#[macro_use]
|
||||
pub mod blocktree;
|
||||
mod blocktree_db;
|
||||
|
@ -8,17 +8,20 @@ use rayon::ThreadPool;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use solana_metrics::datapoint_debug;
|
||||
use solana_rayon_threadlimit::get_thread_count;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::packet::PACKET_DATA_SIZE;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::signature::{Keypair, KeypairUtil, Signature};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
hash::Hash,
|
||||
packet::PACKET_DATA_SIZE,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, KeypairUtil, Signature},
|
||||
};
|
||||
use std::mem::size_of;
|
||||
use std::{sync::Arc, time::Instant};
|
||||
|
||||
/// The following constants are computed by hand, and hardcoded.
|
||||
/// `test_shred_constants` ensures that the values are correct.
|
||||
/// Constants are used over lazy_static for performance reasons.
|
||||
pub const SIZE_OF_COMMON_SHRED_HEADER: usize = 77;
|
||||
pub const SIZE_OF_COMMON_SHRED_HEADER: usize = 79;
|
||||
pub const SIZE_OF_DATA_SHRED_HEADER: usize = 3;
|
||||
pub const SIZE_OF_CODING_SHRED_HEADER: usize = 6;
|
||||
pub const SIZE_OF_SIGNATURE: usize = 64;
|
||||
@ -38,15 +41,12 @@ thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::
|
||||
pub const DATA_SHRED: u8 = 0b1010_0101;
|
||||
pub const CODING_SHRED: u8 = 0b0101_1010;
|
||||
|
||||
/// This limit comes from reed solomon library, but unfortunately they don't have
|
||||
/// a public constant defined for it.
|
||||
pub const MAX_DATA_SHREDS_PER_FEC_BLOCK: u32 = 16;
|
||||
pub const MAX_DATA_SHREDS_PER_FEC_BLOCK: u32 = 32;
|
||||
pub const RECOMMENDED_FEC_RATE: f32 = 1.0;
|
||||
|
||||
/// Based on rse benchmarks, the optimal erasure config uses 16 data shreds and 4 coding shreds
|
||||
pub const RECOMMENDED_FEC_RATE: f32 = 0.25;
|
||||
|
||||
const LAST_SHRED_IN_SLOT: u8 = 0b0000_0001;
|
||||
pub const DATA_COMPLETE_SHRED: u8 = 0b0000_0010;
|
||||
pub const SHRED_TICK_REFERENCE_MASK: u8 = 0b0011_1111;
|
||||
const LAST_SHRED_IN_SLOT: u8 = 0b1000_0000;
|
||||
pub const DATA_COMPLETE_SHRED: u8 = 0b0100_0000;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ShredError {
|
||||
@ -79,6 +79,7 @@ pub struct ShredCommonHeader {
|
||||
pub shred_type: ShredType,
|
||||
pub slot: u64,
|
||||
pub index: u32,
|
||||
pub version: u16,
|
||||
}
|
||||
|
||||
/// The data shred header has parent offset and flags
|
||||
@ -135,14 +136,21 @@ impl Shred {
|
||||
data: Option<&[u8]>,
|
||||
is_last_data: bool,
|
||||
is_last_in_slot: bool,
|
||||
reference_tick: u8,
|
||||
version: u16,
|
||||
) -> Self {
|
||||
let mut payload = vec![0; PACKET_DATA_SIZE];
|
||||
let mut common_header = ShredCommonHeader::default();
|
||||
common_header.slot = slot;
|
||||
common_header.index = index;
|
||||
let common_header = ShredCommonHeader {
|
||||
slot,
|
||||
index,
|
||||
version,
|
||||
..ShredCommonHeader::default()
|
||||
};
|
||||
|
||||
let mut data_header = DataShredHeader::default();
|
||||
data_header.parent_offset = parent_offset;
|
||||
let mut data_header = DataShredHeader {
|
||||
parent_offset,
|
||||
flags: reference_tick.min(SHRED_TICK_REFERENCE_MASK),
|
||||
};
|
||||
|
||||
if is_last_data {
|
||||
data_header.flags |= DATA_COMPLETE_SHRED
|
||||
@ -152,22 +160,23 @@ impl Shred {
|
||||
data_header.flags |= LAST_SHRED_IN_SLOT
|
||||
}
|
||||
|
||||
let mut start = 0;
|
||||
Self::serialize_obj_into(
|
||||
&mut start,
|
||||
SIZE_OF_COMMON_SHRED_HEADER,
|
||||
&mut payload,
|
||||
&common_header,
|
||||
)
|
||||
.expect("Failed to write header into shred buffer");
|
||||
Self::serialize_obj_into(
|
||||
&mut start,
|
||||
SIZE_OF_DATA_SHRED_HEADER,
|
||||
&mut payload,
|
||||
&data_header,
|
||||
)
|
||||
.expect("Failed to write data header into shred buffer");
|
||||
|
||||
if let Some(data) = data {
|
||||
let mut start = 0;
|
||||
Self::serialize_obj_into(
|
||||
&mut start,
|
||||
SIZE_OF_COMMON_SHRED_HEADER,
|
||||
&mut payload,
|
||||
&common_header,
|
||||
)
|
||||
.expect("Failed to write header into shred buffer");
|
||||
Self::serialize_obj_into(
|
||||
&mut start,
|
||||
SIZE_OF_DATA_SHRED_HEADER,
|
||||
&mut payload,
|
||||
&data_header,
|
||||
)
|
||||
.expect("Failed to write data header into shred buffer");
|
||||
payload[start..start + data.len()].clone_from_slice(data);
|
||||
}
|
||||
|
||||
@ -272,6 +281,10 @@ impl Shred {
|
||||
self.common_header.index
|
||||
}
|
||||
|
||||
pub fn version(&self) -> u16 {
|
||||
self.common_header.version
|
||||
}
|
||||
|
||||
/// This is not a safe function. It only changes the meta information.
|
||||
/// Use this only for test code which doesn't care about actual shred
|
||||
pub fn set_index(&mut self, index: u32) {
|
||||
@ -327,23 +340,58 @@ impl Shred {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reference_tick(&self) -> u8 {
|
||||
if self.is_data() {
|
||||
self.data_header.flags & SHRED_TICK_REFERENCE_MASK
|
||||
} else {
|
||||
SHRED_TICK_REFERENCE_MASK
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reference_tick_from_data(data: &[u8]) -> u8 {
|
||||
let flags = data[SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER - size_of::<u8>()];
|
||||
flags & SHRED_TICK_REFERENCE_MASK
|
||||
}
|
||||
|
||||
pub fn verify(&self, pubkey: &Pubkey) -> bool {
|
||||
self.signature()
|
||||
.verify(pubkey.as_ref(), &self.payload[SIZE_OF_SIGNATURE..])
|
||||
}
|
||||
|
||||
pub fn version_from_hash(hash: &Hash) -> u16 {
|
||||
let hash = hash.as_ref();
|
||||
let mut accum = [0u8; 2];
|
||||
hash.chunks(2).for_each(|seed| {
|
||||
accum
|
||||
.iter_mut()
|
||||
.zip(seed)
|
||||
.for_each(|(accum, seed)| *accum ^= *seed)
|
||||
});
|
||||
// convert accum into a u16
|
||||
((accum[0] as u16) << 8) | accum[1] as u16
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Shredder {
|
||||
slot: u64,
|
||||
parent_slot: u64,
|
||||
version: u16,
|
||||
fec_rate: f32,
|
||||
keypair: Arc<Keypair>,
|
||||
pub signing_coding_time: u128,
|
||||
reference_tick: u8,
|
||||
}
|
||||
|
||||
impl Shredder {
|
||||
pub fn new(slot: u64, parent_slot: u64, fec_rate: f32, keypair: Arc<Keypair>) -> Result<Self> {
|
||||
pub fn new(
|
||||
slot: Slot,
|
||||
parent_slot: Slot,
|
||||
fec_rate: f32,
|
||||
keypair: Arc<Keypair>,
|
||||
reference_tick: u8,
|
||||
version: u16,
|
||||
) -> Result<Self> {
|
||||
if fec_rate > 1.0 || fec_rate < 0.0 {
|
||||
Err(ShredError::InvalidFecRate(fec_rate))
|
||||
} else if slot < parent_slot || slot - parent_slot > u64::from(std::u16::MAX) {
|
||||
@ -355,6 +403,8 @@ impl Shredder {
|
||||
fec_rate,
|
||||
keypair,
|
||||
signing_coding_time: 0,
|
||||
reference_tick,
|
||||
version,
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -398,6 +448,8 @@ impl Shredder {
|
||||
Some(shred_data),
|
||||
is_last_data,
|
||||
is_last_in_slot,
|
||||
self.reference_tick,
|
||||
self.version,
|
||||
);
|
||||
|
||||
Shredder::sign_shred(&self.keypair, &mut shred);
|
||||
@ -413,7 +465,12 @@ impl Shredder {
|
||||
data_shreds
|
||||
.par_chunks(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize)
|
||||
.flat_map(|shred_data_batch| {
|
||||
Shredder::generate_coding_shreds(self.slot, self.fec_rate, shred_data_batch)
|
||||
Shredder::generate_coding_shreds(
|
||||
self.slot,
|
||||
self.fec_rate,
|
||||
shred_data_batch,
|
||||
self.version,
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
@ -455,11 +512,15 @@ impl Shredder {
|
||||
num_data: usize,
|
||||
num_code: usize,
|
||||
position: usize,
|
||||
version: u16,
|
||||
) -> (ShredCommonHeader, CodingShredHeader) {
|
||||
let mut header = ShredCommonHeader::default();
|
||||
header.shred_type = ShredType(CODING_SHRED);
|
||||
header.index = index;
|
||||
header.slot = slot;
|
||||
let header = ShredCommonHeader {
|
||||
shred_type: ShredType(CODING_SHRED),
|
||||
index,
|
||||
slot,
|
||||
version,
|
||||
..ShredCommonHeader::default()
|
||||
};
|
||||
(
|
||||
header,
|
||||
CodingShredHeader {
|
||||
@ -475,6 +536,7 @@ impl Shredder {
|
||||
slot: u64,
|
||||
fec_rate: f32,
|
||||
data_shred_batch: &[Shred],
|
||||
version: u16,
|
||||
) -> Vec<Shred> {
|
||||
assert!(!data_shred_batch.is_empty());
|
||||
if fec_rate != 0.0 {
|
||||
@ -501,6 +563,7 @@ impl Shredder {
|
||||
num_data,
|
||||
num_coding,
|
||||
i,
|
||||
version,
|
||||
);
|
||||
let shred =
|
||||
Shred::new_empty_from_header(header, DataShredHeader::default(), coding_header);
|
||||
@ -530,6 +593,7 @@ impl Shredder {
|
||||
num_data,
|
||||
num_coding,
|
||||
i,
|
||||
version,
|
||||
);
|
||||
Shred {
|
||||
common_header,
|
||||
@ -733,6 +797,7 @@ pub mod tests {
|
||||
use super::*;
|
||||
use bincode::serialized_size;
|
||||
use matches::assert_matches;
|
||||
use solana_sdk::hash::hash;
|
||||
use solana_sdk::system_transaction;
|
||||
use std::collections::HashSet;
|
||||
use std::convert::TryInto;
|
||||
@ -800,7 +865,7 @@ pub mod tests {
|
||||
|
||||
// Test that parent cannot be > current slot
|
||||
assert_matches!(
|
||||
Shredder::new(slot, slot + 1, 1.00, keypair.clone()),
|
||||
Shredder::new(slot, slot + 1, 1.00, keypair.clone(), 0, 0),
|
||||
Err(ShredError::SlotTooLow {
|
||||
slot: _,
|
||||
parent_slot: _,
|
||||
@ -808,7 +873,7 @@ pub mod tests {
|
||||
);
|
||||
// Test that slot - parent cannot be > u16 MAX
|
||||
assert_matches!(
|
||||
Shredder::new(slot, slot - 1 - 0xffff, 1.00, keypair.clone()),
|
||||
Shredder::new(slot, slot - 1 - 0xffff, 1.00, keypair.clone(), 0, 0),
|
||||
Err(ShredError::SlotTooLow {
|
||||
slot: _,
|
||||
parent_slot: _,
|
||||
@ -817,7 +882,7 @@ pub mod tests {
|
||||
|
||||
let fec_rate = 0.25;
|
||||
let parent_slot = slot - 5;
|
||||
let shredder = Shredder::new(slot, parent_slot, fec_rate, keypair.clone())
|
||||
let shredder = Shredder::new(slot, parent_slot, fec_rate, keypair.clone(), 0, 0)
|
||||
.expect("Failed in creating shredder");
|
||||
|
||||
let entries: Vec<_> = (0..5)
|
||||
@ -892,7 +957,7 @@ pub mod tests {
|
||||
let slot = 1;
|
||||
|
||||
let parent_slot = 0;
|
||||
let shredder = Shredder::new(slot, parent_slot, 0.0, keypair.clone())
|
||||
let shredder = Shredder::new(slot, parent_slot, 0.0, keypair.clone(), 0, 0)
|
||||
.expect("Failed in creating shredder");
|
||||
|
||||
let entries: Vec<_> = (0..5)
|
||||
@ -912,6 +977,72 @@ pub mod tests {
|
||||
assert_eq!(deserialized_shred, *data_shreds.last().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shred_reference_tick() {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let slot = 1;
|
||||
|
||||
let parent_slot = 0;
|
||||
let shredder = Shredder::new(slot, parent_slot, 0.0, keypair.clone(), 5, 0)
|
||||
.expect("Failed in creating shredder");
|
||||
|
||||
let entries: Vec<_> = (0..5)
|
||||
.map(|_| {
|
||||
let keypair0 = Keypair::new();
|
||||
let keypair1 = Keypair::new();
|
||||
let tx0 =
|
||||
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
||||
Entry::new(&Hash::default(), 1, vec![tx0])
|
||||
})
|
||||
.collect();
|
||||
|
||||
let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0;
|
||||
data_shreds.iter().for_each(|s| {
|
||||
assert_eq!(s.reference_tick(), 5);
|
||||
assert_eq!(Shred::reference_tick_from_data(&s.payload), 5);
|
||||
});
|
||||
|
||||
let deserialized_shred =
|
||||
Shred::new_from_serialized_shred(data_shreds.last().unwrap().payload.clone()).unwrap();
|
||||
assert_eq!(deserialized_shred.reference_tick(), 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shred_reference_tick_overflow() {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let slot = 1;
|
||||
|
||||
let parent_slot = 0;
|
||||
let shredder = Shredder::new(slot, parent_slot, 0.0, keypair.clone(), u8::max_value(), 0)
|
||||
.expect("Failed in creating shredder");
|
||||
|
||||
let entries: Vec<_> = (0..5)
|
||||
.map(|_| {
|
||||
let keypair0 = Keypair::new();
|
||||
let keypair1 = Keypair::new();
|
||||
let tx0 =
|
||||
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
||||
Entry::new(&Hash::default(), 1, vec![tx0])
|
||||
})
|
||||
.collect();
|
||||
|
||||
let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0;
|
||||
data_shreds.iter().for_each(|s| {
|
||||
assert_eq!(s.reference_tick(), SHRED_TICK_REFERENCE_MASK);
|
||||
assert_eq!(
|
||||
Shred::reference_tick_from_data(&s.payload),
|
||||
SHRED_TICK_REFERENCE_MASK
|
||||
);
|
||||
});
|
||||
|
||||
let deserialized_shred =
|
||||
Shred::new_from_serialized_shred(data_shreds.last().unwrap().payload.clone()).unwrap();
|
||||
assert_eq!(
|
||||
deserialized_shred.reference_tick(),
|
||||
SHRED_TICK_REFERENCE_MASK
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_data_and_code_shredder() {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
@ -919,11 +1050,11 @@ pub mod tests {
|
||||
let slot = 0x123456789abcdef0;
|
||||
// Test that FEC rate cannot be > 1.0
|
||||
assert_matches!(
|
||||
Shredder::new(slot, slot - 5, 1.001, keypair.clone()),
|
||||
Shredder::new(slot, slot - 5, 1.001, keypair.clone(), 0, 0),
|
||||
Err(ShredError::InvalidFecRate(_))
|
||||
);
|
||||
|
||||
let shredder = Shredder::new(0x123456789abcdef0, slot - 5, 1.0, keypair.clone())
|
||||
let shredder = Shredder::new(0x123456789abcdef0, slot - 5, 1.0, keypair.clone(), 0, 0)
|
||||
.expect("Failed in creating shredder");
|
||||
|
||||
// Create enough entries to make > 1 shred
|
||||
@ -965,7 +1096,7 @@ pub mod tests {
|
||||
fn test_recovery_and_reassembly() {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let slot = 0x123456789abcdef0;
|
||||
let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone())
|
||||
let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0)
|
||||
.expect("Failed in creating shredder");
|
||||
|
||||
let keypair0 = Keypair::new();
|
||||
@ -1211,7 +1342,7 @@ pub mod tests {
|
||||
fn test_multi_fec_block_coding() {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let slot = 0x123456789abcdef0;
|
||||
let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone())
|
||||
let shredder = Shredder::new(slot, slot - 5, 1.0, keypair.clone(), 0, 0)
|
||||
.expect("Failed in creating shredder");
|
||||
|
||||
let num_fec_sets = 100;
|
||||
@ -1294,4 +1425,54 @@ pub mod tests {
|
||||
let result = Shredder::deshred(&all_shreds[..]).unwrap();
|
||||
assert_eq!(serialized_entries[..], result[..serialized_entries.len()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shred_version() {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let hash = hash(Hash::default().as_ref());
|
||||
let version = Shred::version_from_hash(&hash);
|
||||
assert_ne!(version, 0);
|
||||
let shredder =
|
||||
Shredder::new(0, 0, 1.0, keypair, 0, version).expect("Failed in creating shredder");
|
||||
|
||||
let entries: Vec<_> = (0..5)
|
||||
.map(|_| {
|
||||
let keypair0 = Keypair::new();
|
||||
let keypair1 = Keypair::new();
|
||||
let tx0 =
|
||||
system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default());
|
||||
Entry::new(&Hash::default(), 1, vec![tx0])
|
||||
})
|
||||
.collect();
|
||||
|
||||
let (data_shreds, coding_shreds, _next_index) =
|
||||
shredder.entries_to_shreds(&entries, true, 0);
|
||||
assert!(!data_shreds
|
||||
.iter()
|
||||
.chain(coding_shreds.iter())
|
||||
.any(|s| s.version() != version));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_version_from_hash() {
|
||||
let hash = [
|
||||
0xa5u8, 0xa5, 0x5a, 0x5a, 0xa5, 0xa5, 0x5a, 0x5a, 0xa5, 0xa5, 0x5a, 0x5a, 0xa5, 0xa5,
|
||||
0x5a, 0x5a, 0xa5, 0xa5, 0x5a, 0x5a, 0xa5, 0xa5, 0x5a, 0x5a, 0xa5, 0xa5, 0x5a, 0x5a,
|
||||
0xa5, 0xa5, 0x5a, 0x5a,
|
||||
];
|
||||
let version = Shred::version_from_hash(&Hash::new(&hash));
|
||||
assert_eq!(version, 0);
|
||||
let hash = [
|
||||
0xa5u8, 0xa5, 0x5a, 0x5a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
];
|
||||
let version = Shred::version_from_hash(&Hash::new(&hash));
|
||||
assert_eq!(version, 0xffff);
|
||||
let hash = [
|
||||
0xa5u8, 0xa5, 0x5a, 0x5a, 0xa5, 0xa5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
];
|
||||
let version = Shred::version_from_hash(&Hash::new(&hash));
|
||||
assert_eq!(version, 0x5a5a);
|
||||
}
|
||||
}
|
||||
|
51
ledger/tests/blocktree.rs
Normal file
51
ledger/tests/blocktree.rs
Normal file
@ -0,0 +1,51 @@
|
||||
#[macro_use]
|
||||
extern crate solana_ledger;
|
||||
|
||||
use solana_ledger::blocktree::{self, get_tmp_ledger_path, Blocktree};
|
||||
use solana_ledger::entry;
|
||||
use solana_sdk::hash::Hash;
|
||||
use std::sync::Arc;
|
||||
use std::thread::Builder;
|
||||
|
||||
#[test]
|
||||
fn test_multiple_threads_insert_shred() {
|
||||
let blocktree_path = get_tmp_ledger_path!();
|
||||
let blocktree = Arc::new(Blocktree::open(&blocktree_path).unwrap());
|
||||
|
||||
for _ in 0..100 {
|
||||
let num_threads = 10;
|
||||
|
||||
// Create `num_threads` different ticks in slots 1..num_therads + 1, all
|
||||
// with parent = slot 0
|
||||
let threads: Vec<_> = (0..num_threads)
|
||||
.map(|i| {
|
||||
let entries = entry::create_ticks(1, Hash::default());
|
||||
let shreds = blocktree::entries_to_test_shreds(entries, i + 1, 0, false, 0);
|
||||
let blocktree_ = blocktree.clone();
|
||||
Builder::new()
|
||||
.name("blocktree-writer".to_string())
|
||||
.spawn(move || {
|
||||
blocktree_.insert_shreds(shreds, None, false).unwrap();
|
||||
})
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
|
||||
for t in threads {
|
||||
t.join().unwrap()
|
||||
}
|
||||
|
||||
// Check slot 0 has the correct children
|
||||
let mut meta0 = blocktree.meta(0).unwrap().unwrap();
|
||||
meta0.next_slots.sort();
|
||||
let expected_next_slots: Vec<_> = (1..num_threads + 1).collect();
|
||||
assert_eq!(meta0.next_slots, expected_next_slots);
|
||||
|
||||
// Delete slots for next iteration
|
||||
blocktree.purge_slots(0, None);
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
drop(blocktree);
|
||||
Blocktree::destroy(&blocktree_path).expect("Expected successful database destruction");
|
||||
}
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-local-cluster"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "0.20.0"
|
||||
version = "0.20.5"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -11,29 +11,29 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
solana-bench-exchange = { path = "../bench-exchange", version = "0.20.0" }
|
||||
solana-bench-tps = { path = "../bench-tps", version = "0.20.0" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.0" }
|
||||
solana-core = { path = "../core", version = "0.20.0" }
|
||||
solana-client = { path = "../client", version = "0.20.0" }
|
||||
solana-drone = { path = "../drone", version = "0.20.0" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.0" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.0" }
|
||||
solana-genesis-programs = { path = "../genesis_programs", version = "0.20.0" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.0" }
|
||||
solana-logger = { path = "../logger", version = "0.20.0" }
|
||||
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.0", optional = true }
|
||||
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.0", optional = true }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.0" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.0" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.0" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.0" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.20.0" }
|
||||
solana-vest-api = { path = "../programs/vest_api", version = "0.20.0" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.0" }
|
||||
solana-bench-exchange = { path = "../bench-exchange", version = "0.20.5" }
|
||||
solana-bench-tps = { path = "../bench-tps", version = "0.20.5" }
|
||||
solana-config-api = { path = "../programs/config_api", version = "0.20.5" }
|
||||
solana-core = { path = "../core", version = "0.20.5" }
|
||||
solana-client = { path = "../client", version = "0.20.5" }
|
||||
solana-drone = { path = "../drone", version = "0.20.5" }
|
||||
solana-exchange-api = { path = "../programs/exchange_api", version = "0.20.5" }
|
||||
solana-exchange-program = { path = "../programs/exchange_program", version = "0.20.5" }
|
||||
solana-genesis-programs = { path = "../genesis_programs", version = "0.20.5" }
|
||||
solana-ledger = { path = "../ledger", version = "0.20.5" }
|
||||
solana-logger = { path = "../logger", version = "0.20.5" }
|
||||
solana-move-loader-api = { path = "../programs/move_loader_api", version = "0.20.5", optional = true }
|
||||
solana-move-loader-program = { path = "../programs/move_loader_program", version = "0.20.5", optional = true }
|
||||
solana-runtime = { path = "../runtime", version = "0.20.5" }
|
||||
solana-sdk = { path = "../sdk", version = "0.20.5" }
|
||||
solana-stake-api = { path = "../programs/stake_api", version = "0.20.5" }
|
||||
solana-storage-api = { path = "../programs/storage_api", version = "0.20.5" }
|
||||
solana-storage-program = { path = "../programs/storage_program", version = "0.20.5" }
|
||||
solana-vest-api = { path = "../programs/vest_api", version = "0.20.5" }
|
||||
solana-vote-api = { path = "../programs/vote_api", version = "0.20.5" }
|
||||
symlink = "0.1.0"
|
||||
tempfile = "3.1.0"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.0" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "0.20.5" }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.2.0"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user