Compare commits
130 Commits
Author | SHA1 | Date | |
---|---|---|---|
825c0e2b6e | |||
4334fef955 | |||
2333468350 | |||
86968cb311 | |||
3ce9a16e7f | |||
3746c0c6ac | |||
2b71bf37f9 | |||
3b526cc2de | |||
198f87ffea | |||
859d4db87e | |||
1163144914 | |||
49f212247a | |||
e26840cb09 | |||
14bbcef722 | |||
5326f3ec73 | |||
fca4554d3f | |||
15de250c2c | |||
a7b0fcc21e | |||
4999fa6263 | |||
1f54be66c9 | |||
2e1c3a8338 | |||
fe934eb7a0 | |||
62a0c2f348 | |||
cfeef3a9eb | |||
52009788ee | |||
2acf4d874d | |||
f951d7d33f | |||
20fe24f348 | |||
2e5cc41945 | |||
bc76b20e6d | |||
3f6befe012 | |||
ee201cfb84 | |||
66ec12e869 | |||
addfc99ff4 | |||
91f0faa72d | |||
2deebe4d73 | |||
5bc57ea004 | |||
c63bd05458 | |||
b4933f4c74 | |||
e7748c603b | |||
6a5d782d6c | |||
89fad8f566 | |||
01cac89867 | |||
dfb4729b02 | |||
6ab5f823b3 | |||
bf1ceab6ed | |||
dbaff495c8 | |||
f65caa66bf | |||
2f455e18ef | |||
7b155f384d | |||
fd405239d9 | |||
8698156e27 | |||
2f0f218ad9 | |||
3cc75b4bab | |||
0a0f8470d7 | |||
e46026f1fb | |||
fef5089d7e | |||
a844bd70da | |||
cbc01bd1b9 | |||
e096cc1101 | |||
0dc559d3cf | |||
34f537adad | |||
9558628537 | |||
940bf7081a | |||
38c31c3b4e | |||
253272d757 | |||
9b1bd8065f | |||
b2a5cf57ad | |||
33c51ee75d | |||
d8aa107fae | |||
82e02d0734 | |||
dae59bb3e1 | |||
e0e7fc8e52 | |||
9abc84c583 | |||
db6540772c | |||
840ebfe625 | |||
953282dbf4 | |||
788d1199ac | |||
f6a8f718a8 | |||
f225bf6f01 | |||
b5f03a380b | |||
140c75f613 | |||
6a59f67fdc | |||
5943747001 | |||
f26f18d29d | |||
9b58d72f52 | |||
0c885d6a04 | |||
70b51c0a5a | |||
560660ae11 | |||
5436855b67 | |||
7351e5ed2c | |||
7ee993fadf | |||
9bf459e82f | |||
545090ff17 | |||
bd8074507f | |||
cfc7b22c4c | |||
5f1c637508 | |||
d888e0a6d7 | |||
10e808e1bd | |||
d9b03ca38b | |||
608d75b348 | |||
cee3cee4ef | |||
09e51323f0 | |||
da6f702129 | |||
02a83e7c6e | |||
83263e1737 | |||
1f7ac22b60 | |||
747debae56 | |||
00b4186469 | |||
b087dabf4f | |||
e00eb0a069 | |||
d4e49ffd06 | |||
0f34a190ea | |||
df2fb8f5b3 | |||
80d2a6046b | |||
24273b826f | |||
68d2616e35 | |||
f506d39339 | |||
bc58c9ec7e | |||
b57a52cd85 | |||
8631be42ac | |||
09367369ef | |||
23b8c95cc4 | |||
e61392b057 | |||
7deba20395 | |||
274c097f84 | |||
1c7cea1af4 | |||
4406496d2f | |||
a15fa4840c | |||
c9030660d6 |
6141
Cargo.lock
generated
6141
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -48,10 +48,12 @@ members = [
|
||||
"sdk",
|
||||
"sdk-c",
|
||||
"scripts",
|
||||
"stake-monitor",
|
||||
"sys-tuner",
|
||||
"transaction-status",
|
||||
"upload-perf",
|
||||
"net-utils",
|
||||
"version",
|
||||
"vote-signer",
|
||||
"cli",
|
||||
"rayon-threadlimit",
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-lib"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
description = "Solana Archiver Library"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -11,29 +11,32 @@ edition = "2018"
|
||||
[dependencies]
|
||||
bincode = "1.2.1"
|
||||
crossbeam-channel = "0.3"
|
||||
ed25519-dalek = "=1.0.0-pre.1"
|
||||
ed25519-dalek = "=1.0.0-pre.3"
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-client = { path = "../client", version = "1.0.13" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.13" }
|
||||
rand = "0.7.0"
|
||||
rand_chacha = "0.2.2"
|
||||
solana-client = { path = "../client", version = "1.0.23" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.23" }
|
||||
thiserror = "1.0"
|
||||
serde = "1.0.104"
|
||||
serde_json = "1.0.46"
|
||||
serde_derive = "1.0.103"
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.13" }
|
||||
solana-chacha = { path = "../chacha", version = "1.0.13" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.13" }
|
||||
solana-logger = { path = "../logger", version = "1.0.13" }
|
||||
solana-perf = { path = "../perf", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
solana-core = { path = "../core", version = "1.0.13" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.23" }
|
||||
solana-chacha = { path = "../chacha", version = "1.0.23" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.23" }
|
||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
||||
solana-perf = { path = "../perf", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
solana-core = { path = "../core", version = "1.0.23" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.23" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.0"
|
||||
|
||||
[lib]
|
||||
name = "solana_archiver_lib"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,7 +1,7 @@
|
||||
use crate::result::ArchiverError;
|
||||
use crossbeam_channel::unbounded;
|
||||
use rand::{thread_rng, Rng, SeedableRng};
|
||||
use rand_chacha::ChaChaRng;
|
||||
use rand::{thread_rng, Rng};
|
||||
use rand_chacha::{rand_core::SeedableRng, ChaChaRng};
|
||||
use solana_archiver_utils::sample_file;
|
||||
use solana_chacha::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE};
|
||||
use solana_client::{
|
||||
@ -13,8 +13,7 @@ use solana_core::{
|
||||
contact_info::ContactInfo,
|
||||
gossip_service::GossipService,
|
||||
packet::{limited_deserialize, PACKET_DATA_SIZE},
|
||||
repair_service,
|
||||
repair_service::{RepairService, RepairSlotRange, RepairStrategy},
|
||||
repair_service::{self, RepairService, RepairSlotRange, RepairStats, RepairStrategy},
|
||||
serve_repair::ServeRepair,
|
||||
shred_fetch_stage::ShredFetchStage,
|
||||
sigverify_stage::{DisabledSigVerifier, SigVerifyStage},
|
||||
@ -199,7 +198,7 @@ impl Archiver {
|
||||
|
||||
info!("Connecting to the cluster via {:?}", cluster_entrypoint);
|
||||
let (nodes, _) =
|
||||
match solana_core::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 1) {
|
||||
match solana_core::gossip_service::discover_cluster(&cluster_entrypoint.gossip, 2) {
|
||||
Ok(nodes_and_archivers) => nodes_and_archivers,
|
||||
Err(e) => {
|
||||
//shutdown services before exiting
|
||||
@ -839,13 +838,14 @@ impl Archiver {
|
||||
repair_service::MAX_REPAIR_LENGTH,
|
||||
&repair_slot_range,
|
||||
);
|
||||
let mut repair_stats = RepairStats::default();
|
||||
//iter over the repairs and send them
|
||||
if let Ok(repairs) = repairs {
|
||||
let reqs: Vec<_> = repairs
|
||||
.into_iter()
|
||||
.filter_map(|repair_request| {
|
||||
serve_repair
|
||||
.map_repair_request(&repair_request)
|
||||
.map_repair_request(&repair_request, &mut repair_stats, Some(0))
|
||||
.map(|result| ((archiver_info.gossip, result), repair_request))
|
||||
.ok()
|
||||
})
|
||||
|
@ -1,4 +1,3 @@
|
||||
use serde_json;
|
||||
use solana_client::client_error;
|
||||
use solana_ledger::blockstore;
|
||||
use solana_sdk::transport;
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-archiver-utils"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
description = "Solana Archiver Utils"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,16 +10,19 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
solana-chacha = { path = "../chacha", version = "1.0.13" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.13" }
|
||||
solana-logger = { path = "../logger", version = "1.0.13" }
|
||||
solana-perf = { path = "../perf", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
rand = "0.7.0"
|
||||
solana-chacha = { path = "../chacha", version = "1.0.23" }
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.23" }
|
||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
||||
solana-perf = { path = "../perf", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4.0"
|
||||
|
||||
[lib]
|
||||
name = "solana_archiver_utils"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-archiver"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,11 +10,14 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
console = "0.9.2"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.13" }
|
||||
solana-core = { path = "../core", version = "1.0.13" }
|
||||
solana-logger = { path = "../logger", version = "1.0.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.13" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.0.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.23" }
|
||||
solana-core = { path = "../core", version = "1.0.23" }
|
||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.23" }
|
||||
solana-archiver-lib = { path = "../archiver-lib", version = "1.0.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-banking-bench"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -10,11 +10,14 @@ homepage = "https://solana.com/"
|
||||
[dependencies]
|
||||
log = "0.4.6"
|
||||
rayon = "1.2.0"
|
||||
solana-core = { path = "../core", version = "1.0.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.13" }
|
||||
solana-logger = { path = "../logger", version = "1.0.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.13" }
|
||||
solana-measure = { path = "../measure", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
rand = "0.6.5"
|
||||
solana-core = { path = "../core", version = "1.0.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.23" }
|
||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.23" }
|
||||
solana-measure = { path = "../measure", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
rand = "0.7.0"
|
||||
crossbeam-channel = "0.3"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -246,7 +246,7 @@ fn main() {
|
||||
poh_recorder.lock().unwrap().set_bank(&bank);
|
||||
assert!(poh_recorder.lock().unwrap().bank().is_some());
|
||||
if bank.slot() > 32 {
|
||||
bank_forks.set_root(root, &None);
|
||||
bank_forks.set_root(root, &None, None);
|
||||
root += 1;
|
||||
}
|
||||
debug!(
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-exchange"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -14,21 +14,24 @@ itertools = "0.8.2"
|
||||
log = "0.4.8"
|
||||
num-derive = "0.3"
|
||||
num-traits = "0.2"
|
||||
rand = "0.6.5"
|
||||
rand = "0.7.0"
|
||||
rayon = "1.2.0"
|
||||
serde_json = "1.0.46"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.13" }
|
||||
solana-core = { path = "../core", version = "1.0.13" }
|
||||
solana-genesis = { path = "../genesis", version = "1.0.13" }
|
||||
solana-client = { path = "../client", version = "1.0.13" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.13" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.0.13" }
|
||||
solana-logger = { path = "../logger", version = "1.0.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.23" }
|
||||
solana-core = { path = "../core", version = "1.0.23" }
|
||||
solana-genesis = { path = "../genesis", version = "1.0.23" }
|
||||
solana-client = { path = "../client", version = "1.0.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.23" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.0.23" }
|
||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
|
||||
[dev-dependencies]
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.0.13" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.0.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -2,14 +2,17 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-streamer"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.13" }
|
||||
solana-core = { path = "../core", version = "1.0.13" }
|
||||
solana-logger = { path = "../logger", version = "1.0.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.23" }
|
||||
solana-core = { path = "../core", version = "1.0.23" }
|
||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -2,7 +2,7 @@
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-bench-tps"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -14,24 +14,27 @@ log = "0.4.8"
|
||||
rayon = "1.2.0"
|
||||
serde_json = "1.0.46"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.13" }
|
||||
solana-core = { path = "../core", version = "1.0.13" }
|
||||
solana-genesis = { path = "../genesis", version = "1.0.13" }
|
||||
solana-client = { path = "../client", version = "1.0.13" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.13" }
|
||||
solana-librapay = { path = "../programs/librapay", version = "1.0.13", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.0.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.13" }
|
||||
solana-measure = { path = "../measure", version = "1.0.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
solana-move-loader-program = { path = "../programs/move_loader", version = "1.0.13", optional = true }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.23" }
|
||||
solana-core = { path = "../core", version = "1.0.23" }
|
||||
solana-genesis = { path = "../genesis", version = "1.0.23" }
|
||||
solana-client = { path = "../client", version = "1.0.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.23" }
|
||||
#solana-librapay = { path = "../programs/librapay", version = "1.0.20", optional = true }
|
||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.23" }
|
||||
solana-measure = { path = "../measure", version = "1.0.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
#solana-move-loader-program = { path = "../programs/move_loader", version = "1.0.20", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "0.3.2"
|
||||
serial_test_derive = "0.4.0"
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.0.13" }
|
||||
solana-local-cluster = { path = "../local-cluster", version = "1.0.23" }
|
||||
|
||||
[features]
|
||||
move = ["solana-librapay", "solana-move-loader-program"]
|
||||
#[features]
|
||||
#move = ["solana-librapay", "solana-move-loader-program"]
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-cuda"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
description = "Solana Chacha Cuda APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,15 +10,18 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.13" }
|
||||
solana-chacha = { path = "../chacha", version = "1.0.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.13" }
|
||||
solana-logger = { path = "../logger", version = "1.0.13" }
|
||||
solana-perf = { path = "../perf", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
solana-archiver-utils = { path = "../archiver-utils", version = "1.0.23" }
|
||||
solana-chacha = { path = "../chacha", version = "1.0.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.23" }
|
||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
||||
solana-perf = { path = "../perf", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
||||
[lib]
|
||||
name = "solana_chacha_cuda"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha-sys"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
description = "Solana chacha-sys"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,3 +10,6 @@ edition = "2018"
|
||||
|
||||
[build-dependencies]
|
||||
cc = "1.0.49"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-chacha"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
description = "Solana Chacha APIs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,16 +10,19 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4.8"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.13" }
|
||||
solana-logger = { path = "../logger", version = "1.0.13" }
|
||||
solana-perf = { path = "../perf", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
rand = "0.7.0"
|
||||
rand_chacha = "0.2.2"
|
||||
solana-chacha-sys = { path = "../chacha-sys", version = "1.0.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.23" }
|
||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
||||
solana-perf = { path = "../perf", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.2.1"
|
||||
|
||||
[lib]
|
||||
name = "solana_chacha"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -2,6 +2,16 @@
|
||||
# Build steps that run after the primary pipeline on pushes and tags.
|
||||
# Pull requests to not run these steps.
|
||||
steps:
|
||||
- command: "ci/publish-tarball.sh"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball"
|
||||
- command: "ci/publish-docs.sh"
|
||||
timeout_in_minutes: 15
|
||||
name: "publish docs"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
- wait
|
||||
- command: "sdk/docker-solana/build.sh"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish docker"
|
||||
@ -9,12 +19,6 @@ steps:
|
||||
timeout_in_minutes: 240
|
||||
name: "publish crate"
|
||||
branches: "!master"
|
||||
- command: "ci/publish-bpf-sdk.sh"
|
||||
timeout_in_minutes: 5
|
||||
name: "publish bpf sdk"
|
||||
- command: "ci/publish-tarball.sh"
|
||||
timeout_in_minutes: 60
|
||||
name: "publish tarball"
|
||||
- command: "ci/publish-docs.sh"
|
||||
timeout_in_minutes: 15
|
||||
name: "publish docs"
|
||||
# - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
# name: "move"
|
||||
# timeout_in_minutes: 20
|
||||
|
@ -2,12 +2,15 @@
|
||||
# other than those in docs/ are modified
|
||||
|
||||
steps:
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck"
|
||||
timeout_in_minutes: 5
|
||||
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||
name: "coverage"
|
||||
timeout_in_minutes: 30
|
||||
- wait
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
||||
name: "stable"
|
||||
timeout_in_minutes: 60
|
||||
artifact_paths: "log-*.txt"
|
||||
- wait
|
||||
|
||||
- command: "ci/test-stable-perf.sh"
|
||||
name: "stable-perf"
|
||||
timeout_in_minutes: 40
|
||||
@ -17,21 +20,7 @@ steps:
|
||||
- command: "ci/test-bench.sh"
|
||||
name: "bench"
|
||||
timeout_in_minutes: 30
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable.sh"
|
||||
name: "stable"
|
||||
timeout_in_minutes: 60
|
||||
artifact_paths: "log-*.txt"
|
||||
agents:
|
||||
- "queue=rpc-test-capable"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-move.sh"
|
||||
name: "move"
|
||||
timeout_in_minutes: 20
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-local-cluster.sh"
|
||||
name: "local-cluster"
|
||||
timeout_in_minutes: 45
|
||||
artifact_paths: "log-*.txt"
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-coverage.sh"
|
||||
name: "coverage"
|
||||
timeout_in_minutes: 30
|
||||
agents:
|
||||
- "queue=rpc-test-capable"
|
||||
|
@ -8,6 +8,9 @@ steps:
|
||||
- command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_nightly_docker_image ci/test-checks.sh"
|
||||
name: "checks"
|
||||
timeout_in_minutes: 20
|
||||
- command: "ci/shellcheck.sh"
|
||||
name: "shellcheck"
|
||||
timeout_in_minutes: 5
|
||||
|
||||
- wait
|
||||
|
||||
|
@ -49,7 +49,7 @@ else
|
||||
# ~/.cargo
|
||||
ARGS+=(--volume "$PWD:/home")
|
||||
fi
|
||||
ARGS+=(--env "CARGO_HOME=/home/.cargo")
|
||||
ARGS+=(--env "HOME=/home" --env "CARGO_HOME=/home/.cargo")
|
||||
|
||||
# kcov tries to set the personality of the binary which docker
|
||||
# doesn't allow by default.
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM solanalabs/rust:1.42.0
|
||||
FROM solanalabs/rust:1.43.0
|
||||
ARG date
|
||||
|
||||
RUN set -x \
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Note: when the rust version is changed also modify
|
||||
# ci/rust-version.sh to pick up the new image tag
|
||||
FROM rust:1.42.0
|
||||
FROM rust:1.43.0
|
||||
|
||||
# Add Google Protocol Buffers for Libra's metrics library.
|
||||
ENV PROTOC_VERSION 3.8.0
|
||||
|
@ -71,7 +71,7 @@ echo --- Creating release tarball
|
||||
export CHANNEL
|
||||
|
||||
source ci/rust-version.sh stable
|
||||
scripts/cargo-install-all.sh +"$rust_stable" --use-move solana-release
|
||||
scripts/cargo-install-all.sh +"$rust_stable" solana-release
|
||||
|
||||
tar cvf solana-release-$TARGET.tar solana-release
|
||||
bzip2 solana-release-$TARGET.tar
|
||||
@ -95,9 +95,8 @@ fi
|
||||
source ci/upload-ci-artifact.sh
|
||||
|
||||
for file in solana-release-$TARGET.tar.bz2 solana-release-$TARGET.yml solana-install-init-"$TARGET"* $MAYBE_TARBALLS; do
|
||||
upload-ci-artifact "$file"
|
||||
|
||||
if [[ -n $DO_NOT_PUBLISH_TAR ]]; then
|
||||
upload-ci-artifact "$file"
|
||||
echo "Skipped $file due to DO_NOT_PUBLISH_TAR"
|
||||
continue
|
||||
fi
|
||||
|
@ -1,28 +1,30 @@
|
||||
#
|
||||
# This file maintains the rust versions for use by CI.
|
||||
#
|
||||
# Build with stable rust, updating the stable toolchain if necessary:
|
||||
# $ source ci/rust-version.sh stable
|
||||
# $ cargo +"$rust_stable" build
|
||||
#
|
||||
# Build with nightly rust, updating the nightly toolchain if necessary:
|
||||
# $ source ci/rust-version.sh nightly
|
||||
# $ cargo +"$rust_nightly" build
|
||||
#
|
||||
# Obtain the environment variables without any automatic toolchain updating:
|
||||
# $ source ci/rust-version.sh
|
||||
#
|
||||
# Obtain the environment variables updating both stable and nightly, only stable, or
|
||||
# only nightly:
|
||||
# $ source ci/rust-version.sh all
|
||||
# $ source ci/rust-version.sh stable
|
||||
# $ source ci/rust-version.sh nightly
|
||||
|
||||
# Then to build with either stable or nightly:
|
||||
# $ cargo +"$rust_stable" build
|
||||
# $ cargo +"$rust_nightly" build
|
||||
#
|
||||
|
||||
if [[ -n $RUST_STABLE_VERSION ]]; then
|
||||
stable_version="$RUST_STABLE_VERSION"
|
||||
else
|
||||
stable_version=1.42.0
|
||||
stable_version=1.43.0
|
||||
fi
|
||||
|
||||
if [[ -n $RUST_NIGHTLY_VERSION ]]; then
|
||||
nightly_version="$RUST_NIGHTLY_VERSION"
|
||||
else
|
||||
nightly_version=2020-03-12
|
||||
nightly_version=2020-04-23
|
||||
fi
|
||||
|
||||
|
||||
@ -51,6 +53,10 @@ export rust_nightly_docker_image=solanalabs/rust-nightly:"$nightly_version"
|
||||
nightly)
|
||||
rustup_install "$rust_nightly"
|
||||
;;
|
||||
all)
|
||||
rustup_install "$rust_stable"
|
||||
rustup_install "$rust_nightly"
|
||||
;;
|
||||
*)
|
||||
echo "Note: ignoring unknown argument: $1"
|
||||
;;
|
||||
|
@ -25,7 +25,7 @@ source ci/_
|
||||
source ci/upload-ci-artifact.sh
|
||||
|
||||
eval "$(ci/channel-info.sh)"
|
||||
source ci/rust-version.sh nightly
|
||||
source ci/rust-version.sh all
|
||||
|
||||
set -o pipefail
|
||||
export RUST_BACKTRACE=1
|
||||
|
@ -38,11 +38,16 @@ test -d target/release/bpf && find target/release/bpf -name '*.d' -delete
|
||||
# Clear the BPF sysroot files, they are not automatically rebuilt
|
||||
rm -rf target/xargo # Issue #3105
|
||||
|
||||
# Limit compiler jobs to reduce memory usage
|
||||
# on machines with 1gb/thread of memory
|
||||
NPROC=$(nproc)
|
||||
NPROC=$((NPROC>16 ? 16 : NPROC))
|
||||
|
||||
echo "Executing $testName"
|
||||
case $testName in
|
||||
test-stable)
|
||||
_ cargo +"$rust_stable" test --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
_ cargo +"$rust_stable" test --manifest-path bench-tps/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
|
||||
_ cargo +"$rust_stable" test --jobs "$NPROC" --all --exclude solana-local-cluster ${V:+--verbose} -- --nocapture
|
||||
#_ cargo +"$rust_stable" test --manifest-path bench-tps/Cargo.toml --features=move ${V:+--verbose} test_bench_tps_local_cluster_move -- --nocapture
|
||||
;;
|
||||
test-stable-perf)
|
||||
ci/affects-files.sh \
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-clap-utils"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
description = "Solana utilities for the clap"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -11,8 +11,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
rpassword = "4.0"
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
thiserror = "1.0.11"
|
||||
tiny-bip39 = "0.7.0"
|
||||
url = "2.1.0"
|
||||
@ -20,3 +20,6 @@ chrono = "0.4"
|
||||
|
||||
[lib]
|
||||
name = "solana_clap_utils"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
18
clap-utils/src/commitment.rs
Normal file
18
clap-utils/src/commitment.rs
Normal file
@ -0,0 +1,18 @@
|
||||
use crate::ArgConstant;
|
||||
use clap::Arg;
|
||||
|
||||
pub const COMMITMENT_ARG: ArgConstant<'static> = ArgConstant {
|
||||
name: "commitment",
|
||||
long: "commitment",
|
||||
help: "Return information at the selected commitment level",
|
||||
};
|
||||
|
||||
pub fn commitment_arg<'a, 'b>() -> Arg<'a, 'b> {
|
||||
Arg::with_name(COMMITMENT_ARG.name)
|
||||
.long(COMMITMENT_ARG.long)
|
||||
.takes_value(true)
|
||||
.possible_values(&["recent", "root", "max"])
|
||||
.default_value("recent")
|
||||
.value_name("COMMITMENT_LEVEL")
|
||||
.help(COMMITMENT_ARG.help)
|
||||
}
|
@ -7,6 +7,7 @@ use clap::ArgMatches;
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
clock::UnixTimestamp,
|
||||
commitment_config::CommitmentConfig,
|
||||
native_token::sol_to_lamports,
|
||||
pubkey::Pubkey,
|
||||
signature::{read_keypair_file, Keypair, Signature, Signer},
|
||||
@ -162,6 +163,15 @@ pub fn lamports_of_sol(matches: &ArgMatches<'_>, name: &str) -> Option<u64> {
|
||||
value_of(matches, name).map(sol_to_lamports)
|
||||
}
|
||||
|
||||
pub fn commitment_of(matches: &ArgMatches<'_>, name: &str) -> Option<CommitmentConfig> {
|
||||
matches.value_of(name).map(|value| match value {
|
||||
"max" => CommitmentConfig::max(),
|
||||
"recent" => CommitmentConfig::recent(),
|
||||
"root" => CommitmentConfig::root(),
|
||||
_ => CommitmentConfig::default(),
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@ -42,6 +42,7 @@ impl std::fmt::Debug for DisplayError {
|
||||
}
|
||||
}
|
||||
|
||||
pub mod commitment;
|
||||
pub mod input_parsers;
|
||||
pub mod input_validators;
|
||||
pub mod keypair;
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli-config"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -15,3 +15,6 @@ serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_yaml = "0.8.11"
|
||||
url = "2.1.1"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -1,10 +1,6 @@
|
||||
// Wallet settings that can be configured for long-term use
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::{
|
||||
fs::{create_dir_all, File},
|
||||
io::{self, Write},
|
||||
path::Path,
|
||||
};
|
||||
use std::io;
|
||||
use url::Url;
|
||||
|
||||
lazy_static! {
|
||||
@ -46,23 +42,11 @@ impl Default for Config {
|
||||
|
||||
impl Config {
|
||||
pub fn load(config_file: &str) -> Result<Self, io::Error> {
|
||||
let file = File::open(config_file.to_string())?;
|
||||
let config = serde_yaml::from_reader(file)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
|
||||
Ok(config)
|
||||
crate::load_config_file(config_file)
|
||||
}
|
||||
|
||||
pub fn save(&self, config_file: &str) -> Result<(), io::Error> {
|
||||
let serialized = serde_yaml::to_string(self)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
|
||||
|
||||
if let Some(outdir) = Path::new(&config_file).parent() {
|
||||
create_dir_all(outdir)?;
|
||||
}
|
||||
let mut file = File::create(config_file)?;
|
||||
file.write_all(&serialized.into_bytes())?;
|
||||
|
||||
Ok(())
|
||||
crate::save_config_file(self, config_file)
|
||||
}
|
||||
|
||||
pub fn compute_websocket_url(json_rpc_url: &str) -> String {
|
||||
@ -76,17 +60,38 @@ impl Config {
|
||||
ws_url
|
||||
.set_scheme(if is_secure { "wss" } else { "ws" })
|
||||
.expect("unable to set scheme");
|
||||
let ws_port = match json_rpc_url.port() {
|
||||
Some(port) => port + 1,
|
||||
None => {
|
||||
if is_secure {
|
||||
8901
|
||||
} else {
|
||||
8900
|
||||
}
|
||||
}
|
||||
};
|
||||
ws_url.set_port(Some(ws_port)).expect("unable to set port");
|
||||
if let Some(port) = json_rpc_url.port() {
|
||||
ws_url.set_port(Some(port + 1)).expect("unable to set port");
|
||||
}
|
||||
ws_url.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn compute_websocket_url() {
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"http://devnet.solana.com"),
|
||||
"ws://devnet.solana.com/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"https://devnet.solana.com"),
|
||||
"wss://devnet.solana.com/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"http://example.com:8899"),
|
||||
"ws://example.com:8900/".to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
Config::compute_websocket_url(&"https://example.com:1234"),
|
||||
"wss://example.com:1235/".to_string()
|
||||
);
|
||||
|
||||
assert_eq!(Config::compute_websocket_url(&"garbage"), String::new());
|
||||
}
|
||||
}
|
||||
|
@ -3,3 +3,37 @@ extern crate lazy_static;
|
||||
|
||||
mod config;
|
||||
pub use config::{Config, CONFIG_FILE};
|
||||
|
||||
use std::{
|
||||
fs::{create_dir_all, File},
|
||||
io::{self, Write},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
pub fn load_config_file<T, P>(config_file: P) -> Result<T, io::Error>
|
||||
where
|
||||
T: serde::de::DeserializeOwned,
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let file = File::open(config_file)?;
|
||||
let config = serde_yaml::from_reader(file)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn save_config_file<T, P>(config: &T, config_file: P) -> Result<(), io::Error>
|
||||
where
|
||||
T: serde::ser::Serialize,
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
let serialized = serde_yaml::to_string(config)
|
||||
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
|
||||
|
||||
if let Some(outdir) = config_file.as_ref().parent() {
|
||||
create_dir_all(outdir)?;
|
||||
}
|
||||
let mut file = File::create(config_file)?;
|
||||
file.write_all(&serialized.into_bytes())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-cli"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -26,30 +26,34 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.46"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.13" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.0.13" }
|
||||
solana-client = { path = "../client", version = "1.0.13" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.0.13" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.13" }
|
||||
solana-logger = { path = "../logger", version = "1.0.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.13" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.13" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.13" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.0.13" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.23" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.23" }
|
||||
solana-cli-config = { path = "../cli-config", version = "1.0.23" }
|
||||
solana-client = { path = "../client", version = "1.0.23" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.0.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.23" }
|
||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.23" }
|
||||
solana-remote-wallet = { path = "../remote-wallet", version = "1.0.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.23" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.23" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.0.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.23" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.0.23" }
|
||||
titlecase = "1.1.0"
|
||||
thiserror = "1.0.11"
|
||||
url = "2.1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
solana-core = { path = "../core", version = "1.0.13" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.13" }
|
||||
solana-core = { path = "../core", version = "1.0.23" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.23" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
name = "solana"
|
||||
path = "src/main.rs"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
100
cli/src/cli.rs
100
cli/src/cli.rs
@ -405,6 +405,7 @@ pub enum CliCommand {
|
||||
to: Pubkey,
|
||||
from: SignerIndex,
|
||||
sign_only: bool,
|
||||
no_wait: bool,
|
||||
blockhash_query: BlockhashQuery,
|
||||
nonce_account: Option<Pubkey>,
|
||||
nonce_authority: SignerIndex,
|
||||
@ -537,7 +538,7 @@ impl CliConfig<'_> {
|
||||
if !self.signers.is_empty() {
|
||||
self.signers[0].try_pubkey()
|
||||
} else {
|
||||
Err(SignerError::CustomError(
|
||||
Err(SignerError::Custom(
|
||||
"Default keypair must be set if pubkey arg not provided".to_string(),
|
||||
))
|
||||
}
|
||||
@ -902,6 +903,7 @@ pub fn parse_command(
|
||||
let lamports = lamports_of_sol(matches, "amount").unwrap();
|
||||
let to = pubkey_of_signer(matches, "to", wallet_manager)?.unwrap();
|
||||
let sign_only = matches.is_present(SIGN_ONLY_ARG.name);
|
||||
let no_wait = matches.is_present("no_wait");
|
||||
let blockhash_query = BlockhashQuery::new_from_matches(matches);
|
||||
let nonce_account = pubkey_of_signer(matches, NONCE_ARG.name, wallet_manager)?;
|
||||
let (nonce_authority, nonce_authority_pubkey) =
|
||||
@ -927,6 +929,7 @@ pub fn parse_command(
|
||||
lamports,
|
||||
to,
|
||||
sign_only,
|
||||
no_wait,
|
||||
blockhash_query,
|
||||
nonce_account,
|
||||
nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(),
|
||||
@ -1152,12 +1155,48 @@ fn process_balance(
|
||||
}
|
||||
}
|
||||
|
||||
fn process_confirm(rpc_client: &RpcClient, signature: &Signature) -> ProcessResult {
|
||||
match rpc_client.get_signature_status(&signature.to_string()) {
|
||||
fn process_confirm(
|
||||
rpc_client: &RpcClient,
|
||||
config: &CliConfig,
|
||||
signature: &Signature,
|
||||
) -> ProcessResult {
|
||||
match rpc_client.get_signature_status_with_commitment_and_history(
|
||||
&signature,
|
||||
CommitmentConfig::max(),
|
||||
true,
|
||||
) {
|
||||
Ok(status) => {
|
||||
if let Some(result) = status {
|
||||
match result {
|
||||
Ok(_) => Ok("Confirmed".to_string()),
|
||||
Ok(_) => {
|
||||
if config.verbose {
|
||||
match rpc_client.get_confirmed_transaction(
|
||||
signature,
|
||||
solana_transaction_status::TransactionEncoding::Binary,
|
||||
) {
|
||||
Ok(confirmed_transaction) => {
|
||||
println!("\nTransaction:");
|
||||
crate::display::println_transaction(
|
||||
&confirmed_transaction
|
||||
.transaction
|
||||
.transaction
|
||||
.decode()
|
||||
.expect("Successful decode"),
|
||||
&confirmed_transaction.transaction.meta,
|
||||
" ",
|
||||
);
|
||||
println!();
|
||||
Ok(format!("Confirmed in slot {}", confirmed_transaction.slot))
|
||||
}
|
||||
Err(err) => Ok(format!(
|
||||
"Confirmed. Unable to get confirmed transaction details: {}",
|
||||
err
|
||||
)),
|
||||
}
|
||||
} else {
|
||||
Ok("Confirmed".to_string())
|
||||
}
|
||||
}
|
||||
Err(err) => Ok(format!("Transaction failed with error: {}", err)),
|
||||
}
|
||||
} else {
|
||||
@ -1488,6 +1527,7 @@ fn process_transfer(
|
||||
to: &Pubkey,
|
||||
from: SignerIndex,
|
||||
sign_only: bool,
|
||||
no_wait: bool,
|
||||
blockhash_query: &BlockhashQuery,
|
||||
nonce_account: Option<&Pubkey>,
|
||||
nonce_authority: SignerIndex,
|
||||
@ -1534,7 +1574,11 @@ fn process_transfer(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let result = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers);
|
||||
let result = if no_wait {
|
||||
rpc_client.send_transaction(&tx)
|
||||
} else {
|
||||
rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &config.signers)
|
||||
};
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
}
|
||||
}
|
||||
@ -2026,7 +2070,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
// Cancel a contract by contract Pubkey
|
||||
CliCommand::Cancel(pubkey) => process_cancel(&rpc_client, config, &pubkey),
|
||||
// Confirm the last client transaction by signature
|
||||
CliCommand::Confirm(signature) => process_confirm(&rpc_client, signature),
|
||||
CliCommand::Confirm(signature) => process_confirm(&rpc_client, config, signature),
|
||||
// If client has positive balance, pay lamports to another address
|
||||
CliCommand::Pay(PayCommand {
|
||||
lamports,
|
||||
@ -2080,6 +2124,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
to,
|
||||
from,
|
||||
sign_only,
|
||||
no_wait,
|
||||
ref blockhash_query,
|
||||
ref nonce_account,
|
||||
nonce_authority,
|
||||
@ -2091,6 +2136,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult {
|
||||
to,
|
||||
*from,
|
||||
*sign_only,
|
||||
*no_wait,
|
||||
blockhash_query,
|
||||
nonce_account.as_ref(),
|
||||
*nonce_authority,
|
||||
@ -2165,7 +2211,7 @@ pub fn request_and_confirm_airdrop(
|
||||
log_instruction_custom_error::<SystemError>(result)
|
||||
}
|
||||
|
||||
pub fn log_instruction_custom_error<E>(result: ClientResult<String>) -> ProcessResult
|
||||
pub fn log_instruction_custom_error<E>(result: ClientResult<Signature>) -> ProcessResult
|
||||
where
|
||||
E: 'static + std::error::Error + DecodeError<E> + FromPrimitive,
|
||||
{
|
||||
@ -2173,7 +2219,7 @@ where
|
||||
Err(err) => {
|
||||
if let ClientErrorKind::TransactionError(TransactionError::InstructionError(
|
||||
_,
|
||||
InstructionError::CustomError(code),
|
||||
InstructionError::Custom(code),
|
||||
)) = err.kind()
|
||||
{
|
||||
if let Some(specific_error) = E::decode_custom_error_to_enum(*code) {
|
||||
@ -2182,7 +2228,7 @@ where
|
||||
}
|
||||
Err(err.into())
|
||||
}
|
||||
Ok(sig) => Ok(sig),
|
||||
Ok(sig) => Ok(sig.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
@ -2497,6 +2543,12 @@ pub fn app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> App<'ab, '
|
||||
.validator(is_valid_signer)
|
||||
.help("Source account of funds (if different from client local account)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("no_wait")
|
||||
.long("no-wait")
|
||||
.takes_value(false)
|
||||
.help("Return signature immediately after submitting the transaction, instead of waiting for confirmations"),
|
||||
)
|
||||
.offline_args()
|
||||
.arg(nonce_arg())
|
||||
.arg(nonce_authority_arg())
|
||||
@ -3557,6 +3609,33 @@ mod tests {
|
||||
to: to_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
fee_payer: 0,
|
||||
},
|
||||
signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()],
|
||||
}
|
||||
);
|
||||
|
||||
// Test Transfer no-wait
|
||||
let test_transfer = test_commands.clone().get_matches_from(vec![
|
||||
"test",
|
||||
"transfer",
|
||||
"--no-wait",
|
||||
&to_string,
|
||||
"42",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_transfer, &default_keypair_file, None).unwrap(),
|
||||
CliCommandInfo {
|
||||
command: CliCommand::Transfer {
|
||||
lamports: 42_000_000_000,
|
||||
to: to_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: true,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@ -3586,6 +3665,7 @@ mod tests {
|
||||
to: to_pubkey,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@ -3620,6 +3700,7 @@ mod tests {
|
||||
to: to_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::Cluster,
|
||||
blockhash
|
||||
@ -3658,6 +3739,7 @@ mod tests {
|
||||
to: to_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_address),
|
||||
blockhash
|
||||
|
@ -8,7 +8,12 @@ use crate::{
|
||||
use clap::{value_t, value_t_or_exit, App, Arg, ArgMatches, SubCommand};
|
||||
use console::{style, Emoji};
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*, keypair::signer_from_path};
|
||||
use solana_clap_utils::{
|
||||
commitment::{commitment_arg, COMMITMENT_ARG},
|
||||
input_parsers::*,
|
||||
input_validators::*,
|
||||
keypair::signer_from_path,
|
||||
};
|
||||
use solana_client::{
|
||||
pubsub_client::{PubsubClient, SlotInfoMessage},
|
||||
rpc_client::RpcClient,
|
||||
@ -91,14 +96,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
SubCommand::with_name("epoch-info")
|
||||
.about("Get information about the current epoch")
|
||||
.alias("get-epoch-info")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Return information at maximum-lockout commitment level",
|
||||
),
|
||||
),
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("genesis-hash")
|
||||
@ -108,37 +106,16 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.subcommand(
|
||||
SubCommand::with_name("slot").about("Get current slot")
|
||||
.alias("get-slot")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Return slot at maximum-lockout commitment level",
|
||||
),
|
||||
),
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("epoch").about("Get current epoch")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Return epoch at maximum-lockout commitment level",
|
||||
),
|
||||
),
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("transaction-count").about("Get current transaction count")
|
||||
.alias("get-transaction-count")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Return count at maximum-lockout commitment level",
|
||||
),
|
||||
),
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("ping")
|
||||
@ -178,14 +155,7 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
.default_value("15")
|
||||
.help("Wait up to timeout seconds for transaction confirmation"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Wait until the transaction is confirmed at maximum-lockout commitment level",
|
||||
),
|
||||
),
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("live-slots")
|
||||
@ -236,20 +206,13 @@ impl ClusterQuerySubCommands for App<'_, '_> {
|
||||
SubCommand::with_name("validators")
|
||||
.about("Show summary information about the current validators")
|
||||
.alias("show-validators")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Return information at maximum-lockout commitment level",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("lamports")
|
||||
.long("lamports")
|
||||
.takes_value(false)
|
||||
.help("Display balance in lamports instead of SOL"),
|
||||
),
|
||||
)
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -282,11 +245,7 @@ pub fn parse_cluster_ping(
|
||||
None
|
||||
};
|
||||
let timeout = Duration::from_secs(value_t_or_exit!(matches, "timeout", u64));
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::Ping {
|
||||
lamports,
|
||||
@ -313,11 +272,7 @@ pub fn parse_get_block_time(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
|
||||
}
|
||||
|
||||
pub fn parse_get_epoch_info(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::GetEpochInfo { commitment_config },
|
||||
signers: vec![],
|
||||
@ -325,11 +280,7 @@ pub fn parse_get_epoch_info(matches: &ArgMatches<'_>) -> Result<CliCommandInfo,
|
||||
}
|
||||
|
||||
pub fn parse_get_slot(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::GetSlot { commitment_config },
|
||||
signers: vec![],
|
||||
@ -337,11 +288,7 @@ pub fn parse_get_slot(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliErr
|
||||
}
|
||||
|
||||
pub fn parse_get_epoch(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::GetEpoch { commitment_config },
|
||||
signers: vec![],
|
||||
@ -349,11 +296,7 @@ pub fn parse_get_epoch(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliEr
|
||||
}
|
||||
|
||||
pub fn parse_get_transaction_count(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::GetTransactionCount { commitment_config },
|
||||
signers: vec![],
|
||||
@ -379,11 +322,7 @@ pub fn parse_show_stakes(
|
||||
|
||||
pub fn parse_show_validators(matches: &ArgMatches<'_>) -> Result<CliCommandInfo, CliError> {
|
||||
let use_lamports_unit = matches.is_present("lamports");
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
|
||||
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::ShowValidators {
|
||||
@ -454,25 +393,35 @@ pub fn process_catchup(
|
||||
}
|
||||
|
||||
let slot_distance = rpc_slot as i64 - node_slot as i64;
|
||||
let slots_per_second =
|
||||
(previous_slot_distance - slot_distance) as f64 / f64::from(sleep_interval);
|
||||
let time_remaining = (slot_distance as f64 / slots_per_second).round();
|
||||
let time_remaining = if !time_remaining.is_normal() || time_remaining <= 0.0 {
|
||||
"".to_string()
|
||||
} else {
|
||||
format!(
|
||||
". Time remaining: {}",
|
||||
humantime::format_duration(Duration::from_secs_f64(time_remaining))
|
||||
)
|
||||
};
|
||||
|
||||
progress_bar.set_message(&format!(
|
||||
"Validator is {} slots away (us:{} them:{}){}",
|
||||
"{} slots behind (us:{} them:{}){}",
|
||||
slot_distance,
|
||||
node_slot,
|
||||
rpc_slot,
|
||||
if previous_rpc_slot == std::u64::MAX {
|
||||
"".to_string()
|
||||
} else {
|
||||
let slots_per_second =
|
||||
(previous_slot_distance - slot_distance) as f64 / f64::from(sleep_interval);
|
||||
|
||||
format!(
|
||||
" and {} at {:.1} slots/second",
|
||||
", {} at {:.1} slots/second{}",
|
||||
if slots_per_second < 0.0 {
|
||||
"falling behind"
|
||||
} else {
|
||||
"gaining"
|
||||
},
|
||||
slots_per_second,
|
||||
time_remaining
|
||||
)
|
||||
}
|
||||
));
|
||||
@ -1346,7 +1295,8 @@ mod tests {
|
||||
"2",
|
||||
"-t",
|
||||
"3",
|
||||
"--confirmed",
|
||||
"--commitment",
|
||||
"max",
|
||||
]);
|
||||
assert_eq!(
|
||||
parse_command(&test_ping, &default_keypair_file, None).unwrap(),
|
||||
@ -1356,7 +1306,7 @@ mod tests {
|
||||
interval: Duration::from_secs(1),
|
||||
count: Some(2),
|
||||
timeout: Duration::from_secs(3),
|
||||
commitment_config: CommitmentConfig::default(),
|
||||
commitment_config: CommitmentConfig::max(),
|
||||
},
|
||||
signers: vec![default_keypair.into()],
|
||||
}
|
||||
|
@ -1,6 +1,11 @@
|
||||
use crate::cli::SettingType;
|
||||
use console::style;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::{
|
||||
hash::Hash, native_token::lamports_to_sol, program_utils::limited_deserialize,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use solana_transaction_status::RpcTransactionStatusMeta;
|
||||
use std::io;
|
||||
|
||||
// Pretty print a "name value"
|
||||
pub fn println_name_value(name: &str, value: &str) {
|
||||
@ -49,3 +54,140 @@ pub fn println_signers(
|
||||
}
|
||||
println!();
|
||||
}
|
||||
|
||||
pub fn write_transaction<W: io::Write>(
|
||||
w: &mut W,
|
||||
transaction: &Transaction,
|
||||
transaction_status: &Option<RpcTransactionStatusMeta>,
|
||||
prefix: &str,
|
||||
) -> io::Result<()> {
|
||||
let message = &transaction.message;
|
||||
writeln!(
|
||||
w,
|
||||
"{}Recent Blockhash: {:?}",
|
||||
prefix, message.recent_blockhash
|
||||
)?;
|
||||
for (signature_index, signature) in transaction.signatures.iter().enumerate() {
|
||||
writeln!(
|
||||
w,
|
||||
"{}Signature {}: {:?}",
|
||||
prefix, signature_index, signature
|
||||
)?;
|
||||
}
|
||||
writeln!(w, "{}{:?}", prefix, message.header)?;
|
||||
for (account_index, account) in message.account_keys.iter().enumerate() {
|
||||
writeln!(w, "{}Account {}: {:?}", prefix, account_index, account)?;
|
||||
}
|
||||
for (instruction_index, instruction) in message.instructions.iter().enumerate() {
|
||||
let program_pubkey = message.account_keys[instruction.program_id_index as usize];
|
||||
writeln!(w, "{}Instruction {}", prefix, instruction_index)?;
|
||||
writeln!(
|
||||
w,
|
||||
"{} Program: {} ({})",
|
||||
prefix, program_pubkey, instruction.program_id_index
|
||||
)?;
|
||||
for (account_index, account) in instruction.accounts.iter().enumerate() {
|
||||
let account_pubkey = message.account_keys[*account as usize];
|
||||
writeln!(
|
||||
w,
|
||||
"{} Account {}: {} ({})",
|
||||
prefix, account_index, account_pubkey, account
|
||||
)?;
|
||||
}
|
||||
|
||||
let mut raw = true;
|
||||
if program_pubkey == solana_vote_program::id() {
|
||||
if let Ok(vote_instruction) = limited_deserialize::<
|
||||
solana_vote_program::vote_instruction::VoteInstruction,
|
||||
>(&instruction.data)
|
||||
{
|
||||
writeln!(w, "{} {:?}", prefix, vote_instruction)?;
|
||||
raw = false;
|
||||
}
|
||||
} else if program_pubkey == solana_stake_program::id() {
|
||||
if let Ok(stake_instruction) = limited_deserialize::<
|
||||
solana_stake_program::stake_instruction::StakeInstruction,
|
||||
>(&instruction.data)
|
||||
{
|
||||
writeln!(w, "{} {:?}", prefix, stake_instruction)?;
|
||||
raw = false;
|
||||
}
|
||||
} else if program_pubkey == solana_sdk::system_program::id() {
|
||||
if let Ok(system_instruction) = limited_deserialize::<
|
||||
solana_sdk::system_instruction::SystemInstruction,
|
||||
>(&instruction.data)
|
||||
{
|
||||
writeln!(w, "{} {:?}", prefix, system_instruction)?;
|
||||
raw = false;
|
||||
}
|
||||
}
|
||||
|
||||
if raw {
|
||||
writeln!(w, "{} Data: {:?}", prefix, instruction.data)?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(transaction_status) = transaction_status {
|
||||
writeln!(
|
||||
w,
|
||||
"{}Status: {}",
|
||||
prefix,
|
||||
match &transaction_status.status {
|
||||
Ok(_) => "Ok".into(),
|
||||
Err(err) => err.to_string(),
|
||||
}
|
||||
)?;
|
||||
writeln!(
|
||||
w,
|
||||
"{} Fee: {} SOL",
|
||||
prefix,
|
||||
lamports_to_sol(transaction_status.fee)
|
||||
)?;
|
||||
assert_eq!(
|
||||
transaction_status.pre_balances.len(),
|
||||
transaction_status.post_balances.len()
|
||||
);
|
||||
for (i, (pre, post)) in transaction_status
|
||||
.pre_balances
|
||||
.iter()
|
||||
.zip(transaction_status.post_balances.iter())
|
||||
.enumerate()
|
||||
{
|
||||
if pre == post {
|
||||
writeln!(
|
||||
w,
|
||||
"{} Account {} balance: {} SOL",
|
||||
prefix,
|
||||
i,
|
||||
lamports_to_sol(*pre)
|
||||
)?;
|
||||
} else {
|
||||
writeln!(
|
||||
w,
|
||||
"{} Account {} balance: {} SOL -> {} SOL",
|
||||
prefix,
|
||||
i,
|
||||
lamports_to_sol(*pre),
|
||||
lamports_to_sol(*post)
|
||||
)?;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
writeln!(w, "{}Status: Unavailable", prefix)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn println_transaction(
|
||||
transaction: &Transaction,
|
||||
transaction_status: &Option<RpcTransactionStatusMeta>,
|
||||
prefix: &str,
|
||||
) {
|
||||
let mut w = Vec::new();
|
||||
if write_transaction(&mut w, transaction, transaction_status, prefix).is_ok() {
|
||||
if let Ok(s) = String::from_utf8(w) {
|
||||
print!("{}", s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -266,8 +266,8 @@ pub fn process_claim_storage_reward(
|
||||
&fee_calculator,
|
||||
&tx.message,
|
||||
)?;
|
||||
let signature_str = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &signers)?;
|
||||
Ok(signature_str)
|
||||
let signature = rpc_client.send_and_confirm_transaction_with_spinner(&mut tx, &signers)?;
|
||||
Ok(signature.to_string())
|
||||
}
|
||||
|
||||
pub fn process_show_storage_account(
|
||||
|
@ -4,7 +4,11 @@ use crate::cli::{
|
||||
ProcessResult, SignerIndex,
|
||||
};
|
||||
use clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand};
|
||||
use solana_clap_utils::{input_parsers::*, input_validators::*};
|
||||
use solana_clap_utils::{
|
||||
commitment::{commitment_arg, COMMITMENT_ARG},
|
||||
input_parsers::*,
|
||||
input_validators::*,
|
||||
};
|
||||
use solana_client::rpc_client::RpcClient;
|
||||
use solana_remote_wallet::remote_wallet::RemoteWalletManager;
|
||||
use solana_sdk::{
|
||||
@ -159,14 +163,6 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
SubCommand::with_name("vote-account")
|
||||
.about("Show the contents of a vote account")
|
||||
.alias("show-vote-account")
|
||||
.arg(
|
||||
Arg::with_name("confirmed")
|
||||
.long("confirmed")
|
||||
.takes_value(false)
|
||||
.help(
|
||||
"Return information at maximum-lockout commitment level",
|
||||
),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("vote_account_pubkey")
|
||||
.index(1)
|
||||
@ -181,7 +177,8 @@ impl VoteSubCommands for App<'_, '_> {
|
||||
.long("lamports")
|
||||
.takes_value(false)
|
||||
.help("Display balance in lamports instead of SOL"),
|
||||
),
|
||||
)
|
||||
.arg(commitment_arg()),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("withdraw-from-vote-account")
|
||||
@ -322,11 +319,7 @@ pub fn parse_vote_get_account_command(
|
||||
let vote_account_pubkey =
|
||||
pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap();
|
||||
let use_lamports_unit = matches.is_present("lamports");
|
||||
let commitment_config = if matches.is_present("confirmed") {
|
||||
CommitmentConfig::default()
|
||||
} else {
|
||||
CommitmentConfig::recent()
|
||||
};
|
||||
let commitment_config = commitment_of(matches, COMMITMENT_ARG.long).unwrap();
|
||||
Ok(CliCommandInfo {
|
||||
command: CliCommand::ShowVoteAccount {
|
||||
pubkey: vote_account_pubkey,
|
||||
|
@ -338,6 +338,7 @@ fn test_create_account_with_seed() {
|
||||
to: to_address,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
nonce_account: Some(nonce_address),
|
||||
nonce_authority: 0,
|
||||
@ -358,6 +359,7 @@ fn test_create_account_with_seed() {
|
||||
to: to_address,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_address),
|
||||
sign_only.blockhash,
|
||||
|
@ -68,6 +68,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@ -95,6 +96,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@ -110,6 +112,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@ -147,6 +150,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
|
||||
nonce_hash,
|
||||
@ -187,6 +191,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(nonce_hash),
|
||||
nonce_account: Some(nonce_account.pubkey()),
|
||||
nonce_authority: 0,
|
||||
@ -202,6 +207,7 @@ fn test_transfer() {
|
||||
to: recipient_pubkey,
|
||||
from: 0,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(
|
||||
blockhash_query::Source::NonceAccount(nonce_account.pubkey()),
|
||||
sign_only.blockhash,
|
||||
@ -269,6 +275,7 @@ fn test_transfer_multisession_signing() {
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@ -293,6 +300,7 @@ fn test_transfer_multisession_signing() {
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: true,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::None(blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
@ -314,6 +322,7 @@ fn test_transfer_multisession_signing() {
|
||||
to: to_pubkey,
|
||||
from: 1,
|
||||
sign_only: false,
|
||||
no_wait: false,
|
||||
blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash),
|
||||
nonce_account: None,
|
||||
nonce_authority: 0,
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-client"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
description = "Solana Client"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,10 +19,10 @@ reqwest = { version = "0.10.1", default-features = false, features = ["blocking"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.46"
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.0.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.0.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.23" }
|
||||
thiserror = "1.0"
|
||||
tungstenite = "0.10.1"
|
||||
url = "2.1.1"
|
||||
@ -31,4 +31,7 @@ url = "2.1.1"
|
||||
assert_matches = "1.3.0"
|
||||
jsonrpc-core = "14.0.5"
|
||||
jsonrpc-http-server = "14.0.6"
|
||||
solana-logger = { path = "../logger", version = "1.0.13" }
|
||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -40,7 +40,7 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
fn send(
|
||||
&self,
|
||||
request: &RpcRequest,
|
||||
params: serde_json::Value,
|
||||
_params: serde_json::Value,
|
||||
_retries: usize,
|
||||
) -> Result<serde_json::Value> {
|
||||
if let Some(value) = self.mocks.write().unwrap().remove(request) {
|
||||
@ -50,17 +50,6 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
return Ok(Value::Null);
|
||||
}
|
||||
let val = match request {
|
||||
RpcRequest::ConfirmTransaction => {
|
||||
if let Some(params_array) = params.as_array() {
|
||||
if let Value::String(param_string) = ¶ms_array[0] {
|
||||
Value::Bool(param_string == SIGNATURE)
|
||||
} else {
|
||||
Value::Null
|
||||
}
|
||||
} else {
|
||||
Value::Null
|
||||
}
|
||||
}
|
||||
RpcRequest::GetBalance => serde_json::to_value(Response {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: Value::Number(Number::from(50)),
|
||||
@ -87,21 +76,6 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
context: RpcResponseContext { slot: 1 },
|
||||
value: serde_json::to_value(FeeRateGovernor::default()).unwrap(),
|
||||
})?,
|
||||
RpcRequest::GetSignatureStatus => {
|
||||
let response: Option<transaction::Result<()>> = if self.url == "account_in_use" {
|
||||
Some(Err(TransactionError::AccountInUse))
|
||||
} else if self.url == "instruction_error" {
|
||||
Some(Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::UninitializedAccount,
|
||||
)))
|
||||
} else if self.url == "sig_not_found" {
|
||||
None
|
||||
} else {
|
||||
Some(Ok(()))
|
||||
};
|
||||
serde_json::to_value(response).unwrap()
|
||||
}
|
||||
RpcRequest::GetSignatureStatuses => {
|
||||
let status: transaction::Result<()> = if self.url == "account_in_use" {
|
||||
Err(TransactionError::AccountInUse)
|
||||
@ -116,10 +90,12 @@ impl GenericRpcClientRequest for MockRpcClientRequest {
|
||||
let status = if self.url == "sig_not_found" {
|
||||
None
|
||||
} else {
|
||||
let err = status.clone().err();
|
||||
Some(TransactionStatus {
|
||||
status,
|
||||
slot: 1,
|
||||
confirmations: Some(0),
|
||||
confirmations: None,
|
||||
err,
|
||||
})
|
||||
};
|
||||
serde_json::to_value(Response {
|
||||
|
@ -16,7 +16,10 @@ use log::*;
|
||||
use serde_json::{json, Value};
|
||||
use solana_sdk::{
|
||||
account::Account,
|
||||
clock::{Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT},
|
||||
clock::{
|
||||
Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, DEFAULT_TICKS_PER_SLOT,
|
||||
MAX_HASH_AGE_IN_SECONDS,
|
||||
},
|
||||
commitment_config::CommitmentConfig,
|
||||
epoch_schedule::EpochSchedule,
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
@ -27,12 +30,13 @@ use solana_sdk::{
|
||||
signers::Signers,
|
||||
transaction::{self, Transaction, TransactionError},
|
||||
};
|
||||
use solana_transaction_status::{ConfirmedBlock, TransactionStatus};
|
||||
use solana_transaction_status::{
|
||||
ConfirmedBlock, ConfirmedTransaction, TransactionEncoding, TransactionStatus,
|
||||
};
|
||||
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
|
||||
use std::{
|
||||
error,
|
||||
net::SocketAddr,
|
||||
str::FromStr,
|
||||
thread::sleep,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
@ -71,7 +75,7 @@ impl RpcClient {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn confirm_transaction(&self, signature: &str) -> ClientResult<bool> {
|
||||
pub fn confirm_transaction(&self, signature: &Signature) -> ClientResult<bool> {
|
||||
Ok(self
|
||||
.confirm_transaction_with_commitment(signature, CommitmentConfig::default())?
|
||||
.value)
|
||||
@ -79,49 +83,79 @@ impl RpcClient {
|
||||
|
||||
pub fn confirm_transaction_with_commitment(
|
||||
&self,
|
||||
signature: &str,
|
||||
signature: &Signature,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> RpcResult<bool> {
|
||||
let response = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::ConfirmTransaction,
|
||||
json!([signature, commitment_config]),
|
||||
0,
|
||||
)
|
||||
.map_err(|err| err.into_with_command("ConfirmTransaction"))?;
|
||||
let Response { context, value } = self.get_signature_statuses(&[*signature])?;
|
||||
|
||||
serde_json::from_value::<Response<bool>>(response)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "ConfirmTransaction"))
|
||||
Ok(Response {
|
||||
context,
|
||||
value: value[0]
|
||||
.as_ref()
|
||||
.filter(|result| result.satisfies_commitment(commitment_config))
|
||||
.map(|result| result.status.is_ok())
|
||||
.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult<String> {
|
||||
pub fn send_transaction(&self, transaction: &Transaction) -> ClientResult<Signature> {
|
||||
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
|
||||
let signature =
|
||||
let response =
|
||||
self.client
|
||||
.send(&RpcRequest::SendTransaction, json!([serialized_encoded]), 5)?;
|
||||
if signature.as_str().is_none() {
|
||||
Err(RpcError::ForUser("Received result of an unexpected type".to_string()).into())
|
||||
} else {
|
||||
Ok(signature.as_str().unwrap().to_string())
|
||||
|
||||
match response.as_str() {
|
||||
None => {
|
||||
Err(RpcError::ForUser("Received result of an unexpected type".to_string()).into())
|
||||
}
|
||||
Some(signature_base58_str) => signature_base58_str
|
||||
.parse::<Signature>()
|
||||
.map_err(|err| RpcError::ParseError(err.to_string()).into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn simulate_transaction(
|
||||
&self,
|
||||
transaction: &Transaction,
|
||||
sig_verify: bool,
|
||||
) -> RpcResult<TransactionStatus> {
|
||||
let serialized_encoded = bs58::encode(serialize(transaction).unwrap()).into_string();
|
||||
let response = self.send(
|
||||
&RpcRequest::SimulateTransaction,
|
||||
json!([serialized_encoded, { "sigVerify": sig_verify }]),
|
||||
0,
|
||||
)?;
|
||||
Ok(serde_json::from_value(response)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "SimulateTransaction"))?)
|
||||
}
|
||||
|
||||
pub fn get_signature_status(
|
||||
&self,
|
||||
signature: &str,
|
||||
signature: &Signature,
|
||||
) -> ClientResult<Option<transaction::Result<()>>> {
|
||||
self.get_signature_status_with_commitment(signature, CommitmentConfig::default())
|
||||
}
|
||||
|
||||
pub fn get_signature_statuses(
|
||||
&self,
|
||||
signatures: &[Signature],
|
||||
) -> RpcResult<Vec<Option<TransactionStatus>>> {
|
||||
let signatures: Vec<_> = signatures.iter().map(|s| s.to_string()).collect();
|
||||
let signature_status =
|
||||
self.client
|
||||
.send(&RpcRequest::GetSignatureStatuses, json!([signatures]), 5)?;
|
||||
Ok(serde_json::from_value(signature_status)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?)
|
||||
}
|
||||
|
||||
pub fn get_signature_status_with_commitment(
|
||||
&self,
|
||||
signature: &str,
|
||||
signature: &Signature,
|
||||
commitment_config: CommitmentConfig,
|
||||
) -> ClientResult<Option<transaction::Result<()>>> {
|
||||
let signature_status = self.client.send(
|
||||
&RpcRequest::GetSignatureStatuses,
|
||||
json!([[signature.to_string()], commitment_config]),
|
||||
json!([[signature.to_string()]]),
|
||||
5,
|
||||
)?;
|
||||
let result: Response<Vec<Option<TransactionStatus>>> =
|
||||
@ -129,6 +163,29 @@ impl RpcClient {
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
|
||||
Ok(result.value[0]
|
||||
.clone()
|
||||
.filter(|result| result.satisfies_commitment(commitment_config))
|
||||
.map(|status_meta| status_meta.status))
|
||||
}
|
||||
|
||||
pub fn get_signature_status_with_commitment_and_history(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
commitment_config: CommitmentConfig,
|
||||
search_transaction_history: bool,
|
||||
) -> ClientResult<Option<transaction::Result<()>>> {
|
||||
let signature_status = self.client.send(
|
||||
&RpcRequest::GetSignatureStatuses,
|
||||
json!([[signature.to_string()], {
|
||||
"searchTransactionHistory": search_transaction_history
|
||||
}]),
|
||||
5,
|
||||
)?;
|
||||
let result: Response<Vec<Option<TransactionStatus>>> =
|
||||
serde_json::from_value(signature_status)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetSignatureStatuses"))?;
|
||||
Ok(result.value[0]
|
||||
.clone()
|
||||
.filter(|result| result.satisfies_commitment(commitment_config))
|
||||
.map(|status_meta| status_meta.status))
|
||||
}
|
||||
|
||||
@ -177,9 +234,17 @@ impl RpcClient {
|
||||
}
|
||||
|
||||
pub fn get_confirmed_block(&self, slot: Slot) -> ClientResult<ConfirmedBlock> {
|
||||
self.get_confirmed_block_with_encoding(slot, TransactionEncoding::Json)
|
||||
}
|
||||
|
||||
pub fn get_confirmed_block_with_encoding(
|
||||
&self,
|
||||
slot: Slot,
|
||||
encoding: TransactionEncoding,
|
||||
) -> ClientResult<ConfirmedBlock> {
|
||||
let response = self
|
||||
.client
|
||||
.send(&RpcRequest::GetConfirmedBlock, json!([slot]), 0)
|
||||
.send(&RpcRequest::GetConfirmedBlock, json!([slot, encoding]), 0)
|
||||
.map_err(|err| err.into_with_command("GetConfirmedBlock"))?;
|
||||
|
||||
serde_json::from_value(response)
|
||||
@ -204,6 +269,44 @@ impl RpcClient {
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetConfirmedBlocks"))
|
||||
}
|
||||
|
||||
pub fn get_confirmed_signatures_for_address(
|
||||
&self,
|
||||
address: &Pubkey,
|
||||
start_slot: Slot,
|
||||
end_slot: Slot,
|
||||
) -> ClientResult<Vec<Signature>> {
|
||||
let response = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetConfirmedSignaturesForAddress,
|
||||
json!([address, start_slot, end_slot]),
|
||||
0,
|
||||
)
|
||||
.map_err(|err| err.into_with_command("GetConfirmedSignaturesForAddress"))?;
|
||||
|
||||
serde_json::from_value(response).map_err(|err| {
|
||||
ClientError::new_with_command(err.into(), "GetConfirmedSignaturesForAddress")
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_confirmed_transaction(
|
||||
&self,
|
||||
signature: &Signature,
|
||||
encoding: TransactionEncoding,
|
||||
) -> ClientResult<ConfirmedTransaction> {
|
||||
let response = self
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetConfirmedTransaction,
|
||||
json!([signature.to_string(), encoding]),
|
||||
0,
|
||||
)
|
||||
.map_err(|err| err.into_with_command("GetConfirmedTransaction"))?;
|
||||
|
||||
serde_json::from_value(response)
|
||||
.map_err(|err| ClientError::new_with_command(err.into(), "GetConfirmedTransaction"))
|
||||
}
|
||||
|
||||
pub fn get_block_time(&self, slot: Slot) -> ClientResult<UnixTimestamp> {
|
||||
let response = self
|
||||
.client
|
||||
@ -326,13 +429,13 @@ impl RpcClient {
|
||||
&self,
|
||||
transaction: &mut Transaction,
|
||||
signer_keys: &T,
|
||||
) -> ClientResult<String> {
|
||||
) -> ClientResult<Signature> {
|
||||
let mut send_retries = 20;
|
||||
loop {
|
||||
let mut status_retries = 15;
|
||||
let signature_str = self.send_transaction(transaction)?;
|
||||
let signature = self.send_transaction(transaction)?;
|
||||
let status = loop {
|
||||
let status = self.get_signature_status(&signature_str)?;
|
||||
let status = self.get_signature_status(&signature)?;
|
||||
if status.is_none() {
|
||||
status_retries -= 1;
|
||||
if status_retries == 0 {
|
||||
@ -348,7 +451,7 @@ impl RpcClient {
|
||||
};
|
||||
send_retries = if let Some(result) = status.clone() {
|
||||
match result {
|
||||
Ok(_) => return Ok(signature_str),
|
||||
Ok(_) => return Ok(signature),
|
||||
Err(TransactionError::AccountInUse) => {
|
||||
// Fetch a new blockhash and re-sign the transaction before sending it again
|
||||
self.resign_transaction(transaction, signer_keys)?;
|
||||
@ -810,10 +913,9 @@ impl RpcClient {
|
||||
) -> ClientResult<()> {
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
if let Ok(Some(_)) = self.get_signature_status_with_commitment(
|
||||
&signature.to_string(),
|
||||
commitment_config.clone(),
|
||||
) {
|
||||
if let Ok(Some(_)) =
|
||||
self.get_signature_status_with_commitment(&signature, commitment_config.clone())
|
||||
{
|
||||
break;
|
||||
}
|
||||
if now.elapsed().as_secs() > 15 {
|
||||
@ -833,14 +935,13 @@ impl RpcClient {
|
||||
trace!("check_signature: {:?}", signature);
|
||||
|
||||
for _ in 0..30 {
|
||||
let response = self.client.send(
|
||||
&RpcRequest::ConfirmTransaction,
|
||||
json!([signature.to_string(), CommitmentConfig::recent()]),
|
||||
0,
|
||||
);
|
||||
|
||||
let response =
|
||||
self.confirm_transaction_with_commitment(signature, CommitmentConfig::recent());
|
||||
match response {
|
||||
Ok(Value::Bool(signature_status)) => {
|
||||
Ok(Response {
|
||||
value: signature_status,
|
||||
..
|
||||
}) => {
|
||||
if signature_status {
|
||||
trace!("Response found signature");
|
||||
} else {
|
||||
@ -849,12 +950,6 @@ impl RpcClient {
|
||||
|
||||
return signature_status;
|
||||
}
|
||||
Ok(other) => {
|
||||
debug!(
|
||||
"check_signature request failed, expected bool, got: {:?}",
|
||||
other
|
||||
);
|
||||
}
|
||||
Err(err) => {
|
||||
debug!("check_signature request failed: {:?}", err);
|
||||
}
|
||||
@ -927,7 +1022,7 @@ impl RpcClient {
|
||||
.client
|
||||
.send(
|
||||
&RpcRequest::GetSignatureStatuses,
|
||||
json!([[signature.to_string()], CommitmentConfig::recent().ok()]),
|
||||
json!([[signature.to_string()]]),
|
||||
1,
|
||||
)
|
||||
.map_err(|err| err.into_with_command("GetSignatureStatuses"))?;
|
||||
@ -951,7 +1046,7 @@ impl RpcClient {
|
||||
&self,
|
||||
transaction: &mut Transaction,
|
||||
signer_keys: &T,
|
||||
) -> ClientResult<String> {
|
||||
) -> ClientResult<Signature> {
|
||||
let mut confirmations = 0;
|
||||
|
||||
let progress_bar = new_spinner_progress_bar();
|
||||
@ -962,23 +1057,21 @@ impl RpcClient {
|
||||
));
|
||||
|
||||
let mut send_retries = 20;
|
||||
let signature_str = loop {
|
||||
let signature = loop {
|
||||
let mut status_retries = 15;
|
||||
let (signature_str, status) = loop {
|
||||
let signature_str = self.send_transaction(transaction)?;
|
||||
let (signature, status) = loop {
|
||||
let signature = self.send_transaction(transaction)?;
|
||||
|
||||
// Get recent commitment in order to count confirmations for successful transactions
|
||||
let status = self.get_signature_status_with_commitment(
|
||||
&signature_str,
|
||||
CommitmentConfig::recent(),
|
||||
)?;
|
||||
let status = self
|
||||
.get_signature_status_with_commitment(&signature, CommitmentConfig::recent())?;
|
||||
if status.is_none() {
|
||||
status_retries -= 1;
|
||||
if status_retries == 0 {
|
||||
break (signature_str, status);
|
||||
break (signature, status);
|
||||
}
|
||||
} else {
|
||||
break (signature_str, status);
|
||||
break (signature, status);
|
||||
}
|
||||
|
||||
if cfg!(not(test)) {
|
||||
@ -1003,7 +1096,7 @@ impl RpcClient {
|
||||
if let Some(result) = status {
|
||||
match result {
|
||||
Ok(_) => {
|
||||
break signature_str;
|
||||
break signature;
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(err.into());
|
||||
@ -1016,19 +1109,14 @@ impl RpcClient {
|
||||
}
|
||||
}
|
||||
};
|
||||
let signature = Signature::from_str(&signature_str).map_err(|_| {
|
||||
ClientError::from(ClientErrorKind::Custom(format!(
|
||||
"Returned string {} cannot be parsed as a signature",
|
||||
signature_str
|
||||
)))
|
||||
})?;
|
||||
let now = Instant::now();
|
||||
loop {
|
||||
// Return when default (max) commitment is reached
|
||||
// Failed transactions have already been eliminated, `is_some` check is sufficient
|
||||
if self.get_signature_status(&signature_str)?.is_some() {
|
||||
if self.get_signature_status(&signature)?.is_some() {
|
||||
progress_bar.set_message("Transaction confirmed");
|
||||
progress_bar.finish_and_clear();
|
||||
return Ok(signature_str);
|
||||
return Ok(signature);
|
||||
}
|
||||
progress_bar.set_message(&format!(
|
||||
"[{}/{}] Waiting for confirmations",
|
||||
@ -1036,7 +1124,14 @@ impl RpcClient {
|
||||
MAX_LOCKOUT_HISTORY + 1,
|
||||
));
|
||||
sleep(Duration::from_millis(500));
|
||||
confirmations = self.get_num_blocks_since_signature_confirmation(&signature)?;
|
||||
confirmations = self
|
||||
.get_num_blocks_since_signature_confirmation(&signature)
|
||||
.unwrap_or(confirmations);
|
||||
if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 {
|
||||
return Err(
|
||||
RpcError::ForUser("transaction not finalized. This can happen when a transaction lands in an abandoned fork. Please retry.".to_string()).into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1190,7 +1285,7 @@ mod tests {
|
||||
let tx = system_transaction::transfer(&key, &to, 50, blockhash);
|
||||
|
||||
let signature = rpc_client.send_transaction(&tx);
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.to_string());
|
||||
assert_eq!(signature.unwrap(), SIGNATURE.parse().unwrap());
|
||||
|
||||
let rpc_client = RpcClient::new_mock("fails".to_string());
|
||||
|
||||
@ -1213,18 +1308,17 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_get_signature_status() {
|
||||
let signature = Signature::default();
|
||||
|
||||
let rpc_client = RpcClient::new_mock("succeeds".to_string());
|
||||
let signature = "good_signature";
|
||||
let status = rpc_client.get_signature_status(&signature).unwrap();
|
||||
assert_eq!(status, Some(Ok(())));
|
||||
|
||||
let rpc_client = RpcClient::new_mock("sig_not_found".to_string());
|
||||
let signature = "sig_not_found";
|
||||
let status = rpc_client.get_signature_status(&signature).unwrap();
|
||||
assert_eq!(status, None);
|
||||
|
||||
let rpc_client = RpcClient::new_mock("account_in_use".to_string());
|
||||
let signature = "account_in_use";
|
||||
let status = rpc_client.get_signature_status(&signature).unwrap();
|
||||
assert_eq!(status, Some(Err(TransactionError::AccountInUse)));
|
||||
}
|
||||
|
28
client/src/rpc_config.rs
Normal file
28
client/src/rpc_config.rs
Normal file
@ -0,0 +1,28 @@
|
||||
use solana_sdk::commitment_config::CommitmentConfig;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSignatureStatusConfig {
|
||||
pub search_transaction_history: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSimulateTransactionConfig {
|
||||
pub sig_verify: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum RpcLargestAccountsFilter {
|
||||
Circulating,
|
||||
NonCirculating,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcLargestAccountsConfig {
|
||||
#[serde(flatten)]
|
||||
pub commitment: Option<CommitmentConfig>,
|
||||
pub filter: Option<RpcLargestAccountsFilter>,
|
||||
}
|
@ -3,7 +3,6 @@ use thiserror::Error;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Hash)]
|
||||
pub enum RpcRequest {
|
||||
ConfirmTransaction,
|
||||
DeregisterNode,
|
||||
ValidatorExit,
|
||||
GetAccountInfo,
|
||||
@ -12,6 +11,8 @@ pub enum RpcRequest {
|
||||
GetClusterNodes,
|
||||
GetConfirmedBlock,
|
||||
GetConfirmedBlocks,
|
||||
GetConfirmedSignaturesForAddress,
|
||||
GetConfirmedTransaction,
|
||||
GetEpochInfo,
|
||||
GetEpochSchedule,
|
||||
GetGenesisHash,
|
||||
@ -22,7 +23,6 @@ pub enum RpcRequest {
|
||||
GetRecentBlockhash,
|
||||
GetFeeCalculatorForBlockhash,
|
||||
GetFeeRateGovernor,
|
||||
GetSignatureStatus,
|
||||
GetSignatureStatuses,
|
||||
GetSlot,
|
||||
GetSlotLeader,
|
||||
@ -36,6 +36,7 @@ pub enum RpcRequest {
|
||||
RegisterNode,
|
||||
RequestAirdrop,
|
||||
SendTransaction,
|
||||
SimulateTransaction,
|
||||
SignVote,
|
||||
GetMinimumBalanceForRentExemption,
|
||||
MinimumLedgerSlot,
|
||||
@ -45,7 +46,6 @@ impl RpcRequest {
|
||||
pub(crate) fn build_request_json(&self, id: u64, params: Value) -> Value {
|
||||
let jsonrpc = "2.0";
|
||||
let method = match self {
|
||||
RpcRequest::ConfirmTransaction => "confirmTransaction",
|
||||
RpcRequest::DeregisterNode => "deregisterNode",
|
||||
RpcRequest::ValidatorExit => "validatorExit",
|
||||
RpcRequest::GetAccountInfo => "getAccountInfo",
|
||||
@ -54,6 +54,8 @@ impl RpcRequest {
|
||||
RpcRequest::GetClusterNodes => "getClusterNodes",
|
||||
RpcRequest::GetConfirmedBlock => "getConfirmedBlock",
|
||||
RpcRequest::GetConfirmedBlocks => "getConfirmedBlocks",
|
||||
RpcRequest::GetConfirmedSignaturesForAddress => "getConfirmedSignaturesForAddress",
|
||||
RpcRequest::GetConfirmedTransaction => "getConfirmedTransaction",
|
||||
RpcRequest::GetEpochInfo => "getEpochInfo",
|
||||
RpcRequest::GetEpochSchedule => "getEpochSchedule",
|
||||
RpcRequest::GetGenesisHash => "getGenesisHash",
|
||||
@ -64,7 +66,6 @@ impl RpcRequest {
|
||||
RpcRequest::GetRecentBlockhash => "getRecentBlockhash",
|
||||
RpcRequest::GetFeeCalculatorForBlockhash => "getFeeCalculatorForBlockhash",
|
||||
RpcRequest::GetFeeRateGovernor => "getFeeRateGovernor",
|
||||
RpcRequest::GetSignatureStatus => "getSignatureStatus",
|
||||
RpcRequest::GetSignatureStatuses => "getSignatureStatuses",
|
||||
RpcRequest::GetSlot => "getSlot",
|
||||
RpcRequest::GetSlotLeader => "getSlotLeader",
|
||||
@ -78,6 +79,7 @@ impl RpcRequest {
|
||||
RpcRequest::RegisterNode => "registerNode",
|
||||
RpcRequest::RequestAirdrop => "requestAirdrop",
|
||||
RpcRequest::SendTransaction => "sendTransaction",
|
||||
RpcRequest::SimulateTransaction => "simulateTransaction",
|
||||
RpcRequest::SignVote => "signVote",
|
||||
RpcRequest::GetMinimumBalanceForRentExemption => "getMinimumBalanceForRentExemption",
|
||||
RpcRequest::MinimumLedgerSlot => "minimumLedgerSlot",
|
||||
|
@ -4,7 +4,7 @@ use solana_sdk::{
|
||||
clock::{Epoch, Slot},
|
||||
fee_calculator::{FeeCalculator, FeeRateGovernor},
|
||||
pubkey::Pubkey,
|
||||
transaction::Result,
|
||||
transaction::{Result, TransactionError},
|
||||
};
|
||||
use std::{collections::HashMap, net::SocketAddr, str::FromStr};
|
||||
|
||||
@ -54,6 +54,12 @@ pub struct RpcKeyedAccount {
|
||||
pub account: RpcAccount,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSignatureResult {
|
||||
pub err: Option<TransactionError>,
|
||||
}
|
||||
|
||||
/// A duplicate representation of a Message for pretty JSON serialization
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@ -102,6 +108,8 @@ pub struct RpcContactInfo {
|
||||
pub tpu: Option<SocketAddr>,
|
||||
/// JSON RPC port
|
||||
pub rpc: Option<SocketAddr>,
|
||||
/// Software version
|
||||
pub version: Option<String>,
|
||||
}
|
||||
|
||||
/// Map of leader base58 identity pubkeys to the slot indices relative to the first epoch slot
|
||||
@ -186,3 +194,19 @@ pub struct RpcStorageTurn {
|
||||
pub blockhash: String,
|
||||
pub slot: Slot,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcAccountBalance {
|
||||
pub address: String,
|
||||
pub lamports: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct RpcSupply {
|
||||
pub total: u64,
|
||||
pub circulating: u64,
|
||||
pub non_circulating: u64,
|
||||
pub non_circulating_accounts: Vec<String>,
|
||||
}
|
||||
|
@ -471,7 +471,7 @@ impl SyncClient for ThinClient {
|
||||
) -> TransportResult<Option<transaction::Result<()>>> {
|
||||
let status = self
|
||||
.rpc_client()
|
||||
.get_signature_status(&signature.to_string())
|
||||
.get_signature_status(&signature)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
@ -488,7 +488,7 @@ impl SyncClient for ThinClient {
|
||||
) -> TransportResult<Option<transaction::Result<()>>> {
|
||||
let status = self
|
||||
.rpc_client()
|
||||
.get_signature_status_with_commitment(&signature.to_string(), commitment_config)
|
||||
.get_signature_status_with_commitment(&signature, commitment_config)
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "solana-core"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
documentation = "https://docs.rs/solana"
|
||||
homepage = "https://solana.com/"
|
||||
readme = "../README.md"
|
||||
@ -36,41 +36,42 @@ log = "0.4.8"
|
||||
nix = "0.17.0"
|
||||
num_cpus = "1.0.0"
|
||||
num-traits = "0.2"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
rand = "0.7.0"
|
||||
rand_chacha = "0.2.2"
|
||||
rayon = "1.2.0"
|
||||
regex = "1.3.4"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
serde_json = "1.0.46"
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.13" }
|
||||
solana-client = { path = "../client", version = "1.0.13" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.0.13" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.13" }
|
||||
ed25519-dalek = "=1.0.0-pre.1"
|
||||
solana-ledger = { path = "../ledger", version = "1.0.13" }
|
||||
solana-logger = { path = "../logger", version = "1.0.13" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.0.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.13" }
|
||||
solana-measure = { path = "../measure", version = "1.0.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.13" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.0.13" }
|
||||
solana-perf = { path = "../perf", version = "1.0.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.13" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.13" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.0.13" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.0.13" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.23" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.23" }
|
||||
solana-client = { path = "../client", version = "1.0.23" }
|
||||
solana-transaction-status = { path = "../transaction-status", version = "1.0.23" }
|
||||
solana-faucet = { path = "../faucet", version = "1.0.23" }
|
||||
ed25519-dalek = "=1.0.0-pre.3"
|
||||
solana-ledger = { path = "../ledger", version = "1.0.23" }
|
||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
||||
solana-merkle-tree = { path = "../merkle-tree", version = "1.0.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.23" }
|
||||
solana-measure = { path = "../measure", version = "1.0.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.23" }
|
||||
solana-chacha-cuda = { path = "../chacha-cuda", version = "1.0.23" }
|
||||
solana-perf = { path = "../perf", version = "1.0.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.23" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.23" }
|
||||
solana-version = { path = "../version", version = "1.0.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.23" }
|
||||
solana-vote-signer = { path = "../vote-signer", version = "1.0.23" }
|
||||
solana-sys-tuner = { path = "../sys-tuner", version = "1.0.23" }
|
||||
tempfile = "3.1.0"
|
||||
thiserror = "1.0"
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
tokio-fs = "0.1"
|
||||
tokio-io = "0.1"
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.13" }
|
||||
solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "1.0.23" }
|
||||
trees = "0.2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
@ -104,3 +105,6 @@ name = "cluster_info"
|
||||
[[bench]]
|
||||
name = "chacha"
|
||||
required-features = ["chacha"]
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -5,6 +5,7 @@ extern crate test;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_core::cluster_info::{ClusterInfo, Node};
|
||||
use solana_core::contact_info::ContactInfo;
|
||||
use solana_ledger::shred::{Shred, NONCE_SHRED_PAYLOAD_SIZE};
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::timestamp;
|
||||
use std::collections::HashMap;
|
||||
@ -20,9 +21,8 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) {
|
||||
let mut cluster_info = ClusterInfo::new_with_invalid_keypair(leader_info.info.clone());
|
||||
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
|
||||
const SHRED_SIZE: usize = 1024;
|
||||
const NUM_SHREDS: usize = 32;
|
||||
let shreds = vec![vec![0; SHRED_SIZE]; NUM_SHREDS];
|
||||
let shreds = vec![vec![0; NONCE_SHRED_PAYLOAD_SIZE]; NUM_SHREDS];
|
||||
let seeds = vec![[0u8; 32]; NUM_SHREDS];
|
||||
let mut stakes = HashMap::new();
|
||||
const NUM_PEERS: usize = 200;
|
||||
|
@ -5,11 +5,11 @@ extern crate test;
|
||||
use solana_ledger::entry::{create_ticks, Entry};
|
||||
use solana_ledger::shred::{
|
||||
max_entries_per_n_shred, max_ticks_per_n_shreds, Shred, Shredder, RECOMMENDED_FEC_RATE,
|
||||
SIZE_OF_DATA_SHRED_PAYLOAD,
|
||||
SIZE_OF_NONCE_DATA_SHRED_PAYLOAD,
|
||||
};
|
||||
use solana_perf::test_tx;
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::signature::Keypair;
|
||||
use std::sync::Arc;
|
||||
use test::Bencher;
|
||||
|
||||
@ -29,10 +29,11 @@ fn make_large_unchained_entries(txs_per_entry: u64, num_entries: u64) -> Vec<Ent
|
||||
#[bench]
|
||||
fn bench_shredder_ticks(bencher: &mut Bencher) {
|
||||
let kp = Arc::new(Keypair::new());
|
||||
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
|
||||
let shred_size = SIZE_OF_NONCE_DATA_SHRED_PAYLOAD;
|
||||
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
|
||||
// ~1Mb
|
||||
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
|
||||
let num_ticks =
|
||||
max_ticks_per_n_shreds(1, Some(SIZE_OF_NONCE_DATA_SHRED_PAYLOAD)) * num_shreds as u64;
|
||||
let entries = create_ticks(num_ticks, 0, Hash::default());
|
||||
bencher.iter(|| {
|
||||
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp.clone(), 0, 0).unwrap();
|
||||
@ -43,10 +44,14 @@ fn bench_shredder_ticks(bencher: &mut Bencher) {
|
||||
#[bench]
|
||||
fn bench_shredder_large_entries(bencher: &mut Bencher) {
|
||||
let kp = Arc::new(Keypair::new());
|
||||
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
|
||||
let shred_size = SIZE_OF_NONCE_DATA_SHRED_PAYLOAD;
|
||||
let num_shreds = ((1000 * 1000) + (shred_size - 1)) / shred_size;
|
||||
let txs_per_entry = 128;
|
||||
let num_entries = max_entries_per_n_shred(&make_test_entry(txs_per_entry), num_shreds as u64);
|
||||
let num_entries = max_entries_per_n_shred(
|
||||
&make_test_entry(txs_per_entry),
|
||||
num_shreds as u64,
|
||||
Some(shred_size),
|
||||
);
|
||||
let entries = make_large_unchained_entries(txs_per_entry, num_entries);
|
||||
// 1Mb
|
||||
bencher.iter(|| {
|
||||
@ -58,10 +63,10 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) {
|
||||
#[bench]
|
||||
fn bench_deshredder(bencher: &mut Bencher) {
|
||||
let kp = Arc::new(Keypair::new());
|
||||
let shred_size = SIZE_OF_DATA_SHRED_PAYLOAD;
|
||||
let shred_size = SIZE_OF_NONCE_DATA_SHRED_PAYLOAD;
|
||||
// ~10Mb
|
||||
let num_shreds = ((10000 * 1000) + (shred_size - 1)) / shred_size;
|
||||
let num_ticks = max_ticks_per_n_shreds(1) * num_shreds as u64;
|
||||
let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64;
|
||||
let entries = create_ticks(num_ticks, 0, Hash::default());
|
||||
let shredder = Shredder::new(1, 0, RECOMMENDED_FEC_RATE, kp, 0, 0).unwrap();
|
||||
let data_shreds = shredder.entries_to_shreds(&entries, true, 0).0;
|
||||
@ -73,7 +78,7 @@ fn bench_deshredder(bencher: &mut Bencher) {
|
||||
|
||||
#[bench]
|
||||
fn bench_deserialize_hdr(bencher: &mut Bencher) {
|
||||
let data = vec![0; SIZE_OF_DATA_SHRED_PAYLOAD];
|
||||
let data = vec![0; SIZE_OF_NONCE_DATA_SHRED_PAYLOAD];
|
||||
|
||||
let shred = Shred::new_from_data(2, 1, 1, Some(&data), true, true, 0, 0, 1);
|
||||
|
||||
|
@ -1980,13 +1980,23 @@ mod tests {
|
||||
{
|
||||
if let EncodedTransaction::Json(transaction) = transaction {
|
||||
if transaction.signatures[0] == success_signature.to_string() {
|
||||
assert_eq!(meta.unwrap().status, Ok(()));
|
||||
let meta = meta.unwrap();
|
||||
assert_eq!(meta.err, None);
|
||||
assert_eq!(meta.status, Ok(()));
|
||||
} else if transaction.signatures[0] == ix_error_signature.to_string() {
|
||||
let meta = meta.unwrap();
|
||||
assert_eq!(
|
||||
meta.unwrap().status,
|
||||
meta.err,
|
||||
Some(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::Custom(1)
|
||||
))
|
||||
);
|
||||
assert_eq!(
|
||||
meta.status,
|
||||
Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
InstructionError::Custom(1)
|
||||
))
|
||||
);
|
||||
} else {
|
||||
|
@ -390,7 +390,7 @@ mod test {
|
||||
)));
|
||||
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
|
||||
let mut genesis_config = create_genesis_config(10_000).genesis_config;
|
||||
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot) + 1;
|
||||
genesis_config.ticks_per_slot = max_ticks_per_n_shreds(num_shreds_per_slot, None) + 1;
|
||||
let bank0 = Arc::new(Bank::new(&genesis_config));
|
||||
(
|
||||
blockstore,
|
||||
@ -484,7 +484,11 @@ mod test {
|
||||
// Interrupting the slot should cause the unfinished_slot and stats to reset
|
||||
let num_shreds = 1;
|
||||
assert!(num_shreds < num_shreds_per_slot);
|
||||
let ticks1 = create_ticks(max_ticks_per_n_shreds(num_shreds), 0, genesis_config.hash());
|
||||
let ticks1 = create_ticks(
|
||||
max_ticks_per_n_shreds(num_shreds, None),
|
||||
0,
|
||||
genesis_config.hash(),
|
||||
);
|
||||
let receive_results = ReceiveResults {
|
||||
entries: ticks1.clone(),
|
||||
time_elapsed: Duration::new(2, 0),
|
||||
|
@ -12,8 +12,6 @@
|
||||
//! * layer 2 - Everyone else, if layer 1 is `2^10`, layer 2 should be able to fit `2^20` number of nodes.
|
||||
//!
|
||||
//! Bank needs to provide an interface for us to query the stake weight
|
||||
use crate::crds_value::CompressionType::*;
|
||||
use crate::crds_value::EpochIncompleteSlots;
|
||||
use crate::packet::limited_deserialize;
|
||||
use crate::streamer::{PacketReceiver, PacketSender};
|
||||
use crate::{
|
||||
@ -21,20 +19,29 @@ use crate::{
|
||||
crds_gossip::CrdsGossip,
|
||||
crds_gossip_error::CrdsGossipError,
|
||||
crds_gossip_pull::{CrdsFilter, CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS},
|
||||
crds_value::{self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlots, SnapshotHash, Vote},
|
||||
crds_value::{
|
||||
self, CrdsData, CrdsValue, CrdsValueLabel, EpochSlots, SnapshotHash, Version, Vote,
|
||||
MAX_WALLCLOCK,
|
||||
},
|
||||
packet::{Packet, PACKET_DATA_SIZE},
|
||||
result::{Error, Result},
|
||||
sendmmsg::{multicast, send_mmsg},
|
||||
weighted_shuffle::{weighted_best, weighted_shuffle},
|
||||
};
|
||||
|
||||
use rand::distributions::{Distribution, WeightedIndex};
|
||||
use rand::SeedableRng;
|
||||
use rand_chacha::ChaChaRng;
|
||||
use solana_sdk::sanitize::{Sanitize, SanitizeError};
|
||||
|
||||
use bincode::{serialize, serialized_size};
|
||||
use compression::prelude::*;
|
||||
use core::cmp;
|
||||
use itertools::Itertools;
|
||||
use rayon::iter::IntoParallelIterator;
|
||||
use rayon::iter::ParallelIterator;
|
||||
use rayon::ThreadPool;
|
||||
use solana_ledger::{bank_forks::BankForks, staking_utils};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_measure::thread_mem_usage;
|
||||
use solana_metrics::{datapoint_debug, inc_new_counter_debug, inc_new_counter_error};
|
||||
use solana_net_utils::{
|
||||
@ -81,9 +88,6 @@ const MAX_PROTOCOL_HEADER_SIZE: u64 = 214;
|
||||
/// 128MB/PACKET_DATA_SIZE
|
||||
const MAX_GOSSIP_TRAFFIC: usize = 128_000_000 / PACKET_DATA_SIZE;
|
||||
|
||||
const NUM_BITS_PER_BYTE: u64 = 8;
|
||||
const MIN_SIZE_TO_COMPRESS_GZIP: u64 = 64;
|
||||
|
||||
/// Keep the number of snapshot hashes a node publishes under MAX_PROTOCOL_PAYLOAD_SIZE
|
||||
pub const MAX_SNAPSHOT_HASHES: usize = 16;
|
||||
|
||||
@ -95,6 +99,12 @@ pub enum ClusterInfoError {
|
||||
BadGossipAddress,
|
||||
}
|
||||
#[derive(Clone)]
|
||||
pub struct DataBudget {
|
||||
bytes: usize, // amount of bytes we have in the budget to send
|
||||
last_timestamp_ms: u64, // Last time that we upped the bytes count,
|
||||
// used to detect when to up the bytes budget again
|
||||
}
|
||||
#[derive(Clone)]
|
||||
pub struct ClusterInfo {
|
||||
/// The network
|
||||
pub gossip: CrdsGossip,
|
||||
@ -103,6 +113,8 @@ pub struct ClusterInfo {
|
||||
/// The network entrypoint
|
||||
entrypoint: Option<ContactInfo>,
|
||||
last_datapoint_submit: Instant,
|
||||
|
||||
outbound_budget: DataBudget,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
@ -143,6 +155,15 @@ pub struct PruneData {
|
||||
pub wallclock: u64,
|
||||
}
|
||||
|
||||
impl Sanitize for PruneData {
|
||||
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Signable for PruneData {
|
||||
fn pubkey(&self) -> Pubkey {
|
||||
self.pubkey
|
||||
@ -188,6 +209,14 @@ pub fn make_accounts_hashes_message(
|
||||
Some(CrdsValue::new_signed(message, keypair))
|
||||
}
|
||||
|
||||
fn distance(a: u64, b: u64) -> u64 {
|
||||
if a > b {
|
||||
a - b
|
||||
} else {
|
||||
b - a
|
||||
}
|
||||
}
|
||||
|
||||
// TODO These messages should go through the gpu pipeline for spam filtering
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
@ -199,6 +228,31 @@ enum Protocol {
|
||||
PruneMessage(Pubkey, PruneData),
|
||||
}
|
||||
|
||||
impl Sanitize for Protocol {
|
||||
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
|
||||
match self {
|
||||
Protocol::PullRequest(filter, val) => {
|
||||
filter.sanitize()?;
|
||||
val.sanitize()
|
||||
}
|
||||
Protocol::PullResponse(_, val) => val.sanitize(),
|
||||
Protocol::PushMessage(_, val) => val.sanitize(),
|
||||
Protocol::PruneMessage(_, val) => val.sanitize(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Rating for pull requests
|
||||
// A response table is generated as a
|
||||
// 2-d table arranged by target nodes and a
|
||||
// list of responses for that node,
|
||||
// to/responses_index is a location in that table.
|
||||
struct ResponseScore {
|
||||
to: usize, // to, index of who the response is to
|
||||
responses_index: usize, // index into the list of responses for a given to
|
||||
score: u64, // Relative score of the response
|
||||
}
|
||||
|
||||
impl ClusterInfo {
|
||||
/// Without a valid keypair gossip will not function. Only useful for tests.
|
||||
pub fn new_with_invalid_keypair(contact_info: ContactInfo) -> Self {
|
||||
@ -211,9 +265,13 @@ impl ClusterInfo {
|
||||
keypair,
|
||||
entrypoint: None,
|
||||
last_datapoint_submit: Instant::now(),
|
||||
outbound_budget: DataBudget {
|
||||
bytes: 0,
|
||||
last_timestamp_ms: 0,
|
||||
},
|
||||
};
|
||||
let id = contact_info.id;
|
||||
me.gossip.set_self(&id);
|
||||
me.gossip.set_self(&contact_info.id);
|
||||
me.gossip.set_shred_version(contact_info.shred_version);
|
||||
me.insert_self(contact_info);
|
||||
me.push_self(&HashMap::new());
|
||||
me
|
||||
@ -266,61 +324,73 @@ impl ClusterInfo {
|
||||
let now = timestamp();
|
||||
let mut spy_nodes = 0;
|
||||
let mut archivers = 0;
|
||||
let mut different_shred_nodes = 0;
|
||||
let my_pubkey = self.my_data().id;
|
||||
let my_shred_version = self.my_data().shred_version;
|
||||
let nodes: Vec<_> = self
|
||||
.all_peers()
|
||||
.into_iter()
|
||||
.map(|(node, last_updated)| {
|
||||
.filter_map(|(node, last_updated)| {
|
||||
if Self::is_spy_node(&node) {
|
||||
spy_nodes += 1;
|
||||
} else if Self::is_archiver(&node) {
|
||||
archivers += 1;
|
||||
}
|
||||
fn addr_to_string(default_ip: &IpAddr, addr: &SocketAddr) -> String {
|
||||
if ContactInfo::is_valid_address(addr) {
|
||||
if &addr.ip() == default_ip {
|
||||
addr.port().to_string()
|
||||
} else {
|
||||
addr.to_string()
|
||||
}
|
||||
} else {
|
||||
"none".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
let ip_addr = node.gossip.ip();
|
||||
format!(
|
||||
"{:15} {:2}| {:5} | {:44} | {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
|
||||
if ContactInfo::is_valid_address(&node.gossip) {
|
||||
ip_addr.to_string()
|
||||
} else {
|
||||
"none".to_string()
|
||||
},
|
||||
if node.id == my_pubkey { "me" } else { "" }.to_string(),
|
||||
now.saturating_sub(last_updated),
|
||||
node.id.to_string(),
|
||||
addr_to_string(&ip_addr, &node.gossip),
|
||||
addr_to_string(&ip_addr, &node.tpu),
|
||||
addr_to_string(&ip_addr, &node.tpu_forwards),
|
||||
addr_to_string(&ip_addr, &node.tvu),
|
||||
addr_to_string(&ip_addr, &node.tvu_forwards),
|
||||
addr_to_string(&ip_addr, &node.repair),
|
||||
addr_to_string(&ip_addr, &node.serve_repair),
|
||||
addr_to_string(&ip_addr, &node.storage_addr),
|
||||
addr_to_string(&ip_addr, &node.rpc),
|
||||
addr_to_string(&ip_addr, &node.rpc_pubsub),
|
||||
node.shred_version,
|
||||
)
|
||||
let node_version = self.get_node_version(&node.id);
|
||||
if my_shred_version != 0 && (node.shred_version != 0 && node.shred_version != my_shred_version) {
|
||||
different_shred_nodes += 1;
|
||||
None
|
||||
} else {
|
||||
fn addr_to_string(default_ip: &IpAddr, addr: &SocketAddr) -> String {
|
||||
if ContactInfo::is_valid_address(addr) {
|
||||
if &addr.ip() == default_ip {
|
||||
addr.port().to_string()
|
||||
} else {
|
||||
addr.to_string()
|
||||
}
|
||||
} else {
|
||||
"none".to_string()
|
||||
}
|
||||
}
|
||||
let ip_addr = node.gossip.ip();
|
||||
Some(format!(
|
||||
"{:15} {:2}| {:5} | {:44} |{:^15}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {:5}| {}\n",
|
||||
if ContactInfo::is_valid_address(&node.gossip) {
|
||||
ip_addr.to_string()
|
||||
} else {
|
||||
"none".to_string()
|
||||
},
|
||||
if node.id == my_pubkey { "me" } else { "" }.to_string(),
|
||||
now.saturating_sub(last_updated),
|
||||
node.id.to_string(),
|
||||
if let Some(node_version) = node_version {
|
||||
node_version.to_string()
|
||||
} else {
|
||||
"-".to_string()
|
||||
},
|
||||
addr_to_string(&ip_addr, &node.gossip),
|
||||
addr_to_string(&ip_addr, &node.tpu),
|
||||
addr_to_string(&ip_addr, &node.tpu_forwards),
|
||||
addr_to_string(&ip_addr, &node.tvu),
|
||||
addr_to_string(&ip_addr, &node.tvu_forwards),
|
||||
addr_to_string(&ip_addr, &node.repair),
|
||||
addr_to_string(&ip_addr, &node.serve_repair),
|
||||
addr_to_string(&ip_addr, &node.rpc),
|
||||
addr_to_string(&ip_addr, &node.rpc_pubsub),
|
||||
node.shred_version,
|
||||
))
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
format!(
|
||||
"IP Address |Age(ms)| Node identifier \
|
||||
|Gossip| TPU |TPUfwd| TVU |TVUfwd|Repair|ServeR|Storag| RPC |PubSub|ShredVer\n\
|
||||
------------------+-------+----------------------------------------------+\
|
||||
------+------+------+------+------+------+------+------+------+------+--------\n\
|
||||
| Version |Gossip| TPU |TPUfwd| TVU |TVUfwd|Repair|ServeR| RPC |PubSub|ShredVer\n\
|
||||
------------------+-------+----------------------------------------------+---------------+\
|
||||
------+------+------+------+------+------+------+------+------+--------\n\
|
||||
{}\
|
||||
Nodes: {}{}{}",
|
||||
Nodes: {}{}{}{}",
|
||||
nodes.join(""),
|
||||
nodes.len() - spy_nodes - archivers,
|
||||
if archivers > 0 {
|
||||
@ -332,119 +402,29 @@ impl ClusterInfo {
|
||||
format!("\nSpies: {}", spy_nodes)
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
if different_shred_nodes > 0 {
|
||||
format!(
|
||||
"\nNodes with different shred version: {}",
|
||||
different_shred_nodes
|
||||
)
|
||||
} else {
|
||||
"".to_string()
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
pub fn compress_incomplete_slots(incomplete_slots: &BTreeSet<Slot>) -> EpochIncompleteSlots {
|
||||
if !incomplete_slots.is_empty() {
|
||||
let first_slot = incomplete_slots
|
||||
.iter()
|
||||
.next()
|
||||
.expect("expected to find at least one slot");
|
||||
let last_slot = incomplete_slots
|
||||
.iter()
|
||||
.next_back()
|
||||
.expect("expected to find last slot");
|
||||
let num_uncompressed_bits = last_slot.saturating_sub(*first_slot) + 1;
|
||||
let num_uncompressed_bytes = if num_uncompressed_bits % NUM_BITS_PER_BYTE > 0 {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
} + num_uncompressed_bits / NUM_BITS_PER_BYTE;
|
||||
let mut uncompressed = vec![0u8; num_uncompressed_bytes as usize];
|
||||
incomplete_slots.iter().for_each(|slot| {
|
||||
let offset_from_first_slot = slot.saturating_sub(*first_slot);
|
||||
let index = offset_from_first_slot / NUM_BITS_PER_BYTE;
|
||||
let bit_index = offset_from_first_slot % NUM_BITS_PER_BYTE;
|
||||
uncompressed[index as usize] |= 1 << bit_index;
|
||||
});
|
||||
if num_uncompressed_bytes >= MIN_SIZE_TO_COMPRESS_GZIP {
|
||||
if let Ok(compressed) = uncompressed
|
||||
.iter()
|
||||
.cloned()
|
||||
.encode(&mut GZipEncoder::new(), Action::Finish)
|
||||
.collect::<std::result::Result<Vec<u8>, _>>()
|
||||
{
|
||||
return EpochIncompleteSlots {
|
||||
first: *first_slot,
|
||||
compression: GZip,
|
||||
compressed_list: compressed,
|
||||
};
|
||||
}
|
||||
} else {
|
||||
return EpochIncompleteSlots {
|
||||
first: *first_slot,
|
||||
compression: Uncompressed,
|
||||
compressed_list: uncompressed,
|
||||
};
|
||||
}
|
||||
}
|
||||
EpochIncompleteSlots::default()
|
||||
}
|
||||
|
||||
fn bitmap_to_slot_list(first: Slot, bitmap: &[u8]) -> BTreeSet<Slot> {
|
||||
let mut old_incomplete_slots: BTreeSet<Slot> = BTreeSet::new();
|
||||
bitmap.iter().enumerate().for_each(|(i, val)| {
|
||||
if *val != 0 {
|
||||
(0..8).for_each(|bit_index| {
|
||||
if (1 << bit_index & *val) != 0 {
|
||||
let slot = first + i as u64 * NUM_BITS_PER_BYTE + bit_index as u64;
|
||||
old_incomplete_slots.insert(slot);
|
||||
}
|
||||
})
|
||||
}
|
||||
});
|
||||
old_incomplete_slots
|
||||
}
|
||||
|
||||
pub fn decompress_incomplete_slots(slots: &EpochIncompleteSlots) -> BTreeSet<Slot> {
|
||||
match slots.compression {
|
||||
Uncompressed => Self::bitmap_to_slot_list(slots.first, &slots.compressed_list),
|
||||
GZip => {
|
||||
if let Ok(decompressed) = slots
|
||||
.compressed_list
|
||||
.iter()
|
||||
.cloned()
|
||||
.decode(&mut GZipDecoder::new())
|
||||
.collect::<std::result::Result<Vec<u8>, _>>()
|
||||
{
|
||||
Self::bitmap_to_slot_list(slots.first, &decompressed)
|
||||
} else {
|
||||
BTreeSet::new()
|
||||
}
|
||||
}
|
||||
BZip2 => {
|
||||
if let Ok(decompressed) = slots
|
||||
.compressed_list
|
||||
.iter()
|
||||
.cloned()
|
||||
.decode(&mut BZip2Decoder::new())
|
||||
.collect::<std::result::Result<Vec<u8>, _>>()
|
||||
{
|
||||
Self::bitmap_to_slot_list(slots.first, &decompressed)
|
||||
} else {
|
||||
BTreeSet::new()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_epoch_slots(
|
||||
&mut self,
|
||||
id: Pubkey,
|
||||
root: Slot,
|
||||
_root: Slot,
|
||||
min: Slot,
|
||||
slots: BTreeSet<Slot>,
|
||||
incomplete_slots: &BTreeSet<Slot>,
|
||||
_slots: BTreeSet<Slot>,
|
||||
_incomplete_slots: &BTreeSet<Slot>,
|
||||
) {
|
||||
let compressed = Self::compress_incomplete_slots(incomplete_slots);
|
||||
let now = timestamp();
|
||||
let entry = CrdsValue::new_signed(
|
||||
CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots::new(id, root, min, slots, vec![compressed], now),
|
||||
),
|
||||
CrdsData::EpochSlots(0, EpochSlots::new(id, min, now)),
|
||||
&self.keypair,
|
||||
);
|
||||
self.gossip
|
||||
@ -581,6 +561,16 @@ impl ClusterInfo {
|
||||
.map(|x| x.value.contact_info().unwrap())
|
||||
}
|
||||
|
||||
pub fn get_node_version(&self, pubkey: &Pubkey) -> Option<solana_version::Version> {
|
||||
self.gossip
|
||||
.crds
|
||||
.table
|
||||
.get(&CrdsValueLabel::Version(*pubkey))
|
||||
.map(|x| x.value.version())
|
||||
.flatten()
|
||||
.map(|version| version.version.clone())
|
||||
}
|
||||
|
||||
/// all validators that have a valid rpc port regardless of `shred_version`.
|
||||
pub fn all_rpc_peers(&self) -> Vec<ContactInfo> {
|
||||
let me = self.my_data();
|
||||
@ -1003,7 +993,7 @@ impl ClusterInfo {
|
||||
let mut num_live_peers = 1i64;
|
||||
peers.iter().for_each(|p| {
|
||||
// A peer is considered live if they generated their contact info recently
|
||||
if timestamp() - p.wallclock <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS {
|
||||
if distance(timestamp(), p.wallclock) <= CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS {
|
||||
num_live_peers += 1;
|
||||
}
|
||||
});
|
||||
@ -1226,6 +1216,14 @@ impl ClusterInfo {
|
||||
let mut last_contact_info_trace = timestamp();
|
||||
let mut adopt_shred_version = obj.read().unwrap().my_data().shred_version == 0;
|
||||
let recycler = PacketsRecycler::default();
|
||||
{
|
||||
let mut obj = obj.write().unwrap();
|
||||
let message = CrdsValue::new_signed(
|
||||
CrdsData::Version(Version::new(obj.id())),
|
||||
&obj.keypair,
|
||||
);
|
||||
obj.push_message(message);
|
||||
}
|
||||
loop {
|
||||
let start = timestamp();
|
||||
thread_mem_usage::datapoint("solana-gossip");
|
||||
@ -1282,6 +1280,10 @@ impl ClusterInfo {
|
||||
entrypoint.shred_version, entrypoint.id
|
||||
);
|
||||
self_info.shred_version = entrypoint.shred_version;
|
||||
obj.write()
|
||||
.unwrap()
|
||||
.gossip
|
||||
.set_shred_version(entrypoint.shred_version);
|
||||
obj.write().unwrap().insert_self(self_info);
|
||||
adopt_shred_version = false;
|
||||
}
|
||||
@ -1321,6 +1323,7 @@ impl ClusterInfo {
|
||||
let from_addr = packet.meta.addr();
|
||||
limited_deserialize(&packet.data[..packet.meta.size])
|
||||
.into_iter()
|
||||
.filter(|r: &Protocol| r.sanitize().is_ok())
|
||||
.for_each(|request| match request {
|
||||
Protocol::PullRequest(filter, caller) => {
|
||||
let start = allocated.get();
|
||||
@ -1419,20 +1422,43 @@ impl ClusterInfo {
|
||||
})
|
||||
});
|
||||
// process the collected pulls together
|
||||
let rsp = Self::handle_pull_requests(me, recycler, gossip_pull_data);
|
||||
let rsp = Self::handle_pull_requests(me, recycler, gossip_pull_data, stakes);
|
||||
if let Some(rsp) = rsp {
|
||||
let _ignore_disconnect = response_sender.send(rsp);
|
||||
}
|
||||
}
|
||||
|
||||
// Pull requests take an incoming bloom filter of contained entries from a node
|
||||
// and tries to send back to them the values it detects are missing.
|
||||
fn handle_pull_requests(
|
||||
me: &Arc<RwLock<Self>>,
|
||||
recycler: &PacketsRecycler,
|
||||
requests: Vec<PullData>,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> Option<Packets> {
|
||||
// split the requests into addrs and filters
|
||||
let mut caller_and_filters = vec![];
|
||||
let mut addrs = vec![];
|
||||
let mut time = Measure::start("handle_pull_requests");
|
||||
{
|
||||
let mut cluster_info = me.write().unwrap();
|
||||
|
||||
let now = timestamp();
|
||||
const INTERVAL_MS: u64 = 100;
|
||||
// allow 50kBps per staked validator, epoch slots + votes ~= 1.5kB/slot ~= 4kB/s
|
||||
const BYTES_PER_INTERVAL: usize = 5000;
|
||||
const MAX_BUDGET_MULTIPLE: usize = 5; // allow budget build-up to 5x the interval default
|
||||
|
||||
if now - cluster_info.outbound_budget.last_timestamp_ms > INTERVAL_MS {
|
||||
let len = std::cmp::max(stakes.len(), 2);
|
||||
cluster_info.outbound_budget.bytes += len * BYTES_PER_INTERVAL;
|
||||
cluster_info.outbound_budget.bytes = std::cmp::min(
|
||||
cluster_info.outbound_budget.bytes,
|
||||
MAX_BUDGET_MULTIPLE * len * BYTES_PER_INTERVAL,
|
||||
);
|
||||
cluster_info.outbound_budget.last_timestamp_ms = now;
|
||||
}
|
||||
}
|
||||
for pull_data in requests {
|
||||
caller_and_filters.push((pull_data.caller, pull_data.filter));
|
||||
addrs.push(pull_data.from_addr);
|
||||
@ -1444,30 +1470,101 @@ impl ClusterInfo {
|
||||
.unwrap()
|
||||
.gossip
|
||||
.process_pull_requests(caller_and_filters, now);
|
||||
let mut packets = Packets::new_with_recycler(recycler.clone(), 64, "handle_pull_requests");
|
||||
pull_responses
|
||||
|
||||
// Filter bad to addresses
|
||||
let pull_responses: Vec<_> = pull_responses
|
||||
.into_iter()
|
||||
.zip(addrs.into_iter())
|
||||
.for_each(|(response, from_addr)| {
|
||||
if !from_addr.ip().is_unspecified() && from_addr.port() != 0 {
|
||||
let len = response.len();
|
||||
trace!("get updates since response {}", len);
|
||||
inc_new_counter_debug!("cluster_info-pull_request-rsp", len);
|
||||
Self::split_gossip_messages(response)
|
||||
.into_iter()
|
||||
.for_each(|payload| {
|
||||
let protocol = Protocol::PullResponse(self_id, payload);
|
||||
// The remote node may not know its public IP:PORT. Instead of responding to the caller's
|
||||
// gossip addr, respond to the origin addr. The last origin addr is picked from the list of
|
||||
// addrs.
|
||||
packets
|
||||
.packets
|
||||
.push(Packet::from_data(&from_addr, protocol))
|
||||
})
|
||||
.filter_map(|(responses, from_addr)| {
|
||||
if !from_addr.ip().is_unspecified()
|
||||
&& from_addr.port() != 0
|
||||
&& !responses.is_empty()
|
||||
{
|
||||
Some((responses, from_addr))
|
||||
} else {
|
||||
trace!("Dropping Gossip pull response, as destination is unknown");
|
||||
None
|
||||
}
|
||||
});
|
||||
})
|
||||
.collect();
|
||||
|
||||
if pull_responses.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut stats: Vec<_> = pull_responses
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, (responses, _from_addr))| {
|
||||
let score: u64 = if stakes.get(&responses[0].pubkey()).is_some() {
|
||||
2
|
||||
} else {
|
||||
1
|
||||
};
|
||||
responses
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(j, _response)| ResponseScore {
|
||||
to: i,
|
||||
responses_index: j,
|
||||
score,
|
||||
})
|
||||
.collect::<Vec<ResponseScore>>()
|
||||
})
|
||||
.flatten()
|
||||
.collect();
|
||||
|
||||
stats.sort_by(|a, b| a.score.cmp(&b.score));
|
||||
let weights: Vec<_> = stats.iter().map(|stat| stat.score).collect();
|
||||
|
||||
let seed = [48u8; 32];
|
||||
let rng = &mut ChaChaRng::from_seed(seed);
|
||||
let weighted_index = WeightedIndex::new(weights).unwrap();
|
||||
|
||||
let mut packets = Packets::new_with_recycler(recycler.clone(), 64, "handle_pull_requests");
|
||||
let mut total_bytes = 0;
|
||||
let outbound_budget = me.read().unwrap().outbound_budget.bytes;
|
||||
let mut sent = HashSet::new();
|
||||
while sent.len() < stats.len() {
|
||||
let index = weighted_index.sample(rng);
|
||||
if sent.contains(&index) {
|
||||
continue;
|
||||
}
|
||||
sent.insert(index);
|
||||
let stat = &stats[index];
|
||||
let from_addr = pull_responses[stat.to].1;
|
||||
let response = pull_responses[stat.to].0[stat.responses_index].clone();
|
||||
let protocol = Protocol::PullResponse(self_id, vec![response]);
|
||||
packets
|
||||
.packets
|
||||
.push(Packet::from_data(&from_addr, protocol));
|
||||
let len = packets.packets.len();
|
||||
total_bytes += packets.packets[len - 1].meta.size;
|
||||
|
||||
if total_bytes > outbound_budget {
|
||||
inc_new_counter_info!("gossip_pull_request-no_budget", 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
{
|
||||
let mut cluster_info = me.write().unwrap();
|
||||
cluster_info.outbound_budget.bytes = cluster_info
|
||||
.outbound_budget
|
||||
.bytes
|
||||
.saturating_sub(total_bytes);
|
||||
}
|
||||
time.stop();
|
||||
inc_new_counter_info!("gossip_pull_request-sent_requests", sent.len());
|
||||
inc_new_counter_info!(
|
||||
"gossip_pull_request-dropped_requests",
|
||||
stats.len() - sent.len()
|
||||
);
|
||||
debug!(
|
||||
"handle_pull_requests: {} sent: {} total: {} total_bytes: {}",
|
||||
time,
|
||||
sent.len(),
|
||||
stats.len(),
|
||||
total_bytes
|
||||
);
|
||||
if packets.is_empty() {
|
||||
return None;
|
||||
}
|
||||
@ -1577,7 +1674,10 @@ impl ClusterInfo {
|
||||
requests.push(more_reqs)
|
||||
}
|
||||
if num_requests >= MAX_GOSSIP_TRAFFIC {
|
||||
warn!("Too much gossip traffic, ignoring some messages");
|
||||
warn!(
|
||||
"Too much gossip traffic, ignoring some messages (requests={}, max requests={})",
|
||||
num_requests, MAX_GOSSIP_TRAFFIC
|
||||
);
|
||||
}
|
||||
let epoch_ms;
|
||||
let stakes: HashMap<_, _> = match bank_forks {
|
||||
@ -1645,39 +1745,39 @@ impl ClusterInfo {
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn gossip_contact_info(id: &Pubkey, gossip: SocketAddr) -> ContactInfo {
|
||||
pub fn gossip_contact_info(id: &Pubkey, gossip: SocketAddr, shred_version: u16) -> ContactInfo {
|
||||
ContactInfo {
|
||||
id: *id,
|
||||
gossip,
|
||||
wallclock: timestamp(),
|
||||
shred_version,
|
||||
..ContactInfo::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn spy_contact_info(id: &Pubkey) -> ContactInfo {
|
||||
let dummy_addr = socketaddr_any!();
|
||||
|
||||
Self::gossip_contact_info(id, dummy_addr)
|
||||
}
|
||||
|
||||
/// An alternative to Spy Node that has a valid gossip address and fully participate in Gossip.
|
||||
pub fn gossip_node(
|
||||
id: &Pubkey,
|
||||
gossip_addr: &SocketAddr,
|
||||
shred_version: u16,
|
||||
) -> (ContactInfo, UdpSocket, Option<TcpListener>) {
|
||||
let bind_ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
let (port, (gossip_socket, ip_echo)) =
|
||||
Node::get_gossip_port(gossip_addr, VALIDATOR_PORT_RANGE, bind_ip_addr);
|
||||
let contact_info = Self::gossip_contact_info(id, SocketAddr::new(gossip_addr.ip(), port));
|
||||
let contact_info =
|
||||
Self::gossip_contact_info(id, SocketAddr::new(gossip_addr.ip(), port), shred_version);
|
||||
|
||||
(contact_info, gossip_socket, Some(ip_echo))
|
||||
}
|
||||
|
||||
/// A Node with dummy ports to spy on gossip via pull requests
|
||||
pub fn spy_node(id: &Pubkey) -> (ContactInfo, UdpSocket, Option<TcpListener>) {
|
||||
pub fn spy_node(
|
||||
id: &Pubkey,
|
||||
shred_version: u16,
|
||||
) -> (ContactInfo, UdpSocket, Option<TcpListener>) {
|
||||
let bind_ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0));
|
||||
let (_, gossip_socket) = bind_in_range(bind_ip_addr, VALIDATOR_PORT_RANGE).unwrap();
|
||||
let contact_info = Self::spy_contact_info(id);
|
||||
let contact_info = Self::gossip_contact_info(id, socketaddr_any!(), shred_version);
|
||||
|
||||
(contact_info, gossip_socket, None)
|
||||
}
|
||||
@ -1950,7 +2050,7 @@ impl Node {
|
||||
|
||||
fn report_time_spent(label: &str, time: &Duration, extra: &str) {
|
||||
let count = duration_as_ms(time);
|
||||
if count > 5 {
|
||||
if count > 100 {
|
||||
info!("{} took: {} ms {}", label, count, extra);
|
||||
}
|
||||
}
|
||||
@ -1969,10 +2069,10 @@ mod tests {
|
||||
#[test]
|
||||
fn test_gossip_node() {
|
||||
//check that a gossip nodes always show up as spies
|
||||
let (node, _, _) = ClusterInfo::spy_node(&Pubkey::new_rand());
|
||||
let (node, _, _) = ClusterInfo::spy_node(&Pubkey::new_rand(), 0);
|
||||
assert!(ClusterInfo::is_spy_node(&node));
|
||||
let (node, _, _) =
|
||||
ClusterInfo::gossip_node(&Pubkey::new_rand(), &"1.1.1.1:1111".parse().unwrap());
|
||||
ClusterInfo::gossip_node(&Pubkey::new_rand(), &"1.1.1.1:1111".parse().unwrap(), 0);
|
||||
assert!(ClusterInfo::is_spy_node(&node));
|
||||
}
|
||||
|
||||
@ -1980,7 +2080,7 @@ mod tests {
|
||||
fn test_cluster_spy_gossip() {
|
||||
//check that gossip doesn't try to push to invalid addresses
|
||||
let node = Node::new_localhost();
|
||||
let (spy, _, _) = ClusterInfo::spy_node(&Pubkey::new_rand());
|
||||
let (spy, _, _) = ClusterInfo::spy_node(&Pubkey::new_rand(), 0);
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
node.info,
|
||||
)));
|
||||
@ -2369,14 +2469,7 @@ mod tests {
|
||||
}
|
||||
let value = CrdsValue::new_unsigned(CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots {
|
||||
from: Pubkey::default(),
|
||||
root: 0,
|
||||
lowest: 0,
|
||||
slots: btree_slots,
|
||||
stash: vec![],
|
||||
wallclock: 0,
|
||||
},
|
||||
EpochSlots::new(Pubkey::default(), 0, 0),
|
||||
));
|
||||
test_split_messages(value);
|
||||
}
|
||||
@ -2388,39 +2481,19 @@ mod tests {
|
||||
let payload: Vec<CrdsValue> = vec![];
|
||||
let vec_size = serialized_size(&payload).unwrap();
|
||||
let desired_size = MAX_PROTOCOL_PAYLOAD_SIZE - vec_size;
|
||||
let mut value = CrdsValue::new_unsigned(CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots {
|
||||
from: Pubkey::default(),
|
||||
root: 0,
|
||||
lowest: 0,
|
||||
slots: BTreeSet::new(),
|
||||
stash: vec![],
|
||||
wallclock: 0,
|
||||
},
|
||||
));
|
||||
let mut value = CrdsValue::new_unsigned(CrdsData::SnapshotHashes(SnapshotHash {
|
||||
from: Pubkey::default(),
|
||||
hashes: vec![],
|
||||
wallclock: 0,
|
||||
}));
|
||||
|
||||
let mut i = 0;
|
||||
while value.size() <= desired_size {
|
||||
let slots = (0..i).collect::<BTreeSet<_>>();
|
||||
if slots.len() > 200 {
|
||||
panic!(
|
||||
"impossible to match size: last {:?} vs desired {:?}",
|
||||
serialized_size(&value).unwrap(),
|
||||
desired_size
|
||||
);
|
||||
}
|
||||
value.data = CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots {
|
||||
from: Pubkey::default(),
|
||||
root: 0,
|
||||
lowest: 0,
|
||||
slots,
|
||||
stash: vec![],
|
||||
wallclock: 0,
|
||||
},
|
||||
);
|
||||
value.data = CrdsData::SnapshotHashes(SnapshotHash {
|
||||
from: Pubkey::default(),
|
||||
hashes: vec![(0, Hash::default()); i],
|
||||
wallclock: 0,
|
||||
});
|
||||
i += 1;
|
||||
}
|
||||
let split = ClusterInfo::split_gossip_messages(vec![value.clone()]);
|
||||
@ -2550,11 +2623,9 @@ mod tests {
|
||||
node_keypair,
|
||||
);
|
||||
for i in 0..10 {
|
||||
let mut peer_root = 5;
|
||||
let mut peer_lowest = 0;
|
||||
if i >= 5 {
|
||||
// make these invalid for the upcoming repair request
|
||||
peer_root = 15;
|
||||
peer_lowest = 10;
|
||||
}
|
||||
let other_node_pubkey = Pubkey::new_rand();
|
||||
@ -2562,14 +2633,7 @@ mod tests {
|
||||
cluster_info.insert_info(other_node.clone());
|
||||
let value = CrdsValue::new_unsigned(CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots::new(
|
||||
other_node_pubkey,
|
||||
peer_root,
|
||||
peer_lowest,
|
||||
BTreeSet::new(),
|
||||
vec![],
|
||||
timestamp(),
|
||||
),
|
||||
EpochSlots::new(other_node_pubkey, peer_lowest, timestamp()),
|
||||
));
|
||||
let _ = cluster_info.gossip.crds.insert(value, timestamp());
|
||||
}
|
||||
@ -2618,6 +2682,14 @@ mod tests {
|
||||
assert_eq!(MAX_PROTOCOL_HEADER_SIZE, max_protocol_size);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_protocol_sanitize() {
|
||||
let mut pd = PruneData::default();
|
||||
pd.wallclock = MAX_WALLCLOCK;
|
||||
let msg = Protocol::PruneMessage(Pubkey::default(), pd);
|
||||
assert_eq!(msg.sanitize(), Err(SanitizeError::ValueOutOfBounds));
|
||||
}
|
||||
|
||||
// computes the maximum size for pull request blooms
|
||||
fn max_bloom_size() -> usize {
|
||||
let filter_size = serialized_size(&CrdsFilter::default())
|
||||
@ -2630,38 +2702,4 @@ mod tests {
|
||||
serialized_size(&protocol).expect("unable to serialize gossip protocol") as usize;
|
||||
PACKET_DATA_SIZE - (protocol_size - filter_size)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compress_incomplete_slots() {
|
||||
let mut incomplete_slots: BTreeSet<Slot> = BTreeSet::new();
|
||||
|
||||
assert_eq!(
|
||||
EpochIncompleteSlots::default(),
|
||||
ClusterInfo::compress_incomplete_slots(&incomplete_slots)
|
||||
);
|
||||
|
||||
incomplete_slots.insert(100);
|
||||
let compressed = ClusterInfo::compress_incomplete_slots(&incomplete_slots);
|
||||
assert_eq!(100, compressed.first);
|
||||
let decompressed = ClusterInfo::decompress_incomplete_slots(&compressed);
|
||||
assert_eq!(incomplete_slots, decompressed);
|
||||
|
||||
incomplete_slots.insert(104);
|
||||
let compressed = ClusterInfo::compress_incomplete_slots(&incomplete_slots);
|
||||
assert_eq!(100, compressed.first);
|
||||
let decompressed = ClusterInfo::decompress_incomplete_slots(&compressed);
|
||||
assert_eq!(incomplete_slots, decompressed);
|
||||
|
||||
incomplete_slots.insert(80);
|
||||
let compressed = ClusterInfo::compress_incomplete_slots(&incomplete_slots);
|
||||
assert_eq!(80, compressed.first);
|
||||
let decompressed = ClusterInfo::decompress_incomplete_slots(&compressed);
|
||||
assert_eq!(incomplete_slots, decompressed);
|
||||
|
||||
incomplete_slots.insert(10000);
|
||||
let compressed = ClusterInfo::compress_incomplete_slots(&incomplete_slots);
|
||||
assert_eq!(80, compressed.first);
|
||||
let decompressed = ClusterInfo::decompress_incomplete_slots(&compressed);
|
||||
assert_eq!(incomplete_slots, decompressed);
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
use crate::consensus::VOTE_THRESHOLD_SIZE;
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_metrics::inc_new_counter_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
@ -13,9 +14,11 @@ use std::{
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
pub type BlockCommitmentArray = [u64; MAX_LOCKOUT_HISTORY + 1];
|
||||
|
||||
#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BlockCommitment {
|
||||
pub commitment: [u64; MAX_LOCKOUT_HISTORY],
|
||||
pub commitment: BlockCommitmentArray,
|
||||
}
|
||||
|
||||
impl BlockCommitment {
|
||||
@ -28,17 +31,27 @@ impl BlockCommitment {
|
||||
assert!(confirmation_count > 0 && confirmation_count <= MAX_LOCKOUT_HISTORY);
|
||||
self.commitment[confirmation_count - 1]
|
||||
}
|
||||
|
||||
pub fn increase_rooted_stake(&mut self, stake: u64) {
|
||||
self.commitment[MAX_LOCKOUT_HISTORY] += stake;
|
||||
}
|
||||
|
||||
pub fn get_rooted_stake(&self) -> u64 {
|
||||
self.commitment[MAX_LOCKOUT_HISTORY]
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn new(commitment: [u64; MAX_LOCKOUT_HISTORY]) -> Self {
|
||||
pub(crate) fn new(commitment: BlockCommitmentArray) -> Self {
|
||||
Self { commitment }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct BlockCommitmentCache {
|
||||
block_commitment: HashMap<Slot, BlockCommitment>,
|
||||
largest_confirmed_root: Slot,
|
||||
total_stake: u64,
|
||||
bank: Arc<Bank>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
root: Slot,
|
||||
}
|
||||
|
||||
@ -59,22 +72,41 @@ impl std::fmt::Debug for BlockCommitmentCache {
|
||||
impl BlockCommitmentCache {
|
||||
pub fn new(
|
||||
block_commitment: HashMap<Slot, BlockCommitment>,
|
||||
largest_confirmed_root: Slot,
|
||||
total_stake: u64,
|
||||
bank: Arc<Bank>,
|
||||
blockstore: Arc<Blockstore>,
|
||||
root: Slot,
|
||||
) -> Self {
|
||||
Self {
|
||||
block_commitment,
|
||||
largest_confirmed_root,
|
||||
total_stake,
|
||||
bank,
|
||||
blockstore,
|
||||
root,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_with_blockstore(blockstore: Arc<Blockstore>) -> Self {
|
||||
Self {
|
||||
block_commitment: HashMap::default(),
|
||||
largest_confirmed_root: Slot::default(),
|
||||
total_stake: u64::default(),
|
||||
bank: Arc::new(Bank::default()),
|
||||
blockstore,
|
||||
root: Slot::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_block_commitment(&self, slot: Slot) -> Option<&BlockCommitment> {
|
||||
self.block_commitment.get(&slot)
|
||||
}
|
||||
|
||||
pub fn largest_confirmed_root(&self) -> Slot {
|
||||
self.largest_confirmed_root
|
||||
}
|
||||
|
||||
pub fn total_stake(&self) -> u64 {
|
||||
self.total_stake
|
||||
}
|
||||
@ -110,16 +142,29 @@ impl BlockCommitmentCache {
|
||||
0
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_confirmed_rooted(&self, slot: Slot) -> bool {
|
||||
slot <= self.largest_confirmed_root()
|
||||
&& (self.blockstore.is_root(slot) || self.bank.status_cache_ancestors().contains(&slot))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn new_for_tests() -> Self {
|
||||
pub fn new_for_tests_with_blockstore(blockstore: Arc<Blockstore>) -> Self {
|
||||
let mut block_commitment: HashMap<Slot, BlockCommitment> = HashMap::new();
|
||||
block_commitment.insert(0, BlockCommitment::default());
|
||||
Self {
|
||||
block_commitment,
|
||||
blockstore,
|
||||
total_stake: 42,
|
||||
..Self::default()
|
||||
largest_confirmed_root: Slot::default(),
|
||||
bank: Arc::new(Bank::default()),
|
||||
root: Slot::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn set_get_largest_confirmed_root(&mut self, root: Slot) {
|
||||
self.largest_confirmed_root = root;
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CommitmentAggregationData {
|
||||
@ -138,6 +183,18 @@ impl CommitmentAggregationData {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_largest_confirmed_root(mut rooted_stake: Vec<(Slot, u64)>, total_stake: u64) -> Slot {
|
||||
rooted_stake.sort_by(|a, b| a.0.cmp(&b.0).reverse());
|
||||
let mut stake_sum = 0;
|
||||
for (root, stake) in rooted_stake {
|
||||
stake_sum += stake;
|
||||
if (stake_sum as f64 / total_stake as f64) > VOTE_THRESHOLD_SIZE {
|
||||
return root;
|
||||
}
|
||||
}
|
||||
0
|
||||
}
|
||||
|
||||
pub struct AggregateCommitmentService {
|
||||
t_commitment: JoinHandle<()>,
|
||||
}
|
||||
@ -195,12 +252,18 @@ impl AggregateCommitmentService {
|
||||
}
|
||||
|
||||
let mut aggregate_commitment_time = Measure::start("aggregate-commitment-ms");
|
||||
let block_commitment = Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
|
||||
let (block_commitment, rooted_stake) =
|
||||
Self::aggregate_commitment(&ancestors, &aggregation_data.bank);
|
||||
|
||||
let largest_confirmed_root =
|
||||
get_largest_confirmed_root(rooted_stake, aggregation_data.total_staked);
|
||||
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
largest_confirmed_root,
|
||||
aggregation_data.total_staked,
|
||||
aggregation_data.bank,
|
||||
block_commitment_cache.read().unwrap().blockstore.clone(),
|
||||
aggregation_data.root,
|
||||
);
|
||||
|
||||
@ -215,7 +278,10 @@ impl AggregateCommitmentService {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn aggregate_commitment(ancestors: &[Slot], bank: &Bank) -> HashMap<Slot, BlockCommitment> {
|
||||
pub fn aggregate_commitment(
|
||||
ancestors: &[Slot],
|
||||
bank: &Bank,
|
||||
) -> (HashMap<Slot, BlockCommitment>, Vec<(Slot, u64)>) {
|
||||
assert!(!ancestors.is_empty());
|
||||
|
||||
// Check ancestors is sorted
|
||||
@ -224,6 +290,7 @@ impl AggregateCommitmentService {
|
||||
}
|
||||
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake: Vec<(Slot, u64)> = Vec::new();
|
||||
for (_, (lamports, account)) in bank.vote_accounts().into_iter() {
|
||||
if lamports == 0 {
|
||||
continue;
|
||||
@ -236,17 +303,19 @@ impl AggregateCommitmentService {
|
||||
let vote_state = vote_state.unwrap();
|
||||
Self::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
ancestors,
|
||||
lamports,
|
||||
);
|
||||
}
|
||||
|
||||
commitment
|
||||
(commitment, rooted_stake)
|
||||
}
|
||||
|
||||
fn aggregate_commitment_for_vote_account(
|
||||
commitment: &mut HashMap<Slot, BlockCommitment>,
|
||||
rooted_stake: &mut Vec<(Slot, u64)>,
|
||||
vote_state: &VoteState,
|
||||
ancestors: &[Slot],
|
||||
lamports: u64,
|
||||
@ -259,12 +328,13 @@ impl AggregateCommitmentService {
|
||||
commitment
|
||||
.entry(*a)
|
||||
.or_insert_with(BlockCommitment::default)
|
||||
.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
|
||||
.increase_rooted_stake(lamports);
|
||||
} else {
|
||||
ancestors_index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rooted_stake.push((root, lamports));
|
||||
}
|
||||
|
||||
for vote in &vote_state.votes {
|
||||
@ -291,6 +361,7 @@ impl AggregateCommitmentService {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_stake_program::stake_state;
|
||||
use solana_vote_program::vote_state::{self, VoteStateVersions};
|
||||
@ -308,6 +379,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_get_confirmations() {
|
||||
let bank = Arc::new(Bank::default());
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
// Build BlockCommitmentCache with votes at depths 0 and 1 for 2 slots
|
||||
let mut cache0 = BlockCommitment::default();
|
||||
cache0.increase_confirmation_stake(1, 5);
|
||||
@ -325,7 +398,8 @@ mod tests {
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
block_commitment.entry(1).or_insert(cache1.clone());
|
||||
block_commitment.entry(2).or_insert(cache2.clone());
|
||||
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 50, bank, 0);
|
||||
let block_commitment_cache =
|
||||
BlockCommitmentCache::new(block_commitment, 0, 50, bank, blockstore, 0);
|
||||
|
||||
assert_eq!(block_commitment_cache.get_confirmation_count(0), Some(2));
|
||||
assert_eq!(block_commitment_cache.get_confirmation_count(1), Some(1));
|
||||
@ -333,17 +407,68 @@ mod tests {
|
||||
assert_eq!(block_commitment_cache.get_confirmation_count(3), None,);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_confirmed_rooted() {
|
||||
let bank = Arc::new(Bank::default());
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
blockstore.set_roots(&[0, 1]).unwrap();
|
||||
// Build BlockCommitmentCache with rooted slots
|
||||
let mut cache0 = BlockCommitment::default();
|
||||
cache0.increase_rooted_stake(50);
|
||||
let mut cache1 = BlockCommitment::default();
|
||||
cache1.increase_rooted_stake(40);
|
||||
let mut cache2 = BlockCommitment::default();
|
||||
cache2.increase_rooted_stake(20);
|
||||
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(1).or_insert(cache0.clone());
|
||||
block_commitment.entry(2).or_insert(cache1.clone());
|
||||
block_commitment.entry(3).or_insert(cache2.clone());
|
||||
let largest_confirmed_root = 1;
|
||||
let block_commitment_cache = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
largest_confirmed_root,
|
||||
50,
|
||||
bank,
|
||||
blockstore,
|
||||
0,
|
||||
);
|
||||
|
||||
assert!(block_commitment_cache.is_confirmed_rooted(0));
|
||||
assert!(block_commitment_cache.is_confirmed_rooted(1));
|
||||
assert!(!block_commitment_cache.is_confirmed_rooted(2));
|
||||
assert!(!block_commitment_cache.is_confirmed_rooted(3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_largest_confirmed_root() {
|
||||
assert_eq!(get_largest_confirmed_root(vec![], 10), 0);
|
||||
let mut rooted_stake = vec![];
|
||||
rooted_stake.push((0, 5));
|
||||
rooted_stake.push((1, 5));
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 0);
|
||||
let mut rooted_stake = vec![];
|
||||
rooted_stake.push((1, 5));
|
||||
rooted_stake.push((0, 10));
|
||||
rooted_stake.push((2, 5));
|
||||
rooted_stake.push((1, 4));
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 10), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_1() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
let root = ancestors.last().unwrap();
|
||||
vote_state.root_slot = Some(*root);
|
||||
let root = ancestors.last().unwrap().clone();
|
||||
vote_state.root_slot = Some(root);
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
@ -351,15 +476,17 @@ mod tests {
|
||||
|
||||
for a in ancestors {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_2() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
@ -368,6 +495,7 @@ mod tests {
|
||||
vote_state.process_slot_vote_unchecked(*ancestors.last().unwrap());
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
@ -376,7 +504,7 @@ mod tests {
|
||||
for a in ancestors {
|
||||
if a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else {
|
||||
let mut expected = BlockCommitment::default();
|
||||
@ -384,12 +512,14 @@ mod tests {
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aggregate_commitment_for_vote_account_3() {
|
||||
let ancestors = vec![3, 4, 5, 7, 9, 10, 11];
|
||||
let mut commitment = HashMap::new();
|
||||
let mut rooted_stake = vec![];
|
||||
let lamports = 5;
|
||||
let mut vote_state = VoteState::default();
|
||||
|
||||
@ -400,6 +530,7 @@ mod tests {
|
||||
vote_state.process_slot_vote_unchecked(ancestors[6]);
|
||||
AggregateCommitmentService::aggregate_commitment_for_vote_account(
|
||||
&mut commitment,
|
||||
&mut rooted_stake,
|
||||
&vote_state,
|
||||
&ancestors,
|
||||
lamports,
|
||||
@ -408,7 +539,7 @@ mod tests {
|
||||
for (i, a) in ancestors.iter().enumerate() {
|
||||
if *a <= root {
|
||||
let mut expected = BlockCommitment::default();
|
||||
expected.increase_confirmation_stake(MAX_LOCKOUT_HISTORY, lamports);
|
||||
expected.increase_rooted_stake(lamports);
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
} else if i <= 4 {
|
||||
let mut expected = BlockCommitment::default();
|
||||
@ -420,6 +551,7 @@ mod tests {
|
||||
assert_eq!(*commitment.get(&a).unwrap(), expected);
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake[0], (root, lamports));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -429,6 +561,8 @@ mod tests {
|
||||
mut genesis_config, ..
|
||||
} = create_genesis_config(10_000);
|
||||
|
||||
let rooted_stake_amount = 40;
|
||||
|
||||
let sk1 = Pubkey::new_rand();
|
||||
let pk1 = Pubkey::new_rand();
|
||||
let mut vote_account1 = vote_state::create_account(&pk1, &Pubkey::new_rand(), 0, 100);
|
||||
@ -439,12 +573,36 @@ mod tests {
|
||||
let mut vote_account2 = vote_state::create_account(&pk2, &Pubkey::new_rand(), 0, 50);
|
||||
let stake_account2 =
|
||||
stake_state::create_account(&sk2, &pk2, &vote_account2, &genesis_config.rent, 50);
|
||||
let sk3 = Pubkey::new_rand();
|
||||
let pk3 = Pubkey::new_rand();
|
||||
let mut vote_account3 = vote_state::create_account(&pk3, &Pubkey::new_rand(), 0, 1);
|
||||
let stake_account3 = stake_state::create_account(
|
||||
&sk3,
|
||||
&pk3,
|
||||
&vote_account3,
|
||||
&genesis_config.rent,
|
||||
rooted_stake_amount,
|
||||
);
|
||||
let sk4 = Pubkey::new_rand();
|
||||
let pk4 = Pubkey::new_rand();
|
||||
let mut vote_account4 = vote_state::create_account(&pk4, &Pubkey::new_rand(), 0, 1);
|
||||
let stake_account4 = stake_state::create_account(
|
||||
&sk4,
|
||||
&pk4,
|
||||
&vote_account4,
|
||||
&genesis_config.rent,
|
||||
rooted_stake_amount,
|
||||
);
|
||||
|
||||
genesis_config.accounts.extend(vec![
|
||||
(pk1, vote_account1.clone()),
|
||||
(sk1, stake_account1),
|
||||
(pk2, vote_account2.clone()),
|
||||
(sk2, stake_account2),
|
||||
(pk3, vote_account3.clone()),
|
||||
(sk3, stake_account3),
|
||||
(pk4, vote_account4.clone()),
|
||||
(sk4, stake_account4),
|
||||
]);
|
||||
|
||||
// Create bank
|
||||
@ -464,7 +622,20 @@ mod tests {
|
||||
VoteState::to(&versioned, &mut vote_account2).unwrap();
|
||||
bank.store_account(&pk2, &vote_account2);
|
||||
|
||||
let commitment = AggregateCommitmentService::aggregate_commitment(&ancestors, &bank);
|
||||
let mut vote_state3 = VoteState::from(&vote_account3).unwrap();
|
||||
vote_state3.root_slot = Some(1);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state3));
|
||||
VoteState::to(&versioned, &mut vote_account3).unwrap();
|
||||
bank.store_account(&pk3, &vote_account3);
|
||||
|
||||
let mut vote_state4 = VoteState::from(&vote_account4).unwrap();
|
||||
vote_state4.root_slot = Some(2);
|
||||
let versioned = VoteStateVersions::Current(Box::new(vote_state4));
|
||||
VoteState::to(&versioned, &mut vote_account4).unwrap();
|
||||
bank.store_account(&pk4, &vote_account4);
|
||||
|
||||
let (commitment, rooted_stake) =
|
||||
AggregateCommitmentService::aggregate_commitment(&ancestors, &bank);
|
||||
|
||||
for a in ancestors {
|
||||
if a <= 3 {
|
||||
@ -488,5 +659,7 @@ mod tests {
|
||||
assert!(commitment.get(&a).is_none());
|
||||
}
|
||||
}
|
||||
assert_eq!(rooted_stake.len(), 2);
|
||||
assert_eq!(get_largest_confirmed_root(rooted_stake, 100), 1)
|
||||
}
|
||||
}
|
||||
|
@ -621,7 +621,7 @@ pub mod test {
|
||||
}
|
||||
let vote = tower.new_vote_from_bank(&bank, &my_vote_pubkey).0;
|
||||
if let Some(new_root) = tower.record_bank_vote(vote) {
|
||||
ReplayStage::handle_new_root(new_root, bank_forks, progress, &None);
|
||||
ReplayStage::handle_new_root(new_root, bank_forks, progress, &None, None);
|
||||
}
|
||||
|
||||
// Mark the vote for this bank under this node's pubkey so it will be
|
||||
|
@ -1,6 +1,8 @@
|
||||
use crate::crds_value::MAX_WALLCLOCK;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
#[cfg(test)]
|
||||
use solana_sdk::rpc_port;
|
||||
use solana_sdk::sanitize::{Sanitize, SanitizeError};
|
||||
#[cfg(test)]
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use solana_sdk::timing::timestamp;
|
||||
@ -37,6 +39,15 @@ pub struct ContactInfo {
|
||||
pub shred_version: u16,
|
||||
}
|
||||
|
||||
impl Sanitize for ContactInfo {
|
||||
fn sanitize(&self) -> std::result::Result<(), SanitizeError> {
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for ContactInfo {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.id.cmp(&other.id)
|
||||
@ -314,4 +325,12 @@ mod tests {
|
||||
ci.rpc = socketaddr!("127.0.0.1:234");
|
||||
assert!(ci.valid_client_facing_addr().is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sanitize() {
|
||||
let mut ci = ContactInfo::default();
|
||||
assert_eq!(ci.sanitize(), Ok(()));
|
||||
ci.wallclock = MAX_WALLCLOCK;
|
||||
assert_eq!(ci.sanitize(), Err(SanitizeError::ValueOutOfBounds));
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ pub const CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS: usize = 500;
|
||||
pub struct CrdsGossip {
|
||||
pub crds: Crds,
|
||||
pub id: Pubkey,
|
||||
pub shred_version: u16,
|
||||
pub push: CrdsGossipPush,
|
||||
pub pull: CrdsGossipPull,
|
||||
}
|
||||
@ -29,6 +30,7 @@ impl Default for CrdsGossip {
|
||||
CrdsGossip {
|
||||
crds: Crds::default(),
|
||||
id: Pubkey::default(),
|
||||
shred_version: 0,
|
||||
push: CrdsGossipPush::default(),
|
||||
pull: CrdsGossipPull::default(),
|
||||
}
|
||||
@ -39,6 +41,9 @@ impl CrdsGossip {
|
||||
pub fn set_self(&mut self, id: &Pubkey) {
|
||||
self.id = *id;
|
||||
}
|
||||
pub fn set_shred_version(&mut self, shred_version: u16) {
|
||||
self.shred_version = shred_version;
|
||||
}
|
||||
|
||||
/// process a push message to the network
|
||||
pub fn process_push_message(
|
||||
@ -122,6 +127,7 @@ impl CrdsGossip {
|
||||
&self.crds,
|
||||
stakes,
|
||||
&self.id,
|
||||
self.shred_version,
|
||||
self.pull.pull_request_time.len(),
|
||||
CRDS_GOSSIP_NUM_ACTIVE,
|
||||
)
|
||||
@ -134,8 +140,14 @@ impl CrdsGossip {
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
bloom_size: usize,
|
||||
) -> Result<(Pubkey, Vec<CrdsFilter>, CrdsValue), CrdsGossipError> {
|
||||
self.pull
|
||||
.new_pull_request(&self.crds, &self.id, now, stakes, bloom_size)
|
||||
self.pull.new_pull_request(
|
||||
&self.crds,
|
||||
&self.id,
|
||||
self.shred_version,
|
||||
now,
|
||||
stakes,
|
||||
bloom_size,
|
||||
)
|
||||
}
|
||||
|
||||
/// time when a request to `from` was initiated
|
||||
|
@ -14,7 +14,6 @@ use crate::crds::Crds;
|
||||
use crate::crds_gossip::{get_stake, get_weight, CRDS_GOSSIP_DEFAULT_BLOOM_ITEMS};
|
||||
use crate::crds_gossip_error::CrdsGossipError;
|
||||
use crate::crds_value::{CrdsValue, CrdsValueLabel};
|
||||
use rand;
|
||||
use rand::distributions::{Distribution, WeightedIndex};
|
||||
use rand::Rng;
|
||||
use solana_runtime::bloom::Bloom;
|
||||
@ -37,6 +36,13 @@ pub struct CrdsFilter {
|
||||
mask_bits: u32,
|
||||
}
|
||||
|
||||
impl solana_sdk::sanitize::Sanitize for CrdsFilter {
|
||||
fn sanitize(&self) -> std::result::Result<(), solana_sdk::sanitize::SanitizeError> {
|
||||
self.filter.sanitize()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl CrdsFilter {
|
||||
pub fn new_rand(num_items: usize, max_bytes: usize) -> Self {
|
||||
let max_bits = (max_bytes * 8) as f64;
|
||||
@ -138,11 +144,12 @@ impl CrdsGossipPull {
|
||||
&self,
|
||||
crds: &Crds,
|
||||
self_id: &Pubkey,
|
||||
self_shred_version: u16,
|
||||
now: u64,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
bloom_size: usize,
|
||||
) -> Result<(Pubkey, Vec<CrdsFilter>, CrdsValue), CrdsGossipError> {
|
||||
let options = self.pull_options(crds, &self_id, now, stakes);
|
||||
let options = self.pull_options(crds, &self_id, self_shred_version, now, stakes);
|
||||
if options.is_empty() {
|
||||
return Err(CrdsGossipError::NoPeers);
|
||||
}
|
||||
@ -159,13 +166,20 @@ impl CrdsGossipPull {
|
||||
&self,
|
||||
crds: &'a Crds,
|
||||
self_id: &Pubkey,
|
||||
self_shred_version: u16,
|
||||
now: u64,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> Vec<(f32, &'a ContactInfo)> {
|
||||
crds.table
|
||||
.values()
|
||||
.filter_map(|v| v.value.contact_info())
|
||||
.filter(|v| v.id != *self_id && ContactInfo::is_valid_address(&v.gossip))
|
||||
.filter(|v| {
|
||||
v.id != *self_id
|
||||
&& ContactInfo::is_valid_address(&v.gossip)
|
||||
&& (self_shred_version == 0
|
||||
|| v.shred_version == 0
|
||||
|| self_shred_version == v.shred_version)
|
||||
})
|
||||
.map(|item| {
|
||||
let max_weight = f32::from(u16::max_value()) - 1.0;
|
||||
let req_time: u64 = *self.pull_request_time.get(&item.id).unwrap_or(&0);
|
||||
@ -396,7 +410,7 @@ mod test {
|
||||
stakes.insert(id, i * 100);
|
||||
}
|
||||
let now = 1024;
|
||||
let mut options = node.pull_options(&crds, &me.label().pubkey(), now, &stakes);
|
||||
let mut options = node.pull_options(&crds, &me.label().pubkey(), 0, now, &stakes);
|
||||
assert!(!options.is_empty());
|
||||
options.sort_by(|(weight_l, _), (weight_r, _)| weight_r.partial_cmp(weight_l).unwrap());
|
||||
// check that the highest stake holder is also the heaviest weighted.
|
||||
@ -406,6 +420,66 @@ mod test {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_pulls_from_different_shred_versions() {
|
||||
let mut crds = Crds::default();
|
||||
let stakes = HashMap::new();
|
||||
let node = CrdsGossipPull::default();
|
||||
|
||||
let gossip = socketaddr!("127.0.0.1:1234");
|
||||
|
||||
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 123,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
let spy = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 0,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 123,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
let node_456 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 456,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
|
||||
crds.insert(me.clone(), 0).unwrap();
|
||||
crds.insert(spy.clone(), 0).unwrap();
|
||||
crds.insert(node_123.clone(), 0).unwrap();
|
||||
crds.insert(node_456.clone(), 0).unwrap();
|
||||
|
||||
// shred version 123 should ignore 456 nodes
|
||||
let options = node
|
||||
.pull_options(&crds, &me.label().pubkey(), 123, 0, &stakes)
|
||||
.iter()
|
||||
.map(|(_, c)| c.id)
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(options.len(), 2);
|
||||
assert!(options.contains(&spy.pubkey()));
|
||||
assert!(options.contains(&node_123.pubkey()));
|
||||
|
||||
// spy nodes will see all
|
||||
let options = node
|
||||
.pull_options(&crds, &spy.label().pubkey(), 0, 0, &stakes)
|
||||
.iter()
|
||||
.map(|(_, c)| c.id)
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(options.len(), 3);
|
||||
assert!(options.contains(&me.pubkey()));
|
||||
assert!(options.contains(&node_123.pubkey()));
|
||||
assert!(options.contains(&node_456.pubkey()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_pull_request() {
|
||||
let mut crds = Crds::default();
|
||||
@ -416,13 +490,13 @@ mod test {
|
||||
let id = entry.label().pubkey();
|
||||
let node = CrdsGossipPull::default();
|
||||
assert_eq!(
|
||||
node.new_pull_request(&crds, &id, 0, &HashMap::new(), PACKET_DATA_SIZE),
|
||||
node.new_pull_request(&crds, &id, 0, 0, &HashMap::new(), PACKET_DATA_SIZE),
|
||||
Err(CrdsGossipError::NoPeers)
|
||||
);
|
||||
|
||||
crds.insert(entry.clone(), 0).unwrap();
|
||||
assert_eq!(
|
||||
node.new_pull_request(&crds, &id, 0, &HashMap::new(), PACKET_DATA_SIZE),
|
||||
node.new_pull_request(&crds, &id, 0, 0, &HashMap::new(), PACKET_DATA_SIZE),
|
||||
Err(CrdsGossipError::NoPeers)
|
||||
);
|
||||
|
||||
@ -431,7 +505,7 @@ mod test {
|
||||
0,
|
||||
)));
|
||||
crds.insert(new.clone(), 0).unwrap();
|
||||
let req = node.new_pull_request(&crds, &id, 0, &HashMap::new(), PACKET_DATA_SIZE);
|
||||
let req = node.new_pull_request(&crds, &id, 0, 0, &HashMap::new(), PACKET_DATA_SIZE);
|
||||
let (to, _, self_info) = req.unwrap();
|
||||
assert_eq!(to, new.label().pubkey());
|
||||
assert_eq!(self_info, entry);
|
||||
@ -466,6 +540,7 @@ mod test {
|
||||
let req = node.new_pull_request(
|
||||
&crds,
|
||||
&node_pubkey,
|
||||
0,
|
||||
u64::max_value(),
|
||||
&HashMap::new(),
|
||||
PACKET_DATA_SIZE,
|
||||
@ -495,6 +570,7 @@ mod test {
|
||||
&node_crds,
|
||||
&node_pubkey,
|
||||
0,
|
||||
0,
|
||||
&HashMap::new(),
|
||||
PACKET_DATA_SIZE,
|
||||
);
|
||||
@ -567,6 +643,7 @@ mod test {
|
||||
&node_crds,
|
||||
&node_pubkey,
|
||||
0,
|
||||
0,
|
||||
&HashMap::new(),
|
||||
PACKET_DATA_SIZE,
|
||||
);
|
||||
|
@ -236,13 +236,14 @@ impl CrdsGossipPush {
|
||||
crds: &Crds,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
self_id: &Pubkey,
|
||||
self_shred_version: u16,
|
||||
network_size: usize,
|
||||
ratio: usize,
|
||||
) {
|
||||
let need = Self::compute_need(self.num_active, self.active_set.len(), ratio);
|
||||
let mut new_items = HashMap::new();
|
||||
|
||||
let options: Vec<_> = self.push_options(crds, &self_id, stakes);
|
||||
let options: Vec<_> = self.push_options(crds, &self_id, self_shred_version, stakes);
|
||||
if options.is_empty() {
|
||||
return;
|
||||
}
|
||||
@ -288,13 +289,20 @@ impl CrdsGossipPush {
|
||||
&self,
|
||||
crds: &'a Crds,
|
||||
self_id: &Pubkey,
|
||||
self_shred_version: u16,
|
||||
stakes: &HashMap<Pubkey, u64>,
|
||||
) -> Vec<(f32, &'a ContactInfo)> {
|
||||
crds.table
|
||||
.values()
|
||||
.filter(|v| v.value.contact_info().is_some())
|
||||
.map(|v| (v.value.contact_info().unwrap(), v))
|
||||
.filter(|(info, _)| info.id != *self_id && ContactInfo::is_valid_address(&info.gossip))
|
||||
.filter(|(info, _)| {
|
||||
info.id != *self_id
|
||||
&& ContactInfo::is_valid_address(&info.gossip)
|
||||
&& (self_shred_version == 0
|
||||
|| info.shred_version == 0
|
||||
|| self_shred_version == info.shred_version)
|
||||
})
|
||||
.map(|(info, value)| {
|
||||
let max_weight = f32::from(u16::max_value()) - 1.0;
|
||||
let last_updated: u64 = value.local_timestamp;
|
||||
@ -510,7 +518,7 @@ mod test {
|
||||
)));
|
||||
|
||||
assert_eq!(crds.insert(value1.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
|
||||
|
||||
assert!(push.active_set.get(&value1.label().pubkey()).is_some());
|
||||
let value2 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
@ -520,7 +528,7 @@ mod test {
|
||||
assert!(push.active_set.get(&value2.label().pubkey()).is_none());
|
||||
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
|
||||
for _ in 0..30 {
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
|
||||
if push.active_set.get(&value2.label().pubkey()).is_some() {
|
||||
break;
|
||||
}
|
||||
@ -533,7 +541,7 @@ mod test {
|
||||
));
|
||||
assert_eq!(crds.insert(value2.clone(), 0), Ok(None));
|
||||
}
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
|
||||
assert_eq!(push.active_set.len(), push.num_active);
|
||||
}
|
||||
#[test]
|
||||
@ -551,7 +559,7 @@ mod test {
|
||||
crds.insert(peer.clone(), time).unwrap();
|
||||
stakes.insert(id, i * 100);
|
||||
}
|
||||
let mut options = push.push_options(&crds, &Pubkey::default(), &stakes);
|
||||
let mut options = push.push_options(&crds, &Pubkey::default(), 0, &stakes);
|
||||
assert!(!options.is_empty());
|
||||
options.sort_by(|(weight_l, _), (weight_r, _)| weight_r.partial_cmp(weight_l).unwrap());
|
||||
// check that the highest stake holder is also the heaviest weighted.
|
||||
@ -560,6 +568,66 @@ mod test {
|
||||
10_000_u64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_pushes_to_from_different_shred_versions() {
|
||||
let mut crds = Crds::default();
|
||||
let stakes = HashMap::new();
|
||||
let node = CrdsGossipPush::default();
|
||||
|
||||
let gossip = socketaddr!("127.0.0.1:1234");
|
||||
|
||||
let me = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 123,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
let spy = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 0,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
let node_123 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 123,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
let node_456 = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo {
|
||||
id: Pubkey::new_rand(),
|
||||
shred_version: 456,
|
||||
gossip: gossip.clone(),
|
||||
..ContactInfo::default()
|
||||
}));
|
||||
|
||||
crds.insert(me.clone(), 0).unwrap();
|
||||
crds.insert(spy.clone(), 0).unwrap();
|
||||
crds.insert(node_123.clone(), 0).unwrap();
|
||||
crds.insert(node_456.clone(), 0).unwrap();
|
||||
|
||||
// shred version 123 should ignore 456 nodes
|
||||
let options = node
|
||||
.push_options(&crds, &me.label().pubkey(), 123, &stakes)
|
||||
.iter()
|
||||
.map(|(_, c)| c.id)
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(options.len(), 2);
|
||||
assert!(options.contains(&spy.pubkey()));
|
||||
assert!(options.contains(&node_123.pubkey()));
|
||||
|
||||
// spy nodes will see all
|
||||
let options = node
|
||||
.push_options(&crds, &spy.label().pubkey(), 0, &stakes)
|
||||
.iter()
|
||||
.map(|(_, c)| c.id)
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(options.len(), 3);
|
||||
assert!(options.contains(&me.pubkey()));
|
||||
assert!(options.contains(&node_123.pubkey()));
|
||||
assert!(options.contains(&node_456.pubkey()));
|
||||
}
|
||||
#[test]
|
||||
fn test_new_push_messages() {
|
||||
let mut crds = Crds::default();
|
||||
@ -569,7 +637,7 @@ mod test {
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
|
||||
|
||||
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
@ -606,7 +674,7 @@ mod test {
|
||||
push.process_push_message(&mut crds, &Pubkey::default(), peer_3.clone(), 0),
|
||||
Ok(None)
|
||||
);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
|
||||
|
||||
// push 3's contact info to 1 and 2 and 3
|
||||
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
@ -628,7 +696,7 @@ mod test {
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
|
||||
|
||||
let new_msg = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost(
|
||||
&Pubkey::new_rand(),
|
||||
@ -651,7 +719,7 @@ mod test {
|
||||
0,
|
||||
)));
|
||||
assert_eq!(crds.insert(peer.clone(), 0), Ok(None));
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 1, 1);
|
||||
push.refresh_push_active_set(&crds, &HashMap::new(), &Pubkey::default(), 0, 1, 1);
|
||||
|
||||
let mut ci = ContactInfo::new_localhost(&Pubkey::new_rand(), 0);
|
||||
ci.wallclock = 1;
|
||||
|
@ -1,5 +1,6 @@
|
||||
use crate::contact_info::ContactInfo;
|
||||
use bincode::{serialize, serialized_size};
|
||||
use solana_sdk::sanitize::{Sanitize, SanitizeError};
|
||||
use solana_sdk::timing::timestamp;
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
@ -14,10 +15,14 @@ use std::{
|
||||
fmt,
|
||||
};
|
||||
|
||||
pub const MAX_WALLCLOCK: u64 = 1_000_000_000_000_000;
|
||||
pub const MAX_SLOT: u64 = 1_000_000_000_000_000;
|
||||
|
||||
pub type VoteIndex = u8;
|
||||
pub const MAX_VOTES: VoteIndex = 32;
|
||||
|
||||
pub type EpochSlotIndex = u8;
|
||||
pub const MAX_EPOCH_SLOTS: EpochSlotIndex = 1;
|
||||
|
||||
/// CrdsValue that is replicated across the cluster
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
@ -26,6 +31,13 @@ pub struct CrdsValue {
|
||||
pub data: CrdsData,
|
||||
}
|
||||
|
||||
impl Sanitize for CrdsValue {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
self.signature.sanitize()?;
|
||||
self.data.sanitize()
|
||||
}
|
||||
}
|
||||
|
||||
impl Signable for CrdsValue {
|
||||
fn pubkey(&self) -> Pubkey {
|
||||
self.pubkey()
|
||||
@ -44,14 +56,8 @@ impl Signable for CrdsValue {
|
||||
}
|
||||
|
||||
fn verify(&self) -> bool {
|
||||
let sig_check = self
|
||||
.get_signature()
|
||||
.verify(&self.pubkey().as_ref(), self.signable_data().borrow());
|
||||
let data_check = match &self.data {
|
||||
CrdsData::Vote(ix, _) => *ix < MAX_VOTES,
|
||||
_ => true,
|
||||
};
|
||||
sig_check && data_check
|
||||
self.get_signature()
|
||||
.verify(&self.pubkey().as_ref(), self.signable_data().borrow())
|
||||
}
|
||||
}
|
||||
|
||||
@ -65,6 +71,7 @@ pub enum CrdsData {
|
||||
EpochSlots(EpochSlotIndex, EpochSlots),
|
||||
SnapshotHashes(SnapshotHash),
|
||||
AccountsHashes(SnapshotHash),
|
||||
Version(Version),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
@ -87,6 +94,40 @@ pub struct EpochIncompleteSlots {
|
||||
pub compressed_list: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Sanitize for EpochIncompleteSlots {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
if self.first >= MAX_SLOT {
|
||||
return Err(SanitizeError::InvalidValue);
|
||||
}
|
||||
//rest of the data doesn't matter since we no longer decompress
|
||||
//these values
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Sanitize for CrdsData {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
match self {
|
||||
CrdsData::ContactInfo(val) => val.sanitize(),
|
||||
CrdsData::Vote(ix, val) => {
|
||||
if *ix >= MAX_VOTES {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
val.sanitize()
|
||||
}
|
||||
CrdsData::SnapshotHashes(val) => val.sanitize(),
|
||||
CrdsData::AccountsHashes(val) => val.sanitize(),
|
||||
CrdsData::EpochSlots(ix, val) => {
|
||||
if *ix as usize >= MAX_EPOCH_SLOTS as usize {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
val.sanitize()
|
||||
}
|
||||
CrdsData::Version(version) => version.sanitize(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct SnapshotHash {
|
||||
pub from: Pubkey,
|
||||
@ -94,6 +135,20 @@ pub struct SnapshotHash {
|
||||
pub wallclock: u64,
|
||||
}
|
||||
|
||||
impl Sanitize for SnapshotHash {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
for (slot, _) in &self.hashes {
|
||||
if *slot >= MAX_SLOT {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
}
|
||||
self.from.sanitize()
|
||||
}
|
||||
}
|
||||
|
||||
impl SnapshotHash {
|
||||
pub fn new(from: Pubkey, hashes: Vec<(Slot, Hash)>) -> Self {
|
||||
Self {
|
||||
@ -107,33 +162,47 @@ impl SnapshotHash {
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct EpochSlots {
|
||||
pub from: Pubkey,
|
||||
pub root: Slot,
|
||||
root: Slot,
|
||||
pub lowest: Slot,
|
||||
pub slots: BTreeSet<Slot>,
|
||||
pub stash: Vec<EpochIncompleteSlots>,
|
||||
slots: BTreeSet<Slot>,
|
||||
stash: Vec<EpochIncompleteSlots>,
|
||||
pub wallclock: u64,
|
||||
}
|
||||
|
||||
impl EpochSlots {
|
||||
pub fn new(
|
||||
from: Pubkey,
|
||||
root: Slot,
|
||||
lowest: Slot,
|
||||
slots: BTreeSet<Slot>,
|
||||
stash: Vec<EpochIncompleteSlots>,
|
||||
wallclock: u64,
|
||||
) -> Self {
|
||||
pub fn new(from: Pubkey, lowest: Slot, wallclock: u64) -> Self {
|
||||
Self {
|
||||
from,
|
||||
root,
|
||||
root: 0,
|
||||
lowest,
|
||||
slots,
|
||||
stash,
|
||||
slots: BTreeSet::new(),
|
||||
stash: vec![],
|
||||
wallclock,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Sanitize for EpochSlots {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
if self.lowest >= MAX_SLOT {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
if self.root >= MAX_SLOT {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
for slot in &self.slots {
|
||||
if *slot >= MAX_SLOT {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
}
|
||||
self.stash.sanitize()?;
|
||||
self.from.sanitize()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct Vote {
|
||||
pub from: Pubkey,
|
||||
@ -141,6 +210,16 @@ pub struct Vote {
|
||||
pub wallclock: u64,
|
||||
}
|
||||
|
||||
impl Sanitize for Vote {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
self.from.sanitize()?;
|
||||
self.transaction.sanitize()
|
||||
}
|
||||
}
|
||||
|
||||
impl Vote {
|
||||
pub fn new(from: &Pubkey, transaction: Transaction, wallclock: u64) -> Self {
|
||||
Self {
|
||||
@ -151,6 +230,33 @@ impl Vote {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
|
||||
pub struct Version {
|
||||
pub from: Pubkey,
|
||||
pub wallclock: u64,
|
||||
pub version: solana_version::Version,
|
||||
}
|
||||
|
||||
impl Sanitize for Version {
|
||||
fn sanitize(&self) -> Result<(), SanitizeError> {
|
||||
if self.wallclock >= MAX_WALLCLOCK {
|
||||
return Err(SanitizeError::ValueOutOfBounds);
|
||||
}
|
||||
self.from.sanitize()?;
|
||||
self.version.sanitize()
|
||||
}
|
||||
}
|
||||
|
||||
impl Version {
|
||||
pub fn new(from: Pubkey) -> Self {
|
||||
Self {
|
||||
from,
|
||||
wallclock: timestamp(),
|
||||
version: solana_version::Version::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Type of the replicated value
|
||||
/// These are labels for values in a record that is associated with `Pubkey`
|
||||
#[derive(PartialEq, Hash, Eq, Clone, Debug)]
|
||||
@ -160,6 +266,7 @@ pub enum CrdsValueLabel {
|
||||
EpochSlots(Pubkey),
|
||||
SnapshotHashes(Pubkey),
|
||||
AccountsHashes(Pubkey),
|
||||
Version(Pubkey),
|
||||
}
|
||||
|
||||
impl fmt::Display for CrdsValueLabel {
|
||||
@ -170,6 +277,7 @@ impl fmt::Display for CrdsValueLabel {
|
||||
CrdsValueLabel::EpochSlots(_) => write!(f, "EpochSlots({})", self.pubkey()),
|
||||
CrdsValueLabel::SnapshotHashes(_) => write!(f, "SnapshotHashes({})", self.pubkey()),
|
||||
CrdsValueLabel::AccountsHashes(_) => write!(f, "AccountsHashes({})", self.pubkey()),
|
||||
CrdsValueLabel::Version(_) => write!(f, "Version({})", self.pubkey()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -182,6 +290,7 @@ impl CrdsValueLabel {
|
||||
CrdsValueLabel::EpochSlots(p) => *p,
|
||||
CrdsValueLabel::SnapshotHashes(p) => *p,
|
||||
CrdsValueLabel::AccountsHashes(p) => *p,
|
||||
CrdsValueLabel::Version(p) => *p,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -199,7 +308,7 @@ impl CrdsValue {
|
||||
value.sign(keypair);
|
||||
value
|
||||
}
|
||||
/// Totally unsecure unverfiable wallclock of the node that generated this message
|
||||
/// Totally unsecure unverifiable wallclock of the node that generated this message
|
||||
/// Latest wallclock is always picked.
|
||||
/// This is used to time out push messages.
|
||||
pub fn wallclock(&self) -> u64 {
|
||||
@ -209,6 +318,7 @@ impl CrdsValue {
|
||||
CrdsData::EpochSlots(_, vote) => vote.wallclock,
|
||||
CrdsData::SnapshotHashes(hash) => hash.wallclock,
|
||||
CrdsData::AccountsHashes(hash) => hash.wallclock,
|
||||
CrdsData::Version(version) => version.wallclock,
|
||||
}
|
||||
}
|
||||
pub fn pubkey(&self) -> Pubkey {
|
||||
@ -218,6 +328,7 @@ impl CrdsValue {
|
||||
CrdsData::EpochSlots(_, slots) => slots.from,
|
||||
CrdsData::SnapshotHashes(hash) => hash.from,
|
||||
CrdsData::AccountsHashes(hash) => hash.from,
|
||||
CrdsData::Version(version) => version.from,
|
||||
}
|
||||
}
|
||||
pub fn label(&self) -> CrdsValueLabel {
|
||||
@ -227,6 +338,7 @@ impl CrdsValue {
|
||||
CrdsData::EpochSlots(_, _) => CrdsValueLabel::EpochSlots(self.pubkey()),
|
||||
CrdsData::SnapshotHashes(_) => CrdsValueLabel::SnapshotHashes(self.pubkey()),
|
||||
CrdsData::AccountsHashes(_) => CrdsValueLabel::AccountsHashes(self.pubkey()),
|
||||
CrdsData::Version(_) => CrdsValueLabel::Version(self.pubkey()),
|
||||
}
|
||||
}
|
||||
pub fn contact_info(&self) -> Option<&ContactInfo> {
|
||||
@ -270,6 +382,13 @@ impl CrdsValue {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn version(&self) -> Option<&Version> {
|
||||
match &self.data {
|
||||
CrdsData::Version(version) => Some(version),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return all the possible labels for a record identified by Pubkey.
|
||||
pub fn record_labels(key: &Pubkey) -> Vec<CrdsValueLabel> {
|
||||
let mut labels = vec![
|
||||
@ -277,6 +396,7 @@ impl CrdsValue {
|
||||
CrdsValueLabel::EpochSlots(*key),
|
||||
CrdsValueLabel::SnapshotHashes(*key),
|
||||
CrdsValueLabel::AccountsHashes(*key),
|
||||
CrdsValueLabel::Version(*key),
|
||||
];
|
||||
labels.extend((0..MAX_VOTES).map(|ix| CrdsValueLabel::Vote(ix, *key)));
|
||||
labels
|
||||
@ -326,7 +446,7 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_labels() {
|
||||
let mut hits = [false; 4 + MAX_VOTES as usize];
|
||||
let mut hits = [false; 5 + MAX_VOTES as usize];
|
||||
// this method should cover all the possible labels
|
||||
for v in &CrdsValue::record_labels(&Pubkey::default()) {
|
||||
match v {
|
||||
@ -334,7 +454,8 @@ mod test {
|
||||
CrdsValueLabel::EpochSlots(_) => hits[1] = true,
|
||||
CrdsValueLabel::SnapshotHashes(_) => hits[2] = true,
|
||||
CrdsValueLabel::AccountsHashes(_) => hits[3] = true,
|
||||
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 4] = true,
|
||||
CrdsValueLabel::Version(_) => hits[4] = true,
|
||||
CrdsValueLabel::Vote(ix, _) => hits[*ix as usize + 5] = true,
|
||||
}
|
||||
}
|
||||
assert!(hits.iter().all(|x| *x));
|
||||
@ -356,7 +477,7 @@ mod test {
|
||||
|
||||
let v = CrdsValue::new_unsigned(CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots::new(Pubkey::default(), 0, 0, BTreeSet::new(), vec![], 0),
|
||||
EpochSlots::new(Pubkey::default(), 0, 0),
|
||||
));
|
||||
assert_eq!(v.wallclock(), 0);
|
||||
let key = v.clone().epoch_slots().unwrap().from;
|
||||
@ -377,10 +498,9 @@ mod test {
|
||||
Vote::new(&keypair.pubkey(), test_tx(), timestamp()),
|
||||
));
|
||||
verify_signatures(&mut v, &keypair, &wrong_keypair);
|
||||
let btreeset: BTreeSet<Slot> = vec![1, 2, 3, 6, 8].into_iter().collect();
|
||||
v = CrdsValue::new_unsigned(CrdsData::EpochSlots(
|
||||
0,
|
||||
EpochSlots::new(keypair.pubkey(), 0, 0, btreeset, vec![], timestamp()),
|
||||
EpochSlots::new(keypair.pubkey(), 0, timestamp()),
|
||||
));
|
||||
verify_signatures(&mut v, &keypair, &wrong_keypair);
|
||||
}
|
||||
@ -395,9 +515,21 @@ mod test {
|
||||
),
|
||||
&keypair,
|
||||
);
|
||||
assert!(!vote.verify());
|
||||
assert!(vote.sanitize().is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_epoch_slots_index() {
|
||||
let keypair = Keypair::new();
|
||||
let item = CrdsValue::new_signed(
|
||||
CrdsData::Vote(
|
||||
MAX_VOTES,
|
||||
Vote::new(&keypair.pubkey(), test_tx(), timestamp()),
|
||||
),
|
||||
&keypair,
|
||||
);
|
||||
assert_eq!(item.sanitize(), Err(SanitizeError::ValueOutOfBounds));
|
||||
}
|
||||
#[test]
|
||||
fn test_compute_vote_index_empty() {
|
||||
for i in 0..MAX_VOTES {
|
||||
|
@ -75,6 +75,7 @@ pub fn discover_cluster(
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
)
|
||||
}
|
||||
|
||||
@ -85,9 +86,11 @@ pub fn discover(
|
||||
find_node_by_pubkey: Option<Pubkey>,
|
||||
find_node_by_gossip_addr: Option<&SocketAddr>,
|
||||
my_gossip_addr: Option<&SocketAddr>,
|
||||
my_shred_version: u16,
|
||||
) -> std::io::Result<(Vec<ContactInfo>, Vec<ContactInfo>)> {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let (gossip_service, ip_echo, spy_ref) = make_gossip_node(entrypoint, &exit, my_gossip_addr);
|
||||
let (gossip_service, ip_echo, spy_ref) =
|
||||
make_gossip_node(entrypoint, &exit, my_gossip_addr, my_shred_version);
|
||||
|
||||
let id = spy_ref.read().unwrap().keypair.pubkey();
|
||||
info!("Entrypoint: {:?}", entrypoint);
|
||||
@ -256,12 +259,13 @@ fn make_gossip_node(
|
||||
entrypoint: Option<&SocketAddr>,
|
||||
exit: &Arc<AtomicBool>,
|
||||
gossip_addr: Option<&SocketAddr>,
|
||||
shred_version: u16,
|
||||
) -> (GossipService, Option<TcpListener>, Arc<RwLock<ClusterInfo>>) {
|
||||
let keypair = Arc::new(Keypair::new());
|
||||
let (node, gossip_socket, ip_echo) = if let Some(gossip_addr) = gossip_addr {
|
||||
ClusterInfo::gossip_node(&keypair.pubkey(), gossip_addr)
|
||||
ClusterInfo::gossip_node(&keypair.pubkey(), gossip_addr, shred_version)
|
||||
} else {
|
||||
ClusterInfo::spy_node(&keypair.pubkey())
|
||||
ClusterInfo::spy_node(&keypair.pubkey(), shred_version)
|
||||
};
|
||||
let mut cluster_info = ClusterInfo::new(node, keypair);
|
||||
if let Some(entrypoint) = entrypoint {
|
||||
|
@ -13,15 +13,18 @@ use std::thread;
|
||||
use std::thread::{Builder, JoinHandle};
|
||||
use std::time::Duration;
|
||||
|
||||
// - To try and keep the RocksDB size under 512GB:
|
||||
// Seeing about 1600b/shred, using 2000b/shred for margin, so 250m shreds can be stored in 512gb.
|
||||
// at 5k shreds/slot at 50k tps, this is 500k slots (~5.5 hours).
|
||||
// - To try and keep the RocksDB size under 400GB:
|
||||
// Seeing about 1600b/shred, using 2000b/shred for margin, so 200m shreds can be stored in 400gb.
|
||||
// at 5k shreds/slot at 50k tps, this is 500k slots (~5 hours).
|
||||
// At idle, 60 shreds/slot this is about 4m slots (18 days)
|
||||
// This is chosen to allow enough time for
|
||||
// - A validator to download a snapshot from a peer and boot from it
|
||||
// - To make sure that if a validator needs to reboot from its own snapshot, it has enough slots locally
|
||||
// to catch back up to where it was when it stopped
|
||||
pub const DEFAULT_MAX_LEDGER_SHREDS: u64 = 250_000_000;
|
||||
pub const DEFAULT_MAX_LEDGER_SHREDS: u64 = 200_000_000;
|
||||
|
||||
// Allow down to 50m, or 3.5 days at idle, 1hr at 50k load, around ~100GB
|
||||
pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
|
||||
|
||||
// Check for removing slots at this interval so we don't purge too often
|
||||
// and starve other blockstore users.
|
||||
|
@ -29,16 +29,19 @@ pub mod genesis_utils;
|
||||
pub mod gossip_service;
|
||||
pub mod ledger_cleanup_service;
|
||||
pub mod local_vote_signer_service;
|
||||
pub mod non_circulating_supply;
|
||||
pub mod packet;
|
||||
pub mod poh_recorder;
|
||||
pub mod poh_service;
|
||||
pub mod recvmmsg;
|
||||
pub mod repair_response;
|
||||
pub mod repair_service;
|
||||
pub mod replay_stage;
|
||||
mod result;
|
||||
pub mod retransmit_stage;
|
||||
pub mod rewards_recorder_service;
|
||||
pub mod rpc;
|
||||
pub mod rpc_error;
|
||||
pub mod rpc_pubsub;
|
||||
pub mod rpc_pubsub_service;
|
||||
pub mod rpc_service;
|
||||
|
192
core/src/non_circulating_supply.rs
Normal file
192
core/src/non_circulating_supply.rs
Normal file
@ -0,0 +1,192 @@
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_stake_program::stake_state::StakeState;
|
||||
use std::{collections::HashSet, sync::Arc};
|
||||
|
||||
pub struct NonCirculatingSupply {
|
||||
pub lamports: u64,
|
||||
pub accounts: Vec<Pubkey>,
|
||||
}
|
||||
|
||||
pub fn calculate_non_circulating_supply(bank: &Arc<Bank>) -> NonCirculatingSupply {
|
||||
debug!("Updating Bank supply, epoch: {}", bank.epoch());
|
||||
let mut non_circulating_accounts_set: HashSet<Pubkey> = HashSet::new();
|
||||
|
||||
for key in non_circulating_accounts() {
|
||||
non_circulating_accounts_set.insert(key);
|
||||
}
|
||||
let withdraw_authority_list = withdraw_authority();
|
||||
|
||||
let clock = bank.clock();
|
||||
let stake_accounts = bank.get_program_accounts(Some(&solana_stake_program::id()));
|
||||
for (pubkey, account) in stake_accounts.iter() {
|
||||
let stake_account = StakeState::from(&account).unwrap_or_default();
|
||||
match stake_account {
|
||||
StakeState::Initialized(meta) => {
|
||||
if meta.lockup.is_in_force(&clock, &HashSet::default())
|
||||
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
|
||||
{
|
||||
non_circulating_accounts_set.insert(*pubkey);
|
||||
}
|
||||
}
|
||||
StakeState::Stake(meta, _stake) => {
|
||||
if meta.lockup.is_in_force(&clock, &HashSet::default())
|
||||
|| withdraw_authority_list.contains(&meta.authorized.withdrawer)
|
||||
{
|
||||
non_circulating_accounts_set.insert(*pubkey);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
let lamports = non_circulating_accounts_set
|
||||
.iter()
|
||||
.fold(0, |acc, pubkey| acc + bank.get_balance(&pubkey));
|
||||
|
||||
NonCirculatingSupply {
|
||||
lamports,
|
||||
accounts: non_circulating_accounts_set.into_iter().collect(),
|
||||
}
|
||||
}
|
||||
|
||||
// Mainnet-beta accounts that should be considered non-circulating
|
||||
solana_sdk::pubkeys!(
|
||||
non_circulating_accounts,
|
||||
[
|
||||
"9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA",
|
||||
"GK2zqSsXLA2rwVZk347RYhh6jJpRsCA69FjLW93ZGi3B",
|
||||
"HCV5dGFJXRrJ3jhDYA4DCeb9TEDTwGGYXtT3wHksu2Zr",
|
||||
"25odAafVXnd63L6Hq5Cx6xGmhKqkhE2y6UrLVuqUfWZj",
|
||||
"14FUT96s9swbmH7ZjpDvfEDywnAYy9zaNhv4xvezySGu",
|
||||
"HbZ5FfmKWNHC7uwk6TF1hVi6TCs7dtYfdjEcuPGgzFAg",
|
||||
"C7C8odR8oashR5Feyrq2tJKaXL18id1dSj2zbkDGL2C2",
|
||||
"Eyr9P5XsjK2NUKNCnfu39eqpGoiLFgVAv1LSQgMZCwiQ",
|
||||
"DE1bawNcRJB9rVm3buyMVfr8mBEoyyu73NBovf2oXJsJ",
|
||||
"CakcnaRDHka2gXyfbEd2d3xsvkJkqsLw2akB3zsN1D2S",
|
||||
"7Np41oeYqPefeNQEHSv1UDhYrehxin3NStELsSKCT4K2",
|
||||
"GdnSyH3YtwcxFvQrVVJMm1JhTS4QVX7MFsX56uJLUfiZ",
|
||||
"Mc5XB47H3DKJHym5RLa9mPzWv5snERsF3KNv5AauXK8",
|
||||
"7cvkjYAkUYs4W8XcXsca7cBrEGFeSUjeZmKoNBvEwyri",
|
||||
"AG3m2bAibcY8raMt4oXEGqRHwX4FWKPPJVjZxn1LySDX",
|
||||
"5XdtyEDREHJXXW1CTtCsVjJRjBapAwK78ZquzvnNVRrV",
|
||||
"6yKHERk8rsbmJxvMpPuwPs1ct3hRiP7xaJF2tvnGU6nK",
|
||||
"CHmdL15akDcJgBkY6BP3hzs98Dqr6wbdDC5p8odvtSbq",
|
||||
"FR84wZQy3Y3j2gWz6pgETUiUoJtreMEuWfbg6573UCj9",
|
||||
"5q54XjQ7vDx4y6KphPeE97LUNiYGtP55spjvXAWPGBuf",
|
||||
]
|
||||
);
|
||||
|
||||
// Withdraw authority for autostaked accounts on mainnet-beta
|
||||
solana_sdk::pubkeys!(
|
||||
withdraw_authority,
|
||||
[
|
||||
"8CUUMKYNGxdgYio5CLHRHyzMEhhVRMcqefgE6dLqnVRK",
|
||||
"3FFaheyqtyAXZSYxDzsr5CVKvJuvZD1WE1VEsBtDbRqB",
|
||||
]
|
||||
);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use solana_sdk::{
|
||||
account::Account, epoch_schedule::EpochSchedule, genesis_config::GenesisConfig,
|
||||
};
|
||||
use solana_stake_program::stake_state::{Authorized, Lockup, Meta, StakeState};
|
||||
use std::{collections::BTreeMap, sync::Arc};
|
||||
|
||||
fn new_from_parent(parent: &Arc<Bank>) -> Bank {
|
||||
Bank::new_from_parent(parent, &Pubkey::default(), parent.slot() + 1)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_calculate_non_circulating_supply() {
|
||||
let mut accounts: BTreeMap<Pubkey, Account> = BTreeMap::new();
|
||||
let balance = 10;
|
||||
let num_genesis_accounts = 10;
|
||||
for _ in 0..num_genesis_accounts {
|
||||
accounts.insert(
|
||||
Pubkey::new_rand(),
|
||||
Account::new(balance, 0, &Pubkey::default()),
|
||||
);
|
||||
}
|
||||
let non_circulating_accounts = non_circulating_accounts();
|
||||
let num_non_circulating_accounts = non_circulating_accounts.len() as u64;
|
||||
for key in non_circulating_accounts.clone() {
|
||||
accounts.insert(key, Account::new(balance, 0, &Pubkey::default()));
|
||||
}
|
||||
|
||||
let num_stake_accounts = 3;
|
||||
for _ in 0..num_stake_accounts {
|
||||
let pubkey = Pubkey::new_rand();
|
||||
let meta = Meta {
|
||||
authorized: Authorized::auto(&pubkey),
|
||||
lockup: Lockup {
|
||||
epoch: 1,
|
||||
..Lockup::default()
|
||||
},
|
||||
..Meta::default()
|
||||
};
|
||||
let stake_account = Account::new_data_with_space(
|
||||
balance,
|
||||
&StakeState::Initialized(meta),
|
||||
std::mem::size_of::<StakeState>(),
|
||||
&solana_stake_program::id(),
|
||||
)
|
||||
.unwrap();
|
||||
accounts.insert(pubkey, stake_account);
|
||||
}
|
||||
|
||||
let slots_per_epoch = 32;
|
||||
let genesis_config = GenesisConfig {
|
||||
accounts,
|
||||
epoch_schedule: EpochSchedule::new(slots_per_epoch),
|
||||
..GenesisConfig::default()
|
||||
};
|
||||
let mut bank = Arc::new(Bank::new(&genesis_config));
|
||||
assert_eq!(
|
||||
bank.capitalization(),
|
||||
(num_genesis_accounts + num_non_circulating_accounts + num_stake_accounts) * balance
|
||||
);
|
||||
|
||||
let non_circulating_supply = calculate_non_circulating_supply(&bank);
|
||||
assert_eq!(
|
||||
non_circulating_supply.lamports,
|
||||
(num_non_circulating_accounts + num_stake_accounts) * balance
|
||||
);
|
||||
assert_eq!(
|
||||
non_circulating_supply.accounts.len(),
|
||||
num_non_circulating_accounts as usize + num_stake_accounts as usize
|
||||
);
|
||||
|
||||
bank = Arc::new(new_from_parent(&bank));
|
||||
let new_balance = 11;
|
||||
for key in non_circulating_accounts {
|
||||
bank.store_account(&key, &Account::new(new_balance, 0, &Pubkey::default()));
|
||||
}
|
||||
let non_circulating_supply = calculate_non_circulating_supply(&bank);
|
||||
assert_eq!(
|
||||
non_circulating_supply.lamports,
|
||||
(num_non_circulating_accounts * new_balance) + (num_stake_accounts * balance)
|
||||
);
|
||||
assert_eq!(
|
||||
non_circulating_supply.accounts.len(),
|
||||
num_non_circulating_accounts as usize + num_stake_accounts as usize
|
||||
);
|
||||
|
||||
// Advance bank an epoch, which should unlock stakes
|
||||
for _ in 0..slots_per_epoch {
|
||||
bank = Arc::new(new_from_parent(&bank));
|
||||
}
|
||||
assert_eq!(bank.epoch(), 1);
|
||||
let non_circulating_supply = calculate_non_circulating_supply(&bank);
|
||||
assert_eq!(
|
||||
non_circulating_supply.lamports,
|
||||
num_non_circulating_accounts * new_balance
|
||||
);
|
||||
assert_eq!(
|
||||
non_circulating_supply.accounts.len(),
|
||||
num_non_circulating_accounts as usize
|
||||
);
|
||||
}
|
||||
}
|
@ -1,10 +1,8 @@
|
||||
//! The `poh_service` module implements a service that records the passing of
|
||||
//! "ticks", a measure of time in the PoH stream
|
||||
use crate::poh_recorder::PohRecorder;
|
||||
use core_affinity;
|
||||
use solana_sdk::clock::DEFAULT_TICKS_PER_SLOT;
|
||||
use solana_sdk::poh_config::PohConfig;
|
||||
use solana_sys_tuner;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread::{self, sleep, Builder, JoinHandle};
|
||||
|
@ -213,49 +213,4 @@ mod tests {
|
||||
assert_eq!(packets[i].meta.addr(), saddr2);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
#[test]
|
||||
pub fn test_recv_mmsg_batch_size() {
|
||||
let reader = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
let addr = reader.local_addr().unwrap();
|
||||
let sender = UdpSocket::bind("127.0.0.1:0").expect("bind");
|
||||
|
||||
const TEST_BATCH_SIZE: usize = 64;
|
||||
let sent = TEST_BATCH_SIZE;
|
||||
|
||||
let mut elapsed_in_max_batch = 0;
|
||||
(0..1000).for_each(|_| {
|
||||
for _ in 0..sent {
|
||||
let data = [0; PACKET_DATA_SIZE];
|
||||
sender.send_to(&data[..], &addr).unwrap();
|
||||
}
|
||||
let mut packets = vec![Packet::default(); TEST_BATCH_SIZE];
|
||||
let now = Instant::now();
|
||||
let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1;
|
||||
elapsed_in_max_batch += now.elapsed().as_nanos();
|
||||
assert_eq!(TEST_BATCH_SIZE, recv);
|
||||
});
|
||||
|
||||
let mut elapsed_in_small_batch = 0;
|
||||
(0..1000).for_each(|_| {
|
||||
for _ in 0..sent {
|
||||
let data = [0; PACKET_DATA_SIZE];
|
||||
sender.send_to(&data[..], &addr).unwrap();
|
||||
}
|
||||
let mut packets = vec![Packet::default(); 4];
|
||||
let mut recv = 0;
|
||||
let now = Instant::now();
|
||||
while let Ok(num) = recv_mmsg(&reader, &mut packets[..]) {
|
||||
recv += num.1;
|
||||
if recv >= TEST_BATCH_SIZE {
|
||||
break;
|
||||
}
|
||||
}
|
||||
elapsed_in_small_batch += now.elapsed().as_nanos();
|
||||
assert_eq!(TEST_BATCH_SIZE, recv);
|
||||
});
|
||||
|
||||
assert!(elapsed_in_max_batch <= elapsed_in_small_batch);
|
||||
}
|
||||
}
|
||||
|
129
core/src/repair_response.rs
Normal file
129
core/src/repair_response.rs
Normal file
@ -0,0 +1,129 @@
|
||||
use solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
shred::{Nonce, Shred, SIZE_OF_NONCE},
|
||||
};
|
||||
use solana_perf::packet::limited_deserialize;
|
||||
use solana_sdk::{clock::Slot, packet::Packet};
|
||||
use std::{io, net::SocketAddr};
|
||||
|
||||
pub fn repair_response_packet(
|
||||
blockstore: &Blockstore,
|
||||
slot: Slot,
|
||||
shred_index: u64,
|
||||
dest: &SocketAddr,
|
||||
nonce: Option<Nonce>,
|
||||
) -> Option<Packet> {
|
||||
if Shred::is_nonce_unlocked(slot) && nonce.is_none()
|
||||
|| !Shred::is_nonce_unlocked(slot) && nonce.is_some()
|
||||
{
|
||||
return None;
|
||||
}
|
||||
let shred = blockstore
|
||||
.get_data_shred(slot, shred_index)
|
||||
.expect("Blockstore could not get data shred");
|
||||
shred.map(|shred| repair_response_packet_from_shred(slot, shred, dest, nonce))
|
||||
}
|
||||
|
||||
pub fn repair_response_packet_from_shred(
|
||||
slot: Slot,
|
||||
shred: Vec<u8>,
|
||||
dest: &SocketAddr,
|
||||
nonce: Option<Nonce>,
|
||||
) -> Packet {
|
||||
let size_of_nonce = {
|
||||
if Shred::is_nonce_unlocked(slot) {
|
||||
assert!(nonce.is_some());
|
||||
SIZE_OF_NONCE
|
||||
} else {
|
||||
assert!(nonce.is_none());
|
||||
0
|
||||
}
|
||||
};
|
||||
let mut packet = Packet::default();
|
||||
packet.meta.size = shred.len() + size_of_nonce;
|
||||
packet.meta.set_addr(dest);
|
||||
packet.data[..shred.len()].copy_from_slice(&shred);
|
||||
let mut wr = io::Cursor::new(&mut packet.data[shred.len()..]);
|
||||
if let Some(nonce) = nonce {
|
||||
bincode::serialize_into(&mut wr, &nonce).expect("Buffer not large enough to fit nonce");
|
||||
}
|
||||
packet
|
||||
}
|
||||
|
||||
pub fn nonce(buf: &[u8]) -> Option<Nonce> {
|
||||
if buf.len() < SIZE_OF_NONCE {
|
||||
None
|
||||
} else {
|
||||
limited_deserialize(&buf[buf.len() - SIZE_OF_NONCE..]).ok()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use solana_ledger::{
|
||||
shred::{Shred, Shredder, UNLOCK_NONCE_SLOT},
|
||||
sigverify_shreds::verify_shred_cpu,
|
||||
};
|
||||
use solana_sdk::signature::{Keypair, Signer};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::{IpAddr, Ipv4Addr},
|
||||
};
|
||||
|
||||
fn run_test_sigverify_shred_cpu_repair(slot: Slot) {
|
||||
solana_logger::setup();
|
||||
let mut shred = Shred::new_from_data(
|
||||
slot,
|
||||
0xc0de,
|
||||
0xdead,
|
||||
Some(&[1, 2, 3, 4]),
|
||||
true,
|
||||
true,
|
||||
0,
|
||||
0,
|
||||
0xc0de,
|
||||
);
|
||||
assert_eq!(shred.slot(), slot);
|
||||
let keypair = Keypair::new();
|
||||
Shredder::sign_shred(&keypair, &mut shred);
|
||||
trace!("signature {}", shred.common_header.signature);
|
||||
let nonce = if Shred::is_nonce_unlocked(slot) {
|
||||
Some(9)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let mut packet = repair_response_packet_from_shred(
|
||||
slot,
|
||||
shred.payload,
|
||||
&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080),
|
||||
nonce,
|
||||
);
|
||||
packet.meta.repair = true;
|
||||
|
||||
let leader_slots = [(slot, keypair.pubkey().to_bytes())]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
let rv = verify_shred_cpu(&packet, &leader_slots);
|
||||
assert_eq!(rv, Some(1));
|
||||
|
||||
let wrong_keypair = Keypair::new();
|
||||
let leader_slots = [(slot, wrong_keypair.pubkey().to_bytes())]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
let rv = verify_shred_cpu(&packet, &leader_slots);
|
||||
assert_eq!(rv, Some(0));
|
||||
|
||||
let leader_slots = HashMap::new();
|
||||
let rv = verify_shred_cpu(&packet, &leader_slots);
|
||||
assert_eq!(rv, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sigverify_shred_cpu_repair() {
|
||||
run_test_sigverify_shred_cpu_repair(UNLOCK_NONCE_SLOT);
|
||||
run_test_sigverify_shred_cpu_repair(UNLOCK_NONCE_SLOT + 1);
|
||||
}
|
||||
}
|
@ -20,9 +20,31 @@ use std::{
|
||||
sync::{Arc, RwLock},
|
||||
thread::sleep,
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::Duration,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct RepairStatsGroup {
|
||||
pub count: u64,
|
||||
pub min: u64,
|
||||
pub max: u64,
|
||||
}
|
||||
|
||||
impl RepairStatsGroup {
|
||||
pub fn update(&mut self, slot: u64) {
|
||||
self.count += 1;
|
||||
self.min = std::cmp::min(self.min, slot);
|
||||
self.max = std::cmp::max(self.max, slot);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct RepairStats {
|
||||
pub shred: RepairStatsGroup,
|
||||
pub highest_shred: RepairStatsGroup,
|
||||
pub orphan: RepairStatsGroup,
|
||||
}
|
||||
|
||||
pub const MAX_REPAIR_LENGTH: usize = 512;
|
||||
pub const REPAIR_MS: u64 = 100;
|
||||
pub const MAX_ORPHANS: usize = 5;
|
||||
@ -107,6 +129,8 @@ impl RepairService {
|
||||
cluster_info,
|
||||
);
|
||||
}
|
||||
let mut repair_stats = RepairStats::default();
|
||||
let mut last_stats = Instant::now();
|
||||
loop {
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
break;
|
||||
@ -144,22 +168,34 @@ impl RepairService {
|
||||
};
|
||||
|
||||
if let Ok(repairs) = repairs {
|
||||
let reqs: Vec<_> = repairs
|
||||
.into_iter()
|
||||
.filter_map(|repair_request| {
|
||||
serve_repair
|
||||
.repair_request(&repair_request)
|
||||
.map(|result| (result, repair_request))
|
||||
.ok()
|
||||
})
|
||||
.collect();
|
||||
|
||||
for ((to, req), _) in reqs {
|
||||
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
|
||||
info!("{} repair req send_to({}) error {:?}", id, to, e);
|
||||
0
|
||||
});
|
||||
repairs.into_iter().for_each(|repair_request| {
|
||||
if let Ok((to, req)) =
|
||||
serve_repair.repair_request(&repair_request, &mut repair_stats)
|
||||
{
|
||||
repair_socket.send_to(&req, to).unwrap_or_else(|e| {
|
||||
info!("{} repair req send_to({}) error {:?}", id, to, e);
|
||||
0
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
if last_stats.elapsed().as_secs() > 1 {
|
||||
let repair_total = repair_stats.shred.count
|
||||
+ repair_stats.highest_shred.count
|
||||
+ repair_stats.orphan.count;
|
||||
if repair_total > 0 {
|
||||
datapoint_info!(
|
||||
"serve_repair-repair",
|
||||
("repair-total", repair_total, i64),
|
||||
("shred-count", repair_stats.shred.count, i64),
|
||||
("highest-shred-count", repair_stats.highest_shred.count, i64),
|
||||
("orphan-count", repair_stats.orphan.count, i64),
|
||||
("repair-highest-slot", repair_stats.highest_shred.max, i64),
|
||||
("repair-orphan", repair_stats.orphan.max, i64),
|
||||
);
|
||||
}
|
||||
repair_stats = RepairStats::default();
|
||||
last_stats = Instant::now();
|
||||
}
|
||||
sleep(Duration::from_millis(REPAIR_MS));
|
||||
}
|
||||
@ -565,7 +601,7 @@ mod test {
|
||||
let blockstore = Blockstore::open(&blockstore_path).unwrap();
|
||||
|
||||
let slots: Vec<u64> = vec![1, 3, 5, 7, 8];
|
||||
let num_entries_per_slot = max_ticks_per_n_shreds(1) + 1;
|
||||
let num_entries_per_slot = max_ticks_per_n_shreds(1, None) + 1;
|
||||
|
||||
let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot);
|
||||
for (mut slot_shreds, _) in shreds.into_iter() {
|
||||
|
@ -26,6 +26,7 @@ use solana_metrics::inc_new_counter_info;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
genesis_config::GenesisConfig,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
@ -66,7 +67,6 @@ impl Drop for Finalizer {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ReplayStageConfig {
|
||||
pub my_pubkey: Pubkey,
|
||||
pub vote_account: Pubkey,
|
||||
@ -80,6 +80,7 @@ pub struct ReplayStageConfig {
|
||||
pub block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
|
||||
pub transaction_status_sender: Option<TransactionStatusSender>,
|
||||
pub rewards_recorder_sender: Option<RewardsRecorderSender>,
|
||||
pub genesis_config: GenesisConfig,
|
||||
}
|
||||
|
||||
pub struct ReplayStage {
|
||||
@ -183,6 +184,7 @@ impl ReplayStage {
|
||||
block_commitment_cache,
|
||||
transaction_status_sender,
|
||||
rewards_recorder_sender,
|
||||
genesis_config,
|
||||
} = config;
|
||||
|
||||
let (root_bank_sender, root_bank_receiver) = channel();
|
||||
@ -246,6 +248,7 @@ impl ReplayStage {
|
||||
&slot_full_senders,
|
||||
transaction_status_sender.clone(),
|
||||
&verify_recyclers,
|
||||
&genesis_config,
|
||||
);
|
||||
datapoint_debug!(
|
||||
"replay_stage-memory",
|
||||
@ -253,13 +256,15 @@ impl ReplayStage {
|
||||
);
|
||||
|
||||
let ancestors = Arc::new(bank_forks.read().unwrap().ancestors());
|
||||
let forks_root = bank_forks.read().unwrap().root();
|
||||
let start = allocated.get();
|
||||
let mut frozen_banks: Vec<_> = bank_forks
|
||||
.read()
|
||||
.unwrap()
|
||||
.frozen_banks()
|
||||
.values()
|
||||
.cloned()
|
||||
.into_iter()
|
||||
.filter(|(slot, _)| *slot >= forks_root)
|
||||
.map(|(_, bank)| bank)
|
||||
.collect();
|
||||
let newly_computed_slot_stats = Self::compute_bank_stats(
|
||||
&my_pubkey,
|
||||
@ -340,6 +345,7 @@ impl ReplayStage {
|
||||
&accounts_hash_sender,
|
||||
&latest_root_senders,
|
||||
&subscriptions,
|
||||
&block_commitment_cache,
|
||||
)?;
|
||||
}
|
||||
datapoint_debug!(
|
||||
@ -551,6 +557,7 @@ impl ReplayStage {
|
||||
bank_progress: &mut ForkProgress,
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
verify_recyclers: &VerifyRecyclers,
|
||||
genesis_config: &GenesisConfig,
|
||||
) -> result::Result<usize, BlockstoreProcessorError> {
|
||||
let tx_count_before = bank_progress.replay_progress.num_txs;
|
||||
let confirm_result = blockstore_processor::confirm_slot(
|
||||
@ -562,6 +569,7 @@ impl ReplayStage {
|
||||
transaction_status_sender,
|
||||
None,
|
||||
verify_recyclers,
|
||||
Some(genesis_config),
|
||||
);
|
||||
let tx_count_after = bank_progress.replay_progress.num_txs;
|
||||
let tx_count = tx_count_after - tx_count_before;
|
||||
@ -612,6 +620,7 @@ impl ReplayStage {
|
||||
accounts_hash_sender: &Option<SnapshotPackageSender>,
|
||||
latest_root_senders: &[Sender<Slot>],
|
||||
subscriptions: &Arc<RpcSubscriptions>,
|
||||
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
|
||||
) -> Result<()> {
|
||||
if bank.is_empty() {
|
||||
inc_new_counter_info!("replay_stage-voted_empty_bank", 1);
|
||||
@ -637,7 +646,20 @@ impl ReplayStage {
|
||||
blockstore
|
||||
.set_roots(&rooted_slots)
|
||||
.expect("Ledger set roots failed");
|
||||
Self::handle_new_root(new_root, &bank_forks, progress, accounts_hash_sender);
|
||||
let largest_confirmed_root = Some(
|
||||
block_commitment_cache
|
||||
.read()
|
||||
.unwrap()
|
||||
.largest_confirmed_root(),
|
||||
);
|
||||
|
||||
Self::handle_new_root(
|
||||
new_root,
|
||||
&bank_forks,
|
||||
progress,
|
||||
accounts_hash_sender,
|
||||
largest_confirmed_root,
|
||||
);
|
||||
subscriptions.notify_roots(rooted_slots);
|
||||
latest_root_senders.iter().for_each(|s| {
|
||||
if let Err(e) = s.send(new_root) {
|
||||
@ -737,6 +759,7 @@ impl ReplayStage {
|
||||
slot_full_senders: &[Sender<(u64, Pubkey)>],
|
||||
transaction_status_sender: Option<TransactionStatusSender>,
|
||||
verify_recyclers: &VerifyRecyclers,
|
||||
genesis_config: &GenesisConfig,
|
||||
) -> bool {
|
||||
let mut did_complete_bank = false;
|
||||
let mut tx_count = 0;
|
||||
@ -765,6 +788,7 @@ impl ReplayStage {
|
||||
bank_progress,
|
||||
transaction_status_sender.clone(),
|
||||
verify_recyclers,
|
||||
genesis_config,
|
||||
);
|
||||
match replay_result {
|
||||
Ok(replay_tx_count) => tx_count += replay_tx_count,
|
||||
@ -971,15 +995,17 @@ impl ReplayStage {
|
||||
}
|
||||
|
||||
pub(crate) fn handle_new_root(
|
||||
new_root: u64,
|
||||
new_root: Slot,
|
||||
bank_forks: &RwLock<BankForks>,
|
||||
progress: &mut HashMap<u64, ForkProgress>,
|
||||
accounts_hash_sender: &Option<SnapshotPackageSender>,
|
||||
largest_confirmed_root: Option<Slot>,
|
||||
) {
|
||||
bank_forks
|
||||
.write()
|
||||
.unwrap()
|
||||
.set_root(new_root, accounts_hash_sender);
|
||||
bank_forks.write().unwrap().set_root(
|
||||
new_root,
|
||||
accounts_hash_sender,
|
||||
largest_confirmed_root,
|
||||
);
|
||||
let r_bank_forks = bank_forks.read().unwrap();
|
||||
progress.retain(|k, _| r_bank_forks.get(*k).is_some());
|
||||
}
|
||||
@ -1008,7 +1034,11 @@ impl ReplayStage {
|
||||
// Find the next slot that chains to the old slot
|
||||
let forks = forks_lock.read().unwrap();
|
||||
let frozen_banks = forks.frozen_banks();
|
||||
let frozen_bank_slots: Vec<u64> = frozen_banks.keys().cloned().collect();
|
||||
let frozen_bank_slots: Vec<u64> = frozen_banks
|
||||
.keys()
|
||||
.cloned()
|
||||
.filter(|s| *s >= forks.root())
|
||||
.collect();
|
||||
let next_slots = blockstore
|
||||
.get_slots_since(&frozen_bank_slots)
|
||||
.expect("Db error");
|
||||
@ -1411,7 +1441,9 @@ pub(crate) mod tests {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore.clone(),
|
||||
))),
|
||||
));
|
||||
let bank_forks = BankForks::new(0, bank0);
|
||||
bank_forks.working_bank().freeze();
|
||||
@ -1464,12 +1496,58 @@ pub(crate) mod tests {
|
||||
for i in 0..=root {
|
||||
progress.insert(i, ForkProgress::new(Hash::default()));
|
||||
}
|
||||
ReplayStage::handle_new_root(root, &bank_forks, &mut progress, &None);
|
||||
ReplayStage::handle_new_root(root, &bank_forks, &mut progress, &None, None);
|
||||
assert_eq!(bank_forks.read().unwrap().root(), root);
|
||||
assert_eq!(progress.len(), 1);
|
||||
assert!(progress.get(&root).is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_handle_new_root_ahead_of_largest_confirmed_root() {
|
||||
let genesis_config = create_genesis_config(10_000).genesis_config;
|
||||
let bank0 = Bank::new(&genesis_config);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank0)));
|
||||
let confirmed_root = 1;
|
||||
let fork = 2;
|
||||
let bank1 = Bank::new_from_parent(
|
||||
bank_forks.read().unwrap().get(0).unwrap(),
|
||||
&Pubkey::default(),
|
||||
confirmed_root,
|
||||
);
|
||||
bank_forks.write().unwrap().insert(bank1);
|
||||
let bank2 = Bank::new_from_parent(
|
||||
bank_forks.read().unwrap().get(confirmed_root).unwrap(),
|
||||
&Pubkey::default(),
|
||||
fork,
|
||||
);
|
||||
bank_forks.write().unwrap().insert(bank2);
|
||||
let root = 3;
|
||||
let root_bank = Bank::new_from_parent(
|
||||
bank_forks.read().unwrap().get(confirmed_root).unwrap(),
|
||||
&Pubkey::default(),
|
||||
root,
|
||||
);
|
||||
bank_forks.write().unwrap().insert(root_bank);
|
||||
let mut progress = HashMap::new();
|
||||
for i in 0..=root {
|
||||
progress.insert(i, ForkProgress::new(Hash::default()));
|
||||
}
|
||||
ReplayStage::handle_new_root(
|
||||
root,
|
||||
&bank_forks,
|
||||
&mut progress,
|
||||
&None,
|
||||
Some(confirmed_root),
|
||||
);
|
||||
assert_eq!(bank_forks.read().unwrap().root(), root);
|
||||
assert!(bank_forks.read().unwrap().get(confirmed_root).is_some());
|
||||
assert!(bank_forks.read().unwrap().get(fork).is_none());
|
||||
assert_eq!(progress.len(), 2);
|
||||
assert!(progress.get(&root).is_some());
|
||||
assert!(progress.get(&confirmed_root).is_some());
|
||||
assert!(progress.get(&fork).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dead_fork_transaction_error() {
|
||||
let keypair1 = Keypair::new();
|
||||
@ -1660,6 +1738,7 @@ pub(crate) mod tests {
|
||||
ShredCommonHeader::default(),
|
||||
data_header,
|
||||
CodingShredHeader::default(),
|
||||
PACKET_DATA_SIZE,
|
||||
);
|
||||
bincode::serialize_into(
|
||||
&mut shred.payload[SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER..],
|
||||
@ -1709,6 +1788,7 @@ pub(crate) mod tests {
|
||||
&mut bank0_progress,
|
||||
None,
|
||||
&VerifyRecyclers::default(),
|
||||
&genesis_config,
|
||||
);
|
||||
|
||||
// Check that the erroring bank was marked as dead in the progress map
|
||||
@ -1736,7 +1816,11 @@ pub(crate) mod tests {
|
||||
bank.store_account(&pubkey, &leader_vote_account);
|
||||
}
|
||||
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
let (lockouts_sender, _) = AggregateCommitmentService::new(
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
block_commitment_cache.clone(),
|
||||
@ -1932,13 +2016,23 @@ pub(crate) mod tests {
|
||||
{
|
||||
if let EncodedTransaction::Json(transaction) = transaction {
|
||||
if transaction.signatures[0] == signatures[0].to_string() {
|
||||
assert_eq!(meta.unwrap().status, Ok(()));
|
||||
let meta = meta.unwrap();
|
||||
assert_eq!(meta.err, None);
|
||||
assert_eq!(meta.status, Ok(()));
|
||||
} else if transaction.signatures[0] == signatures[1].to_string() {
|
||||
let meta = meta.unwrap();
|
||||
assert_eq!(
|
||||
meta.unwrap().status,
|
||||
meta.err,
|
||||
Some(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::Custom(1)
|
||||
))
|
||||
);
|
||||
assert_eq!(
|
||||
meta.status,
|
||||
Err(TransactionError::InstructionError(
|
||||
0,
|
||||
InstructionError::CustomError(1)
|
||||
InstructionError::Custom(1)
|
||||
))
|
||||
);
|
||||
} else {
|
||||
|
919
core/src/rpc.rs
919
core/src/rpc.rs
File diff suppressed because it is too large
Load Diff
45
core/src/rpc_error.rs
Normal file
45
core/src/rpc_error.rs
Normal file
@ -0,0 +1,45 @@
|
||||
use jsonrpc_core::{Error, ErrorCode};
|
||||
use solana_sdk::clock::Slot;
|
||||
|
||||
const JSON_RPC_SERVER_ERROR_0: i64 = -32000;
|
||||
const JSON_RPC_SERVER_ERROR_1: i64 = -32001;
|
||||
|
||||
pub enum RpcCustomError {
|
||||
NonexistentClusterRoot {
|
||||
cluster_root: Slot,
|
||||
node_root: Slot,
|
||||
},
|
||||
BlockCleanedUp {
|
||||
slot: Slot,
|
||||
first_available_block: Slot,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<RpcCustomError> for Error {
|
||||
fn from(e: RpcCustomError) -> Self {
|
||||
match e {
|
||||
RpcCustomError::NonexistentClusterRoot {
|
||||
cluster_root,
|
||||
node_root,
|
||||
} => Self {
|
||||
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_0),
|
||||
message: format!(
|
||||
"Cluster largest_confirmed_root {} does not exist on node. Node root: {}",
|
||||
cluster_root, node_root,
|
||||
),
|
||||
data: None,
|
||||
},
|
||||
RpcCustomError::BlockCleanedUp {
|
||||
slot,
|
||||
first_available_block,
|
||||
} => Self {
|
||||
code: ErrorCode::ServerError(JSON_RPC_SERVER_ERROR_1),
|
||||
message: format!(
|
||||
"Block {} cleaned up, does not exist on node. First available block: {}",
|
||||
slot, first_available_block,
|
||||
),
|
||||
data: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
@ -4,9 +4,16 @@ use crate::rpc_subscriptions::{Confirmations, RpcSubscriptions, SlotInfo};
|
||||
use jsonrpc_core::{Error, ErrorCode, Result};
|
||||
use jsonrpc_derive::rpc;
|
||||
use jsonrpc_pubsub::{typed::Subscriber, Session, SubscriptionId};
|
||||
use solana_client::rpc_response::{Response as RpcResponse, RpcAccount, RpcKeyedAccount};
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature, transaction};
|
||||
use std::sync::{atomic, Arc};
|
||||
use solana_client::rpc_response::{
|
||||
Response as RpcResponse, RpcAccount, RpcKeyedAccount, RpcSignatureResult,
|
||||
};
|
||||
#[cfg(test)]
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature};
|
||||
use std::{
|
||||
str::FromStr,
|
||||
sync::{atomic, Arc},
|
||||
};
|
||||
|
||||
// Suppress needless_return due to
|
||||
// https://github.com/paritytech/jsonrpc/blob/2d38e6424d8461cdf72e78425ce67d51af9c6586/derive/src/lib.rs#L204
|
||||
@ -74,7 +81,7 @@ pub trait RpcSolPubSub {
|
||||
fn signature_subscribe(
|
||||
&self,
|
||||
meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcResponse<transaction::Result<()>>>,
|
||||
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
|
||||
signature_str: String,
|
||||
confirmations: Option<Confirmations>,
|
||||
);
|
||||
@ -116,7 +123,6 @@ pub trait RpcSolPubSub {
|
||||
fn root_unsubscribe(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool>;
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct RpcSolPubSubImpl {
|
||||
uid: Arc<atomic::AtomicUsize>,
|
||||
subscriptions: Arc<RpcSubscriptions>,
|
||||
@ -127,9 +133,14 @@ impl RpcSolPubSubImpl {
|
||||
let uid = Arc::new(atomic::AtomicUsize::default());
|
||||
Self { uid, subscriptions }
|
||||
}
|
||||
}
|
||||
|
||||
use std::str::FromStr;
|
||||
#[cfg(test)]
|
||||
fn default_with_blockstore(blockstore: Arc<Blockstore>) -> Self {
|
||||
let uid = Arc::new(atomic::AtomicUsize::default());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::default_with_blockstore(blockstore));
|
||||
Self { uid, subscriptions }
|
||||
}
|
||||
}
|
||||
|
||||
fn param<T: FromStr>(param_str: &str, thing: &str) -> Result<T> {
|
||||
param_str.parse::<T>().map_err(|_e| Error {
|
||||
@ -225,7 +236,7 @@ impl RpcSolPubSub for RpcSolPubSubImpl {
|
||||
fn signature_subscribe(
|
||||
&self,
|
||||
_meta: Self::Metadata,
|
||||
subscriber: Subscriber<RpcResponse<transaction::Result<()>>>,
|
||||
subscriber: Subscriber<RpcResponse<RpcSignatureResult>>,
|
||||
signature_str: String,
|
||||
confirmations: Option<Confirmations>,
|
||||
) {
|
||||
@ -319,8 +330,9 @@ mod tests {
|
||||
};
|
||||
use jsonrpc_core::{futures::sync::mpsc, Response};
|
||||
use jsonrpc_pubsub::{PubSubHandler, Session};
|
||||
use serial_test_derive::serial;
|
||||
use solana_budget_program::{self, budget_instruction};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::{bank_forks::BankForks, get_tmp_ledger_path};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
pubkey::Pubkey,
|
||||
@ -355,6 +367,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_signature_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@ -366,12 +379,16 @@ mod tests {
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let rpc = RpcSolPubSubImpl {
|
||||
subscriptions: Arc::new(RpcSubscriptions::new(
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
)),
|
||||
..RpcSolPubSubImpl::default()
|
||||
uid: Arc::new(atomic::AtomicUsize::default()),
|
||||
};
|
||||
|
||||
// Test signature subscriptions
|
||||
@ -385,7 +402,7 @@ mod tests {
|
||||
|
||||
// Test signature confirmation notification
|
||||
let (response, _) = robust_poll_or_panic(receiver);
|
||||
let expected_res: Option<transaction::Result<()>> = Some(Ok(()));
|
||||
let expected_res = RpcSignatureResult { err: None };
|
||||
let expected = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "signatureNotification",
|
||||
@ -401,6 +418,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_signature_unsubscribe() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
@ -411,11 +429,13 @@ mod tests {
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let arc_bank = Arc::new(bank);
|
||||
let blockhash = arc_bank.last_blockhash();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
||||
let session = create_session();
|
||||
|
||||
let mut io = PubSubHandler::default();
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
|
||||
io.extend_with(rpc.to_delegate());
|
||||
|
||||
let tx = system_transaction::transfer(&alice, &bob_pubkey, 20, blockhash);
|
||||
@ -449,6 +469,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_account_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
mut genesis_config,
|
||||
@ -469,13 +490,17 @@ mod tests {
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
||||
let rpc = RpcSolPubSubImpl {
|
||||
subscriptions: Arc::new(RpcSubscriptions::new(
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
)),
|
||||
..RpcSolPubSubImpl::default()
|
||||
uid: Arc::new(atomic::AtomicUsize::default()),
|
||||
};
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("accountNotification");
|
||||
@ -559,12 +584,15 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_account_unsubscribe() {
|
||||
let bob_pubkey = Pubkey::new_rand();
|
||||
let session = create_session();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
|
||||
let mut io = PubSubHandler::default();
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
|
||||
|
||||
io.extend_with(rpc.to_delegate());
|
||||
|
||||
@ -608,13 +636,17 @@ mod tests {
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bob = Keypair::new();
|
||||
|
||||
let mut rpc = RpcSolPubSubImpl::default();
|
||||
let mut rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore.clone());
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
);
|
||||
rpc.subscriptions = Arc::new(subscriptions);
|
||||
let session = create_session();
|
||||
@ -645,11 +677,15 @@ mod tests {
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bob = Keypair::new();
|
||||
|
||||
let mut rpc = RpcSolPubSubImpl::default();
|
||||
let mut rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore.clone());
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests()));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
|
||||
let subscriptions = RpcSubscriptions::new(&exit, block_commitment_cache.clone());
|
||||
rpc.subscriptions = Arc::new(subscriptions);
|
||||
@ -676,8 +712,14 @@ mod tests {
|
||||
cache0.increase_confirmation_stake(1, 10);
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
let mut new_block_commitment =
|
||||
BlockCommitmentCache::new(block_commitment, 10, bank1.clone(), 0);
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(
|
||||
block_commitment,
|
||||
0,
|
||||
10,
|
||||
bank1.clone(),
|
||||
blockstore.clone(),
|
||||
0,
|
||||
);
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
drop(w_block_commitment_cache);
|
||||
@ -691,7 +733,8 @@ mod tests {
|
||||
cache0.increase_confirmation_stake(2, 10);
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
let mut new_block_commitment = BlockCommitmentCache::new(block_commitment, 10, bank2, 0);
|
||||
let mut new_block_commitment =
|
||||
BlockCommitmentCache::new(block_commitment, 0, 10, bank2, blockstore.clone(), 0);
|
||||
let mut w_block_commitment_cache = block_commitment_cache.write().unwrap();
|
||||
std::mem::swap(&mut *w_block_commitment_cache, &mut new_block_commitment);
|
||||
drop(w_block_commitment_cache);
|
||||
@ -719,8 +762,11 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_slot_subscribe() {
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("slotNotification");
|
||||
rpc.slot_subscribe(session, subscriber);
|
||||
@ -743,8 +789,11 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_slot_unsubscribe() {
|
||||
let rpc = RpcSolPubSubImpl::default();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let rpc = RpcSolPubSubImpl::default_with_blockstore(blockstore);
|
||||
let session = create_session();
|
||||
let (subscriber, _id_receiver, receiver) = Subscriber::new_test("slotNotification");
|
||||
rpc.slot_subscribe(session, subscriber);
|
||||
|
@ -73,6 +73,7 @@ impl PubSubService {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::commitment::BlockCommitmentCache;
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr},
|
||||
sync::RwLock,
|
||||
@ -82,9 +83,13 @@ mod tests {
|
||||
fn test_pubsub_new() {
|
||||
let pubsub_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
));
|
||||
let pubsub_service = PubSubService::new(&subscriptions, pubsub_addr, &exit);
|
||||
let thread = pubsub_service.thread_hdl.thread();
|
||||
|
@ -15,8 +15,9 @@ use solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
snapshot_utils,
|
||||
};
|
||||
use solana_sdk::hash::Hash;
|
||||
use solana_sdk::{hash::Hash, pubkey::Pubkey};
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
net::SocketAddr,
|
||||
path::{Path, PathBuf},
|
||||
sync::{mpsc::channel, Arc, RwLock},
|
||||
@ -24,6 +25,10 @@ use std::{
|
||||
};
|
||||
use tokio::prelude::Future;
|
||||
|
||||
// If trusted validators are specified, consider this validator healthy if its latest account hash
|
||||
// is no further behind than this distance from the latest trusted validator account hash
|
||||
const HEALTH_CHECK_SLOT_DISTANCE: u64 = 150;
|
||||
|
||||
pub struct JsonRpcService {
|
||||
thread_hdl: JoinHandle<()>,
|
||||
|
||||
@ -37,15 +42,27 @@ struct RpcRequestMiddleware {
|
||||
ledger_path: PathBuf,
|
||||
snapshot_archive_path_regex: Regex,
|
||||
snapshot_config: Option<SnapshotConfig>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
}
|
||||
|
||||
impl RpcRequestMiddleware {
|
||||
pub fn new(ledger_path: PathBuf, snapshot_config: Option<SnapshotConfig>) -> Self {
|
||||
pub fn new(
|
||||
ledger_path: PathBuf,
|
||||
snapshot_config: Option<SnapshotConfig>,
|
||||
cluster_info: Arc<RwLock<ClusterInfo>>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
bank_forks: Arc<RwLock<BankForks>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
ledger_path,
|
||||
snapshot_archive_path_regex: Regex::new(r"/snapshot-\d+-[[:alnum:]]+\.tar\.bz2$")
|
||||
.unwrap(),
|
||||
snapshot_config,
|
||||
cluster_info,
|
||||
trusted_validators,
|
||||
bank_forks,
|
||||
}
|
||||
}
|
||||
|
||||
@ -71,7 +88,7 @@ impl RpcRequestMiddleware {
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn is_get_path(&self, path: &str) -> bool {
|
||||
fn is_file_get_path(&self, path: &str) -> bool {
|
||||
match path {
|
||||
"/genesis.tar.bz2" => true,
|
||||
_ => {
|
||||
@ -84,7 +101,7 @@ impl RpcRequestMiddleware {
|
||||
}
|
||||
}
|
||||
|
||||
fn get(&self, path: &str) -> RequestMiddlewareAction {
|
||||
fn process_file_get(&self, path: &str) -> RequestMiddlewareAction {
|
||||
let filename = self.ledger_path.join(
|
||||
path.split_at(1).1, // Drop leading '/' from path
|
||||
);
|
||||
@ -104,6 +121,58 @@ impl RpcRequestMiddleware {
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
fn health_check(&self) -> &'static str {
|
||||
let response = if let Some(trusted_validators) = &self.trusted_validators {
|
||||
let (latest_account_hash_slot, latest_trusted_validator_account_hash_slot) = {
|
||||
let cluster_info = self.cluster_info.read().unwrap();
|
||||
(
|
||||
cluster_info
|
||||
.get_accounts_hash_for_node(&cluster_info.id())
|
||||
.map(|hashes| hashes.iter().max_by(|a, b| a.0.cmp(&b.0)))
|
||||
.flatten()
|
||||
.map(|slot_hash| slot_hash.0)
|
||||
.unwrap_or(0),
|
||||
trusted_validators
|
||||
.iter()
|
||||
.map(|trusted_validator| {
|
||||
cluster_info
|
||||
.get_accounts_hash_for_node(&trusted_validator)
|
||||
.map(|hashes| hashes.iter().max_by(|a, b| a.0.cmp(&b.0)))
|
||||
.flatten()
|
||||
.map(|slot_hash| slot_hash.0)
|
||||
.unwrap_or(0)
|
||||
})
|
||||
.max()
|
||||
.unwrap_or(0),
|
||||
)
|
||||
};
|
||||
|
||||
// This validator is considered healthy if its latest account hash slot is within
|
||||
// `HEALTH_CHECK_SLOT_DISTANCE` of the latest trusted validator's account hash slot
|
||||
if latest_account_hash_slot > 0
|
||||
&& latest_trusted_validator_account_hash_slot > 0
|
||||
&& latest_account_hash_slot
|
||||
> latest_trusted_validator_account_hash_slot
|
||||
.saturating_sub(HEALTH_CHECK_SLOT_DISTANCE)
|
||||
{
|
||||
"ok"
|
||||
} else {
|
||||
warn!(
|
||||
"health check: me={}, latest trusted_validator={}",
|
||||
latest_account_hash_slot, latest_trusted_validator_account_hash_slot
|
||||
);
|
||||
"behind"
|
||||
}
|
||||
} else {
|
||||
// No trusted validator point of reference available, so this validator is healthy
|
||||
// because it's running
|
||||
"ok"
|
||||
};
|
||||
|
||||
info!("health check: {}", response);
|
||||
response
|
||||
}
|
||||
}
|
||||
|
||||
impl RequestMiddleware for RpcRequestMiddleware {
|
||||
@ -136,8 +205,29 @@ impl RequestMiddleware for RpcRequestMiddleware {
|
||||
};
|
||||
}
|
||||
}
|
||||
if self.is_get_path(request.uri().path()) {
|
||||
self.get(request.uri().path())
|
||||
|
||||
if let Some(result) = process_rest(&self.bank_forks, request.uri().path()) {
|
||||
RequestMiddlewareAction::Respond {
|
||||
should_validate_hosts: true,
|
||||
response: Box::new(jsonrpc_core::futures::future::ok(
|
||||
hyper::Response::builder()
|
||||
.status(hyper::StatusCode::OK)
|
||||
.body(hyper::Body::from(result))
|
||||
.unwrap(),
|
||||
)),
|
||||
}
|
||||
} else if self.is_file_get_path(request.uri().path()) {
|
||||
self.process_file_get(request.uri().path())
|
||||
} else if request.uri().path() == "/health" {
|
||||
RequestMiddlewareAction::Respond {
|
||||
should_validate_hosts: true,
|
||||
response: Box::new(jsonrpc_core::futures::future::ok(
|
||||
hyper::Response::builder()
|
||||
.status(hyper::StatusCode::OK)
|
||||
.body(hyper::Body::from(self.health_check()))
|
||||
.unwrap(),
|
||||
)),
|
||||
}
|
||||
} else {
|
||||
RequestMiddlewareAction::Proceed {
|
||||
should_continue_on_invalid_cors: false,
|
||||
@ -147,6 +237,26 @@ impl RequestMiddleware for RpcRequestMiddleware {
|
||||
}
|
||||
}
|
||||
|
||||
fn process_rest(bank_forks: &Arc<RwLock<BankForks>>, path: &str) -> Option<String> {
|
||||
match path {
|
||||
"/v0/circulating-supply" => {
|
||||
let r_bank_forks = bank_forks.read().unwrap();
|
||||
let bank = r_bank_forks.root_bank();
|
||||
let total_supply = bank.capitalization();
|
||||
let non_circulating_supply =
|
||||
crate::non_circulating_supply::calculate_non_circulating_supply(&bank).lamports;
|
||||
Some(format!("{}", total_supply - non_circulating_supply))
|
||||
}
|
||||
"/v0/total-supply" => {
|
||||
let r_bank_forks = bank_forks.read().unwrap();
|
||||
let bank = r_bank_forks.root_bank();
|
||||
let total_supply = bank.capitalization();
|
||||
Some(format!("{}", total_supply))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
impl JsonRpcService {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
@ -161,12 +271,13 @@ impl JsonRpcService {
|
||||
ledger_path: &Path,
|
||||
storage_state: StorageState,
|
||||
validator_exit: Arc<RwLock<Option<ValidatorExit>>>,
|
||||
trusted_validators: Option<HashSet<Pubkey>>,
|
||||
) -> Self {
|
||||
info!("rpc bound to {:?}", rpc_addr);
|
||||
info!("rpc configuration: {:?}", config);
|
||||
let request_processor = Arc::new(RwLock::new(JsonRpcRequestProcessor::new(
|
||||
config,
|
||||
bank_forks,
|
||||
bank_forks.clone(),
|
||||
block_commitment_cache,
|
||||
blockstore,
|
||||
storage_state,
|
||||
@ -186,20 +297,36 @@ impl JsonRpcService {
|
||||
let rpc = RpcSolImpl;
|
||||
io.extend_with(rpc.to_delegate());
|
||||
|
||||
let server =
|
||||
ServerBuilder::with_meta_extractor(io, move |_req: &hyper::Request<hyper::Body>| Meta {
|
||||
let request_middleware = RpcRequestMiddleware::new(
|
||||
ledger_path,
|
||||
snapshot_config,
|
||||
cluster_info.clone(),
|
||||
trusted_validators,
|
||||
bank_forks.clone(),
|
||||
);
|
||||
let server = ServerBuilder::with_meta_extractor(
|
||||
io,
|
||||
move |_req: &hyper::Request<hyper::Body>| Meta {
|
||||
request_processor: request_processor.clone(),
|
||||
cluster_info: cluster_info.clone(),
|
||||
genesis_hash
|
||||
}).threads(4)
|
||||
.cors(DomainsValidation::AllowOnly(vec![
|
||||
AccessControlAllowOrigin::Any,
|
||||
]))
|
||||
.cors_max_age(86400)
|
||||
.request_middleware(RpcRequestMiddleware::new(ledger_path, snapshot_config))
|
||||
.start_http(&rpc_addr);
|
||||
genesis_hash,
|
||||
},
|
||||
)
|
||||
.threads(num_cpus::get())
|
||||
.cors(DomainsValidation::AllowOnly(vec![
|
||||
AccessControlAllowOrigin::Any,
|
||||
]))
|
||||
.cors_max_age(86400)
|
||||
.request_middleware(request_middleware)
|
||||
.start_http(&rpc_addr);
|
||||
|
||||
if let Err(e) = server {
|
||||
warn!("JSON RPC service unavailable error: {:?}. \nAlso, check that port {} is not already in use by another application", e, rpc_addr.port());
|
||||
warn!(
|
||||
"JSON RPC service unavailable error: {:?}. \n\
|
||||
Also, check that port {} is not already in use by another application",
|
||||
e,
|
||||
rpc_addr.port()
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -240,6 +367,7 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
contact_info::ContactInfo,
|
||||
crds_value::{CrdsData, CrdsValue, SnapshotHash},
|
||||
genesis_utils::{create_genesis_config, GenesisConfigInfo},
|
||||
rpc::tests::create_validator_exit,
|
||||
};
|
||||
@ -268,21 +396,24 @@ mod tests {
|
||||
solana_net_utils::find_available_port_in_range(ip_addr, (10000, 65535)).unwrap(),
|
||||
);
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank.slot(), bank)));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Blockstore::open(&ledger_path).unwrap();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
let mut rpc_service = JsonRpcService::new(
|
||||
rpc_addr,
|
||||
JsonRpcConfig::default(),
|
||||
None,
|
||||
bank_forks,
|
||||
block_commitment_cache,
|
||||
Arc::new(blockstore),
|
||||
blockstore,
|
||||
cluster_info,
|
||||
Hash::default(),
|
||||
&PathBuf::from("farf"),
|
||||
StorageState::default(),
|
||||
validator_exit,
|
||||
None,
|
||||
);
|
||||
let thread = rpc_service.thread_hdl.thread();
|
||||
assert_eq!(thread.name().unwrap(), "solana-jsonrpc");
|
||||
@ -301,9 +432,41 @@ mod tests {
|
||||
rpc_service.join().unwrap();
|
||||
}
|
||||
|
||||
fn create_bank_forks() -> Arc<RwLock<BankForks>> {
|
||||
let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000);
|
||||
let bank = Bank::new(&genesis_config);
|
||||
Arc::new(RwLock::new(BankForks::new(bank.slot(), bank)))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_get_path() {
|
||||
let rrm = RpcRequestMiddleware::new(PathBuf::from("/"), None);
|
||||
fn test_process_rest_api() {
|
||||
let bank_forks = create_bank_forks();
|
||||
|
||||
assert_eq!(None, process_rest(&bank_forks, "not-a-supported-rest-api"));
|
||||
assert_eq!(
|
||||
Some("10127".to_string()),
|
||||
process_rest(&bank_forks, "/v0/circulating-supply")
|
||||
);
|
||||
assert_eq!(
|
||||
Some("10127".to_string()),
|
||||
process_rest(&bank_forks, "/v0/total-supply")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_file_get_path() {
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::default(),
|
||||
)));
|
||||
let bank_forks = create_bank_forks();
|
||||
|
||||
let rrm = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
None,
|
||||
cluster_info.clone(),
|
||||
None,
|
||||
bank_forks.clone(),
|
||||
);
|
||||
let rrm_with_snapshot_config = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
Some(SnapshotConfig {
|
||||
@ -311,25 +474,127 @@ mod tests {
|
||||
snapshot_package_output_path: PathBuf::from("/"),
|
||||
snapshot_path: PathBuf::from("/"),
|
||||
}),
|
||||
cluster_info,
|
||||
None,
|
||||
bank_forks,
|
||||
);
|
||||
|
||||
assert!(rrm.is_get_path("/genesis.tar.bz2"));
|
||||
assert!(!rrm.is_get_path("genesis.tar.bz2"));
|
||||
assert!(rrm.is_file_get_path("/genesis.tar.bz2"));
|
||||
assert!(!rrm.is_file_get_path("genesis.tar.bz2"));
|
||||
|
||||
assert!(!rrm.is_get_path("/snapshot.tar.bz2")); // This is a redirect
|
||||
assert!(!rrm.is_file_get_path("/snapshot.tar.bz2")); // This is a redirect
|
||||
|
||||
assert!(
|
||||
!rrm.is_get_path("/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2")
|
||||
);
|
||||
assert!(rrm_with_snapshot_config
|
||||
.is_get_path("/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"));
|
||||
assert!(!rrm.is_file_get_path(
|
||||
"/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"
|
||||
));
|
||||
assert!(rrm_with_snapshot_config.is_file_get_path(
|
||||
"/snapshot-100-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"
|
||||
));
|
||||
|
||||
assert!(!rrm.is_get_path(
|
||||
assert!(!rrm.is_file_get_path(
|
||||
"/snapshot-notaslotnumber-AvFf9oS8A8U78HdjT9YG2sTTThLHJZmhaMn2g8vkWYnr.tar.bz2"
|
||||
));
|
||||
|
||||
assert!(!rrm.is_get_path("/"));
|
||||
assert!(!rrm.is_get_path(".."));
|
||||
assert!(!rrm.is_get_path("🎣"));
|
||||
assert!(!rrm.is_file_get_path("/"));
|
||||
assert!(!rrm.is_file_get_path(".."));
|
||||
assert!(!rrm.is_file_get_path("🎣"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_health_check_with_no_trusted_validators() {
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::default(),
|
||||
)));
|
||||
|
||||
let rm = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
None,
|
||||
cluster_info.clone(),
|
||||
None,
|
||||
create_bank_forks(),
|
||||
);
|
||||
assert_eq!(rm.health_check(), "ok");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_health_check_with_trusted_validators() {
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(
|
||||
ContactInfo::default(),
|
||||
)));
|
||||
|
||||
let trusted_validators = vec![Pubkey::new_rand(), Pubkey::new_rand(), Pubkey::new_rand()];
|
||||
let rm = RpcRequestMiddleware::new(
|
||||
PathBuf::from("/"),
|
||||
None,
|
||||
cluster_info.clone(),
|
||||
Some(trusted_validators.clone().into_iter().collect()),
|
||||
create_bank_forks(),
|
||||
);
|
||||
|
||||
// No account hashes for this node or any trusted validators == "behind"
|
||||
assert_eq!(rm.health_check(), "behind");
|
||||
|
||||
// No account hashes for any trusted validators == "behind"
|
||||
{
|
||||
let mut cluster_info = cluster_info.write().unwrap();
|
||||
cluster_info
|
||||
.push_accounts_hashes(vec![(1000, Hash::default()), (900, Hash::default())]);
|
||||
}
|
||||
assert_eq!(rm.health_check(), "behind");
|
||||
|
||||
// This node is ahead of the trusted validators == "ok"
|
||||
{
|
||||
let mut cluster_info = cluster_info.write().unwrap();
|
||||
cluster_info
|
||||
.gossip
|
||||
.crds
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
|
||||
trusted_validators[0].clone(),
|
||||
vec![
|
||||
(1, Hash::default()),
|
||||
(1001, Hash::default()),
|
||||
(2, Hash::default()),
|
||||
],
|
||||
))),
|
||||
1,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
assert_eq!(rm.health_check(), "ok");
|
||||
|
||||
// Node is slightly behind the trusted validators == "ok"
|
||||
{
|
||||
let mut cluster_info = cluster_info.write().unwrap();
|
||||
cluster_info
|
||||
.gossip
|
||||
.crds
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
|
||||
trusted_validators[1].clone(),
|
||||
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE - 1, Hash::default())],
|
||||
))),
|
||||
1,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
assert_eq!(rm.health_check(), "ok");
|
||||
|
||||
// Node is far behind the trusted validators == "behind"
|
||||
{
|
||||
let mut cluster_info = cluster_info.write().unwrap();
|
||||
cluster_info
|
||||
.gossip
|
||||
.crds
|
||||
.insert(
|
||||
CrdsValue::new_unsigned(CrdsData::AccountsHashes(SnapshotHash::new(
|
||||
trusted_validators[2].clone(),
|
||||
vec![(1000 + HEALTH_CHECK_SLOT_DISTANCE, Hash::default())],
|
||||
))),
|
||||
1,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
assert_eq!(rm.health_check(), "behind");
|
||||
}
|
||||
}
|
||||
|
@ -8,8 +8,10 @@ use jsonrpc_pubsub::{
|
||||
SubscriptionId,
|
||||
};
|
||||
use serde::Serialize;
|
||||
use solana_client::rpc_response::{Response, RpcAccount, RpcKeyedAccount, RpcResponseContext};
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_client::rpc_response::{
|
||||
Response, RpcAccount, RpcKeyedAccount, RpcResponseContext, RpcSignatureResult,
|
||||
};
|
||||
use solana_ledger::{bank_forks::BankForks, blockstore::Blockstore};
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
account::Account, clock::Slot, pubkey::Pubkey, signature::Signature, transaction,
|
||||
@ -66,7 +68,7 @@ type RpcProgramSubscriptions = RwLock<
|
||||
type RpcSignatureSubscriptions = RwLock<
|
||||
HashMap<
|
||||
Signature,
|
||||
HashMap<SubscriptionId, (Sink<Response<transaction::Result<()>>>, Confirmations)>,
|
||||
HashMap<SubscriptionId, (Sink<Response<RpcSignatureResult>>, Confirmations)>,
|
||||
>,
|
||||
>;
|
||||
type RpcSlotSubscriptions = RwLock<HashMap<SubscriptionId, Sink<SlotInfo>>>;
|
||||
@ -207,11 +209,15 @@ fn filter_account_result(
|
||||
Box::new(iter::empty())
|
||||
}
|
||||
|
||||
fn filter_signature_result<S>(result: Option<S>, _root: Slot) -> Box<dyn Iterator<Item = S>>
|
||||
where
|
||||
S: 'static + Clone + Serialize,
|
||||
{
|
||||
Box::new(result.into_iter())
|
||||
fn filter_signature_result(
|
||||
result: Option<transaction::Result<()>>,
|
||||
_root: Slot,
|
||||
) -> Box<dyn Iterator<Item = RpcSignatureResult>> {
|
||||
Box::new(
|
||||
result
|
||||
.into_iter()
|
||||
.map(|result| RpcSignatureResult { err: result.err() }),
|
||||
)
|
||||
}
|
||||
|
||||
fn filter_program_results(
|
||||
@ -240,15 +246,6 @@ pub struct RpcSubscriptions {
|
||||
exit: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl Default for RpcSubscriptions {
|
||||
fn default() -> Self {
|
||||
Self::new(
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for RpcSubscriptions {
|
||||
fn drop(&mut self) {
|
||||
self.shutdown().unwrap_or_else(|err| {
|
||||
@ -318,6 +315,15 @@ impl RpcSubscriptions {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_with_blockstore(blockstore: Arc<Blockstore>) -> Self {
|
||||
Self::new(
|
||||
&Arc::new(AtomicBool::new(false)),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore,
|
||||
))),
|
||||
)
|
||||
}
|
||||
|
||||
fn check_account(
|
||||
pubkey: &Pubkey,
|
||||
bank_forks: &Arc<RwLock<BankForks>>,
|
||||
@ -430,7 +436,7 @@ impl RpcSubscriptions {
|
||||
signature: Signature,
|
||||
confirmations: Option<Confirmations>,
|
||||
sub_id: SubscriptionId,
|
||||
subscriber: Subscriber<Response<transaction::Result<()>>>,
|
||||
subscriber: Subscriber<Response<RpcSignatureResult>>,
|
||||
) {
|
||||
let mut subscriptions = self.signature_subscriptions.write().unwrap();
|
||||
add_subscription(
|
||||
@ -616,7 +622,9 @@ pub(crate) mod tests {
|
||||
};
|
||||
use jsonrpc_core::futures::{self, stream::Stream};
|
||||
use jsonrpc_pubsub::typed::Subscriber;
|
||||
use serial_test_derive::serial;
|
||||
use solana_budget_program;
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_sdk::{
|
||||
signature::{Keypair, Signer},
|
||||
system_transaction,
|
||||
@ -650,12 +658,15 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_check_account_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair,
|
||||
..
|
||||
} = create_genesis_config(100);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
@ -682,7 +693,9 @@ pub(crate) mod tests {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
);
|
||||
subscriptions.add_account_subscription(alice.pubkey(), None, sub_id.clone(), subscriber);
|
||||
|
||||
@ -722,12 +735,15 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_check_program_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair,
|
||||
..
|
||||
} = create_genesis_config(100);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let bank_forks = Arc::new(RwLock::new(BankForks::new(0, bank)));
|
||||
@ -754,7 +770,9 @@ pub(crate) mod tests {
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
);
|
||||
subscriptions.add_program_subscription(
|
||||
solana_budget_program::id(),
|
||||
@ -802,12 +820,15 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_check_signature_subscribe() {
|
||||
let GenesisConfigInfo {
|
||||
genesis_config,
|
||||
mint_keypair,
|
||||
..
|
||||
} = create_genesis_config(100);
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let bank = Bank::new(&genesis_config);
|
||||
let blockhash = bank.last_blockhash();
|
||||
let mut bank_forks = BankForks::new(0, bank);
|
||||
@ -846,7 +867,8 @@ pub(crate) mod tests {
|
||||
let mut block_commitment = HashMap::new();
|
||||
block_commitment.entry(0).or_insert(cache0.clone());
|
||||
block_commitment.entry(1).or_insert(cache1.clone());
|
||||
let block_commitment_cache = BlockCommitmentCache::new(block_commitment, 10, bank1, 0);
|
||||
let block_commitment_cache =
|
||||
BlockCommitmentCache::new(block_commitment, 0, 10, bank1, blockstore, 0);
|
||||
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let subscriptions =
|
||||
@ -892,7 +914,7 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
subscriptions.notify_subscribers(1, &bank_forks);
|
||||
let expected_res: Option<transaction::Result<()>> = Some(Ok(()));
|
||||
let expected_res = RpcSignatureResult { err: None };
|
||||
|
||||
struct Notification {
|
||||
slot: Slot,
|
||||
@ -943,14 +965,19 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_check_slot_subscribe() {
|
||||
let (subscriber, _id_receiver, transport_receiver) =
|
||||
Subscriber::new_test("slotNotification");
|
||||
let sub_id = SubscriptionId::Number(0 as u64);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
);
|
||||
subscriptions.add_slot_subscription(sub_id.clone(), subscriber);
|
||||
|
||||
@ -984,14 +1011,19 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_check_root_subscribe() {
|
||||
let (subscriber, _id_receiver, mut transport_receiver) =
|
||||
Subscriber::new_test("rootNotification");
|
||||
let sub_id = SubscriptionId::Number(0 as u64);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let subscriptions = RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())),
|
||||
Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::new_for_tests_with_blockstore(blockstore),
|
||||
)),
|
||||
);
|
||||
subscriptions.add_root_subscription(sub_id.clone(), subscriber);
|
||||
|
||||
@ -1024,6 +1056,7 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_add_and_remove_subscription() {
|
||||
let mut subscriptions: HashMap<u64, HashMap<SubscriptionId, (Sink<()>, Confirmations)>> =
|
||||
HashMap::new();
|
||||
|
@ -1,17 +1,21 @@
|
||||
use crate::packet::limited_deserialize;
|
||||
use crate::streamer::{PacketReceiver, PacketSender};
|
||||
use crate::{
|
||||
cluster_info::{ClusterInfo, ClusterInfoError},
|
||||
contact_info::ContactInfo,
|
||||
packet::Packet,
|
||||
repair_response,
|
||||
repair_service::RepairStats,
|
||||
result::{Error, Result},
|
||||
};
|
||||
use bincode::serialize;
|
||||
use rand::{thread_rng, Rng};
|
||||
use solana_ledger::blockstore::Blockstore;
|
||||
use solana_ledger::{
|
||||
blockstore::Blockstore,
|
||||
shred::{Nonce, Shred},
|
||||
};
|
||||
use solana_measure::measure::Measure;
|
||||
use solana_measure::thread_mem_usage;
|
||||
use solana_metrics::{datapoint_debug, inc_new_counter_debug};
|
||||
use solana_perf::packet::{Packets, PacketsRecycler};
|
||||
use solana_perf::packet::{limited_deserialize, Packets, PacketsRecycler};
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
signature::{Keypair, Signer},
|
||||
@ -27,6 +31,7 @@ use std::{
|
||||
|
||||
/// the number of slots to respond with when responding to `Orphan` requests
|
||||
pub const MAX_ORPHAN_REPAIR_RESPONSES: usize = 10;
|
||||
pub const DEFAULT_NONCE: u32 = 42;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum RepairType {
|
||||
@ -45,12 +50,25 @@ impl RepairType {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ServeRepairStats {
|
||||
pub total_packets: usize,
|
||||
pub processed: usize,
|
||||
pub self_repair: usize,
|
||||
pub window_index: usize,
|
||||
pub highest_window_index: usize,
|
||||
pub orphan: usize,
|
||||
}
|
||||
|
||||
/// Window protocol messages
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
enum RepairProtocol {
|
||||
WindowIndex(ContactInfo, u64, u64),
|
||||
HighestWindowIndex(ContactInfo, u64, u64),
|
||||
Orphan(ContactInfo, u64),
|
||||
WindowIndexWithNonce(ContactInfo, u64, u64, Nonce),
|
||||
HighestWindowIndexWithNonce(ContactInfo, u64, u64, Nonce),
|
||||
OrphanWithNonce(ContactInfo, u64, Nonce),
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -94,6 +112,9 @@ impl ServeRepair {
|
||||
RepairProtocol::WindowIndex(ref from, _, _) => from,
|
||||
RepairProtocol::HighestWindowIndex(ref from, _, _) => from,
|
||||
RepairProtocol::Orphan(ref from, _) => from,
|
||||
RepairProtocol::WindowIndexWithNonce(ref from, _, _, _) => from,
|
||||
RepairProtocol::HighestWindowIndexWithNonce(ref from, _, _, _) => from,
|
||||
RepairProtocol::OrphanWithNonce(ref from, _, _) => from,
|
||||
}
|
||||
}
|
||||
|
||||
@ -103,6 +124,7 @@ impl ServeRepair {
|
||||
from_addr: &SocketAddr,
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
request: RepairProtocol,
|
||||
stats: &mut ServeRepairStats,
|
||||
) -> Option<Packets> {
|
||||
let now = Instant::now();
|
||||
|
||||
@ -110,18 +132,14 @@ impl ServeRepair {
|
||||
let my_id = me.read().unwrap().keypair.pubkey();
|
||||
let from = Self::get_repair_sender(&request);
|
||||
if from.id == my_id {
|
||||
warn!(
|
||||
"{}: Ignored received repair request from ME {}",
|
||||
my_id, from.id,
|
||||
);
|
||||
inc_new_counter_debug!("serve_repair-handle-repair--eq", 1);
|
||||
stats.self_repair += 1;
|
||||
return None;
|
||||
}
|
||||
|
||||
let (res, label) = {
|
||||
match &request {
|
||||
RepairProtocol::WindowIndex(from, slot, shred_index) => {
|
||||
inc_new_counter_debug!("serve_repair-request-window-index", 1);
|
||||
stats.window_index += 1;
|
||||
(
|
||||
Self::run_window_request(
|
||||
recycler,
|
||||
@ -131,13 +149,14 @@ impl ServeRepair {
|
||||
&me.read().unwrap().my_info,
|
||||
*slot,
|
||||
*shred_index,
|
||||
None,
|
||||
),
|
||||
"WindowIndex",
|
||||
)
|
||||
}
|
||||
|
||||
RepairProtocol::HighestWindowIndex(_, slot, highest_index) => {
|
||||
inc_new_counter_debug!("serve_repair-request-highest-window-index", 1);
|
||||
stats.highest_window_index += 1;
|
||||
(
|
||||
Self::run_highest_window_request(
|
||||
recycler,
|
||||
@ -145,12 +164,13 @@ impl ServeRepair {
|
||||
blockstore,
|
||||
*slot,
|
||||
*highest_index,
|
||||
None,
|
||||
),
|
||||
"HighestWindowIndex",
|
||||
)
|
||||
}
|
||||
RepairProtocol::Orphan(_, slot) => {
|
||||
inc_new_counter_debug!("serve_repair-request-orphan", 1);
|
||||
stats.orphan += 1;
|
||||
(
|
||||
Self::run_orphan(
|
||||
recycler,
|
||||
@ -158,10 +178,55 @@ impl ServeRepair {
|
||||
blockstore,
|
||||
*slot,
|
||||
MAX_ORPHAN_REPAIR_RESPONSES,
|
||||
None,
|
||||
),
|
||||
"Orphan",
|
||||
)
|
||||
}
|
||||
RepairProtocol::WindowIndexWithNonce(_, slot, shred_index, nonce) => {
|
||||
stats.window_index += 1;
|
||||
(
|
||||
Self::run_window_request(
|
||||
recycler,
|
||||
from,
|
||||
&from_addr,
|
||||
blockstore,
|
||||
&me.read().unwrap().my_info,
|
||||
*slot,
|
||||
*shred_index,
|
||||
Some(*nonce),
|
||||
),
|
||||
"WindowIndexWithNonce",
|
||||
)
|
||||
}
|
||||
RepairProtocol::HighestWindowIndexWithNonce(_, slot, highest_index, nonce) => {
|
||||
stats.highest_window_index += 1;
|
||||
(
|
||||
Self::run_highest_window_request(
|
||||
recycler,
|
||||
&from_addr,
|
||||
blockstore,
|
||||
*slot,
|
||||
*highest_index,
|
||||
Some(*nonce),
|
||||
),
|
||||
"HighestWindowIndexWithNonce",
|
||||
)
|
||||
}
|
||||
RepairProtocol::OrphanWithNonce(_, slot, nonce) => {
|
||||
stats.orphan += 1;
|
||||
(
|
||||
Self::run_orphan(
|
||||
recycler,
|
||||
&from_addr,
|
||||
blockstore,
|
||||
*slot,
|
||||
MAX_ORPHAN_REPAIR_RESPONSES,
|
||||
Some(*nonce),
|
||||
),
|
||||
"OrphanWithNonce",
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -184,15 +249,62 @@ impl ServeRepair {
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
requests_receiver: &PacketReceiver,
|
||||
response_sender: &PacketSender,
|
||||
max_packets: &mut usize,
|
||||
stats: &mut ServeRepairStats,
|
||||
) -> Result<()> {
|
||||
//TODO cache connections
|
||||
let timeout = Duration::new(1, 0);
|
||||
let reqs = requests_receiver.recv_timeout(timeout)?;
|
||||
let mut reqs_v = vec![requests_receiver.recv_timeout(timeout)?];
|
||||
let mut total_packets = reqs_v[0].packets.len();
|
||||
|
||||
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender);
|
||||
while let Ok(more) = requests_receiver.try_recv() {
|
||||
total_packets += more.packets.len();
|
||||
if total_packets < *max_packets {
|
||||
// Drop the rest in the channel in case of dos
|
||||
reqs_v.push(more);
|
||||
}
|
||||
}
|
||||
|
||||
let mut time = Measure::start("repair::handle_packets");
|
||||
for reqs in reqs_v {
|
||||
Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender, stats);
|
||||
}
|
||||
time.stop();
|
||||
if total_packets >= *max_packets {
|
||||
if time.as_ms() > 1000 {
|
||||
*max_packets = (*max_packets * 9) / 10;
|
||||
} else {
|
||||
*max_packets = (*max_packets * 10) / 9;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn report_reset_stats(me: &Arc<RwLock<Self>>, stats: &mut ServeRepairStats) {
|
||||
if stats.self_repair > 0 {
|
||||
let my_id = me.read().unwrap().keypair.pubkey();
|
||||
warn!(
|
||||
"{}: Ignored received repair requests from ME: {}",
|
||||
my_id, stats.self_repair,
|
||||
);
|
||||
inc_new_counter_debug!("serve_repair-handle-repair--eq", stats.self_repair);
|
||||
}
|
||||
|
||||
debug!(
|
||||
"repair_listener: total_packets: {} passed: {}",
|
||||
stats.total_packets, stats.processed
|
||||
);
|
||||
|
||||
inc_new_counter_debug!("serve_repair-request-window-index", stats.window_index);
|
||||
inc_new_counter_debug!(
|
||||
"serve_repair-request-highest-window-index",
|
||||
stats.highest_window_index
|
||||
);
|
||||
inc_new_counter_debug!("serve_repair-request-orphan", stats.orphan);
|
||||
|
||||
*stats = ServeRepairStats::default();
|
||||
}
|
||||
|
||||
pub fn listen(
|
||||
me: Arc<RwLock<Self>>,
|
||||
blockstore: Option<Arc<Blockstore>>,
|
||||
@ -204,22 +316,33 @@ impl ServeRepair {
|
||||
let recycler = PacketsRecycler::default();
|
||||
Builder::new()
|
||||
.name("solana-repair-listen".to_string())
|
||||
.spawn(move || loop {
|
||||
let result = Self::run_listen(
|
||||
&me,
|
||||
&recycler,
|
||||
blockstore.as_ref(),
|
||||
&requests_receiver,
|
||||
&response_sender,
|
||||
);
|
||||
match result {
|
||||
Err(Error::RecvTimeoutError(_)) | Ok(_) => {}
|
||||
Err(err) => info!("repair listener error: {:?}", err),
|
||||
};
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
.spawn(move || {
|
||||
let mut max_packets = 1024;
|
||||
let mut last_print = Instant::now();
|
||||
let mut stats = ServeRepairStats::default();
|
||||
loop {
|
||||
let result = Self::run_listen(
|
||||
&me,
|
||||
&recycler,
|
||||
blockstore.as_ref(),
|
||||
&requests_receiver,
|
||||
&response_sender,
|
||||
&mut max_packets,
|
||||
&mut stats,
|
||||
);
|
||||
match result {
|
||||
Err(Error::RecvTimeoutError(_)) | Ok(_) => {}
|
||||
Err(err) => info!("repair listener error: {:?}", err),
|
||||
};
|
||||
if exit.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
if last_print.elapsed().as_secs() > 2 {
|
||||
Self::report_reset_stats(&me, &mut stats);
|
||||
last_print = Instant::now();
|
||||
}
|
||||
thread_mem_usage::datapoint("solana-repair-listen");
|
||||
}
|
||||
thread_mem_usage::datapoint("solana-repair-listen");
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
@ -230,6 +353,7 @@ impl ServeRepair {
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
packets: Packets,
|
||||
response_sender: &PacketSender,
|
||||
stats: &mut ServeRepairStats,
|
||||
) {
|
||||
// iter over the packets, collect pulls separately and process everything else
|
||||
let allocated = thread_mem_usage::Allocatedp::default();
|
||||
@ -239,7 +363,9 @@ impl ServeRepair {
|
||||
limited_deserialize(&packet.data[..packet.meta.size])
|
||||
.into_iter()
|
||||
.for_each(|request| {
|
||||
let rsp = Self::handle_repair(me, recycler, &from_addr, blockstore, request);
|
||||
stats.processed += 1;
|
||||
let rsp =
|
||||
Self::handle_repair(me, recycler, &from_addr, blockstore, request, stats);
|
||||
if let Some(rsp) = rsp {
|
||||
let _ignore_disconnect = response_sender.send(rsp);
|
||||
}
|
||||
@ -251,27 +377,59 @@ impl ServeRepair {
|
||||
});
|
||||
}
|
||||
|
||||
fn window_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
|
||||
let req = RepairProtocol::WindowIndex(self.my_info.clone(), slot, shred_index);
|
||||
fn window_index_request_bytes(
|
||||
&self,
|
||||
slot: Slot,
|
||||
shred_index: u64,
|
||||
nonce: Option<Nonce>,
|
||||
) -> Result<Vec<u8>> {
|
||||
let req = if let Some(nonce) = nonce {
|
||||
RepairProtocol::WindowIndexWithNonce(self.my_info.clone(), slot, shred_index, nonce)
|
||||
} else {
|
||||
RepairProtocol::WindowIndex(self.my_info.clone(), slot, shred_index)
|
||||
};
|
||||
let out = serialize(&req)?;
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
fn window_highest_index_request_bytes(&self, slot: Slot, shred_index: u64) -> Result<Vec<u8>> {
|
||||
let req = RepairProtocol::HighestWindowIndex(self.my_info.clone(), slot, shred_index);
|
||||
fn window_highest_index_request_bytes(
|
||||
&self,
|
||||
slot: Slot,
|
||||
shred_index: u64,
|
||||
nonce: Option<Nonce>,
|
||||
) -> Result<Vec<u8>> {
|
||||
let req = if let Some(nonce) = nonce {
|
||||
RepairProtocol::HighestWindowIndexWithNonce(
|
||||
self.my_info.clone(),
|
||||
slot,
|
||||
shred_index,
|
||||
nonce,
|
||||
)
|
||||
} else {
|
||||
RepairProtocol::HighestWindowIndex(self.my_info.clone(), slot, shred_index)
|
||||
};
|
||||
let out = serialize(&req)?;
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
fn orphan_bytes(&self, slot: Slot) -> Result<Vec<u8>> {
|
||||
let req = RepairProtocol::Orphan(self.my_info.clone(), slot);
|
||||
fn orphan_bytes(&self, slot: Slot, nonce: Option<Nonce>) -> Result<Vec<u8>> {
|
||||
let req = if let Some(nonce) = nonce {
|
||||
RepairProtocol::OrphanWithNonce(self.my_info.clone(), slot, nonce)
|
||||
} else {
|
||||
RepairProtocol::Orphan(self.my_info.clone(), slot)
|
||||
};
|
||||
let out = serialize(&req)?;
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
pub fn repair_request(&self, repair_request: &RepairType) -> Result<(SocketAddr, Vec<u8>)> {
|
||||
pub fn repair_request(
|
||||
&self,
|
||||
repair_request: &RepairType,
|
||||
repair_stats: &mut RepairStats,
|
||||
) -> Result<(SocketAddr, Vec<u8>)> {
|
||||
// find a peer that appears to be accepting replication and has the desired slot, as indicated
|
||||
// by a valid tvu port location
|
||||
let slot = repair_request.slot();
|
||||
let valid: Vec<_> = self
|
||||
.cluster_info
|
||||
.read()
|
||||
@ -282,32 +440,38 @@ impl ServeRepair {
|
||||
}
|
||||
let n = thread_rng().gen::<usize>() % valid.len();
|
||||
let addr = valid[n].serve_repair; // send the request to the peer's serve_repair port
|
||||
let out = self.map_repair_request(repair_request)?;
|
||||
let nonce = if Shred::is_nonce_unlocked(slot) {
|
||||
Some(DEFAULT_NONCE)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let out = self.map_repair_request(&repair_request, repair_stats, nonce)?;
|
||||
|
||||
Ok((addr, out))
|
||||
}
|
||||
|
||||
pub fn map_repair_request(&self, repair_request: &RepairType) -> Result<Vec<u8>> {
|
||||
pub fn map_repair_request(
|
||||
&self,
|
||||
repair_request: &RepairType,
|
||||
repair_stats: &mut RepairStats,
|
||||
nonce: Option<Nonce>,
|
||||
) -> Result<Vec<u8>> {
|
||||
let slot = repair_request.slot();
|
||||
if Shred::is_nonce_unlocked(slot) {
|
||||
assert!(nonce.is_some());
|
||||
}
|
||||
match repair_request {
|
||||
RepairType::Shred(slot, shred_index) => {
|
||||
datapoint_debug!(
|
||||
"serve_repair-repair",
|
||||
("repair-slot", *slot, i64),
|
||||
("repair-ix", *shred_index, i64)
|
||||
);
|
||||
Ok(self.window_index_request_bytes(*slot, *shred_index)?)
|
||||
repair_stats.shred.update(*slot);
|
||||
Ok(self.window_index_request_bytes(*slot, *shred_index, nonce)?)
|
||||
}
|
||||
RepairType::HighestShred(slot, shred_index) => {
|
||||
datapoint_info!(
|
||||
"serve_repair-repair_highest",
|
||||
("repair-highest-slot", *slot, i64),
|
||||
("repair-highest-ix", *shred_index, i64)
|
||||
);
|
||||
Ok(self.window_highest_index_request_bytes(*slot, *shred_index)?)
|
||||
repair_stats.highest_shred.update(*slot);
|
||||
Ok(self.window_highest_index_request_bytes(*slot, *shred_index, nonce)?)
|
||||
}
|
||||
RepairType::Orphan(slot) => {
|
||||
datapoint_info!("serve_repair-repair_orphan", ("repair-orphan", *slot, i64));
|
||||
Ok(self.orphan_bytes(*slot)?)
|
||||
repair_stats.orphan.update(*slot);
|
||||
Ok(self.orphan_bytes(*slot, nonce)?)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -320,12 +484,19 @@ impl ServeRepair {
|
||||
me: &ContactInfo,
|
||||
slot: Slot,
|
||||
shred_index: u64,
|
||||
nonce: Option<Nonce>,
|
||||
) -> Option<Packets> {
|
||||
if let Some(blockstore) = blockstore {
|
||||
// Try to find the requested index in one of the slots
|
||||
let packet = Self::get_data_shred_as_packet(blockstore, slot, shred_index, from_addr);
|
||||
let packet = repair_response::repair_response_packet(
|
||||
blockstore,
|
||||
slot,
|
||||
shred_index,
|
||||
from_addr,
|
||||
nonce,
|
||||
);
|
||||
|
||||
if let Ok(Some(packet)) = packet {
|
||||
if let Some(packet) = packet {
|
||||
inc_new_counter_debug!("serve_repair-window-request-ledger", 1);
|
||||
return Some(Packets::new_with_recycler_data(
|
||||
recycler,
|
||||
@ -353,15 +524,20 @@ impl ServeRepair {
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
slot: Slot,
|
||||
highest_index: u64,
|
||||
nonce: Option<Nonce>,
|
||||
) -> Option<Packets> {
|
||||
let blockstore = blockstore?;
|
||||
// Try to find the requested index in one of the slots
|
||||
let meta = blockstore.meta(slot).ok()??;
|
||||
if meta.received > highest_index {
|
||||
// meta.received must be at least 1 by this point
|
||||
let packet =
|
||||
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr)
|
||||
.ok()??;
|
||||
let packet = repair_response::repair_response_packet(
|
||||
blockstore,
|
||||
slot,
|
||||
meta.received - 1,
|
||||
from_addr,
|
||||
nonce,
|
||||
)?;
|
||||
return Some(Packets::new_with_recycler_data(
|
||||
recycler,
|
||||
"run_highest_window_request",
|
||||
@ -377,6 +553,7 @@ impl ServeRepair {
|
||||
blockstore: Option<&Arc<Blockstore>>,
|
||||
mut slot: Slot,
|
||||
max_responses: usize,
|
||||
nonce: Option<Nonce>,
|
||||
) -> Option<Packets> {
|
||||
let mut res = Packets::new_with_recycler(recycler.clone(), 64, "run_orphan");
|
||||
if let Some(blockstore) = blockstore {
|
||||
@ -385,9 +562,19 @@ impl ServeRepair {
|
||||
if meta.received == 0 {
|
||||
break;
|
||||
}
|
||||
let packet =
|
||||
Self::get_data_shred_as_packet(blockstore, slot, meta.received - 1, from_addr);
|
||||
if let Ok(Some(packet)) = packet {
|
||||
let nonce = if Shred::is_nonce_unlocked(slot) {
|
||||
nonce
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let packet = repair_response::repair_response_packet(
|
||||
blockstore,
|
||||
slot,
|
||||
meta.received - 1,
|
||||
from_addr,
|
||||
nonce,
|
||||
);
|
||||
if let Some(packet) = packet {
|
||||
res.packets.push(packet);
|
||||
}
|
||||
if meta.is_parent_set() && res.packets.len() <= max_responses {
|
||||
@ -402,41 +589,31 @@ impl ServeRepair {
|
||||
}
|
||||
Some(res)
|
||||
}
|
||||
|
||||
fn get_data_shred_as_packet(
|
||||
blockstore: &Arc<Blockstore>,
|
||||
slot: Slot,
|
||||
shred_index: u64,
|
||||
dest: &SocketAddr,
|
||||
) -> Result<Option<Packet>> {
|
||||
let data = blockstore.get_data_shred(slot, shred_index)?;
|
||||
Ok(data.map(|data| {
|
||||
let mut packet = Packet::default();
|
||||
packet.meta.size = data.len();
|
||||
packet.meta.set_addr(dest);
|
||||
packet.data.copy_from_slice(&data);
|
||||
packet
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::result::Error;
|
||||
use crate::{repair_response, result::Error};
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_ledger::{
|
||||
blockstore::make_many_slot_entries,
|
||||
blockstore_processor::fill_blockstore_slot_with_ticks,
|
||||
shred::{
|
||||
max_ticks_per_n_shreds, CodingShredHeader, DataShredHeader, Shred, ShredCommonHeader,
|
||||
NONCE_SHRED_PAYLOAD_SIZE, UNLOCK_NONCE_SLOT,
|
||||
},
|
||||
};
|
||||
use solana_sdk::{hash::Hash, pubkey::Pubkey, timing::timestamp};
|
||||
|
||||
/// test run_window_requestwindow requests respond with the right shred, and do not overrun
|
||||
#[test]
|
||||
fn run_highest_window_request() {
|
||||
fn test_run_highest_window_request() {
|
||||
run_highest_window_request(UNLOCK_NONCE_SLOT + 3, 3, Some(9));
|
||||
run_highest_window_request(UNLOCK_NONCE_SLOT, 3, None);
|
||||
}
|
||||
|
||||
/// test run_window_request responds with the right shred, and do not overrun
|
||||
fn run_highest_window_request(slot: Slot, num_slots: u64, nonce: Option<Nonce>) {
|
||||
let recycler = PacketsRecycler::default();
|
||||
solana_logger::setup();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
@ -448,41 +625,51 @@ mod tests {
|
||||
Some(&blockstore),
|
||||
0,
|
||||
0,
|
||||
nonce,
|
||||
);
|
||||
assert!(rv.is_none());
|
||||
|
||||
let _ = fill_blockstore_slot_with_ticks(
|
||||
&blockstore,
|
||||
max_ticks_per_n_shreds(1) + 1,
|
||||
2,
|
||||
1,
|
||||
max_ticks_per_n_shreds(1, None) + 1,
|
||||
slot,
|
||||
slot - num_slots + 1,
|
||||
Hash::default(),
|
||||
);
|
||||
|
||||
let index = 1;
|
||||
let rv = ServeRepair::run_highest_window_request(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
2,
|
||||
1,
|
||||
);
|
||||
slot,
|
||||
index,
|
||||
nonce,
|
||||
)
|
||||
.expect("packets");
|
||||
|
||||
let rv: Vec<Shred> = rv
|
||||
.expect("packets")
|
||||
.packets
|
||||
.into_iter()
|
||||
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
|
||||
.filter_map(|b| {
|
||||
if nonce.is_some() {
|
||||
assert_eq!(repair_response::nonce(&b.data[..]), nonce);
|
||||
}
|
||||
Shred::new_from_serialized_shred(b.data.to_vec()).ok()
|
||||
})
|
||||
.collect();
|
||||
assert!(!rv.is_empty());
|
||||
let index = blockstore.meta(2).unwrap().unwrap().received - 1;
|
||||
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
|
||||
assert_eq!(rv[0].index(), index as u32);
|
||||
assert_eq!(rv[0].slot(), 2);
|
||||
assert_eq!(rv[0].slot(), slot);
|
||||
|
||||
let rv = ServeRepair::run_highest_window_request(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
2,
|
||||
slot,
|
||||
index + 1,
|
||||
nonce,
|
||||
);
|
||||
assert!(rv.is_none());
|
||||
}
|
||||
@ -490,9 +677,14 @@ mod tests {
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
}
|
||||
|
||||
/// test window requests respond with the right shred, and do not overrun
|
||||
#[test]
|
||||
fn run_window_request() {
|
||||
fn test_run_window_request() {
|
||||
run_window_request(UNLOCK_NONCE_SLOT + 1, Some(9));
|
||||
run_window_request(UNLOCK_NONCE_SLOT - 3, None);
|
||||
}
|
||||
|
||||
/// test window requests respond with the right shred, and do not overrun
|
||||
fn run_window_request(slot: Slot, nonce: Option<Nonce>) {
|
||||
let recycler = PacketsRecycler::default();
|
||||
solana_logger::setup();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
@ -519,12 +711,13 @@ mod tests {
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
&me,
|
||||
slot,
|
||||
0,
|
||||
0,
|
||||
nonce,
|
||||
);
|
||||
assert!(rv.is_none());
|
||||
let mut common_header = ShredCommonHeader::default();
|
||||
common_header.slot = 2;
|
||||
common_header.slot = slot;
|
||||
common_header.index = 1;
|
||||
let mut data_header = DataShredHeader::default();
|
||||
data_header.parent_offset = 1;
|
||||
@ -532,30 +725,37 @@ mod tests {
|
||||
common_header,
|
||||
data_header,
|
||||
CodingShredHeader::default(),
|
||||
NONCE_SHRED_PAYLOAD_SIZE,
|
||||
);
|
||||
|
||||
blockstore
|
||||
.insert_shreds(vec![shred_info], None, false)
|
||||
.expect("Expect successful ledger write");
|
||||
|
||||
let index = 1;
|
||||
let rv = ServeRepair::run_window_request(
|
||||
&recycler,
|
||||
&me,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
&me,
|
||||
2,
|
||||
1,
|
||||
);
|
||||
assert!(!rv.is_none());
|
||||
slot,
|
||||
index,
|
||||
nonce,
|
||||
)
|
||||
.expect("packets");
|
||||
let rv: Vec<Shred> = rv
|
||||
.expect("packets")
|
||||
.packets
|
||||
.into_iter()
|
||||
.filter_map(|b| Shred::new_from_serialized_shred(b.data.to_vec()).ok())
|
||||
.filter_map(|b| {
|
||||
if nonce.is_some() {
|
||||
assert_eq!(repair_response::nonce(&b.data[..]), nonce);
|
||||
}
|
||||
Shred::new_from_serialized_shred(b.data.to_vec()).ok()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(rv[0].index(), 1);
|
||||
assert_eq!(rv[0].slot(), 2);
|
||||
assert_eq!(rv[0].slot(), slot);
|
||||
}
|
||||
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
@ -566,7 +766,7 @@ mod tests {
|
||||
let me = ContactInfo::new_localhost(&Pubkey::new_rand(), timestamp());
|
||||
let cluster_info = Arc::new(RwLock::new(ClusterInfo::new_with_invalid_keypair(me)));
|
||||
let serve_repair = ServeRepair::new(cluster_info.clone());
|
||||
let rv = serve_repair.repair_request(&RepairType::Shred(0, 0));
|
||||
let rv = serve_repair.repair_request(&RepairType::Shred(0, 0), &mut RepairStats::default());
|
||||
assert_matches!(rv, Err(Error::ClusterInfoError(ClusterInfoError::NoPeers)));
|
||||
|
||||
let serve_repair_addr = socketaddr!([127, 0, 0, 1], 1243);
|
||||
@ -587,7 +787,7 @@ mod tests {
|
||||
};
|
||||
cluster_info.write().unwrap().insert_info(nxt.clone());
|
||||
let rv = serve_repair
|
||||
.repair_request(&RepairType::Shred(0, 0))
|
||||
.repair_request(&RepairType::Shred(0, 0), &mut RepairStats::default())
|
||||
.unwrap();
|
||||
assert_eq!(nxt.serve_repair, serve_repair_addr);
|
||||
assert_eq!(rv.0, nxt.serve_repair);
|
||||
@ -614,7 +814,7 @@ mod tests {
|
||||
while !one || !two {
|
||||
//this randomly picks an option, so eventually it should pick both
|
||||
let rv = serve_repair
|
||||
.repair_request(&RepairType::Shred(0, 0))
|
||||
.repair_request(&RepairType::Shred(0, 0), &mut RepairStats::default())
|
||||
.unwrap();
|
||||
if rv.0 == serve_repair_addr {
|
||||
one = true;
|
||||
@ -627,52 +827,85 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn run_orphan() {
|
||||
fn test_run_orphan() {
|
||||
run_orphan(UNLOCK_NONCE_SLOT + 1, 3, Some(9));
|
||||
// Test where the response will be for some slots <= UNLOCK_NONCE_SLOT,
|
||||
// and some of the response will be for some slots > UNLOCK_NONCE_SLOT.
|
||||
// Should not panic.
|
||||
run_orphan(UNLOCK_NONCE_SLOT, 3, None);
|
||||
run_orphan(UNLOCK_NONCE_SLOT, 3, Some(9));
|
||||
}
|
||||
|
||||
fn run_orphan(slot: Slot, num_slots: u64, nonce: Option<Nonce>) {
|
||||
solana_logger::setup();
|
||||
let recycler = PacketsRecycler::default();
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
{
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let rv =
|
||||
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 2, 0);
|
||||
let rv = ServeRepair::run_orphan(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
slot,
|
||||
0,
|
||||
nonce,
|
||||
);
|
||||
assert!(rv.is_none());
|
||||
|
||||
// Create slots 1, 2, 3 with 5 shreds apiece
|
||||
let (shreds, _) = make_many_slot_entries(1, 3, 5);
|
||||
// Create slots [slot, slot + num_slots) with 5 shreds apiece
|
||||
let (shreds, _) = make_many_slot_entries(slot, num_slots, 5);
|
||||
|
||||
blockstore
|
||||
.insert_shreds(shreds, None, false)
|
||||
.expect("Expect successful ledger write");
|
||||
|
||||
// We don't have slot 4, so we don't know how to service this requeset
|
||||
let rv =
|
||||
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 4, 5);
|
||||
// We don't have slot `slot + num_slots`, so we don't know how to service this request
|
||||
let rv = ServeRepair::run_orphan(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
slot + num_slots,
|
||||
5,
|
||||
nonce,
|
||||
);
|
||||
assert!(rv.is_none());
|
||||
|
||||
// For slot 3, we should return the highest shreds from slots 3, 2, 1 respectively
|
||||
// for this request
|
||||
let rv: Vec<_> =
|
||||
ServeRepair::run_orphan(&recycler, &socketaddr_any!(), Some(&blockstore), 3, 5)
|
||||
.expect("run_orphan packets")
|
||||
.packets
|
||||
.iter()
|
||||
.map(|b| b.clone())
|
||||
.collect();
|
||||
let expected: Vec<_> = (1..=3)
|
||||
// For a orphan request for `slot + num_slots - 1`, we should return the highest shreds
|
||||
// from slots in the range [slot, slot + num_slots - 1]
|
||||
let rv: Vec<_> = ServeRepair::run_orphan(
|
||||
&recycler,
|
||||
&socketaddr_any!(),
|
||||
Some(&blockstore),
|
||||
slot + num_slots - 1,
|
||||
5,
|
||||
nonce,
|
||||
)
|
||||
.expect("run_orphan packets")
|
||||
.packets
|
||||
.iter()
|
||||
.map(|b| b.clone())
|
||||
.collect();
|
||||
|
||||
// Verify responses
|
||||
let expected: Vec<_> = (slot..slot + num_slots)
|
||||
.rev()
|
||||
.map(|slot| {
|
||||
.filter_map(|slot| {
|
||||
let nonce = if Shred::is_nonce_unlocked(slot) {
|
||||
nonce
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let index = blockstore.meta(slot).unwrap().unwrap().received - 1;
|
||||
ServeRepair::get_data_shred_as_packet(
|
||||
repair_response::repair_response_packet(
|
||||
&blockstore,
|
||||
slot,
|
||||
index,
|
||||
&socketaddr_any!(),
|
||||
nonce,
|
||||
)
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
})
|
||||
.collect();
|
||||
assert_eq!(rv, expected)
|
||||
assert_eq!(rv, expected);
|
||||
}
|
||||
|
||||
Blockstore::destroy(&ledger_path).expect("Expected successful database destruction");
|
||||
|
@ -663,6 +663,7 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use rayon::prelude::*;
|
||||
use solana_ledger::get_tmp_ledger_path;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::{
|
||||
hash::Hasher,
|
||||
@ -690,7 +691,11 @@ mod tests {
|
||||
&[bank.clone()],
|
||||
vec![0],
|
||||
)));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore),
|
||||
));
|
||||
let (_slot_sender, slot_receiver) = channel();
|
||||
let storage_state = StorageState::new(
|
||||
&bank.last_blockhash(),
|
||||
|
@ -70,9 +70,14 @@ impl TransactionStatusService {
|
||||
}
|
||||
.expect("FeeCalculator must exist");
|
||||
let fee = fee_calculator.calculate_fee(transaction.message());
|
||||
let (writable_keys, readonly_keys) =
|
||||
transaction.message.get_account_keys_by_lock_type();
|
||||
blockstore
|
||||
.write_transaction_status(
|
||||
(slot, transaction.signatures[0]),
|
||||
slot,
|
||||
transaction.signatures[0],
|
||||
writable_keys,
|
||||
readonly_keys,
|
||||
&TransactionStatusMeta {
|
||||
status,
|
||||
fee,
|
||||
|
@ -26,6 +26,7 @@ use solana_ledger::{
|
||||
snapshot_package::SnapshotPackageSender,
|
||||
};
|
||||
use solana_sdk::{
|
||||
genesis_config::GenesisConfig,
|
||||
pubkey::Pubkey,
|
||||
signature::{Keypair, Signer},
|
||||
};
|
||||
@ -67,6 +68,7 @@ pub struct TvuConfig {
|
||||
pub halt_on_trusted_validators_accounts_hash_mismatch: bool,
|
||||
pub trusted_validators: Option<HashSet<Pubkey>>,
|
||||
pub accounts_hash_fault_injection_slots: u64,
|
||||
pub genesis_config: GenesisConfig,
|
||||
}
|
||||
|
||||
impl Tvu {
|
||||
@ -185,6 +187,7 @@ impl Tvu {
|
||||
block_commitment_cache: block_commitment_cache.clone(),
|
||||
transaction_status_sender,
|
||||
rewards_recorder_sender,
|
||||
genesis_config: tvu_config.genesis_config,
|
||||
};
|
||||
|
||||
let (replay_stage, root_bank_receiver) = ReplayStage::new(
|
||||
@ -264,11 +267,13 @@ pub mod tests {
|
||||
use crate::banking_stage::create_test_recorder;
|
||||
use crate::cluster_info::{ClusterInfo, Node};
|
||||
use crate::genesis_utils::{create_genesis_config, GenesisConfigInfo};
|
||||
use serial_test_derive::serial;
|
||||
use solana_ledger::create_new_tmp_ledger;
|
||||
use solana_runtime::bank::Bank;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn test_tvu_exit() {
|
||||
solana_logger::setup();
|
||||
let leader = Node::new_localhost();
|
||||
@ -296,7 +301,9 @@ pub mod tests {
|
||||
let voting_keypair = Keypair::new();
|
||||
let storage_keypair = Arc::new(Keypair::new());
|
||||
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
let tvu = Tvu::new(
|
||||
&voting_keypair.pubkey(),
|
||||
Some(Arc::new(voting_keypair)),
|
||||
@ -315,10 +322,7 @@ pub mod tests {
|
||||
&StorageState::default(),
|
||||
None,
|
||||
l_receiver,
|
||||
&Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
)),
|
||||
&Arc::new(RpcSubscriptions::new(&exit, block_commitment_cache.clone())),
|
||||
&poh_recorder,
|
||||
&leader_schedule_cache,
|
||||
&exit,
|
||||
|
@ -202,7 +202,6 @@ impl Validator {
|
||||
}
|
||||
|
||||
let bank_forks = Arc::new(RwLock::new(bank_forks));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
|
||||
let mut validator_exit = ValidatorExit::default();
|
||||
let exit_ = exit.clone();
|
||||
@ -238,6 +237,9 @@ impl Validator {
|
||||
);
|
||||
|
||||
let blockstore = Arc::new(blockstore);
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(&exit, block_commitment_cache.clone()));
|
||||
|
||||
@ -262,6 +264,7 @@ impl Validator {
|
||||
ledger_path,
|
||||
storage_state.clone(),
|
||||
validator_exit.clone(),
|
||||
config.trusted_validators.clone(),
|
||||
),
|
||||
PubSubService::new(
|
||||
&subscriptions,
|
||||
@ -311,12 +314,19 @@ impl Validator {
|
||||
);
|
||||
|
||||
if config.dev_halt_at_slot.is_some() {
|
||||
// Simulate a confirmed root to avoid RPC errors with CommitmentmentConfig::max() and
|
||||
// to ensure RPC endpoints like getConfirmedBlock, which require a confirmed root, work
|
||||
block_commitment_cache
|
||||
.write()
|
||||
.unwrap()
|
||||
.set_get_largest_confirmed_root(bank_forks.read().unwrap().root());
|
||||
|
||||
// Park with the RPC service running, ready for inspection!
|
||||
warn!("Validator halted");
|
||||
std::thread::park();
|
||||
}
|
||||
|
||||
let poh_config = Arc::new(genesis_config.poh_config);
|
||||
let poh_config = Arc::new(genesis_config.poh_config.clone());
|
||||
let (mut poh_recorder, entry_receiver) = PohRecorder::new_with_clear_signal(
|
||||
bank.tick_height(),
|
||||
bank.last_blockhash(),
|
||||
@ -443,6 +453,7 @@ impl Validator {
|
||||
shred_version: node.info.shred_version,
|
||||
trusted_validators: config.trusted_validators.clone(),
|
||||
accounts_hash_fault_injection_slots: config.accounts_hash_fault_injection_slots,
|
||||
genesis_config,
|
||||
},
|
||||
);
|
||||
|
||||
|
@ -1,31 +1,37 @@
|
||||
//! `window_service` handles the data plane incoming shreds, storing them in
|
||||
//! blockstore and retransmitting where required
|
||||
//!
|
||||
use crate::cluster_info::ClusterInfo;
|
||||
use crate::packet::Packets;
|
||||
use crate::repair_service::{RepairService, RepairStrategy};
|
||||
use crate::result::{Error, Result};
|
||||
use crate::streamer::PacketSender;
|
||||
use crate::{
|
||||
cluster_info::ClusterInfo,
|
||||
packet::Packets,
|
||||
repair_response,
|
||||
repair_service::{RepairService, RepairStrategy},
|
||||
result::{Error, Result},
|
||||
serve_repair::DEFAULT_NONCE,
|
||||
streamer::PacketSender,
|
||||
};
|
||||
use crossbeam_channel::{
|
||||
unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Sender as CrossbeamSender,
|
||||
};
|
||||
use rayon::iter::IntoParallelRefMutIterator;
|
||||
use rayon::iter::ParallelIterator;
|
||||
use rayon::ThreadPool;
|
||||
use solana_ledger::bank_forks::BankForks;
|
||||
use solana_ledger::blockstore::{self, Blockstore, MAX_DATA_SHREDS_PER_SLOT};
|
||||
use solana_ledger::leader_schedule_cache::LeaderScheduleCache;
|
||||
use solana_ledger::shred::Shred;
|
||||
use solana_ledger::{
|
||||
bank_forks::BankForks,
|
||||
blockstore::{self, Blockstore, MAX_DATA_SHREDS_PER_SLOT},
|
||||
leader_schedule_cache::LeaderScheduleCache,
|
||||
shred::{Nonce, Shred},
|
||||
};
|
||||
use solana_metrics::{inc_new_counter_debug, inc_new_counter_error};
|
||||
use solana_rayon_threadlimit::get_thread_count;
|
||||
use solana_runtime::bank::Bank;
|
||||
use solana_sdk::pubkey::Pubkey;
|
||||
use solana_sdk::timing::duration_as_ms;
|
||||
use std::net::UdpSocket;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread::{self, Builder, JoinHandle};
|
||||
use std::time::{Duration, Instant};
|
||||
use solana_sdk::{packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::duration_as_ms};
|
||||
use std::{
|
||||
net::{SocketAddr, UdpSocket},
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
sync::{Arc, RwLock},
|
||||
thread::{self, Builder, JoinHandle},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
fn verify_shred_slot(shred: &Shred, root: u64) -> bool {
|
||||
if shred.is_data() {
|
||||
@ -102,8 +108,15 @@ fn run_check_duplicate(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_repair(_shred: &Shred, repair_info: &Option<RepairMeta>) -> bool {
|
||||
repair_info
|
||||
.as_ref()
|
||||
.map(|repair_info| repair_info.nonce == DEFAULT_NONCE)
|
||||
.unwrap_or(true)
|
||||
}
|
||||
|
||||
fn run_insert<F>(
|
||||
shred_receiver: &CrossbeamReceiver<Vec<Shred>>,
|
||||
shred_receiver: &CrossbeamReceiver<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
handle_duplicate: F,
|
||||
@ -112,12 +125,16 @@ where
|
||||
F: Fn(Shred) -> (),
|
||||
{
|
||||
let timer = Duration::from_millis(200);
|
||||
let mut shreds = shred_receiver.recv_timeout(timer)?;
|
||||
|
||||
while let Ok(mut more_shreds) = shred_receiver.try_recv() {
|
||||
shreds.append(&mut more_shreds)
|
||||
let (mut shreds, mut repair_infos) = shred_receiver.recv_timeout(timer)?;
|
||||
while let Ok((more_shreds, more_repair_infos)) = shred_receiver.try_recv() {
|
||||
shreds.extend(more_shreds);
|
||||
repair_infos.extend(more_repair_infos);
|
||||
}
|
||||
|
||||
assert_eq!(shreds.len(), repair_infos.len());
|
||||
let mut i = 0;
|
||||
shreds.retain(|shred| (verify_repair(&shred, &repair_infos[i]), i += 1).0);
|
||||
|
||||
let blockstore_insert_metrics = blockstore.insert_shreds_handle_duplicate(
|
||||
shreds,
|
||||
Some(leader_schedule_cache),
|
||||
@ -131,7 +148,7 @@ where
|
||||
|
||||
fn recv_window<F>(
|
||||
blockstore: &Arc<Blockstore>,
|
||||
insert_shred_sender: &CrossbeamSender<Vec<Shred>>,
|
||||
insert_shred_sender: &CrossbeamSender<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
|
||||
my_pubkey: &Pubkey,
|
||||
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
|
||||
retransmit: &PacketSender,
|
||||
@ -155,7 +172,7 @@ where
|
||||
inc_new_counter_debug!("streamer-recv_window-recv", total_packets);
|
||||
|
||||
let last_root = blockstore.last_root();
|
||||
let shreds: Vec<_> = thread_pool.install(|| {
|
||||
let (shreds, repair_infos): (Vec<_>, Vec<_>) = thread_pool.install(|| {
|
||||
packets
|
||||
.par_iter_mut()
|
||||
.flat_map(|packets| {
|
||||
@ -164,34 +181,59 @@ where
|
||||
.iter_mut()
|
||||
.filter_map(|packet| {
|
||||
if packet.meta.discard {
|
||||
inc_new_counter_debug!("streamer-recv_window-invalid_signature", 1);
|
||||
inc_new_counter_debug!(
|
||||
"streamer-recv_window-invalid_or_unnecessary_packet",
|
||||
1
|
||||
);
|
||||
None
|
||||
} else if let Ok(shred) =
|
||||
Shred::new_from_serialized_shred(packet.data.to_vec())
|
||||
{
|
||||
if shred_filter(&shred, last_root) {
|
||||
// Mark slot as dead if the current shred is on the boundary
|
||||
// of max shreds per slot. However, let the current shred
|
||||
// get retransmitted. It'll allow peer nodes to see this shred
|
||||
// and trigger them to mark the slot as dead.
|
||||
if shred.index() >= (MAX_DATA_SHREDS_PER_SLOT - 1) as u32 {
|
||||
let _ = blockstore.set_dead_slot(shred.slot());
|
||||
} else {
|
||||
// shred fetch stage should be sending packets
|
||||
// with sufficiently large buffers. Needed to ensure
|
||||
// call to `new_from_serialized_shred` is safe.
|
||||
assert_eq!(packet.data.len(), PACKET_DATA_SIZE);
|
||||
let serialized_shred = packet.data.to_vec();
|
||||
if let Ok(shred) = Shred::new_from_serialized_shred(serialized_shred) {
|
||||
let repair_info = {
|
||||
if packet.meta.repair && Shred::is_nonce_unlocked(shred.slot())
|
||||
{
|
||||
if let Some(nonce) = repair_response::nonce(&packet.data) {
|
||||
let repair_info = RepairMeta {
|
||||
_from_addr: packet.meta.addr(),
|
||||
nonce,
|
||||
};
|
||||
Some(repair_info)
|
||||
} else {
|
||||
// If can't parse the nonce, dump the packet
|
||||
return None;
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
if shred_filter(&shred, last_root) {
|
||||
// Mark slot as dead if the current shred is on the boundary
|
||||
// of max shreds per slot. However, let the current shred
|
||||
// get retransmitted. It'll allow peer nodes to see this shred
|
||||
// and trigger them to mark the slot as dead.
|
||||
if shred.index() >= (MAX_DATA_SHREDS_PER_SLOT - 1) as u32 {
|
||||
let _ = blockstore.set_dead_slot(shred.slot());
|
||||
}
|
||||
packet.meta.slot = shred.slot();
|
||||
packet.meta.seed = shred.seed();
|
||||
Some((shred, repair_info))
|
||||
} else {
|
||||
packet.meta.discard = true;
|
||||
None
|
||||
}
|
||||
packet.meta.slot = shred.slot();
|
||||
packet.meta.seed = shred.seed();
|
||||
Some(shred)
|
||||
} else {
|
||||
packet.meta.discard = true;
|
||||
None
|
||||
}
|
||||
} else {
|
||||
packet.meta.discard = true;
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.collect()
|
||||
.unzip()
|
||||
});
|
||||
|
||||
trace!("{:?} shreds from packets", shreds.len());
|
||||
@ -205,7 +247,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
insert_shred_sender.send(shreds)?;
|
||||
insert_shred_sender.send((shreds, repair_infos))?;
|
||||
|
||||
trace!(
|
||||
"Elapsed processing time in recv_window(): {}",
|
||||
@ -215,6 +257,11 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct RepairMeta {
|
||||
_from_addr: SocketAddr,
|
||||
nonce: Nonce,
|
||||
}
|
||||
|
||||
// Implement a destructor for the window_service thread to signal it exited
|
||||
// even on panics
|
||||
struct Finalizer {
|
||||
@ -336,7 +383,7 @@ impl WindowService {
|
||||
exit: &Arc<AtomicBool>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
leader_schedule_cache: &Arc<LeaderScheduleCache>,
|
||||
insert_receiver: CrossbeamReceiver<Vec<Shred>>,
|
||||
insert_receiver: CrossbeamReceiver<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
|
||||
duplicate_sender: CrossbeamSender<Shred>,
|
||||
) -> JoinHandle<()> {
|
||||
let exit = exit.clone();
|
||||
@ -377,7 +424,7 @@ impl WindowService {
|
||||
id: Pubkey,
|
||||
exit: &Arc<AtomicBool>,
|
||||
blockstore: &Arc<Blockstore>,
|
||||
insert_sender: CrossbeamSender<Vec<Shred>>,
|
||||
insert_sender: CrossbeamSender<(Vec<Shred>, Vec<Option<RepairMeta>>)>,
|
||||
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
|
||||
shred_filter: F,
|
||||
bank_forks: Option<Arc<RwLock<BankForks>>>,
|
||||
@ -483,12 +530,11 @@ mod test {
|
||||
repair_service::RepairSlotRange,
|
||||
};
|
||||
use rand::thread_rng;
|
||||
use solana_ledger::shred::DataShredHeader;
|
||||
use solana_ledger::{
|
||||
blockstore::{make_many_slot_entries, Blockstore},
|
||||
entry::{create_ticks, Entry},
|
||||
get_tmp_ledger_path,
|
||||
shred::Shredder,
|
||||
shred::{DataShredHeader, Shredder, NONCE_SHRED_PAYLOAD_SIZE},
|
||||
};
|
||||
use solana_sdk::{
|
||||
clock::Slot,
|
||||
@ -562,8 +608,12 @@ mod test {
|
||||
|
||||
// If it's a coding shred, test that slot >= root
|
||||
let (common, coding) = Shredder::new_coding_shred_header(5, 5, 5, 6, 6, 0, 0);
|
||||
let mut coding_shred =
|
||||
Shred::new_empty_from_header(common, DataShredHeader::default(), coding);
|
||||
let mut coding_shred = Shred::new_empty_from_header(
|
||||
common,
|
||||
DataShredHeader::default(),
|
||||
coding,
|
||||
NONCE_SHRED_PAYLOAD_SIZE,
|
||||
);
|
||||
Shredder::sign_shred(&leader_keypair, &mut coding_shred);
|
||||
assert_eq!(
|
||||
should_retransmit_and_persist(&coding_shred, Some(bank.clone()), &cache, &me_id, 0, 0),
|
||||
|
@ -137,7 +137,7 @@ mod tests {
|
||||
// and to allow snapshotting of bank and the purging logic on status_cache to
|
||||
// kick in
|
||||
if slot % set_root_interval == 0 || slot == last_slot - 1 {
|
||||
bank_forks.set_root(bank.slot(), &sender);
|
||||
bank_forks.set_root(bank.slot(), &sender, None);
|
||||
}
|
||||
}
|
||||
// Generate a snapshot package for last bank
|
||||
@ -377,9 +377,11 @@ mod tests {
|
||||
snapshot_test_config.bank_forks.insert(new_bank);
|
||||
current_bank = snapshot_test_config.bank_forks[new_slot].clone();
|
||||
}
|
||||
snapshot_test_config
|
||||
.bank_forks
|
||||
.set_root(current_bank.slot(), &snapshot_sender);
|
||||
snapshot_test_config.bank_forks.set_root(
|
||||
current_bank.slot(),
|
||||
&snapshot_sender,
|
||||
None,
|
||||
);
|
||||
}
|
||||
|
||||
let num_old_slots = num_set_roots * *add_root_interval - MAX_CACHE_ENTRIES + 1;
|
||||
|
@ -6,6 +6,7 @@ use solana_core::{
|
||||
commitment::BlockCommitmentCache, rpc_pubsub_service::PubSubService,
|
||||
rpc_subscriptions::RpcSubscriptions, validator::TestValidator,
|
||||
};
|
||||
use solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path};
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig, pubkey::Pubkey, rpc_port, signature::Signer,
|
||||
system_transaction,
|
||||
@ -39,7 +40,7 @@ fn test_rpc_client() {
|
||||
|
||||
assert_eq!(
|
||||
client.get_version().unwrap().solana_core,
|
||||
solana_clap_utils::version!()
|
||||
solana_version::version!()
|
||||
);
|
||||
|
||||
assert!(client.get_account(&bob_pubkey).is_err());
|
||||
@ -58,7 +59,7 @@ fn test_rpc_client() {
|
||||
let now = Instant::now();
|
||||
while now.elapsed().as_secs() <= 20 {
|
||||
let response = client
|
||||
.confirm_transaction_with_commitment(signature.as_str(), CommitmentConfig::default())
|
||||
.confirm_transaction_with_commitment(&signature, CommitmentConfig::default())
|
||||
.unwrap();
|
||||
|
||||
if response.value {
|
||||
@ -85,9 +86,13 @@ fn test_slot_subscription() {
|
||||
rpc_port::DEFAULT_RPC_PUBSUB_PORT,
|
||||
);
|
||||
let exit = Arc::new(AtomicBool::new(false));
|
||||
let ledger_path = get_tmp_ledger_path!();
|
||||
let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap());
|
||||
let subscriptions = Arc::new(RpcSubscriptions::new(
|
||||
&exit,
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default())),
|
||||
Arc::new(RwLock::new(BlockCommitmentCache::default_with_blockstore(
|
||||
blockstore,
|
||||
))),
|
||||
));
|
||||
let pubsub_service = PubSubService::new(&subscriptions, pubsub_addr, &exit);
|
||||
std::thread::sleep(Duration::from_millis(400));
|
||||
|
@ -9,15 +9,12 @@ use reqwest::{self, header::CONTENT_TYPE};
|
||||
use serde_json::{json, Value};
|
||||
use solana_client::{
|
||||
rpc_client::{get_rpc_request_str, RpcClient},
|
||||
rpc_response::Response,
|
||||
rpc_response::{Response, RpcSignatureResult},
|
||||
};
|
||||
use solana_core::{rpc_pubsub::gen_client::Client as PubsubClient, validator::TestValidator};
|
||||
use solana_sdk::{
|
||||
commitment_config::CommitmentConfig,
|
||||
hash::Hash,
|
||||
pubkey::Pubkey,
|
||||
system_transaction,
|
||||
transaction::{self, Transaction},
|
||||
commitment_config::CommitmentConfig, hash::Hash, pubkey::Pubkey, system_transaction,
|
||||
transaction::Transaction,
|
||||
};
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
@ -224,7 +221,7 @@ fn test_rpc_subscriptions() {
|
||||
let mut rt = Runtime::new().unwrap();
|
||||
let rpc_pubsub_url = format!("ws://{}/", leader_data.rpc_pubsub);
|
||||
|
||||
let (status_sender, status_receiver) = channel::<(String, Response<transaction::Result<()>>)>();
|
||||
let (status_sender, status_receiver) = channel::<(String, Response<RpcSignatureResult>)>();
|
||||
let status_sender = Arc::new(Mutex::new(status_sender));
|
||||
let (sent_sender, sent_receiver) = channel::<()>();
|
||||
let sent_sender = Arc::new(Mutex::new(sent_sender));
|
||||
@ -296,7 +293,7 @@ fn test_rpc_subscriptions() {
|
||||
let timeout = deadline.saturating_duration_since(Instant::now());
|
||||
match status_receiver.recv_timeout(timeout) {
|
||||
Ok((sig, result)) => {
|
||||
assert!(result.value.is_ok());
|
||||
assert!(result.value.err.is_none());
|
||||
assert!(signature_set.remove(&sig));
|
||||
}
|
||||
Err(_err) => {
|
||||
|
@ -59,7 +59,9 @@ mod tests {
|
||||
&[bank.clone()],
|
||||
vec![0],
|
||||
)));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
let cluster_info = test_cluster_info(&keypair.pubkey());
|
||||
|
||||
let (bank_sender, bank_receiver) = channel();
|
||||
@ -180,7 +182,9 @@ mod tests {
|
||||
&[bank.clone()],
|
||||
vec![0],
|
||||
)));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default()));
|
||||
let block_commitment_cache = Arc::new(RwLock::new(
|
||||
BlockCommitmentCache::default_with_blockstore(blockstore.clone()),
|
||||
));
|
||||
|
||||
let cluster_info = test_cluster_info(&keypair.pubkey());
|
||||
let (bank_sender, bank_receiver) = channel();
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-crate-features"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
description = "Solana Crate Features"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -12,17 +12,20 @@ edition = "2018"
|
||||
backtrace = { version = "0.3.33", features = ["serialize-serde"] }
|
||||
bytes = { version = "0.4.12", features = ["either"] }
|
||||
cc = { version = "1.0.45", features = ["jobserver", "num_cpus", "parallel"]}
|
||||
curve25519-dalek = { version = "1.1.3" }
|
||||
curve25519-dalek = { version = "2" }
|
||||
either= { version = "1.5.2" }
|
||||
failure = { version = "0.1.5" }
|
||||
lazy_static = { version = "1.4.0", features = ["spin", "spin_no_std"] }
|
||||
libc = { version = "0.2.62", features = ["extra_traits"] }
|
||||
rand_chacha = { version = "0.1.1" }
|
||||
rand_chacha = { version = "0.2.2" }
|
||||
regex-syntax = { version = "0.6.12" }
|
||||
reqwest = { version = "0.9.20", default-features = false, features = ["rustls-tls"] }
|
||||
serde = { version = "1.0.100", features = ["rc"] }
|
||||
ed25519-dalek = { version = "=1.0.0-pre.1", features = ["serde"] }
|
||||
ed25519-dalek = { version = "=1.0.0-pre.3", features = ["serde"] }
|
||||
syn_0_15 = { package = "syn", version = "0.15.42", features = ["extra-traits", "fold", "full"] }
|
||||
syn_1_0 = { package = "syn", version = "1.0.3", features = ["extra-traits", "fold", "full"] }
|
||||
tokio = { version = "0.1.22",features=["bytes", "codec", "default", "fs", "io", "mio", "num_cpus", "reactor", "rt-full", "sync", "tcp", "timer", "tokio-codec", "tokio-current-thread", "tokio-executor", "tokio-io", "tokio-io", "tokio-reactor", "tokio-tcp", "tokio-tcp", "tokio-threadpool", "tokio-timer", "tokio-udp", "tokio-uds", "udp", "uds"] }
|
||||
winapi = { version = "0.3.8", features=["basetsd", "consoleapi", "errhandlingapi", "fileapi", "handleapi", "impl-debug", "impl-default", "knownfolders", "libloaderapi", "memoryapi", "minwinbase", "minwindef", "ntdef", "ntsecapi", "ntstatus", "objbase", "processenv", "processthreadsapi", "profileapi", "shlobj", "std", "synchapi", "sysinfoapi", "timezoneapi", "utilapiset", "winbase", "wincon", "windef", "winerror", "winnls", "winnt", "winreg", "winsock2", "winuser", "ws2def", "ws2ipdef", "ws2tcpip", "wtypesbase"] }
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -3,7 +3,9 @@ set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
usage=$(cargo -q run -p solana-cli -- -C ~/.foo --help | sed 's|'"$HOME"'|~|g')
|
||||
: "${rust_stable:=}" # Pacify shellcheck
|
||||
|
||||
usage=$(cargo +"$rust_stable" -q run -p solana-cli -- -C ~/.foo --help | sed -e 's|'"$HOME"'|~|g' -e 's/[[:space:]]\+$//')
|
||||
|
||||
out=${1:-src/cli/usage.md}
|
||||
|
||||
@ -29,6 +31,6 @@ in_subcommands=0
|
||||
while read -r subcommand rest; do
|
||||
[[ $subcommand == "SUBCOMMANDS:" ]] && in_subcommands=1 && continue
|
||||
if ((in_subcommands)); then
|
||||
section "$(cargo -q run -p solana-cli -- help "$subcommand" | sed 's|'"$HOME"'|~|g')" "####" >> "$out"
|
||||
section "$(cargo +"$rust_stable" -q run -p solana-cli -- help "$subcommand" | sed 's|'"$HOME"'|~|g')" "####" >> "$out"
|
||||
fi
|
||||
done <<<"$usage">>"$out"
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 25 KiB |
Binary file not shown.
Before Width: | Height: | Size: 120 KiB |
Binary file not shown.
Before Width: | Height: | Size: 30 KiB |
Binary file not shown.
Before Width: | Height: | Size: 83 KiB |
@ -14,7 +14,6 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
|
||||
## Methods
|
||||
|
||||
* [confirmTransaction](jsonrpc-api.md#confirmtransaction)
|
||||
* [getAccountInfo](jsonrpc-api.md#getaccountinfo)
|
||||
* [getBalance](jsonrpc-api.md#getbalance)
|
||||
* [getBlockCommitment](jsonrpc-api.md#getblockcommitment)
|
||||
@ -22,18 +21,21 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
* [getClusterNodes](jsonrpc-api.md#getclusternodes)
|
||||
* [getConfirmedBlock](jsonrpc-api.md#getconfirmedblock)
|
||||
* [getConfirmedBlocks](jsonrpc-api.md#getconfirmedblocks)
|
||||
* [getConfirmedSignaturesForAddress](jsonrpc-api.md#getconfirmedsignaturesforaddress)
|
||||
* [getConfirmedTransaction](jsonrpc-api.md#getconfirmedtransaction)
|
||||
* [getEpochInfo](jsonrpc-api.md#getepochinfo)
|
||||
* [getEpochSchedule](jsonrpc-api.md#getepochschedule)
|
||||
* [getFeeCalculatorForBlockhash](jsonrpc-api.md#getfeecalculatorforblockhash)
|
||||
* [getFeeRateGovernor](jsonrpc-api.md#getfeerategovernor)
|
||||
* [getFirstAvailableBlock](jsonrpc-api.md#getfirstavailableblock)
|
||||
* [getGenesisHash](jsonrpc-api.md#getgenesishash)
|
||||
* [getIdentity](jsonrpc-api.md#getidentity)
|
||||
* [getInflation](jsonrpc-api.md#getinflation)
|
||||
* [getLargestAccounts](jsonrpc-api.md#getlargestaccounts)
|
||||
* [getLeaderSchedule](jsonrpc-api.md#getleaderschedule)
|
||||
* [getMinimumBalanceForRentExemption](jsonrpc-api.md#getminimumbalanceforrentexemption)
|
||||
* [getProgramAccounts](jsonrpc-api.md#getprogramaccounts)
|
||||
* [getRecentBlockhash](jsonrpc-api.md#getrecentblockhash)
|
||||
* [getSignatureStatus](jsonrpc-api.md#getsignaturestatus)
|
||||
* [getSignatureStatuses](jsonrpc-api.md#getsignaturestatuses)
|
||||
* [getSlot](jsonrpc-api.md#getslot)
|
||||
* [getSlotLeader](jsonrpc-api.md#getslotleader)
|
||||
@ -41,13 +43,14 @@ To interact with a Solana node inside a JavaScript application, use the [solana-
|
||||
* [getStoragePubkeysForSlot](jsonrpc-api.md#getstoragepubkeysforslot)
|
||||
* [getStorageTurn](jsonrpc-api.md#getstorageturn)
|
||||
* [getStorageTurnRate](jsonrpc-api.md#getstorageturnrate)
|
||||
* [getSupply](jsonrpc-api.md#getsupply)
|
||||
* [getTransactionCount](jsonrpc-api.md#gettransactioncount)
|
||||
* [getTotalSupply](jsonrpc-api.md#gettotalsupply)
|
||||
* [getVersion](jsonrpc-api.md#getversion)
|
||||
* [getVoteAccounts](jsonrpc-api.md#getvoteaccounts)
|
||||
* [minimumLedgerSlot](jsonrpc-api.md#minimumledgerslot)
|
||||
* [requestAirdrop](jsonrpc-api.md#requestairdrop)
|
||||
* [sendTransaction](jsonrpc-api.md#sendtransaction)
|
||||
* [simulateTransaction](jsonrpc-api.md#simulatetransaction)
|
||||
* [setLogFilter](jsonrpc-api.md#setlogfilter)
|
||||
* [validatorExit](jsonrpc-api.md#validatorexit)
|
||||
* [Subscription Websocket](jsonrpc-api.md#subscription-websocket)
|
||||
@ -94,7 +97,8 @@ Requests can be sent in batches by sending an array of JSON-RPC request objects
|
||||
|
||||
Solana nodes choose which bank state to query based on a commitment requirement
|
||||
set by the client. Clients may specify either:
|
||||
* `{"commitment":"max"}` - the node will query the most recent bank having reached `MAX_LOCKOUT_HISTORY` confirmations
|
||||
* `{"commitment":"max"}` - the node will query the most recent bank confirmed by the cluster as having reached `MAX_LOCKOUT_HISTORY` confirmations
|
||||
* `{"commitment":"root"}` - the node will query the most recent bank having reached `MAX_LOCKOUT_HISTORY` confirmations on this node
|
||||
* `{"commitment":"recent"}` - the node will query its most recent bank state
|
||||
|
||||
The commitment parameter should be included as the last element in the `params` array:
|
||||
@ -115,31 +119,18 @@ Many methods that take a commitment parameter return an RpcResponse JSON object
|
||||
* `value` : The value returned by the operation itself.
|
||||
|
||||
|
||||
## Health Check
|
||||
Although not a JSON RPC API, a `GET /heath` at the RPC HTTP Endpoint provides a
|
||||
health-check mechanism for use by load balancers or other network
|
||||
infrastructure. This request will always return a HTTP 200 OK response with a body of
|
||||
"ok" or "behind" based on the following conditions:
|
||||
1. If one or more `--trusted-validator` arguments are provided to `solana-validator`, "ok" is returned
|
||||
when the node has within `HEALTH_CHECK_SLOT_DISTANCE` slots of the highest trusted validator,
|
||||
otherwise "behind" is returned.
|
||||
2. "ok" is always returned if no trusted validators are provided.
|
||||
|
||||
## JSON RPC API Reference
|
||||
|
||||
### confirmTransaction
|
||||
|
||||
Returns a transaction receipt. This method only searches the recent status cache of signatures, which retains all active slots plus `MAX_RECENT_BLOCKHASHES` rooted slots.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Signature of Transaction to confirm, as base-58 encoded string
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `RpcResponse<bool>` - RpcResponse JSON object with `value` field set to Transaction status, boolean true if Transaction is confirmed
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"confirmTransaction", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":true},"id":1}
|
||||
```
|
||||
|
||||
### getAccountInfo
|
||||
|
||||
Returns all information associated with the account of provided Pubkey
|
||||
@ -207,7 +198,7 @@ The result field will be a JSON object containing:
|
||||
|
||||
* `commitment` - commitment, comprising either:
|
||||
* `<null>` - Unknown block
|
||||
* `<array>` - commitment, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY`
|
||||
* `<array>` - commitment, array of u64 integers logging the amount of cluster stake in lamports that has voted on the block at each depth from 0 to `MAX_LOCKOUT_HISTORY` + 1
|
||||
* `totalStake` - total active stake, in lamports, of the current epoch
|
||||
|
||||
#### Example:
|
||||
@ -217,7 +208,7 @@ The result field will be a JSON object containing:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getBlockCommitment","params":[5]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"commitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,32],"totalStake": 42},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"commitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,32],"totalStake": 42},"id":1}
|
||||
```
|
||||
|
||||
### getBlockTime
|
||||
@ -268,7 +259,8 @@ The result field will be an array of JSON objects, each with the following sub f
|
||||
* `pubkey: <string>` - Node public key, as base-58 encoded string
|
||||
* `gossip: <string>` - Gossip network address for the node
|
||||
* `tpu: <string>` - TPU network address for the node
|
||||
* `rpc: <string>` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
|
||||
* `rpc: <string>|null` - JSON RPC network address for the node, or `null` if the JSON RPC service is not enabled
|
||||
* `version: <string>|null` - The software version of the node, or `null` if the version information is not available
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -277,7 +269,7 @@ The result field will be an array of JSON objects, each with the following sub f
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getClusterNodes"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","pubkey":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"}],"id":1}
|
||||
{"jsonrpc":"2.0","result":[{"gossip":"10.239.6.48:8001","pubkey":"9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ","rpc":"10.239.6.48:8899","tpu":"10.239.6.48:8856"},"version":"1.0.0 c375ce1f"],"id":1}
|
||||
```
|
||||
|
||||
### getConfirmedBlock
|
||||
@ -293,21 +285,24 @@ Returns identity and transaction information about a confirmed block in the ledg
|
||||
|
||||
The result field will be an object with the following fields:
|
||||
|
||||
* `blockhash: <string>` - the blockhash of this block, as base-58 encoded string
|
||||
* `previousBlockhash: <string>` - the blockhash of this block's parent, as base-58 encoded string
|
||||
* `parentSlot: <u64>` - the slot index of this block's parent
|
||||
* `transactions: <array>` - an array of JSON objects containing:
|
||||
* `transaction: <object|string>` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
|
||||
* `meta: <object>` - transaction status metadata object, containing `null` or:
|
||||
* `status: <object>` - Transaction status:
|
||||
* `"Ok": null` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L18)
|
||||
* `fee: <u64>` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
|
||||
* `rewards: <array>` - an array of JSON objects containing:
|
||||
* `pubkey: <string>` - The public key, as base-58 encoded string, of the account that received the reward
|
||||
* `lamports: <i64>`- number of reward lamports credited or debited by the account, as a i64
|
||||
* `<null>` - if specified block is not confirmed
|
||||
* `<object>` - if block is confirmed, an object with the following fields:
|
||||
* `blockhash: <string>` - the blockhash of this block, as base-58 encoded string
|
||||
* `previousBlockhash: <string>` - the blockhash of this block's parent, as base-58 encoded string; if the parent block is not available due to ledger cleanup, this field will return "11111111111111111111111111111111"
|
||||
* `parentSlot: <u64>` - the slot index of this block's parent
|
||||
* `transactions: <array>` - an array of JSON objects containing:
|
||||
* `transaction: <object|string>` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
|
||||
* `meta: <object>` - transaction status metadata object, containing `null` or:
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `fee: <u64>` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
|
||||
* DEPRECATED: `status: <object>` - Transaction status
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError
|
||||
* `rewards: <array>` - an array of JSON objects containing:
|
||||
* `pubkey: <string>` - The public key, as base-58 encoded string, of the account that received the reward
|
||||
* `lamports: <i64>`- number of reward lamports credited or debited by the account, as a i64
|
||||
|
||||
#### Example:
|
||||
|
||||
@ -316,18 +311,18 @@ The result field will be an object with the following fields:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "json"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},"meta":{"fee":1000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},"meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
|
||||
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedBlock","params":[430, "binary"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":"81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ","meta":{"fee":1000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"blockhash":"Gp3t5bfDsJv1ovP8cB1SuRhXVuoTqDv7p3tymyubYg5","parentSlot":429,"previousBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA","transactions":[{"transaction":"81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ","meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}}]},"id":1}
|
||||
```
|
||||
|
||||
#### Transaction Structure
|
||||
|
||||
Transactions are quite different from those on other blockchains. Be sure to review [Anatomy of a Transaction](transaction.md) to learn about transactions on Solana.
|
||||
Transactions are quite different from those on other blockchains. Be sure to review [Anatomy of a Transaction](../transaction.md) to learn about transactions on Solana.
|
||||
|
||||
The JSON structure of a transaction is defined as follows:
|
||||
|
||||
@ -369,6 +364,72 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"m
|
||||
{"jsonrpc":"2.0","result":[5,6,7,8,9,10],"id":1}
|
||||
```
|
||||
|
||||
### getConfirmedSignaturesForAddress
|
||||
|
||||
Returns a list of all the confirmed signatures for transactions involving an address, within a specified Slot range. Max range allowed is 10_000 Slots.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - account address as base-58 encoded string
|
||||
* `<u64>` - start slot, inclusive
|
||||
* `<u64>` - end slot, inclusive
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an array of:
|
||||
* `<string>` - transaction signature as base-58 encoded string
|
||||
|
||||
The signatures will be ordered based on the Slot in which they were confirmed in, from lowest to highest Slot
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedSignaturesForAddress","params":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC", 0, 100]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4bJdGN8Tt2kLWZ3Fa1dpwPSEkXWWTSszPSf1rRVsCwNjxbbUdwTeiWtmi8soA26YmwnKD4aAxNp8ci1Gjpdv4gsr","4LQ14a7BYY27578Uj8LPCaVhSdJGLn9DJqnUJHpy95FMqdKf9acAhUhecPQNjNUy6VoNFUbvwYkPociFSf87cWbG"]},"id":1}
|
||||
```
|
||||
|
||||
### getConfirmedTransaction
|
||||
|
||||
Returns transaction details for a confirmed transaction
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - transaction signature as base-58 encoded string
|
||||
* `<string>` - (optional) encoding for the returned Transaction, either "json" or "binary". If not provided, the default encoding is JSON.
|
||||
|
||||
#### Results:
|
||||
|
||||
The result field will be an object with the following fields:
|
||||
* `slot: <u64>` - the slot this transaction was processed in
|
||||
* `transaction: <object|string>` - [Transaction](#transaction-structure) object, either in JSON format or base-58 encoded binary data, depending on encoding parameter
|
||||
* `meta: <object>` - transaction status metadata object, containing `null` or:
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `fee: <u64>` - fee this transaction was charged, as u64 integer
|
||||
* `preBalances: <array>` - array of u64 account balances from before the transaction was processed
|
||||
* `postBalances: <array>` - array of u64 account balances after the transaction was processed
|
||||
* DEPRECATED: `status: <object>` - Transaction status
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedTransaction","params":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby", "json"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"slot":430,"transaction":{"message":{"accountKeys":["6H94zdiaYfRfPfKjYLjyr2VFBg6JHXygy84r3qhc3NsC","39UAy8hsoYPywGPGdmun747omSr79zLSjqvPJN3zetoH","SysvarS1otHashes111111111111111111111111111","SysvarC1ock11111111111111111111111111111111","Vote111111111111111111111111111111111111111"],"header":{"numReadonlySignedAccounts":0,"numReadonlyUnsignedAccounts":3,"numRequiredSignatures":2},"instructions":[{"accounts":[1,2,3],"data":"29z5mr1JoRmJYQ6ynmk3pf31cGFRziAF1M3mT3L6sFXf5cKLdkEaMXMT8AqLpD4CpcupHmuMEmtZHpomrwfdZetSomNy3d","programIdIndex":4}],"recentBlockhash":"EFejToxii1L5aUF2NrK9dsbAEmZSNyN5nsipmZHQR1eA"},"signatures":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby","4vANMjSKiwEchGSXwVrQkwHnmsbKQmy9vdrsYxWdCup1bLsFzX8gKrFTSVDCZCae2dbxJB9mPNhqB2sD1vvr4sAD"]},"meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}},"id":1}
|
||||
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0","id":1,"method":"getConfirmedTransaction","params":["35YGay1Lwjwgxe9zaH6APSHbt9gYQUCtBWTNL3aVwVGn9xTFw2fgds7qK5AL29mP63A9j3rh8KpN1TgSR62XCaby", "binary"]}' localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"slot":430,"transaction":"81UZJt4dh4Do66jDhrgkQudS8J2N6iG3jaVav7gJrqJSFY4Ug53iA9JFJZh2gxKWcaFdLJwhHx9mRdg9JwDAWB4ywiu5154CRwXV4FMdnPLg7bhxRLwhhYaLsVgMF5AyNRcTzjCVoBvqFgDU7P8VEKDEiMvD3qxzm1pLZVxDG1LTQpT3Dz4Uviv4KQbFQNuC22KupBoyHFB7Zh6KFdMqux4M9PvhoqcoJsJKwXjWpKu7xmEKnnrSbfLadkgjBmmjhW3fdTrFvnhQdTkhtdJxUL1xS9GMuJQer8YgSKNtUXB1eXZQwXU8bU2BjYkZE6Q5Xww8hu9Z4E4Mo4QsooVtHoP6BM3NKw8zjVbWfoCQqxTrwuSzrNCWCWt58C24LHecH67CTt2uXbYSviixvrYkK7A3t68BxTJcF1dXJitEPTFe2ceTkauLJqrJgnER4iUrsjr26T8YgWvpY9wkkWFSviQW6wV5RASTCUasVEcrDiaKj8EQMkgyDoe9HyKitSVg67vMWJFpUXpQobseWJUs5FTWWzmfHmFp8FZ","meta":{"err":null,"fee":18000,"postBalances":[499999972500,15298080,1,1,1],"preBalances":[499999990500,15298080,1,1,1],"status":{"Ok":null}}},"id":1}
|
||||
```
|
||||
|
||||
### getEpochInfo
|
||||
|
||||
Returns information about the current epoch
|
||||
@ -476,6 +537,28 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":54},"value":{"feeRateGovernor":{"burnPercent":50,"maxLamportsPerSignature":100000,"minLamportsPerSignature":5000,"targetLamportsPerSignature":10000,"targetSignaturesPerSlot":20000}}},"id":1}
|
||||
```
|
||||
|
||||
### getFirstAvailableBlock
|
||||
|
||||
Returns the slot of the lowest confirmed block that has not been purged from the ledger
|
||||
|
||||
#### Parameters:
|
||||
|
||||
None
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<u64>` - Slot
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getFirstAvailableBlock"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":250000,"id":1}
|
||||
```
|
||||
|
||||
### getGenesisHash
|
||||
|
||||
Returns the genesis hash
|
||||
@ -550,6 +633,32 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":{"foundation":0.05,"foundationTerm":7.0,"initial":0.15,"storage":0.1,"taper":0.15,"terminal":0.015},"id":1}
|
||||
```
|
||||
|
||||
### getLargestAccounts
|
||||
|
||||
Returns the 20 largest accounts, by lamport balance
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result will be an RpcResponse JSON object with `value` equal to an array of:
|
||||
|
||||
* `<object>` - otherwise, a JSON object containing:
|
||||
* `address: <string>`, base-58 encoded address of the account
|
||||
* `lamports: <u64>`, number of lamports in the account, as a u64
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getLargestAccounts"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":54},"value":[{"lamports":999974,"address":"99P8ZgtJYe1buSK8JXkvpLh8xPsCFuLYhz9hQFNw93WJ"},{"lamports":42,"address":"uPwWLo16MVehpyWqsLkK3Ka8nLowWvAHbBChqv2FZeL"},{"lamports":42,"address":"aYJCgU7REfu3XF8b3QhkqgqQvLizx8zxuLBHA25PzDS"},{"lamports":42,"address":"CTvHVtQ4gd4gUcw3bdVgZJJqApXE9nCbbbP4VTS5wE1D"},{"lamports":20,"address":"4fq3xJ6kfrh9RkJQsmVd5gNMvJbuSHfErywvEjNQDPxu"},{"lamports":4,"address":"AXJADheGVp9cruP8WYu46oNkRbeASngN5fPCMVGQqNHa"},{"lamports":2,"address":"8NT8yS6LiwNprgW4yM1jPPow7CwRUotddBVkrkWgYp24"},{"lamports":1,"address":"SysvarEpochSchedu1e111111111111111111111111"},{"lamports":1,"address":"11111111111111111111111111111111"},{"lamports":1,"address":"Stake11111111111111111111111111111111111111"},{"lamports":1,"address":"SysvarC1ock11111111111111111111111111111111"},{"lamports":1,"address":"StakeConfig11111111111111111111111111111111"},{"lamports":1,"address":"SysvarRent111111111111111111111111111111111"},{"lamports":1,"address":"Config1111111111111111111111111111111111111"},{"lamports":1,"address":"SysvarStakeHistory1111111111111111111111111"},{"lamports":1,"address":"SysvarRecentB1ockHashes11111111111111111111"},{"lamports":1,"address":"SysvarFees111111111111111111111111111111111"},{"lamports":1,"address":"Vote111111111111111111111111111111111111111"}]},"id":1}
|
||||
```
|
||||
|
||||
### getLeaderSchedule
|
||||
|
||||
Returns the leader schedule for an epoch
|
||||
@ -655,41 +764,18 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1},"value":{"blockhash":"CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR","feeCalculator":{"burnPercent":50,"lamportsPerSignature":5000,"maxLamportsPerSignature":10000,"minLamportsPerSignature":5000,"targetLamportsPerSignature":1000,"targetSignaturesPerSlot":20000}}},"id":1}
|
||||
```
|
||||
|
||||
### getSignatureStatus
|
||||
|
||||
Returns the status of a given signature. This method is similar to [confirmTransaction](jsonrpc-api.md#confirmtransaction) but provides more resolution for error events. This method only searches the recent status cache of signatures, which retains all active slots plus `MAX_RECENT_BLOCKHASHES` rooted slots.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Signature of Transaction to confirm, as base-58 encoded string
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<null>` - Unknown transaction
|
||||
* `<object>` - Transaction status:
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatus", "params":["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"Ok": null},"id":1}
|
||||
```
|
||||
|
||||
### getSignatureStatuses
|
||||
|
||||
Returns the statuses of a list of signatures. This method is similar to [confirmTransaction](jsonrpc-api.md#confirmtransaction) but provides more resolution for error events. This method only searches the recent status cache of signatures, which retains all active slots plus `MAX_RECENT_BLOCKHASHES` rooted slots.
|
||||
Returns the statuses of a list of signatures. Unless the
|
||||
`searchTransactionHistory` configuration parameter is included, this method only
|
||||
searches the recent status cache of signatures, which retains statuses for all
|
||||
active slots plus `MAX_RECENT_BLOCKHASHES` rooted slots.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<array>` - An array of transaction signatures to confirm, as base-58 encoded strings
|
||||
* `<object>` - (optional) Extended Rpc configuration, containing the following optional fields:
|
||||
* `commitment: <string>` - [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
* `<object>` - (optional) Configuration object containing the following field:
|
||||
* `searchTransactionHistory: <bool>` - if true, a Solana node will search its ledger cache for any signatures not found in the recent status cache
|
||||
|
||||
#### Results:
|
||||
|
||||
@ -702,22 +788,26 @@ An array of:
|
||||
* `<null>` - Unknown transaction
|
||||
* `<object>`
|
||||
* `slot: <u64>` - The slot the transaction was processed
|
||||
* `confirmations: <usize | null>` - Number of blocks since signature confirmation, null if rooted
|
||||
* `status: <object>` - Transaction status
|
||||
* `confirmations: <usize | null>` - Number of blocks since signature confirmation, null if rooted, as well as finalized by a supermajority of the cluster
|
||||
* `err: <object | null>` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* DEPRECATED: `status: <object>` - Transaction status
|
||||
* `"Ok": <null>` - Transaction was successful
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L14)
|
||||
* `"Err": <ERR>` - Transaction failed with TransactionError
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatus", "params":[["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW", "5j7s6NiJS3JAkvgkoc18WVAsiSaci2pxB2A6ueCJP4tprA2TFg9wSyTLeYouxPBJEMzJinENTkpA52YStRW5Dia7"]]]}' http://localhost:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatuses", "params":[["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW", "5j7s6NiJS3JAkvgkoc18WVAsiSaci2pxB2A6ueCJP4tprA2TFg9wSyTLeYouxPBJEMzJinENTkpA52YStRW5Dia7"]]}' http://localhost:8899
|
||||
|
||||
// Request with configuration
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSignatureStatuses", "params":[["5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW"], {"searchTransactionHistory": true}]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":82},"value":[{"slot": 72, "confirmations": 10, "status": {"Ok": null}}, null]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":82},"value":[{"slot": 72, "confirmations": 10, "err": null, "status": {"Ok": null}}, null]},"id":1}
|
||||
|
||||
// Result, first transaction rooted
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":82},"value":[{"slot": 48, "confirmations": null, "status": {"Ok": null}}, null]},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":82},"value":[{"slot": 48, "confirmations": null, "err": null, "status": {"Ok": null}}, null]},"id":1}
|
||||
```
|
||||
|
||||
### getSlot
|
||||
@ -851,6 +941,32 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":1024,"id":1}
|
||||
```
|
||||
|
||||
### getSupply
|
||||
|
||||
Returns information about the current supply.
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
The result will be an RpcResponse JSON object with `value` equal to a JSON object containing:
|
||||
|
||||
* `total: <u64>` - Total supply in lamports
|
||||
* `circulating: <u64>` - Circulating supply in lamports
|
||||
* `nonCirculating: <u64>` - Non-circulating supply in lamports
|
||||
* `nonCirculatingAccounts: <array>` - an array of account addresses of non-circulating accounts, as strings
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0", "id":1, "method":"getSupply"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":1114},"value":{"circulating":16000,"nonCirculating":1000000,"nonCirculatingAccounts":["FEy8pTbP5fEoqMV1GdTz83byuA8EKByqYat1PKDgVAq5","9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA","3mi1GmwEE3zo2jmfDuzvjSX9ovRXsDUKHvsntpkhuLJ9","BYxEJTDerkaRWBem3XgnVcdhppktBXa2HbkHPKj2Ui4Z],total:1016000}},"id":1}
|
||||
```
|
||||
|
||||
### getTransactionCount
|
||||
|
||||
Returns the current Transaction count from the ledger
|
||||
@ -873,28 +989,6 @@ curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "m
|
||||
{"jsonrpc":"2.0","result":268,"id":1}
|
||||
```
|
||||
|
||||
### getTotalSupply
|
||||
|
||||
Returns the current total supply in lamports
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<object>` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment)
|
||||
|
||||
#### Results:
|
||||
|
||||
* `<u64>` - Total supply
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getTotalSupply"}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":10126,"id":1}
|
||||
```
|
||||
|
||||
### getVersion
|
||||
|
||||
Returns the current solana versions running on the node
|
||||
@ -915,7 +1009,7 @@ The result field will be a JSON object with the following fields:
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"getVersion"}' http://localhost:8899
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.0.13"},"id":1}
|
||||
{"jsonrpc":"2.0","result":{"solana-core": "1.0.23"},"id":1}
|
||||
```
|
||||
|
||||
### getVoteAccounts
|
||||
@ -1011,10 +1105,34 @@ Creates new transaction
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"sendTransaction", "params":["3gKEMTuxvm3DKEJc4UyiyoNz1sxwdVRW2pyDDXqaCvUjGApnsazGh2y4W92zuaSSdJhBbWLYAkZokBt4N5oW27R7zCVaLLpLxvATL2GgheEh9DmmDR1P9r1ZqirVXM2fF3z5cafmc4EtwWc1UErFdCWj1qYvy4bDGMLXRYLURxaKytEEqrxz6JXj8rUHhDpjTZeFxmC6iAW3hZr6cmaAzewQCQfiEv2HfydriwHDtN95u3Y1EF6SuXxcRqox2aTjGye2Ln9zFj4XbnAtjCmkZhR"]}' http://localhost:8899
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"sendTransaction", "params":["4hXTCkRzt9WyecNzV1XPgCDfGAZzQKNxLXgynz5QDuWWPSAZBZSHptvWRL3BjCvzUXRdKvHL2b7yGrRQcWyaqsaBCncVG7BFggS8w9snUts67BSh3EqKpXLUm5UMHfD7ZBe9GhARjbNQMLJ1QD3Spr6oMTBU6EhdB4RD8CP2xUxr2u3d6fos36PD98XS6oX8TQjLpsMwncs5DAMiD4nNnR8NBfyghGCWvCVifVwvA8B8TJxE1aiyiv2L429BCWfyzAme5sZW8rDb14NeCQHhZbtNqfXhcp2tAnaAT"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":"2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b","id":1}
|
||||
{"jsonrpc":"2.0","result":"2id3YC2jK9G5Wo2phDx4gJVAew8DcY5NAojnVuao8rkxwPYPe8cSwE5GzhEgJA2y8fVjDEo6iR6ykBvDxrTQrtpb","id":1}
|
||||
```
|
||||
|
||||
### simulateTransaction
|
||||
|
||||
Simulate sending a transaction
|
||||
|
||||
#### Parameters:
|
||||
|
||||
* `<string>` - Transaction, as base-58 encoded string. The transaction must have a valid blockhash, but is not required to be signed.
|
||||
* `<object>` - (optional) Configuration object containing the following field:
|
||||
* `sigVerify: <bool>` - if true the transaction signatures will be verified (default: false)
|
||||
|
||||
#### Results:
|
||||
|
||||
An RpcResponse containing a TransactionStatus object
|
||||
|
||||
#### Example:
|
||||
|
||||
```bash
|
||||
// Request
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","id":1, "method":"simulateTransaction", "params":["4hXTCkRzt9WyecNzV1XPgCDfGAZzQKNxLXgynz5QDuWWPSAZBZSHptvWRL3BjCvzUXRdKvHL2b7yGrRQcWyaqsaBCncVG7BFggS8w9snUts67BSh3EqKpXLUm5UMHfD7ZBe9GhARjbNQMLJ1QD3Spr6oMTBU6EhdB4RD8CP2xUxr2u3d6fos36PD98XS6oX8TQjLpsMwncs5DAMiD4nNnR8NBfyghGCWvCVifVwvA8B8TJxE1aiyiv2L429BCWfyzAme5sZW8rDb14NeCQHhZbtNqfXhcp2tAnaAT"]}' http://localhost:8899
|
||||
|
||||
// Result
|
||||
{"jsonrpc":"2.0","result":{"context":{"slot":218},"value":{"confirmations":0,"err":null,"slot":218,"status":{"Ok":null}}},"id":1}
|
||||
```
|
||||
|
||||
### setLogFilter
|
||||
@ -1226,7 +1344,7 @@ Subscribe to a transaction signature to receive notification when the transactio
|
||||
#### Notification Format:
|
||||
|
||||
```bash
|
||||
{"jsonrpc": "2.0","method": "signatureNotification", "params": {"result": "Confirmed","subscription":0}}
|
||||
{"jsonrpc": "2.0","method": "signatureNotification", "params": {"result": {"err": null}, "subscription":0}}
|
||||
```
|
||||
|
||||
### signatureUnsubscribe
|
||||
|
@ -171,7 +171,7 @@ $ solana send-timestamp <PUBKEY> <PROCESS_ID> --date 2018-12-24T23:59:00
|
||||
## Usage
|
||||
### solana-cli
|
||||
```text
|
||||
solana-cli 1.0.13 [channel=unknown commit=unknown]
|
||||
solana-cli 1.0.23 [channel=unknown commit=unknown]
|
||||
Blockchain, Rebuilt for Scale
|
||||
|
||||
USAGE:
|
||||
|
@ -5,50 +5,18 @@ simplest way for most users to get started with a Solana wallet.
|
||||
## Install Trust Wallet
|
||||
|
||||
#### iOS
|
||||
|
||||
- Open the App Store
|
||||
- Download “Trust: Crypto & Bitcoin Wallet” from Six Days LLC
|
||||
- Requires iOS 13.0 or higher
|
||||
- Open Trust Wallet and follow the app prompts to get started
|
||||
|
||||
***
|
||||
|
||||
#### Android
|
||||
|
||||
**NOTE: At this time, Solana's SOL tokens are only supported in the Beta version
|
||||
of Trust Wallet for Android. The following steps explain how to install this
|
||||
Beta version to start using your Solana wallet. Check back here or check the
|
||||
latest official Trust Wallet release notes for when support is added to their
|
||||
official Android release.**
|
||||
|
||||
- Open the Play Store
|
||||
- Download the official version of Trust Wallet
|
||||
- “Trust: Crypto & Bitcoin Wallet” from Six Days LLC
|
||||
- Download “Trust Crypto Wallet” from Six Days LLC
|
||||
- Requires Android 6.0 or higher
|
||||
|
||||

|
||||
|
||||
##### Enable Beta version of Trust Wallet
|
||||
- Make sure you already have the official version installed
|
||||
- Open Play Store and view Trust Wallet's app page
|
||||
- Scroll down to the bottom to the "Beta" section and tap "Join"
|
||||
- It may take a few minutes for your device to get access to the Beta version
|
||||
|
||||

|
||||
|
||||
##### Upgrade to the Beta version
|
||||
- Open Play Store .
|
||||
- Tap Menu --> My apps and games --> Beta.
|
||||
- Tap Trust Wallet
|
||||
- Tap Upgrade when brought back to the Trust Wallet (Beta) install page
|
||||
|
||||

|
||||
|
||||
***
|
||||
|
||||

|
||||
|
||||
##### Beta Install Support for Android
|
||||
- [Google's Official Help for Installing Beta Versions of Apps](https://support.google.com/googleplay/answer/7003180?hl=en)
|
||||
- Open Trust Wallet and follow the app prompts to get started
|
||||
|
||||
## Add Solana (SOL) tokens to your wallet
|
||||
- From the main page, go to the “Tokens” tab at the top of the screen
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-faucet"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
description = "Solana Faucet"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -19,10 +19,10 @@ clap = "2.33"
|
||||
log = "0.4.8"
|
||||
serde = "1.0.104"
|
||||
serde_derive = "1.0.103"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.13" }
|
||||
solana-logger = { path = "../logger", version = "1.0.13" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.23" }
|
||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
||||
solana-metrics = { path = "../metrics", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
tokio = "0.1"
|
||||
tokio-codec = "0.1"
|
||||
|
||||
@ -33,3 +33,6 @@ name = "solana_faucet"
|
||||
[[bin]]
|
||||
name = "solana-faucet"
|
||||
path = "src/bin/faucet.rs"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -21,10 +21,20 @@ if [[ ! -f target/perf-libs/.$VERSION ]]; then
|
||||
(
|
||||
set -x
|
||||
cd target/perf-libs
|
||||
curl -L --retry 5 --retry-delay 2 --retry-connrefused -o solana-perf.tgz \
|
||||
https://github.com/solana-labs/solana-perf-libs/releases/download/$PERF_LIBS_VERSION/solana-perf.tgz
|
||||
|
||||
if [[ -r ~/.cache/solana-perf-$PERF_LIBS_VERSION.tgz ]]; then
|
||||
cp ~/.cache/solana-perf-$PERF_LIBS_VERSION.tgz solana-perf.tgz
|
||||
else
|
||||
curl -L --retry 5 --retry-delay 2 --retry-connrefused -o solana-perf.tgz \
|
||||
https://github.com/solana-labs/solana-perf-libs/releases/download/$PERF_LIBS_VERSION/solana-perf.tgz
|
||||
fi
|
||||
tar zxvf solana-perf.tgz
|
||||
rm -f solana-perf.tgz
|
||||
|
||||
if [[ ! -r ~/.cache/solana-perf-$PERF_LIBS_VERSION.tgz ]]; then
|
||||
# Save it for next time
|
||||
mkdir -p ~/.cache
|
||||
mv solana-perf.tgz ~/.cache/solana-perf-$PERF_LIBS_VERSION.tgz
|
||||
fi
|
||||
touch .$VERSION
|
||||
)
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "solana-genesis-programs"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
description = "Solana genesis programs"
|
||||
authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
@ -10,17 +10,20 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
log = { version = "0.4.8" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.0.13" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.13" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.0.13" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.0.13" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.13" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.13" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.0.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.13" }
|
||||
solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "1.0.23" }
|
||||
solana-budget-program = { path = "../programs/budget", version = "1.0.23" }
|
||||
solana-config-program = { path = "../programs/config", version = "1.0.23" }
|
||||
solana-exchange-program = { path = "../programs/exchange", version = "1.0.23" }
|
||||
solana-runtime = { path = "../runtime", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.23" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.23" }
|
||||
solana-vest-program = { path = "../programs/vest", version = "1.0.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.23" }
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib"]
|
||||
name = "solana_genesis_programs"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -3,7 +3,7 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-genesis"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
@ -15,13 +15,13 @@ chrono = "0.4"
|
||||
serde = "1.0.104"
|
||||
serde_json = "1.0.46"
|
||||
serde_yaml = "0.8.11"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.13" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.0.13" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.13" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.13" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.23" }
|
||||
solana-genesis-programs = { path = "../genesis-programs", version = "1.0.23" }
|
||||
solana-ledger = { path = "../ledger", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
solana-stake-program = { path = "../programs/stake", version = "1.0.23" }
|
||||
solana-storage-program = { path = "../programs/storage", version = "1.0.23" }
|
||||
solana-vote-program = { path = "../programs/vote", version = "1.0.23" }
|
||||
tempfile = "3.1.0"
|
||||
|
||||
[[bin]]
|
||||
@ -31,3 +31,6 @@ path = "src/main.rs"
|
||||
|
||||
[lib]
|
||||
name = "solana_genesis"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -3,19 +3,22 @@ authors = ["Solana Maintainers <maintainers@solana.com>"]
|
||||
edition = "2018"
|
||||
name = "solana-gossip"
|
||||
description = "Blockchain, Rebuilt for Scale"
|
||||
version = "1.0.13"
|
||||
version = "1.0.23"
|
||||
repository = "https://github.com/solana-labs/solana"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://solana.com/"
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.0"
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.13" }
|
||||
solana-core = { path = "../core", version = "1.0.13" }
|
||||
solana-client = { path = "../client", version = "1.0.13" }
|
||||
solana-logger = { path = "../logger", version = "1.0.13" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.13" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.13" }
|
||||
solana-clap-utils = { path = "../clap-utils", version = "1.0.23" }
|
||||
solana-core = { path = "../core", version = "1.0.23" }
|
||||
solana-client = { path = "../client", version = "1.0.23" }
|
||||
solana-logger = { path = "../logger", version = "1.0.23" }
|
||||
solana-net-utils = { path = "../net-utils", version = "1.0.23" }
|
||||
solana-sdk = { path = "../sdk", version = "1.0.23" }
|
||||
|
||||
|
||||
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
@ -15,6 +15,13 @@ use std::process::exit;
|
||||
fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
solana_logger::setup_with_default("solana=info");
|
||||
|
||||
let shred_version_arg = Arg::with_name("shred_version")
|
||||
.long("shred-version")
|
||||
.value_name("VERSION")
|
||||
.takes_value(true)
|
||||
.default_value("0")
|
||||
.help("Filter gossip nodes by this shred version");
|
||||
|
||||
let matches = App::new(crate_name!())
|
||||
.about(crate_description!())
|
||||
.version(solana_clap_utils::version!())
|
||||
@ -53,6 +60,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.default_value("5")
|
||||
.help("Timeout in seconds"),
|
||||
)
|
||||
.arg(&shred_version_arg)
|
||||
.setting(AppSettings::DisableVersion),
|
||||
)
|
||||
.subcommand(
|
||||
@ -110,6 +118,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
.validator(is_pubkey)
|
||||
.help("Public key of a specific node to wait for"),
|
||||
)
|
||||
.arg(&shred_version_arg)
|
||||
.arg(
|
||||
Arg::with_name("timeout")
|
||||
.long("timeout")
|
||||
@ -167,6 +176,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
let pubkey = matches
|
||||
.value_of("node_pubkey")
|
||||
.map(|pubkey_str| pubkey_str.parse::<Pubkey>().unwrap());
|
||||
let shred_version = value_t_or_exit!(matches, "shred_version", u16);
|
||||
|
||||
let entrypoint_addr = parse_entrypoint(&matches);
|
||||
|
||||
@ -212,6 +222,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
pubkey,
|
||||
None,
|
||||
Some(&gossip_addr),
|
||||
shred_version,
|
||||
)?;
|
||||
|
||||
if timeout.is_some() {
|
||||
@ -251,6 +262,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
let all = matches.is_present("all");
|
||||
let entrypoint_addr = parse_entrypoint(&matches);
|
||||
let timeout = value_t_or_exit!(matches, "timeout", u64);
|
||||
let shred_version = value_t_or_exit!(matches, "shred_version", u16);
|
||||
let (nodes, _archivers) = discover(
|
||||
entrypoint_addr.as_ref(),
|
||||
Some(1),
|
||||
@ -258,6 +270,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
None,
|
||||
entrypoint_addr.as_ref(),
|
||||
None,
|
||||
shred_version,
|
||||
)?;
|
||||
|
||||
let rpc_addrs: Vec<_> = nodes
|
||||
@ -298,6 +311,7 @@ fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
Some(pubkey),
|
||||
None,
|
||||
None,
|
||||
0,
|
||||
)?;
|
||||
let node = nodes.iter().find(|x| x.id == pubkey).unwrap();
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user